id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4855916 | <reponame>thomasjpfan/ansible-docker-role
import os
import json
ssl_path = "/etc/docker/ssl"
cert_path = os.path.join(ssl_path, "server-cert.pem")
key_path = os.path.join(ssl_path, "server-key.pem")
ca_path = os.path.join(ssl_path, "ca.pem")
client_cert_path = os.path.join(ssl_path, "client-cert.pem")
client_key_path = os.path.join(ssl_path, "client-key.pem")
def test_docker_installed(host):
assert host.service("docker").is_enabled
assert host.service("docker").is_running
def test_user_in_group(host):
d_groups = host.user("deploy").groups
assert "docker" in d_groups
# check certs are there
def test_certs_installed(host):
cert_dir = host.file(ssl_path)
assert cert_dir.is_directory
assert cert_dir.mode == 0o0444
cert = host.file(cert_path)
key = host.file(key_path)
ca = host.file(ca_path)
with host.sudo():
assert cert.exists
assert key.exists
assert ca.exists
assert cert.mode == 0o0444
assert key.mode == 0o400
assert ca.mode == 0o444
def test_daemon_json(host):
daemon_json = host.file("/etc/docker/daemon.json")
assert daemon_json.is_file
assert daemon_json.mode == 0o0644
daemon = json.loads(daemon_json.content_string)
assert daemon["tlsverify"]
assert "tcp://ansible-local:2376" in daemon["hosts"]
assert cert_path == daemon["tlscert"]
assert key_path == daemon["tlskey"]
assert ca_path == daemon["tlscacert"]
assert "8.8.8.8" in daemon["dns"]
assert "8.8.4.4" in daemon["dns"]
assert not daemon["ipv6"]
assert daemon["log-driver"] == "json-file"
assert daemon["log-opts"]["max-size"] == "10m"
assert daemon["log-opts"]["max-file"] == "1000"
def test_systemd_override(host):
override_path = "/etc/systemd/system/docker.service.d"
override_dir = host.file(override_path)
assert override_dir.is_directory
assert override_dir.mode == 0o0755
override_file_path = os.path.join(override_path, "override.conf")
override_path = host.file(override_file_path)
assert override_path.is_file
assert override_path.mode == 0o0644
def test_docker_tls_verify(host):
cmd_s = ("docker --tlsverify --tlscacert=%s --tlscert=%s "
"--tlskey=%s -H=ansible-local:2376 version")
cmd = host.run(cmd_s, ca_path, client_cert_path, client_key_path)
assert cmd.rc == 0
def test_docker_crontab(host):
cmd = host.run("crontab -l")
assert cmd.rc == 0
assert "docker system prune -af" in cmd.stdout
| StarcoderdataPython |
4875594 | """ This module has functions for liquid layer detection.
"""
import numpy as np
import numpy.ma as ma
import scipy.signal
from cloudnetpy import utils
from cloudnetpy.constants import T0
def ind_base(dprof, p, dist, lim):
"""Finds base index of a peak in profile.
Return the lowermost index of profile where 1st order differences
below the peak exceed a threshold value.
Args:
dprof (ndarray): 1-D array of 1st discrete difference.
Masked values should be 0, e.g. dprof =
np.diff(masked_prof).filled(0)
p (int): Index of (possibly local) peak in the original profile.
Note that the peak must be found with some other method prior
calling this function.
dist (int): Number of elements investigated below *p*.
If ( *p* - *dist*)<0, search starts from index 0.
lim (float): Parameter for base index. Values greater than 1.0
are valid. Values close to 1 most likely return the
point right below the maximum 1st order difference
(within *dist* points below *p*).
Values larger than 1 more likely
accept some other point, lower in the profile.
Returns:
int: Base index of the peak.
Examples:
Consider a profile
>>> x = np.array([0, 0.5, 1, -99, 4, 8, 5])
that contains one bad, masked value
>>> mx = ma.masked_array(x, mask=[0, 0, 0, 1, 0, 0, 0])
[0 0.5, 1.0, --, 4.0, 8.0, 5.0]
The 1st order difference is now
>>> dx = np.diff(mx).filled(0)
[0.5 0.5, 0. , 0. , 4. , -3. ]
From the original profile we see that the peak index is 5.
Let's assume our base can't be more than 4 elements below
peak and the threshold value is 2. Thus we call
>>> ind_base(dx, 5, 4, 2)
4
When x[4] is the lowermost point that satisfies the condition.
Changing the threshold value would alter the result
>>> ind_base(dx, 5, 4, 10)
1
See also:
droplet.ind_top()
"""
start = max(p-dist, 0) # should not be negative
diffs = dprof[start:p]
mind = np.argmax(diffs)
return start + np.where(diffs > diffs[mind]/lim)[0][0]
def ind_top(dprof, p, nprof, dist, lim):
"""Finds top index of a peak in profile.
Return the uppermost index of profile where 1st order differences
above the peak exceed a threshold value.
Args:
dprof (ndarray): 1-D array of 1st discrete difference.
Masked values should be 0, e.g. dprof =
np.diff(masked_prof).filled(0)
nprof (int): Length of the profile. Top index can't be higher
than this.
p (int): Index of (possibly local) peak in the profile.
Note that the peak must be found with some other method prior
calling this function.
dist (int): Number of elements investigated above *p*.
If (*p* + *dist*) > *nprof*, search ends to *nprof*.
lim (float): Parameter for top index. Values greater than 1.0
are valid. Values close to 1 most likely return the
point right above the maximum 1st order difference
(within *dist* points above *p*).
Values larger than 1 more likely
accept some other point, higher in the profile.
Returns:
int: Top index of the peak.
See also:
droplet.ind_base()
"""
end = min(p+dist, nprof) # should not be greater than len(profile)
diffs = dprof[p:end]
mind = np.argmin(diffs)
return p + np.where(diffs < diffs[mind]/lim)[0][-1] + 1
def find_liquid(obs, peak_amp=2e-5, max_width=300, min_points=3,
min_top_der=2e-7):
""" Estimate liquid layers from SNR-screened attenuated backscattering.
Args:
obs (ClassData): Observations container.
peak_amp (float, optional): Minimum value of peak. Default is 2e-5.
max_width (float, optional): Maximum width of peak. Default is 300 (m).
min_points (int, optional): Minimum number of valid points in peak.
Default is 3.
min_top_der (float, optional): Minimum derivative above peak,
defined as (beta_peak-beta_top) / (alt_top-alt_peak), which
is always positive. Default is 2e-7.
Returns:
dict: Dict containing 'presence', 'bases' and 'tops'.
"""
beta = obs.beta
height = obs.height
is_liquid, liquid_top, liquid_base = utils.init(3, beta.shape, dtype=bool,
masked=False)
base_below_peak = utils.n_elements(height, 200)
top_above_peak = utils.n_elements(height, 150)
beta_diff = np.diff(beta, axis=1).filled(0)
beta = beta.filled(0)
pind = scipy.signal.argrelextrema(beta, np.greater, order=4, axis=1)
strong_peaks = np.where(beta[pind] > peak_amp)
pind = (pind[0][strong_peaks], pind[1][strong_peaks])
for n, peak in zip(*pind):
lprof = beta[n, :]
dprof = beta_diff[n, :]
try:
base = ind_base(dprof, peak, base_below_peak, 4)
top = ind_top(dprof, peak, height.shape[0], top_above_peak, 4)
except:
continue
npoints = np.count_nonzero(lprof[base:top+1])
peak_width = height[top] - height[base]
top_der = (lprof[peak] - lprof[top]) / (height[top] - height[peak])
conds = (npoints > min_points,
peak_width < max_width,
top_der > min_top_der)
if all(conds):
is_liquid[n, base:top+1] = True
liquid_top[n, top] = True
liquid_base[n, base] = True
return {'presence': is_liquid, 'bases': liquid_base, 'tops': liquid_top}
def correct_liquid_top(obs, liquid, is_freezing):
"""Corrects lidar detected liquid cloud top using radar data.
Args:
obs (ClassData): Observations container.
liquid (dict): Dictionary for liquid clouds.
is_freezing (ndarray): 2-D boolean array of sub-zero temperature,
derived from the model temperature and melting layer based
on radar data.
Returns:
ndarray: Corrected liquid cloud array.
See also:
droplet.find_liquid()
"""
top_above = utils.n_elements(obs.height, 750)
for prof, top in zip(*np.where(liquid['tops'])):
ind = np.where(is_freezing[prof, top:])[0][0] + top_above
rad = obs.z[prof, top:top+ind+1]
if not (rad.mask.all() or ~rad.mask.any()):
first_masked = ma.where(rad.mask)[0][0]
liquid['presence'][prof, top:top+first_masked+1] = True
liquid['presence'][obs.tw < (T0-40)] = False
return liquid['presence']
| StarcoderdataPython |
1814537 | #!/usr/bin/env python
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from os import path
_README="""
A script to generate confusion matrix and evaluate accuracy of rfmix.
-<NAME> (magu[at]stanford[dot]edu)
"""
_TODO="""
1. modify output directory (currently the output text file is stored in the directory that this script is called from).
"""
# quick check if we're on galangal
import platform
if platform.uname()[1]=='galangal.stanford.edu':
print('error: script must be modified to be run on galangal')
else:
# assume we're on sherlock -- load modules and check versions
print('Assuming we are on sherlock')
# define functions
def load_data(args_dict):
print('Loading in data')
## check if output file already exists
if path.exists(args_dict['output-filename']):
print('Error: output file already exists. Aborting script.')
return '', '', True
## load y and yhat_raw
data_dir='/scratch/users/magu/deepmix/data/simulated_chr20/'
yhat_raw=pd.read_table(data_dir+'vcf/rf_out/'+args_dict['rfmix-result-filename'], skiprows=1)
y=np.load(data_dir+'label/'+args_dict['gt-filename'])
return y, yhat_raw, False
def expand_rfmix_windows(y, yhat_raw, S):
print('Expanding rfmix windows')
V_pos=y['V'][:,1].astype(int)
yhat=pd.DataFrame(index=['_'.join(s) for s in y['V']], columns=S)
for ix in range(yhat_raw.shape[0]):
ids=(yhat_raw.iloc[ix,1] <= V_pos) & (V_pos <= yhat_raw.iloc[ix,2])
yhat.iloc[ids,:]=np.vstack([yhat_raw.iloc[ix,6:] for _ in range(sum(ids))]).astype(int)+1
return yhat
def evaluate_model(y, yhat, args_dict):
print('Evaluating model and creating text file')
## create df of confusion matrices and evaluate accuracy
# confusion
cm=confusion_matrix(y['L'].flatten(), yhat.T.values.flatten().astype(int))
# accuracy
acc=np.sum(np.diag(cm))/np.sum(cm)
anc_label=['AFR', 'EAS', 'EUR', 'NAT', 'SAS']
row_normalized_df = pd.DataFrame(cm, index=anc_label, columns=anc_label).divide(cm.sum(axis=1), axis=0)
col_normalized_df = pd.DataFrame(cm, index=anc_label, columns=anc_label).divide(cm.sum(axis=0), axis=1)
bp_df = pd.DataFrame(cm, index=anc_label, columns=anc_label)
## write df and accuracy to text file
output_file_handle = open(args_dict['output-filename'],"w")
output_file_handle.writelines('Row-normalized confusion matrix:\n\n')
output_file_handle.writelines(row_normalized_df.to_string())
output_file_handle.writelines('\n\n\n\nCol-normalized confusion matrix:\n\n')
output_file_handle.writelines(col_normalized_df.to_string())
output_file_handle.writelines('\n\n\n\nBP confusion matrix:\n\n')
output_file_handle.writelines(bp_df.to_string())
output_file_handle.writelines('\n\n\n\nmodel accuracy = '+str(acc))
output_file_handle.close()
def get_args():
import argparse
parser=argparse.ArgumentParser(description=_README)
parser.add_argument('rfmix-result-filename', metavar='rfmix_result', type=str,
help='filename of rfmix output (.msp.tsv file)')
parser.add_argument('gt-filename', metavar='groundtruth', type=str,
help='filename of ground truth labels (.npz file)')
parser.add_argument('output-filename', metavar='output', type=str,
help='output filename (.txt file)')
args=parser.parse_args()
return args
def main():
args=get_args()
# print(args)
y, yhat_raw, abort = load_data(vars(args))
if abort: return
S = np.array([s.replace('_S1','.0').replace('_S2','.1') for s in y['S']]) # match samples
yhat = expand_rfmix_windows(y, yhat_raw, S) # expand rfmix windows
evaluate_model(y, yhat, vars(args)) # evaluate model and save accuracy & confusion matrices to text file
return
if __name__=='__main__':
main()
| StarcoderdataPython |
1686698 |
import sys
import datetime
def capitalize(string):
return string[0].upper() + string[1:]
action = sys.argv[1]
file_path = sys.argv[2]
project_name = sys.argv[3]
namespace = sys.argv[4]
now = datetime.datetime.now()
date = now.strftime("%m-%d-%Y %H:%M:%S")
args = sys.argv[6:]
username = "Logan Rickert"
def new_class():
file_name = sys.argv[5]
cpp_file_path = file_path + "src/" + file_name + ".cpp"
h_file_path = file_path + "include/" + file_name + ".h"
if len(args) % 2 != 0:
print "You must have an even amount of arguments!"
sys.exit()
parse = []
for arg in xrange(0,len(args),2):
parse.append([args[arg], args[arg + 1]])
cpp_file_contents = None
h_file_contents = None
with open(cpp_file_path, 'r') as f:
cpp_file_contents = f.read()
with open(h_file_path, 'r') as f:
h_file_contents = f.read()
cpp_file_contents = cpp_file_contents.replace(
"{{class_name}}", file_name
)
cpp_file_contents = cpp_file_contents.replace(
"{{namespace}}", namespace
)
cpp_file_contents = cpp_file_contents.replace(
"{{date}}", date
)
cpp_file_contents = cpp_file_contents.replace(
"{{username}}", username
)
if len(args) > 0:
construct_init = file_name + "::" + file_name + "("
for key, value in parse:
construct_init += key + " s" + capitalize(value) + ", "
construct_init = construct_init[:-2] + ") {"
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init}}", construct_init
)
construct_init_equals = ""
for key, value in parse:
construct_init_equals += "\t" + value + " = s" + capitalize(value) + ";\n"
construct_init_equals += "}"
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init_equals}}", construct_init_equals
)
getters_setters = ""
for key, value in parse:
getters_setters += """%s %s::get%s() {
return %s;
}
void %s::set%s(%s s%s) {
%s = s%s;
}
""" % (
key,
file_name,
capitalize(value),
value,
file_name,
capitalize(value),
key,
capitalize(value),
value,
capitalize(value)
)
getters_setters = getters_setters[:-2]
cpp_file_contents = cpp_file_contents.replace(
"{{getters_setters}}", getters_setters
)
else:
cpp_file_contents = cpp_file_contents.replace(
"\n{{construct_init}}\n", ""
)
cpp_file_contents = cpp_file_contents.replace(
"{{construct_init_equals}}\n", ""
)
cpp_file_contents = cpp_file_contents.replace(
"\n{{getters_setters}}\n", ""
)
with open(cpp_file_path, 'w') as f:
f.write(cpp_file_contents)
h_file_contents = h_file_contents.replace(
"{{class_name_caps}}", file_name.upper()
)
h_file_contents = h_file_contents.replace(
"{{class_name}}", file_name
)
h_file_contents = h_file_contents.replace(
"{{username}}", username
)
h_file_contents = h_file_contents.replace(
"{{namespace}}", namespace
)
h_file_contents = h_file_contents.replace(
"{{date}}", date
)
if len(args) > 0:
class_construct_full = file_name + "("
for key, value in parse:
class_construct_full += key + ", "
class_construct_full = class_construct_full[:-2] + ");"
h_file_contents = h_file_contents.replace(
"{{class_construct_full}}", class_construct_full
)
getters_setters = ""
for key, value in parse:
getters_setters += "\t\t" + key + " get" + capitalize(value) + "();\n"
getters_setters += '\n'
for key, value in parse:
getters_setters += "\t\tvoid set" + capitalize(value) + "(" + key + " s" + capitalize(value) + ");\n"
h_file_contents = h_file_contents.replace(
"{{getters_setters}}", getters_setters
)
class_fields = ""
for key, value in parse:
class_fields += "\t\t" + key + " " + value + ";\n"
h_file_contents = h_file_contents.replace(
"{{class_fields}}", class_fields
)
else:
h_file_contents = h_file_contents.replace(
"\n\t\t{{class_construct_full}}", ""
)
h_file_contents = h_file_contents.replace(
"{{getters_setters}}\n", ""
)
h_file_contents = h_file_contents.replace(
"{{class_fields}}", ""
)
with open(h_file_path, 'w') as f:
f.write(h_file_contents)
def new_main():
cpp_file_path = file_path + "/src/Main.cpp"
cpp_file_contents = None
h_file_contents = None
with open(cpp_file_path, 'r') as f:
cpp_file_contents = f.read()
cpp_file_contents = cpp_file_contents.replace(
"{{class_name}}", "Main"
)
cpp_file_contents = cpp_file_contents.replace(
"{{namespace}}", namespace
)
cpp_file_contents = cpp_file_contents.replace(
"{{username}}", username
)
cpp_file_contents = cpp_file_contents.replace(
"{{date}}", date
)
with open(cpp_file_path, 'w') as f:
f.write(cpp_file_contents)
if action == "class":
new_class()
elif action == "namespace" or action == "project":
new_main() | StarcoderdataPython |
8020404 | <gh_stars>1-10
from typing import AsyncGenerator
import pygame
import random
from car_simulation import Car
from dqn import Agent
import numpy as np
WIDTH = 1920
HEIGHT = 1080
CAR_SIZE_X = 60
CAR_SIZE_Y = 60
BORDER_COLOR = (255, 255, 255, 255) # Color To Crash on Hit
current_generation = 0 # Generation counter
def train():
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
game_map = pygame.image.load('map.png').convert() # Convert Speeds Up A Lot
clock = pygame.time.Clock()
agent = Agent(gamma=0.99, epsilson=1.0, batch_size=64, n_actions=4, eps_end=0.01, input_dims=[5], lr = 0.01)
scores, eps_history = [], []
n_games = 1000
for i in range(n_games):
car = Car()
done = False
score = 0
observation = car.get_data()
while not done:
action = agent.choose_action(observation)
if action == 0:
car.angle += 10 # Left
elif action == 1:
car.angle -= 10 # Right
elif action == 2:
if(car.speed - 2 >= 12):
car.speed -= 2 # Slow Down
else:
car.speed += 2 # Speed Up
screen.blit(game_map, (0, 0))
car.update(game_map)
car.draw(screen)
pygame.display.flip()
clock.tick(30)
observation_, reward, done = car.get_data(), car.get_reward(), not car.is_alive()
score += reward
agent.store_transition(observation, action, reward, observation_, done)
agent.learn()
observation = observation_
scores.append(score)
eps_history.append(agent.epsilon)
avg_score = np.mean(scores[-100:])
print(f'episode: {i}, score = {round(score,2)}, epsilon= {round(agent.epsilon,3)}, avg_score = {round(avg_score,2)}')
def random_simulation():
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
game_map = pygame.image.load('map.png').convert() # Convert Speeds Up A Lot
clock = pygame.time.Clock()
n_games = 1000
for i in range(n_games):
car = Car()
done = False
score = 0
observation = car.get_data()
while not done:
action = random.choice([0,1,2,3])
if action == 0:
car.angle += 10 # Left
elif action == 1:
car.angle -= 10 # Right
elif action == 2:
if(car.speed - 2 >= 12):
car.speed -= 2 # Slow Down
else:
car.speed += 2 # Speed Up
screen.blit(game_map, (0, 0))
car.update(game_map)
car.draw(screen)
pygame.display.flip()
clock.tick(30)
done = not car.is_alive()
if __name__ == '__main__':
train()
# random_simulation()
| StarcoderdataPython |
3256175 | <reponame>chuta2323/Pythonista
import appex
from urllib.parse import quote
from webbrowser import open
TWITTER_ID = 'Input your ID'
if __name__ == '__main__':
if appex.is_running_extension():
# No argument
info = appex.get_web_page_info()
title = info['title']
url = info['url']
# Create scheme
scheme = 'feather://' + TWITTER_ID + '/post?text='
text = title + ' - ' + url
scheme = scheme + quote(text)
open(scheme)
| StarcoderdataPython |
4951690 | import numpy as np
import matplotlib.animation
import matplotlib.pyplot as plt
import mpl_toolkits.axes_grid1
import scipy.interpolate
pi = np.pi
def rebin_1D(a, shape):
sh = shape[0],a.shape[0]//shape[0]
return a.reshape(sh).mean(-1)
def rebin_2D(a, shape):
sh = shape[0],a.shape[0]//shape[0],shape[1],a.shape[1]//shape[1]
return a.reshape(sh).mean(-1).mean(1)
def log_bin(array, x, x_min, x_max, n_bin):
bin_edges = np.logspace(np.log10(x_min), np.log10(x_max), n_bin+1, endpoint=True)
binned_array = np.zeros(n_bin, dtype=array.dtype)
mean_x = np.zeros(n_bin, dtype=array.dtype)
for i in range(n_bin):
M = np.logical_and(bin_edges[i] <= x, x < bin_edges[i+1])
binned_array[i] = np.mean(array[M])
mean_x[i] = np.mean(x[M])
return binned_array, mean_x
def create_Gaussian_field_1d(P, n_grid, box_size, mean=0, output_FT=False, precision=np.float32):
# k_min = 2.0*pi/box_size
# k = np.fft.rfftfreq(n_grid, d=1.0/n_grid)*k_min
# P_grid = P(k)
# if np.any(P_grid <= 0):
# m_ft = np.zeros(k.shape, dtype=np.complex64)
# m_ft[P_grid>0] = np.random.normal(scale=np.sqrt((n_grid/box_size)*n_grid*P_grid[P_grid>0]))*np.exp(2j*pi*np.random.random(k.shape)[P_grid>0])
# else:
# m_ft = np.random.normal(scale=np.sqrt((n_grid/box_size)*n_grid*P_grid))*np.exp(2j*pi*np.random.random(k.shape))
# m_ft[k == 0] = mean
# m = np.fft.irfft(m_ft)
# return m
k_grid = np.fft.rfftfreq(n_grid).astype(precision)
k_min = 2*pi/(box_size/n_grid)
V = box_size/(n_grid)**2
P_grid = np.atleast_1d(P(k_grid*k_min))
m_ft = np.random.normal(scale=np.sqrt(1/V*P_grid))*np.exp(2j*pi*np.random.random(k_grid.shape))
if mean != 0:
m_ft[k_grid == 0] = mean
else:
m_ft[k_grid == 0] = np.random.normal(scale=np.sqrt(1/V*P_grid[k_grid==0]))
m = np.fft.irfft(m_ft)
if output_FT:
return m, m_ft, k_grid*k_min
else:
return m
def calculate_pseudo_P_k_1d(m1, m2, box_size, n_k_bin=None, k_min=None, k_max=None, logspaced=False):
if m1.shape != m2.shape:
raise ValueError("Map dimensions don't match: {}x{} vs {}x{}".format(*(m1.shape + m2.shape)))
m1m2 = np.fft.rfft(m1)*np.conj(np.fft.rfft(m2))
k_grid = np.fft.rfftfreq(m1.shape[0])
k_min_box = 2*pi/(box_size/m1.shape[0])
if n_k_bin == None:
bin_edges = k_grid + k_min_box/2
Pk_real = m1m2[1:].real
Pk_imag = m1m2[1:].imag
Pk_err = np.zeros_like(Pk_real)
k_mean = k_grid[1:]
n_mode = np.ones(Pk_real.size, dtype=int)
else:
if logspaced:
bin_edges = np.logspace(np.log10(k_min/k_min_box), np.log10(k_max/k_min_box), n_k_bin+1, endpoint=True)
else:
bin_edges = np.linspace(k_min/k_min_box, k_max/k_min_box, n_k_bin+1, endpoint=True)
n_bin = n_k_bin
Pk_real = np.zeros(n_bin)
Pk_err = np.zeros(n_bin)
Pk_imag = np.zeros(n_bin)
k_mean = np.zeros(n_bin)
n_mode = np.zeros(n_bin)
bin_idx = np.searchsorted(k_grid, bin_edges)
for i in range(n_bin):
if bin_idx[i+1] - bin_idx[i] == 0:
if logspaced:
k_mean[i] = np.sqrt(bin_edges[i]*bin_edges[i+1])
else:
k_mean[i] = (bin_edges[i]+bin_edges[i+1])/2
else:
P = m1m2[bin_idx[i]:bin_idx[i+1]]
Pk_real[i] = np.mean(P.real)
Pk_imag[i] = np.mean(P.imag)
Pk_err[i] = np.sqrt(np.var(P.real)/len(P))
k_mean[i] = np.mean(k_grid[bin_idx[i]:bin_idx[i+1]])
n_mode[i] = len(P)
V = box_size/(m1.shape[0])**2
return Pk_real*V, Pk_err*V, k_mean*k_min_box, bin_edges*k_min_box, n_mode
def interpolated_powerspectrum_from_file(filename):
k_grid, P_grid = np.loadtxt(filename, unpack=True)
log_P_intp = scipy.interpolate.InterpolatedUnivariateSpline(np.log(k_grid), np.log(P_grid), k=1, ext=0)
def P(k):
P_k = np.zeros_like(k)
P_k[k>0] = 1/(2*pi)*k[k>0]**2*np.exp(log_P_intp(np.log(k[k>0])))
return P_k
return P
def correlation_coefficient(cov):
s = np.diag(1/np.sqrt(np.diag(cov)))
return s @ cov @ s
def subplot_colorbar(im, axes, **kwargs):
cax = mpl_toolkits.axes_grid1.make_axes_locatable(axes).append_axes("right", size = "5%", pad = 0.05)
plt.colorbar(im, cax=cax, **kwargs)
class AnimatePhaseSpace:
def __init__(self, snapshots, fig, ax, xlim=None, ylim=None,
trails=False,
formats=[],
anim_kwargs={"interval" : 50}):
self.points = []
self.ax = ax
self.xlim = xlim
self.ylim = ylim
self.trails = trails
if type(snapshots) == list:
self.snapshots = snapshots
else:
self.snapshots = [snapshots,]
self.n_timesteps = self.snapshots[0].shape[0]
for i,s in enumerate(self.snapshots):
if s.shape[0] != self.n_timesteps:
raise RuntimeError("Snapshots with unequal number of timesteps!")
try:
f = formats[i]
except:
f = {}
if not "marker" in f:
f["marker"] = "."
self.points.append(ax.plot([],[], ls="none", **f)[0])
self.animation = matplotlib.animation.FuncAnimation(fig, func=self.update_animation, init_func=self.init_animation,
frames=self.n_timesteps, **anim_kwargs)
def init_animation(self):
if self.xlim:
self.ax.set_xlim(*self.xlim)
if self.ylim:
self.ax.set_ylim(*self.ylim)
self.ax.grid()
def update_animation(self, timestep_idx):
std_v = 0
for i, p in enumerate(self.points):
if self.trails:
p.set_data(self.snapshots[i][:timestep_idx,0], self.snapshots[i][:timestep_idx,1])
else:
p.set_data(self.snapshots[i][timestep_idx,0], self.snapshots[i][timestep_idx,1])
std_v = max(std_v, np.std(self.snapshots[i][timestep_idx,1]))
if not self.ylim:
self.ax.set_ylim(-3*std_v, 3*std_v)
return self.points
if __name__ == "__main__":
print("hello")
def P(k):
p = np.zeros_like(k)
p[k!=0] = k[k!=0]**-1
return p
n_grid = 100
L = 1
d = create_Gaussian_field_1d(P, n_grid, L) | StarcoderdataPython |
11370591 | <gh_stars>1-10
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
class GaiaImageCompareArguments(object):
name = 'Gaia Image Compare'
args = [
[['--store-reference-image'],
{'action': 'store_true',
'default': False,
'help': 'Store the captured screenshots as reference images',
}],
[['--fuzz-factor'],
{'type': int,
'default': 15,
'metavar': int,
'help': 'fuzz value supplied to ImageMagick call, in percentage. Default value is %(default)s percent.',
}],
[['--reference-path'],
{'default': 'reference_images',
'help': 'Location of reference images, relative to the current location, Default folder is %(default)s',
}],
[['--screenshots-path'],
{'default': 'screenshots',
'help': 'Path of screenshot images, relative to the current location, Default folder is %(default)s',
}]
]
# verify_usage
def verify_usage_handler(self, args):
if not 0 <= args.fuzz_factor <= 100:
raise ValueError('fuzz_factor must be between 0 and 100')
| StarcoderdataPython |
3428886 | from django.contrib import admin
from .models import Exam, Room, ExamVenue, InvigilatorResponse
from .forms import RestrictedResponseForm
# register without modification
admin.site.register(Exam)
admin.site.register(Room)
admin.site.register(ExamVenue)
# custom admin classes
# restrict venue options
class ResponseAdmin(admin.ModelAdmin):
form = RestrictedResponseForm
admin.site.register(InvigilatorResponse,ResponseAdmin)
| StarcoderdataPython |
388713 | from django.contrib import admin
from .models import Node, Author, Post, Comment, Likes, FriendRequest, Inbox
admin.site.register(Node)
admin.site.register(Author)
admin.site.register(Post)
admin.site.register(Comment)
admin.site.register(Likes)
admin.site.register(FriendRequest)
admin.site.register(Inbox)
| StarcoderdataPython |
4963932 | from flask_login import login_required
from flasky.decorators import admin_required
from flasky.decorators import permission_required
from flasky.main import main
from flasky.models import Permission
@main.route('/secret')
@login_required
def secret():
return 'Only authenticated users are allowed!'
@main.route('/admin', endpoint='administrator')
@login_required
@admin_required
def for_admin_only():
return "only for administrator!"
@main.route('/moderator', endpoint='moderator')
@login_required
@permission_required(Permission.MODERATE_COMMENTS)
def for_moderators_only():
return "For comment moderators!"
| StarcoderdataPython |
8191314 | import numpy as np
UNOCCUPIED = 1
OCCUPIED = -1
FOOD = 1
HEAD = -2
def update_board(state):
height = state["board"]["height"]
Matrix = [[UNOCCUPIED for x in range(height)] for y in range(height)]
board_state = state['board']
food_coords = board_state['food']
snakes = board_state['snakes']
my_body = state['you']['body']
for coord in food_coords:
Matrix[coord['y']][coord['x']] = FOOD
for snake in snakes:
snake_body = snake['body']
for coord in snake_body[1:]:
Matrix[coord['y']][coord['x']] = OCCUPIED
Tail_coord = snake_body[len(snake_body)-1]
one_coord = snake_body[len(snake_body) - 2]
Matrix[Tail_coord['y']][Tail_coord['x']] = UNOCCUPIED
if Tail_coord['x'] == one_coord['x'] and Tail_coord['y'] == Tail_coord['y']:
Matrix[Tail_coord['y']][Tail_coord['x']] = OCCUPIED
head_coord = snake_body[0]
Matrix[head_coord['y']][head_coord['x']] = HEAD
for coord in my_body[0:]:
Matrix[coord['y']][coord['x']] = OCCUPIED
tail = my_body[len(my_body)-1]
oneback = my_body[len(my_body) - 2]
Matrix[tail['y']][tail['x']] = 4
if state['turn']< 3:
Matrix[tail['y']][tail['x']] = OCCUPIED
if tail['x']== oneback['x'] and tail['y'] == oneback['y']:
Matrix[tail['y']][tail['x']] = OCCUPIED
# print('Updated board state for turn ' + str(state['turn']) + ':\n\n' + str(board) + '\n\n')
# for x in range(len(Matrix)):
# print(Matrix[x])
return Matrix | StarcoderdataPython |
5003800 | <reponame>EthanCarragher/anesthetic
"""Tools for reading from polychord chains files."""
import numpy as np
from anesthetic.read.getdistreader import GetDistReader
class PolyChordReader(GetDistReader):
"""Read polychord files."""
def samples(self):
"""Read ``<root>_dead-birth.txt`` in polychord format."""
data = np.loadtxt(self.birth_file)
try:
_data = np.loadtxt(self.phys_live_birth_file)
data = np.concatenate([data, _data])
data = np.unique(data, axis=0)
i = np.argsort(data[:, -2])
data = data[i, :]
except (OSError, IOError):
pass
samples, logL, logL_birth = np.split(data, [-2, -1], axis=1)
return samples, logL.flatten(), logL_birth.flatten()
@property
def birth_file(self):
"""File containing dead and birth contours."""
return self.root + '_dead-birth.txt'
@property
def phys_live_birth_file(self):
"""File containing physical live points."""
return self.root + '_phys_live-birth.txt'
| StarcoderdataPython |
4876077 | import csv
import numpy as np
from sklearn import metrics
import sed_eval
import inference
import utils
def evaluate_audio_tagging(y_true, y_pred, threshold=0.5):
"""Evaluate audio tagging performance.
Three types of results are returned:
* Class-wise
* Macro-averaged
* Micro-averaged
The ground truth values and predictions should both be passed in a
2D array in which the first dimension is the sample axis and the
second is the class axis.
Args:
y_true (np.ndarray): 2D array of ground truth values.
y_pred (np.ndarray): 2D array of predictions.
threshold (float): Threshold used to binarize predictions.
Returns:
tuple: Tuple containing class scores, macro-averaged scores, and
micro-averaged scores in that order. Each tuple element is a
list (see: :func:`compute_scores`).
Notes:
The element ordering of `y_true` and `y_pred` must be the same.
"""
y_pred_b = inference.binarize_predictions_2d(y_pred, threshold)
# Compute scores for class-wise performance
class_scores = compute_audio_tagging_scores(y_true, y_pred, y_pred_b)
# Compute scores for macro-averaged performance
macro_scores = [np.mean(scores) for scores in class_scores]
# Compute scores for micro-averaged performance
micro_scores = compute_audio_tagging_scores(
y_true, y_pred, y_pred_b, average='micro')
return class_scores, macro_scores, micro_scores
def compute_audio_tagging_scores(y_true, y_pred, y_pred_b, average=None):
"""Compute prediction scores using several performance metrics.
The following metrics are used:
* F1 Score
* Precision
* Recall
* Equal error rate (EER)
* Receiver operator characteristic (ROC) area under curve (AUC)
Args:
y_true (np.ndarray): 2D array of ground truth values.
y_pred (np.ndarray): 2D array of prediction probabilities.
y_pred_b (np.ndarray): 2D array of binary predictions.
average (str): The averaging method. Either ``'macro'``,
``'micro'``, or ``None``, where the latter is used to
disable averaging.
Returns:
tuple: List of scores corresponding to the metrics used (in
the order listed above).
Notes:
The element ordering of `y_true`, `y_pred`, and `y_pred_b` must
be the same.
"""
# Compute precision and recall scores
precision, recall, f1_score, _ = metrics.precision_recall_fscore_support(
y_true, y_pred_b, average=average)
# Compute equal error rate
if average is None:
eer = np.array([compute_eer(y_true[:, i].flatten(),
y_pred[:, i].flatten())
for i in range(y_true.shape[1])])
else:
eer = compute_eer(y_true.flatten(), y_pred.flatten())
# Compute area under curve (AUC) score
auc = metrics.roc_auc_score(y_true, y_pred, average=average)
return [f1_score, precision, recall, eer, auc]
def write_audio_tagging_results(results, output_path, print_results=True):
"""Write audio tagging results to a CSV file.
Args:
results (tuple): Tuple containing class scores, macro-averaged
scores, and micro-averaged scores in that order.
output_path (str): File path of the output CSV file.
print_results (bool): Whether to also print results to console.
"""
class_scores, macro_scores, micro_scores = results
with open(output_path, 'w') as f:
writer = csv.writer(f, quoting=csv.QUOTE_NONNUMERIC)
# Convenience function for writing and printing a row of data
def _write_row(row, is_header=False, use_separator=False):
"""Write and (optionally) print a row of data."""
writer.writerow(row)
if print_results:
_print_row(row, is_header, use_separator)
header = ['Class', 'F1 Score', 'Precision', 'Recall', 'EER', 'AUC']
_write_row(header, is_header=True, use_separator=True)
# Write class-specific scores
class_scores = np.array(class_scores).T
for i, class_i_scores in enumerate(class_scores):
_write_row([utils.LABELS[i]] + class_i_scores.tolist(),
use_separator=(i == len(class_scores) - 1))
# Write macro-averaged scores
_write_row(['Macro Average'] + macro_scores)
# Write micro-averaged scores
_write_row(['Micro Average'] + micro_scores)
def evaluate_sed(ground_truth, predictions, names, time_resolution=1.0):
"""Evaluate sound event detection performance using sed_eval [1]_.
The ground truth values and predictions are assumed to be a list of
*event lists*, where each event list corresponds to a particular
audio clip. An event list is a list of events, and an event is a
``(label, onset, offset)`` tuple.
Args:
ground_truth (list): List of ground truth event lists.
predictions (list): List of predicted event lists.
names (list): File names of the audio clips.
time_resolution (float): Resolution of event times.
Returns:
An ``sed_eval.SoundEventMetrics`` instance.
Notes:
The element ordering of `ground_truth`, `predictions`, and
`names` must be the same.
References:
.. [1] <NAME>, <NAME>, and <NAME>,
"Metrics for polyphonic sound event detection",
Applied Sciences, 6(6):162, 2016
"""
segment_based_metrics = sed_eval.sound_event.SegmentBasedMetrics(
event_label_list=utils.LABELS, time_resolution=time_resolution)
# Evaluate the performance for each example
for i, name in enumerate(names):
def _event_list(entries):
"""Create an sed_eval-compatible event list."""
return [{'file': name,
'event_label': label,
'event_onset': onset,
'event_offset': offset}
for label, onset, offset in entries]
gt_event_list = _event_list(ground_truth[i])
pred_event_list = _event_list(predictions[i])
segment_based_metrics.evaluate(
reference_event_list=gt_event_list,
estimated_event_list=pred_event_list)
return segment_based_metrics
def compute_eer(y_true, y_pred):
"""Compute the equal error rate (EER).
Args:
y_true (np.ndarray): 2D array of ground truth values.
y_pred (np.ndarray): 2D array of predictions.
Returns:
float: The equal error rate.
"""
fpr, tpr, _ = metrics.roc_curve(y_true, y_pred)
# Find the points closest to the true EER point
points = list(zip(fpr, tpr))
i = np.argmax(fpr > 1-tpr)
p1 = points[(i or 1) - 1]
p2 = points[i]
# Interpolate between p1 and p2
if abs(p2[0] - p1[0]) < 1e-6:
rate = p1[0]
else:
gradient = (p2[1] - p1[1]) / (p2[0] - p1[0])
offset = p1[1] - gradient * p1[0]
rate = (1 - offset) / (1 + gradient)
return rate
def compute_map(y_true, y_pred, k=3):
"""Compute the mean average precision at k (MAP@k).
Args:
y_true (np.ndarray): 2D array of ground truth values.
y_pred (np.ndarray): 2D array of predictions.
k (int): The maximum number of predicted elements.
Returns:
float: The mean average precision at k.
Note:
This function accepts a 2D array for `y_true`, but it assumes
the grounds truths are single-label.
"""
# Compute how the true label ranks in terms of probability
idx = y_pred.argsort()[:, ::-1].argsort()
rank = idx[y_true.astype(bool)] + 1
if len(rank) > len(y_true):
raise Exception('Multi-label classification not supported')
return np.sum(1 / rank[rank <= k]) / len(y_true)
def compute_thresholds(y_true, y_pred):
"""Compute the optimal probability thresholds for each class.
This function computes the precision-recall curve for each class,
and selects the threshold corresponding to the highest F1 score.
Args:
y_true (np.ndarray): 2D array of ground truth values.
y_pred (np.ndarray): 2D array of predictions.
Returns:
list: The optimal per-class probability thresholds.
"""
thresholds = []
for i in range(y_true.shape[1]):
p, r, t = metrics.precision_recall_curve(y_true[:, i],
y_pred[:, i])
p = np.array(p)
r = np.array(r)
f1_score = 2 * p * r / (p + r + 1e-9)
thresholds.append(t[np.argmax(f1_score)])
return thresholds
def _print_row(row, is_header=False, use_separator=False):
"""Print the given row in a tabulated format.
Args:
row (list): List of row cells to print.
is_header (bool): Whether row is a header (non-numeric).
use_separator (bool): Whether to print a horizontal rule after.
"""
cell_format = '{:<%d}' if is_header else '{:<%d.3f}'
row_format = '{:<%d}' + (cell_format * 5)
row_widths = (32, 11, 11, 11, 11, 5)
print((row_format % row_widths).format(*row))
if use_separator:
print('=' * sum(row_widths))
| StarcoderdataPython |
150069 | <reponame>passiopeia/passiopeia-hub<gh_stars>0
"""
Clean up burned OTPs from database
You should use a cron job to trigger this regularly.
"""
import datetime
from django.core.management import BaseCommand
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
from hub_app.models import BurnedOtp
class Command(BaseCommand):
"""
Management Command for cleaning up old burned OTPs
"""
help = _('Delete burned OTPs older than 2 hours')
def handle_clean_burned_otp(self):
"""
Remove burned OTPs that are older than 2 hours
"""
the_oldest_one_to_keep = now() - datetime.timedelta(hours=2)
old_entries = BurnedOtp.objects.filter(burned_timestamp__lt=the_oldest_one_to_keep)
count = old_entries.count()
self.stdout.write(_('Deleting %(count)s burned OTPs.') % {'count': count})
if count > 0:
old_entries.delete()
self.stdout.write(self.style.SUCCESS(_('Done')))
def handle(self, *args, **options):
self.handle_clean_burned_otp()
| StarcoderdataPython |
8171733 | <filename>huaweicloud-sdk-meeting/huaweicloudsdkmeeting/v1/model/user_dto.py
# coding: utf-8
import pprint
import re
import six
class UserDTO:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'id': 'str',
'status_code': 'int',
'account': 'str',
'name': 'str',
'english_name': 'str',
'email': 'str',
'phone': 'str',
'dept_name': 'str',
'number': 'str',
'update_time': 'float',
'is_hard_terminal': 'bool',
'vmr_id': 'str',
'signature': 'str',
'title': 'str',
'description': 'str',
'hide_phone': 'bool',
'type': 'str'
}
attribute_map = {
'id': 'id',
'status_code': 'statusCode',
'account': 'account',
'name': 'name',
'english_name': 'englishName',
'email': 'email',
'phone': 'phone',
'dept_name': 'deptName',
'number': 'number',
'update_time': 'updateTime',
'is_hard_terminal': 'isHardTerminal',
'vmr_id': 'vmrId',
'signature': 'signature',
'title': 'title',
'description': 'description',
'hide_phone': 'hidePhone',
'type': 'type'
}
def __init__(self, id=None, status_code=None, account=None, name=None, english_name=None, email=None, phone=None, dept_name=None, number=None, update_time=None, is_hard_terminal=None, vmr_id=None, signature=None, title=None, description=None, hide_phone=None, type=None):
"""UserDTO - a model defined in huaweicloud sdk"""
self._id = None
self._status_code = None
self._account = None
self._name = None
self._english_name = None
self._email = None
self._phone = None
self._dept_name = None
self._number = None
self._update_time = None
self._is_hard_terminal = None
self._vmr_id = None
self._signature = None
self._title = None
self._description = None
self._hide_phone = None
self._type = None
self.discriminator = None
if id is not None:
self.id = id
if status_code is not None:
self.status_code = status_code
if account is not None:
self.account = account
if name is not None:
self.name = name
if english_name is not None:
self.english_name = english_name
if email is not None:
self.email = email
if phone is not None:
self.phone = phone
if dept_name is not None:
self.dept_name = dept_name
if number is not None:
self.number = number
if update_time is not None:
self.update_time = update_time
if is_hard_terminal is not None:
self.is_hard_terminal = is_hard_terminal
if vmr_id is not None:
self.vmr_id = vmr_id
if signature is not None:
self.signature = signature
if title is not None:
self.title = title
if description is not None:
self.description = description
if hide_phone is not None:
self.hide_phone = hide_phone
if type is not None:
self.type = type
@property
def id(self):
"""Gets the id of this UserDTO.
用户ID。
:return: The id of this UserDTO.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this UserDTO.
用户ID。
:param id: The id of this UserDTO.
:type: str
"""
self._id = id
@property
def status_code(self):
"""Gets the status_code of this UserDTO.
查询用户详情时, 根据不同情况,响应不同。 * 0: 查询成功且用户信息有变化, 响应会把新的信息都返回回去 * 1 :查询成功且用户信息没有变化,响应只会返回用户ID * 2 :用户不存在 * 3 :无权限查询这个用户
:return: The status_code of this UserDTO.
:rtype: int
"""
return self._status_code
@status_code.setter
def status_code(self, status_code):
"""Sets the status_code of this UserDTO.
查询用户详情时, 根据不同情况,响应不同。 * 0: 查询成功且用户信息有变化, 响应会把新的信息都返回回去 * 1 :查询成功且用户信息没有变化,响应只会返回用户ID * 2 :用户不存在 * 3 :无权限查询这个用户
:param status_code: The status_code of this UserDTO.
:type: int
"""
self._status_code = status_code
@property
def account(self):
"""Gets the account of this UserDTO.
用户账号。
:return: The account of this UserDTO.
:rtype: str
"""
return self._account
@account.setter
def account(self, account):
"""Sets the account of this UserDTO.
用户账号。
:param account: The account of this UserDTO.
:type: str
"""
self._account = account
@property
def name(self):
"""Gets the name of this UserDTO.
用户名。
:return: The name of this UserDTO.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this UserDTO.
用户名。
:param name: The name of this UserDTO.
:type: str
"""
self._name = name
@property
def english_name(self):
"""Gets the english_name of this UserDTO.
英文名。
:return: The english_name of this UserDTO.
:rtype: str
"""
return self._english_name
@english_name.setter
def english_name(self, english_name):
"""Sets the english_name of this UserDTO.
英文名。
:param english_name: The english_name of this UserDTO.
:type: str
"""
self._english_name = english_name
@property
def email(self):
"""Gets the email of this UserDTO.
邮箱。
:return: The email of this UserDTO.
:rtype: str
"""
return self._email
@email.setter
def email(self, email):
"""Sets the email of this UserDTO.
邮箱。
:param email: The email of this UserDTO.
:type: str
"""
self._email = email
@property
def phone(self):
"""Gets the phone of this UserDTO.
用户手机。
:return: The phone of this UserDTO.
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this UserDTO.
用户手机。
:param phone: The phone of this UserDTO.
:type: str
"""
self._phone = phone
@property
def dept_name(self):
"""Gets the dept_name of this UserDTO.
用户部门。
:return: The dept_name of this UserDTO.
:rtype: str
"""
return self._dept_name
@dept_name.setter
def dept_name(self, dept_name):
"""Sets the dept_name of this UserDTO.
用户部门。
:param dept_name: The dept_name of this UserDTO.
:type: str
"""
self._dept_name = dept_name
@property
def number(self):
"""Gets the number of this UserDTO.
用户号码。
:return: The number of this UserDTO.
:rtype: str
"""
return self._number
@number.setter
def number(self, number):
"""Sets the number of this UserDTO.
用户号码。
:param number: The number of this UserDTO.
:type: str
"""
self._number = number
@property
def update_time(self):
"""Gets the update_time of this UserDTO.
用户信息最后更新时间。
:return: The update_time of this UserDTO.
:rtype: float
"""
return self._update_time
@update_time.setter
def update_time(self, update_time):
"""Sets the update_time of this UserDTO.
用户信息最后更新时间。
:param update_time: The update_time of this UserDTO.
:type: float
"""
self._update_time = update_time
@property
def is_hard_terminal(self):
"""Gets the is_hard_terminal of this UserDTO.
是否为硬终端。
:return: The is_hard_terminal of this UserDTO.
:rtype: bool
"""
return self._is_hard_terminal
@is_hard_terminal.setter
def is_hard_terminal(self, is_hard_terminal):
"""Sets the is_hard_terminal of this UserDTO.
是否为硬终端。
:param is_hard_terminal: The is_hard_terminal of this UserDTO.
:type: bool
"""
self._is_hard_terminal = is_hard_terminal
@property
def vmr_id(self):
"""Gets the vmr_id of this UserDTO.
用户虚拟会议室ID。
:return: The vmr_id of this UserDTO.
:rtype: str
"""
return self._vmr_id
@vmr_id.setter
def vmr_id(self, vmr_id):
"""Sets the vmr_id of this UserDTO.
用户虚拟会议室ID。
:param vmr_id: The vmr_id of this UserDTO.
:type: str
"""
self._vmr_id = vmr_id
@property
def signature(self):
"""Gets the signature of this UserDTO.
用户签名。
:return: The signature of this UserDTO.
:rtype: str
"""
return self._signature
@signature.setter
def signature(self, signature):
"""Sets the signature of this UserDTO.
用户签名。
:param signature: The signature of this UserDTO.
:type: str
"""
self._signature = signature
@property
def title(self):
"""Gets the title of this UserDTO.
职位。
:return: The title of this UserDTO.
:rtype: str
"""
return self._title
@title.setter
def title(self, title):
"""Sets the title of this UserDTO.
职位。
:param title: The title of this UserDTO.
:type: str
"""
self._title = title
@property
def description(self):
"""Gets the description of this UserDTO.
描述信息。
:return: The description of this UserDTO.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this UserDTO.
描述信息。
:param description: The description of this UserDTO.
:type: str
"""
self._description = description
@property
def hide_phone(self):
"""Gets the hide_phone of this UserDTO.
是否隐藏手机号(如果为true,其他人查询该用户时,不会返回该用户的手机号。自己查自己是可见的)
:return: The hide_phone of this UserDTO.
:rtype: bool
"""
return self._hide_phone
@hide_phone.setter
def hide_phone(self, hide_phone):
"""Sets the hide_phone of this UserDTO.
是否隐藏手机号(如果为true,其他人查询该用户时,不会返回该用户的手机号。自己查自己是可见的)
:param hide_phone: The hide_phone of this UserDTO.
:type: bool
"""
self._hide_phone = hide_phone
@property
def type(self):
"""Gets the type of this UserDTO.
类型: * NORMAL_USER=普通用户 * HARD_TERMINAL=硬终端用户 * WHITE_BOARD=第三方白板 * HW_VISION_MEMBER=智慧屏
:return: The type of this UserDTO.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this UserDTO.
类型: * NORMAL_USER=普通用户 * HARD_TERMINAL=硬终端用户 * WHITE_BOARD=第三方白板 * HW_VISION_MEMBER=智慧屏
:param type: The type of this UserDTO.
:type: str
"""
self._type = type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, UserDTO):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
11325258 | from sympy import (legendre, Symbol, hermite, chebyshevu, chebyshevt,
chebyshevt_root, chebyshevu_root, assoc_legendre, Rational,
roots, sympify, S, laguerre_l, laguerre_poly)
x = Symbol('x')
def test_legendre():
assert legendre(0, x) == 1
assert legendre(1, x) == x
assert legendre(2, x) == ((3*x**2-1)/2).expand()
assert legendre(3, x) == ((5*x**3-3*x)/2).expand()
assert legendre(4, x) == ((35*x**4-30*x**2+3)/8).expand()
assert legendre(5, x) == ((63*x**5-70*x**3+15*x)/8).expand()
assert legendre(6, x) == ((231*x**6-315*x**4+105*x**2-5)/16).expand()
assert legendre(10, -1) == 1
assert legendre(11, -1) == -1
assert legendre(10, 1) == 1
assert legendre(11, 1) == 1
assert legendre(10, 0) != 0
assert legendre(11, 0) == 0
assert roots(legendre(4,x), x) == {
(Rational(3, 7) - Rational(2, 35)*30**S.Half)**S.Half: 1,
-(Rational(3, 7) - Rational(2, 35)*30**S.Half)**S.Half: 1,
(Rational(3, 7) + Rational(2, 35)*30**S.Half)**S.Half: 1,
-(Rational(3, 7) + Rational(2, 35)*30**S.Half)**S.Half: 1,
}
def test_assoc_legendre():
Plm=assoc_legendre
Q=(1-x**2)**Rational(1,2)
assert Plm(0, 0, x) == 1
assert Plm(1, 0, x) == x
assert Plm(1, 1, x) == -Q
assert Plm(2, 0, x) == (3*x**2-1)/2
assert Plm(2, 1, x) == -3*x*Q
assert Plm(2, 2, x) == 3*Q**2
assert Plm(3, 0, x) == (5*x**3-3*x)/2
assert Plm(3, 1, x).expand() == (( 3*(1-5*x**2)/2 ).expand() * Q).expand()
assert Plm(3, 2, x) == 15*x * Q**2
assert Plm(3, 3, x) == -15 * Q**3
# negative m
assert Plm(1,-1, x) == -Plm(1, 1, x)/2
assert Plm(2,-2, x) == Plm(2, 2, x)/24
assert Plm(2,-1, x) == -Plm(2, 1, x)/6
assert Plm(3,-3, x) == -Plm(3, 3, x)/720
assert Plm(3,-2, x) == Plm(3, 2, x)/120
assert Plm(3,-1, x) == -Plm(3, 1, x)/12
def test_chebyshev():
assert chebyshevt(0, x) == 1
assert chebyshevt(1, x) == x
assert chebyshevt(2, x) == 2*x**2-1
assert chebyshevt(3, x) == 4*x**3-3*x
for n in range(1, 4):
for k in range(n):
z = chebyshevt_root(n, k)
assert chebyshevt(n, z) == 0
for n in range(1, 4):
for k in range(n):
z = chebyshevu_root(n, k)
assert chebyshevu(n, z) == 0
def test_hermite():
assert hermite(6, x) == 64*x**6 - 480*x**4 + 720*x**2 - 120
def test_laguerre():
alpha = Symbol("alpha")
# generalized Laguerre polynomials:
assert laguerre_l(0, alpha, x) == 1
assert laguerre_l(1, alpha, x) == -x + alpha + 1
assert laguerre_l(2, alpha, x) == x**2/2 - (alpha+2)*x + (alpha+2)*(alpha+1)/2
assert laguerre_l(3, alpha, x) == -x**3/6 + (alpha+3)*x**2/2 - (alpha+2)*(alpha+3)*x/2 + (alpha+1)*(alpha+2)*(alpha+3)/6
# Laguerre polynomials:
assert laguerre_l(0, 0, x) == 1
assert laguerre_l(1, 0, x) == 1 - x
assert laguerre_l(2, 0, x) == 1 - 2*x + x**2/2
assert laguerre_l(3, 0, x) == 1 - 3*x + 3*x**2/2 - x**3/6
# Test the lowest 10 polynomials with laguerre_poly, to make sure that it
# works:
for i in range(10):
assert laguerre_l(i, 0, x) == laguerre_poly(i, x)
| StarcoderdataPython |
6558988 | <filename>tests/tests.py
from pygibson import PyGibsonError
from nose.tools import eq_, ok_
from django.conf import settings
settings.configure()
def set_connection():
try:
settings.SESSION_GIBSON_TIMEOUT = 100
try:
# set IPC connection settings
settings.SESSION_GIBSON_UNIX_SOCKET = '/var/run/gibson.sock'
from gibson_sessions.sessions import SessionStore
ss = SessionStore()
except PyGibsonError:
# set TCP connection settings
settings.SESSION_GIBSON_HOST = '127.0.0.0'
settings.SESSION_GIBSON_PORT = 10128
from gibson_sessions.sessions import SessionStore
ss = SessionStore()
ss.session_backend.ping()
return ss
except PyGibsonError:
return None
def modification_test():
storage = set_connection()
ok_(storage)
eq_(storage.modified, False)
storage['gibson'] = 'modified'
eq_(storage.modified, True)
eq_(storage['gibson'], 'modified')
def create_and_delete_test():
storage = set_connection()
ok_(storage)
storage.save()
eq_(storage.exists(storage.session_key), True)
storage.delete()
eq_(storage.exists(storage.session_key), False)
def create_and_delete_locked_test():
storage = set_connection()
ok_(storage)
storage.save()
eq_(storage.exists(storage.session_key), True)
storage.session_backend.lock(storage.prefixed_key_name(storage.session_key), 60)
storage.delete()
eq_(storage.exists(storage.session_key), False)
def update_locked_test():
storage = set_connection()
ok_(storage)
storage.save()
eq_(storage.exists(storage.session_key), True)
storage['gibson'] = 'before_lock'
storage.session_backend.lock(storage.prefixed_key_name(storage.session_key), 60)
storage['gibson'] = 'ignore_lock'
storage.save()
eq_(storage['gibson'], 'ignore_lock')
def expiration_test():
import time
storage = set_connection()
ok_(storage)
storage.set_expiry(1)
storage.save()
eq_(storage.exists(storage.session_key), True)
time.sleep(2)
eq_(storage.exists(storage.session_key), False) | StarcoderdataPython |
6521736 | <reponame>zztkm/cvoverlayimg
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import cv2 as cv
from cvoverlayimg import CvOverlayImage
def main():
cv_background_image = cv.imread("image/bg_takeyabu.jpg")
cv_overlay_image = cv.imread(
"image/ninja_hashiru.png",
cv.IMREAD_UNCHANGED) # IMREAD_UNCHANGEDを指定しα込みで読み込む
cv_overlay_image = cv.resize(cv_overlay_image, (100, 100))
point = (550, 250)
image = CvOverlayImage.overlay(cv_background_image, cv_overlay_image,
point)
cv.imshow("sample", image)
cv.waitKey(0)
if __name__ == '__main__':
main()
| StarcoderdataPython |
8112619 | <filename>{{cookiecutter.project_slug}}/{{cookiecutter.project_slug}}/apps/main/actions.py
# Actions are mainly ajax requests/responses to feed into views.
from django.core.files.base import ContentFile
from notifications.signals import notify
from {{cookiecutter.project_slug}}.apps.main.models import *
from {{cookiecutter.project_slug}}.apps.main.utils import (
get_collection,
get_entity,
get_image,
get_text
)
from {{cookiecutter.project_slug}}.settings import (
BASE_DIR,
MEDIA_ROOT
)
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from django.contrib import messages
from django.http import HttpResponse, JsonResponse
from django.http.response import (
HttpResponseRedirect,
HttpResponseForbidden,
Http404
)
from django.shortcuts import (
get_object_or_404,
render_to_response,
render,
redirect
)
from django.utils import timezone
from django.urls import reverse
import json
import os
import pickle
import re
media_dir = os.path.join(BASE_DIR,MEDIA_ROOT)
###############################################################################################
# Entities ####################################################################################
###############################################################################################
@login_required
def update_entity_status(request,eid):
'''update_entity_status will change the status of an entity, usually from active to inactive
or vice versa
:param eid: the unique id of the entity to change
'''
entity = get_entity(eid)
if request.user == entity.collection.owner:
if request.method == 'POST':
entity.active = not entity.active
entity.save()
response_data = {'result':'Entity changed successfully!',
'status':entity.active }
return JsonResponse(response_data)
else:
return JsonResponse({"Unicorn poop cookies...": "I will never understand the allure."})
return JsonResponse({"message":"You are not authorized to annotate this collection."})
###############################################################################################
# Collections #################################################################################
###############################################################################################
@login_required
def collection_update_instruction(request,cid):
'''update the instruction for a particular annotation or markup task
'''
collection = get_collection(cid)
if request.user == collection.owner:
if request.method == 'POST':
instruction = request.POST.get("instruction",None)
fieldtype = request.POST.get("fieldtype",None)
if instruction not in ["",None] and fieldtype in collection.status:
collection.status[fieldtype]['instruction'] = instruction
collection.save()
response_data = {'result':'Instruction updated',
'status': instruction }
return JsonResponse(response_data)
return JsonResponse({"Unicorn poop cookies...": "I will never understand the allure."})
def serve_image_metadata(request,uid):
'''return image metadata as json
'''
image = get_image(uid)
return JsonResponse(image.metadata)
def serve_text_metadata(request,uid):
'''return text metadata as json
'''
text = get_text(uid)
return JsonResponse(text.metadata)
def serve_text(request,uid):
'''return raw text
'''
text = get_text(uid)
return JsonResponse({"original":text.original})
###############################################################################################
# Annotations #################################################################################
###############################################################################################
def update_annotation(user,allowed_annotation,instance):
'''update_annotation will take a user, and an annotation, some instance
(text or image) and call the appropriate function to update it.
:param user: the user object or user id
:param allowed_annotation: the allowed annotation object or id
:param instance: the Image or text instance
'''
if not isinstance(user,User):
user = User.objects.get(id=user)
if not isinstance(allowed_annotation,Annotation):
allowed_annotation = Annotation.objects.get(id=allowed_annotation)
if isinstance(instance,Image):
return update_image_annotation(user,allowed_annotation,instance)
elif isinstance(instance,Text):
return update_text_annotation(user,allowed_annotation,instance)
def update_image_annotation(user,allowed_annotation,image):
'''update_image_annotation is called from update_annotation given that the
user has provided an image
'''
if not isinstance(image,Image):
image = Image.objects.get(id=image)
# Remove annotations done previously by the user for the image
previous_annotations = ImageAnnotation.objects.filter(creator=user,
image_id=image.id,
annotation__name=allowed_annotation.name)
annotation,created = ImageAnnotation.objects.get_or_create(creator=user,
image_id=image.id,
annotation=allowed_annotation)
# If the annotation was just created, save it, and add report
if created == True:
annotation.save()
return finalize_annotation(previous_annotations,annotation)
def update_text_annotation(user,allowed_annotation,text):
'''update_text_annotation is called from update_annotation given that the
user has provided text
'''
if not isinstance(text,Text):
text = Text.objects.get(id=text)
# Remove annotations done previously by the user for the image
previous_annotations = TextAnnotation.objects.filter(creator=user,
text__id=text.id,
annotation__name=allowed_annotation.name)
annotation,created = TextAnnotation.objects.get_or_create(creator=user,
text=text,
annotation=allowed_annotation)
# If the annotation was just created, save it, and add report
if created == True:
annotation.save()
return finalize_annotation(previous_annotations,annotation)
def finalize_annotation(previous_annotations,annotation):
'''finalize_annotation will ensure that an annotation is
unique, meaning removing all other options.
:param previous_annotations: any previous annotations with the same name
:param annotation: the annotation to set/keep
'''
for previous in previous_annotations:
if previous.id != annotation.id:
previous.delete()
return annotation
def clear_user_annotations(user,instance):
'''clear_user_annotations will remove all annotations for a user for
an instance, whether an image or text.
:param user: the user
:param instance: the image or text to clear annotations for
'''
try:
if isinstance(instance,Text):
previous_annotations = TextAnnotation.objects.filter(creator=user,
image__id=instance.id)
elif isinstance(instance,Image):
previous_annotations = ImageAnnotation.objects.filter(creator=user,
text__id=instance.id)
[x.delete() for x in previous_annotations]
return True
except:
return False
@login_required
def update_annotations(request,instance):
'''update_annotation_view is a general view to handle update of an annotation for a
text or image instance
'''
if request.method == 'POST':
try:
new_annotations = json.loads(request.POST.get('annotations'))
except:
return JsonResponse({"error": "error parsing array!"})
# Update the annotations
for new_annotation in new_annotations:
if new_annotation['value'] == "on":
aname,alabel = new_annotation['name'].split('||')
annotation_object = Annotation.objects.get(name=aname,
label=alabel)
annot = update_annotation(user=request.user,
allowed_annotation=annotation_object,
instance=instance)
response_data = {'result':'Create post successful!'}
return JsonResponse(response_data)
return JsonResponse({"have you ever seen...": "a radiologist ravioli?"})
def clear_annotations(request,instance):
'''clear annotations view clears all annotations for a text or image instance
:param instance: the text or image instance
'''
if request.method == 'POST':
try:
status = clear_user_annotations(request.user,image)
response_data = {'result':'Annotations successfully cleared',
'status': status}
except:
response_data = {'result':'Error clearing annotations.'}
return JsonResponse(response_data)
return JsonResponse({"have you ever seen...": "a researcher rigatoni?"})
###############################################################################################
# Markup ######################################################################################
###############################################################################################
@login_required
def update_text_markup(request,uid):
'''update_text_annotation will update a user's text annotation
'''
if request.method == 'POST':
try:
markups = json.loads(request.POST.get('markup'))
except:
return JsonResponse({"error": "error parsing markup!"})
text_markup,created = TextMarkup.objects.get_or_create(creator=request.user,
text_id=uid)
text_markup.locations = markups
text_markup.save()
response_data = {'result':markups}
return JsonResponse(response_data)
return JsonResponse({"nope...": "nopenope"})
| StarcoderdataPython |
1974420 | <filename>cmdb-backend/resources/idc.py<gh_stars>10-100
from flask import Flask, request, render_template, redirect, url_for, session, g, abort, flash, make_response, jsonify, Response
from datetime import timedelta
from models import IDC, User, db
from common.utility import auth_login_required, hashpass, login_required
from common.restful import Serialization
from common.token_manage import Token_Manager
from app import app
from config import cross_origin
task = Serialization()
tokenauth = Token_Manager()
@app.route('/api/v1/idc',methods=['GET','POST'])
@cross_origin()
@auth_login_required
def idc():
'''
查询所有IDC信息
'''
if request.method == 'POST':
#data = [{'idc':'天地祥云','address':'北京某地','manager':'Mr 张','contacts':'1234567'},{'idc':'济南某机房','address':'济南某地','manager':'Mr 张','contacts':'1234567'}]
data = [ {'idc': idc.name, 'address': idc.address,'manager': idc.contact, 'contacts':idc.phone} for idc in IDC.query.all() ]
return task.json_message_200(data), 200 | StarcoderdataPython |
4955501 | from stg.api import PulseFile, STG4000
# we initialize the STG and print information about it
stg = STG4000()
print(stg, stg.version)
# create a pulsefile with default parameters
p = PulseFile()
# compile the pulsefile and expand the tuple to positional arguments
# and download it into channel 1
# As you can see, indexing starts at zero
stg.download(0, *p())
# start stimulation at channel 1
stg.start_stimulation([0])
# sleep for 500ms
stg.sleep(500)
# create a new pulsefile consisting of 600 repetitve pulses
p = PulseFile(intensity_in_mA=1, burstcount=600)
stg.download(0, *p())
# start and immediatly stop it again
# this shows that an ongoing stimulation can be aborted
stg.start_stimulation([0])
stg.stop_stimulation()
# create a biphasic pulse with 1mA amplitude and a pulse-width of 2ms
# and trigger it every 250 ms
# timing is here determined by python and therefore necessarily not as exact
p = PulseFile(intensity_in_mA=1, pulsewidth_in_ms=2)
stg.download(0, *p())
while True:
stg.start_stimulation([0, 1])
stg.sleep(duration_in_ms=250)
| StarcoderdataPython |
9631598 | <gh_stars>0
import tensorflow as tf
import __main__
import numpy as np
from matplotlib import pyplot as plt
from spsql import spsql
import os
from dotenv import load_dotenv
from termcolor import colored
load_dotenv()
import inspect
s = spsql()
SCHEMA = os.getenv("SCHEMA", "gw")
try:
from gwtools import mismatch
from gwtools import gwutils
except:
print("gwtools not installed, cant calculate SNR")
def tp(bo):
"""simpole decorator for handling test function in unit tests. input function ust return bool"""
def testcolor(bo):
try:
if bo():
print(colored('pass','green'))
else:
print(colored('FAIL CONDITION','red'),colored(bo.__name__,'magenta'),colored(bo.__code__.co_names[-5::],'red'),colored(inspect.getsource(bo).split("\n")[-2::],'yellow'))
except Exception as e:
print(colored('ACTUAL FAIL','red'), e,colored(bo.__name__,'red'),colored(bo.__code__.co_names[-5::],'red'))
return lambda : 1
return testcolor(bo)
def serialize(x):
"""recursive function that converts nested arrays to lists and the numpy
numeric data types to native python floats to make the structure json
serializable, so that it can be dumped to json. input is iterable output is
python list"""
out = []
for k in x:
try:
if len(k) > 0:
out.append(serialize(list(k)))
except TypeError:
out.append(float(k))
return out
def savetda(end=False):
if end:
part = ""
else:
part = "_part"
np.save(
__main__.outfile + part,
(
__main__.loadfile,
__main__.Nchop,
__main__.Ndattotal,
__main__.ncoeff,
__main__.xsig,
__main__.bettiout,
__main__.pdout,
__main__.swout,
__main__.yout,
),
)
def plot_signals(filen, show=True):
g = np.load(filen, allow_pickle=True)
for j, k in enumerate(g[0][1:30]):
if g[1][j][0] == 1:
plt.plot(k, "g")
else:
plt.plot(k, "r")
if show:
plt.show()
def prep4Classifier(loadfile, embedi):
modes = ["pd", "sw", "bv", "all", "bv1", "bv2", "pd1", "pd2"]
"""prepares the data for classification, input is file and embedding type, output is embedded signals and raw signals"""
(
filename_original,
Nchop,
Ndattotal,
ncoeff,
xsig,
bettiout,
pdout,
swout,
yout,
) = np.load(loadfile, allow_pickle=True)
y = []
signals = [] # time domain raw signal
xembed = [] # embeddings
xconcat = [] # combined
if embedi == "pd":
daN = 100
else:
daN = 50
Ndat = len(yout)
maxsig = 0.0
for xn in xsig:
maxsig = max(maxsig, np.abs(xn).max())
for j in range(Ndat):
if embedi == modes[0]: # pd
b = pdout[j]
xx = np.reshape(np.array(list(b[0]) + list(b[1])), (daN, 1))
elif embedi == modes[1]: # sw
b = np.array(swout[j])
daN = b.size
xx = np.reshape(b, (daN, 1))
elif embedi == modes[2]: # bv
bettiout[j][0][0] = 0
b = bettiout[j]
xx = np.reshape(np.array(list(b[0]) + list(b[1])), (daN, 1))
elif embedi == modes[3]: # all
bettiout[j][0][0] = 0
b = np.array(
list(pdout[j][0])
+ list(pdout[j][1])
+ list(swout[j])
+ list(bettiout[j][0])
+ list(bettiout[j][1])
)
daN = b.size
xx = np.reshape(b, (daN, 1))
elif embedi == modes[4]: # bv
bettiout[j][0][0] = 0
b = np.array(bettiout[j][0])
daN = b.size
xx = np.reshape(np.array(b), (daN, 1))
elif embedi == modes[5]: # bv
bettiout[j][0][0] = 0
b = np.array(bettiout[j][1])
daN = b.size
xx = np.reshape(np.array(b), (daN, 1))
if embedi == modes[6]: # pd
b = np.array(pdout[j][0])
daN = b.size
xx = np.reshape(b, (daN, 1))
if embedi == modes[7]: # pd
b = np.array(pdout[j][1])
daN = b.size
xx = np.reshape(b, (daN, 1))
signals.append(np.reshape(xsig[j] / maxsig, (len(xsig[j]), 1)))
# raw normalized signal
xembed.append(xx / xx.max())
y.append(yout[j])
xconcat.append(
np.concatenate(
((xx / xx.max()), np.reshape(xsig[j] / maxsig, (len(xsig[j]), 1)))
)
)
# embedding
return (np.array(xconcat), np.array(y), np.array(signals), np.array(xembed))
def loadmodel(model_json, weights):
if type(weights) is dict:
weightsa = np.array(weights["weights"])
model = loadfromjson(model_json, weightsa)
model.compile()
return model
def loadfromjson(CONFIGJSON, serialweights):
"""loads a sequential model from json as string or dict, with accomidation
for various user ineptitudes with regards to the need for consistent input
types and data structure. returns tensorflow model"""
if type(CONFIGJSON) == str:
_json_cfg = json.loads(CONFIGJSON)
else: # if type(CONFIGJSON)==dict:
_json_cfg = CONFIGJSON
try:
json_cfg = _json_cfg["config"]
except KeyError:
json_cfg = _json_cfg
try:
model = tf.keras.models.Sequential.from_config(json_cfg)
except:
print(json_cfg)
raise
theshapes = []
for l in model.layers:
for w in l.weights:
theshapes.append(tuple(w.shape.as_list()))
theweights = []
for j, s in enumerate(serialweights):
theweights.append(np.reshape(s, theshapes[j]))
model.set_weights(theweights)
return model
def getlastN(N):
s.curs.execute(
"select weightshash from " + f"{SCHEMA}.runs order by id desc limit %s",
(N,),
)
_weightsH = s.curs.fetchall()
return np.ravel(_weightsH)
def getmodel(weightshash):
s.curs.execute(
"select modelhash, weights, cmdline_args,loadfile from "
+ f"{SCHEMA}.runs where weightshash=%s order by id desc limit 1",
(weightshash.lower(),),
)
_weights = s.curs.fetchall()
(modelhash, weights, cmdline_args, loadfile) = _weights[0]
s.curs.execute(
"select model_json from " + SCHEMA + ".models where modelhash=%s", (modelhash,)
)
_model_json = s.curs.fetchall()[0][0]
return loadmodel(_model_json, weights), loadfile, cmdline_args
| StarcoderdataPython |
11334563 | <filename>gyp/resources.gyp<gh_stars>0
# Copyright 2014 Google Inc.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'resources',
'type': 'static_library',
'sources': [
'../tools/Resources.cpp',
'../tools/Resources.h',
],
'dependencies': [
'flags.gyp:flags',
'skia_lib.gyp:skia_lib',
],
'direct_dependent_settings': {
'include_dirs': [
'../tools/',
],
},
},
]
}
| StarcoderdataPython |
6647799 | <filename>pygamelearning/line.py
# Write your code here :-)
import pygame, time
import sys
pygame.init()
screen = pygame.display.set_mode([500,500])
WHITE = [255,255,255]
BLACK = [0,0,0]
screen.fill(WHITE)
pygame.draw.lines(screen, BLACK, False, [[50,50],[250,100]], 5)
pygame.display.update()
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
| StarcoderdataPython |
1959515 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
IRC import handler...
"""
import imp
import pkgutil
import importlib
class ImportHandler(object):
"""Manage module imports and updates."""
def __init__(self, cmdHandler):
super(ImportHandler, self).__init__()
self.reply = cmdHandler.replyWithMessage
self.importedModules = {}
self.loadTheModules()
def update(self, mod=None):
"""
Reload all the command modules previously imported and saved to the
class variable commandModules.
"""
self.loadTheModules()
moduleNotFound = True
for name, module in self.importedModules.items():
if mod is None or name == mod:
imp.reload(module)
moduleNotFound = False
if moduleNotFound:
self.reply("No module named %s." % (mod,))
else:
self.reply("Modules have been updated!")
def loadTheModules(self):
"""
Dynamically load all the packages/modules in src/commands and add them
to the class variable commandModules in the format:
name: moduleObject
So we can easily refer to them later.
"""
for importer, package_name, _ in pkgutil.iter_modules(['src/modules']):
module = importlib.import_module(
'src.modules.%s.%s' % (package_name, package_name)
)
self.importedModules[package_name.lower()] = module
| StarcoderdataPython |
5103450 | <reponame>andrey-zotov/ib_console<gh_stars>10-100
"""Module to generate ascii charts.
This module provides a single function `plot` that can be used to generate an
ascii chart from a series of numbers. The chart can be configured via several
options to tune the output.
"""
from math import ceil, floor, isnan
black = "\033[30m"
red = "\033[31m"
green = "\033[32m"
yellow = "\033[33m"
blue = "\033[34m"
magenta = "\033[35m"
cyan = "\033[36m"
lightgray = "\033[37m"
default = "\033[39m"
darkgray = "\033[90m"
lightred = "\033[91m"
lightgreen = "\033[92m"
lightyellow = "\033[93m"
lightblue = "\033[94m"
lightmagenta = "\033[95m"
lightcyan = "\033[96m"
white = "\033[97m"
reset = "\033[0m"
__all__ = [
'plot', 'black', 'red',
'green', 'yellow', 'blue',
'magenta', 'cyan', 'lightgray',
'default', 'darkgray', 'lightred',
'lightgreen', 'lightyellow', 'lightblue',
'lightmagenta', 'lightcyan', 'white', 'reset',
]
# Python 3.2 has math.isfinite, which could have been used, but to support older
# versions, this little helper is shorter than having to keep doing not isnan(),
# plus the double-negative of "not is not a number" is confusing, so this should
# help with readability.
def _isnum(n):
return not isnan(n)
def colored(char, color):
if not color:
return char
else:
return color + char + reset
def plot(series, cfg=None):
"""Generate an ascii chart for a series of numbers.
`series` should be a list of ints or floats. Missing data values in the
series can be specified as a NaN. In Python versions less than 3.5, use
float("nan") to specify an NaN. With 3.5 onwards, use math.nan to specify a
NaN.
>>> series = [1,2,3,4,float("nan"),4,3,2,1]
>>> print(plot(series))
4.00 ┤ ╭╴╶╮
3.00 ┤ ╭╯ ╰╮
2.00 ┤╭╯ ╰╮
1.00 ┼╯ ╰
`series` can also be a list of lists to support multiple data series.
>>> series = [[10,20,30,40,30,20,10], [40,30,20,10,20,30,40]]
>>> print(plot(series, {'height': 3}))
40.00 ┤╮ ╭╮ ╭
30.00 ┤╰╮╯╰╭╯
20.00 ┤╭╰╮╭╯╮
10.00 ┼╯ ╰╯ ╰
`cfg` is an optional dictionary of various parameters to tune the appearance
of the chart. `min` and `max` will clamp the y-axis and all values:
>>> series = [1,2,3,4,float("nan"),4,3,2,1]
>>> print(plot(series, {'min': 0}))
4.00 ┼ ╭╴╶╮
3.00 ┤ ╭╯ ╰╮
2.00 ┤╭╯ ╰╮
1.00 ┼╯ ╰
0.00 ┤
>>> print(plot(series, {'min': 2}))
4.00 ┤ ╭╴╶╮
3.00 ┤ ╭╯ ╰╮
2.00 ┼─╯ ╰─
>>> print(plot(series, {'min': 2, 'max': 3}))
3.00 ┤ ╭─╴╶─╮
2.00 ┼─╯ ╰─
`height` specifies the number of rows the graph should occupy. It can be
used to scale down a graph with large data values:
>>> series = [10,20,30,40,50,40,30,20,10]
>>> print(plot(series, {'height': 4}))
50.00 ┤ ╭╮
40.00 ┤ ╭╯╰╮
30.00 ┤ ╭╯ ╰╮
20.00 ┤╭╯ ╰╮
10.00 ┼╯ ╰
`format` specifies a Python format string used to format the labels on the
y-axis. The default value is "{:8.2f} ". This can be used to remove the
decimal point:
>>> series = [10,20,30,40,50,40,30,20,10]
>>> print(plot(series, {'height': 4, 'format':'{:8.0f}'}))
50 ┤ ╭╮
40 ┤ ╭╯╰╮
30 ┤ ╭╯ ╰╮
20 ┤╭╯ ╰╮
10 ┼╯ ╰
"""
if len(series) == 0:
return ''
if not isinstance(series[0], list):
if all(isnan(n) for n in series):
return ''
else:
series = [series]
cfg = cfg or {}
colors = cfg.get('colors', [None])
minimum = cfg.get('min', min(filter(_isnum, [j for i in series for j in i])))
maximum = cfg.get('max', max(filter(_isnum, [j for i in series for j in i])))
default_symbols = ['┼', '┤', '╶', '╴', '─', '╰', '╭', '╮', '╯', '│']
symbols = cfg.get('symbols', default_symbols)
if minimum > maximum:
raise ValueError('The min value cannot exceed the max value.')
interval = maximum - minimum
offset = cfg.get('offset', 3)
height = cfg.get('height', interval)
ratio = height / interval if interval > 0 else 1
min2 = int(floor(minimum * ratio))
max2 = int(ceil(maximum * ratio))
def clamp(n):
return min(max(n, minimum), maximum)
def scaled(y):
return int(round(clamp(y) * ratio) - min2)
rows = max2 - min2
width = 0
for i in range(0, len(series)):
width = max(width, len(series[i]))
width += offset
placeholder = cfg.get('format', '{:8.2f} ')
result = [[' '] * width for i in range(rows + 1)]
# axis and labels
for y in range(min2, max2 + 1):
label = placeholder.format(maximum - ((y - min2) * interval / (rows if rows else 1)))
result[y - min2][max(offset - len(label), 0)] = label
result[y - min2][offset - 1] = symbols[0] if y == 0 else symbols[1] # zero tick mark
# first value is a tick mark across the y-axis
d0 = series[0][0]
if _isnum(d0):
result[rows - scaled(d0)][offset - 1] = symbols[0]
for i in range(0, len(series)):
color = colors[i % len(colors)]
# plot the line
for x in range(0, len(series[i]) - 1):
d0 = series[i][x + 0]
d1 = series[i][x + 1]
if isnan(d0) and isnan(d1):
continue
if isnan(d0) and _isnum(d1):
result[rows - scaled(d1)][x + offset] = colored(symbols[2], color)
continue
if _isnum(d0) and isnan(d1):
result[rows - scaled(d0)][x + offset] = colored(symbols[3], color)
continue
y0 = scaled(d0)
y1 = scaled(d1)
if y0 == y1:
result[rows - y0][x + offset] = colored(symbols[4], color)
continue
result[rows - y1][x + offset] = colored(symbols[5], color) if y0 > y1 else colored(symbols[6], color)
result[rows - y0][x + offset] = colored(symbols[7], color) if y0 > y1 else colored(symbols[8], color)
start = min(y0, y1) + 1
end = max(y0, y1)
for y in range(start, end):
result[rows - y][x + offset] = colored(symbols[9], color)
return '\n'.join([''.join(row).rstrip() for row in result])
| StarcoderdataPython |
4804276 | <filename>ctapipe/core/component.py
""" Class to handle configuration for algorithms """
from traitlets.config import Configurable
from abc import ABCMeta
from logging import getLogger
class AbstractConfigurableMeta(type(Configurable), ABCMeta):
'''
Metaclass to be able to make Component abstract
see: http://stackoverflow.com/a/7314847/3838691
'''
pass
class Component(Configurable, metaclass=AbstractConfigurableMeta):
"""Base class of all Components (sometimes called
workers, makers, etc). Components are are classes that do some sort
of processing and contain user-configurable parameters, which are
implemented using `traitlets`.
`traitlets` can validate values and provide defaults and
descriptions. These will be automatically translated into
configuration parameters (command-line, config file, etc). Note
that any parameter that should be externally configurable must
have its `config` attribute set to `True`, e.g. defined like
`myparam = Integer(0, help='the parameter').tag(config=True)`.
All components also contain a `logger` instance in their `log`
attribute, that you must use to output info, debugging data,
warnings, etc (do not use `print()` statements, instead use
`self.log.info()`, `self.log.warn()`, `self.log.debug()`, etc).
Components are generally used within `ctapipe.core.Tool`
subclasses, which provide configuration handling and command-line
tool generation.
For example:
.. code:: python
from ctapipe.core import Component
from traitlets import (Integer, Float)
class MyComponent(Component):
\"\"\" Does something \"\"\"
some_option = Integer(default_value=6,
help='a value to set').tag(config=True)
comp = MyComponent(None)
comp.some_option = 6 # ok
comp.some_option = 'test' # will fail validation
"""
def __init__(self, parent=None, **kwargs):
"""
Parameters
----------
parent: Tool or Component
Tool or component that is the Parent of this one
kwargs: type
other paremeters
"""
super().__init__(parent=parent, **kwargs)
# set up logging
if self.parent:
self.log = self.parent.log.getChild(self.__class__.__name__)
else:
self.log = getLogger(self.__class__.__name__)
| StarcoderdataPython |
1838041 | <reponame>TS-at-WS/cloudify-manager
import os
import sys
import shutil
import tempfile
import subprocess
from manager_rest.utils import mkdirs
S3_URI = 's3://cloudify-tests-logs/'
LINK_PREFIX = 'http://cloudify-tests-logs.s3.amazonaws.com/'
SKIP_FILES = ['journalctl.log']
def _make_links_file(edition, build, root_dir, links):
for test in links:
lines = ['<ul>\n']
for link in links[test]:
address = LINK_PREFIX + link
if address.endswith('.log'):
address += '.txt'
title = os.path.join(*link.split('/')[4:])
lines.append(' <li><a href="{0}">{1}</a></li>\n'.format(
address, title))
lines.append('</ul>\n')
with open(os.path.join(
root_dir, edition, build, test, 'links.html'), 'w') as f:
f.writelines(lines)
def _set_local_dir(target_dir, logs_dir, build, edition):
links = {}
for root, dirs, files in os.walk(logs_dir):
for log_file in files:
if log_file in SKIP_FILES:
continue
abs_path = os.path.join(root, log_file)
rel_path = abs_path.replace(logs_dir, '').strip('/').split('/')
test_dir = '{0}-{1}-{2}'.format(build, rel_path[0], rel_path[1])
rel_target = os.path.join(edition, build, test_dir, *rel_path[2:])
abs_target = os.path.join(target_dir, rel_target)
if abs_target.endswith('.log'):
abs_target += '.txt'
mkdirs(os.path.join('/', *abs_target.split('/')[:-1]))
shutil.copy(abs_path, abs_target)
links.setdefault(test_dir, []).append(rel_target)
return links
def _upload_to_s3(s3_uri, local_dir):
with open(os.devnull, 'w') as null_out:
subprocess.check_call(['aws', 's3', 'sync', local_dir, s3_uri,
'--content-type', 'text/plain'],
stdout=null_out)
def main(s3_uri, logs_dir, build, edition):
tmp_dir = tempfile.mkdtemp()
try:
_make_links_file(edition, build, tmp_dir,
_set_local_dir(tmp_dir, logs_dir, build, edition))
_upload_to_s3(s3_uri, tmp_dir)
except BaseException:
raise
finally:
shutil.rmtree(tmp_dir)
if __name__ == '__main__':
if len(sys.argv) != 3:
print 'Usage: python upload_manager_logs.py JENKINS_BUILD_NUMBER ' \
'CLOUDIFY_EDITION'
exit(1)
logs_path = os.environ.get('CFY_LOGS_PATH_REMOTE')
if not logs_path:
print 'The environment variable "CFY_LOGS_PATH_REMOTE" is not ' \
'specified'
exit(1)
main(s3_uri=S3_URI,
logs_dir=os.path.expanduser(logs_path),
build=sys.argv[1],
edition=sys.argv[2])
| StarcoderdataPython |
8057539 | from collections import namedtuple
from pathlib import Path
import pytest
IdlFixtures = namedtuple('IdlFixtures', 'idl baseline')
SYNTAX_FIXTURES = (Path(__file__).parent / 'syntax').resolve()
SYNTAX_IDL_FIXTURES = SYNTAX_FIXTURES / 'idl'
SYNTAX_BASELINE_FIXTURES = SYNTAX_FIXTURES / 'baseline'
@pytest.fixture(params=[
fixture.name for fixture in SYNTAX_IDL_FIXTURES.glob('*.webidl')
])
def syntax_fixture(request):
return IdlFixtures(
idl=SYNTAX_IDL_FIXTURES / request.param,
baseline=(SYNTAX_BASELINE_FIXTURES / request.param).with_suffix('.json')
)
INVALID_FIXTURES = (Path(__file__).parent / 'invalid').resolve()
INVALID_IDL_FIXTURES = INVALID_FIXTURES / 'idl'
INVALID_BASELINE_FIXTURES = INVALID_FIXTURES / 'baseline'
@pytest.fixture(params=[
fixture.name for fixture in INVALID_IDL_FIXTURES.glob('*.webidl')
])
def invalid_fixture(request):
return IdlFixtures(
idl=INVALID_IDL_FIXTURES / request.param,
baseline=(INVALID_BASELINE_FIXTURES / request.param).with_suffix('.json')
)
| StarcoderdataPython |
6569700 | from typing import Any, Dict, List, Optional
from aws_lambda_powertools.utilities.data_classes.common import BaseProxyEvent, DictWrapper
class APIGatewayEventIdentity(DictWrapper):
@property
def access_key(self) -> Optional[str]:
return self["requestContext"]["identity"].get("accessKey")
@property
def account_id(self) -> Optional[str]:
"""The AWS account ID associated with the request."""
return self["requestContext"]["identity"].get("accountId")
@property
def api_key(self) -> Optional[str]:
"""For API methods that require an API key, this variable is the API key associated with the method request.
For methods that don't require an API key, this variable is null."""
return self["requestContext"]["identity"].get("apiKey")
@property
def api_key_id(self) -> Optional[str]:
"""The API key ID associated with an API request that requires an API key."""
return self["requestContext"]["identity"].get("apiKeyId")
@property
def caller(self) -> Optional[str]:
"""The principal identifier of the caller making the request."""
return self["requestContext"]["identity"].get("caller")
@property
def cognito_authentication_provider(self) -> Optional[str]:
"""A comma-separated list of the Amazon Cognito authentication providers used by the caller
making the request. Available only if the request was signed with Amazon Cognito credentials."""
return self["requestContext"]["identity"].get("cognitoAuthenticationProvider")
@property
def cognito_authentication_type(self) -> Optional[str]:
"""The Amazon Cognito authentication type of the caller making the request.
Available only if the request was signed with Amazon Cognito credentials."""
return self["requestContext"]["identity"].get("cognitoAuthenticationType")
@property
def cognito_identity_id(self) -> Optional[str]:
"""The Amazon Cognito identity ID of the caller making the request.
Available only if the request was signed with Amazon Cognito credentials."""
return self["requestContext"]["identity"].get("cognitoIdentityId")
@property
def cognito_identity_pool_id(self) -> Optional[str]:
"""The Amazon Cognito identity pool ID of the caller making the request.
Available only if the request was signed with Amazon Cognito credentials."""
return self["requestContext"]["identity"].get("cognitoIdentityPoolId")
@property
def principal_org_id(self) -> Optional[str]:
"""The AWS organization ID."""
return self["requestContext"]["identity"].get("principalOrgId")
@property
def source_ip(self) -> str:
"""The source IP address of the TCP connection making the request to API Gateway."""
return self["requestContext"]["identity"]["sourceIp"]
@property
def user(self) -> Optional[str]:
"""The principal identifier of the user making the request."""
return self["requestContext"]["identity"].get("user")
@property
def user_agent(self) -> Optional[str]:
"""The User Agent of the API caller."""
return self["requestContext"]["identity"].get("userAgent")
@property
def user_arn(self) -> Optional[str]:
"""The Amazon Resource Name (ARN) of the effective user identified after authentication."""
return self["requestContext"]["identity"].get("userArn")
class APIGatewayEventAuthorizer(DictWrapper):
@property
def claims(self) -> Optional[Dict[str, Any]]:
return self["requestContext"]["authorizer"].get("claims")
@property
def scopes(self) -> Optional[List[str]]:
return self["requestContext"]["authorizer"].get("scopes")
class APIGatewayEventRequestContext(DictWrapper):
@property
def account_id(self) -> str:
"""The AWS account ID associated with the request."""
return self["requestContext"]["accountId"]
@property
def api_id(self) -> str:
"""The identifier API Gateway assigns to your API."""
return self["requestContext"]["apiId"]
@property
def authorizer(self) -> APIGatewayEventAuthorizer:
return APIGatewayEventAuthorizer(self._data)
@property
def connected_at(self) -> Optional[int]:
"""The Epoch-formatted connection time. (WebSocket API)"""
return self["requestContext"].get("connectedAt")
@property
def connection_id(self) -> Optional[str]:
"""A unique ID for the connection that can be used to make a callback to the client. (WebSocket API)"""
return self["requestContext"].get("connectionId")
@property
def domain_name(self) -> Optional[str]:
"""A domain name"""
return self["requestContext"].get("domainName")
@property
def domain_prefix(self) -> Optional[str]:
return self["requestContext"].get("domainPrefix")
@property
def event_type(self) -> Optional[str]:
"""The event type: `CONNECT`, `MESSAGE`, or `DISCONNECT`. (WebSocket API)"""
return self["requestContext"].get("eventType")
@property
def extended_request_id(self) -> Optional[str]:
"""An automatically generated ID for the API call, which contains more useful information
for debugging/troubleshooting."""
return self["requestContext"].get("extendedRequestId")
@property
def protocol(self) -> str:
"""The request protocol, for example, HTTP/1.1."""
return self["requestContext"]["protocol"]
@property
def http_method(self) -> str:
"""The HTTP method used. Valid values include: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT."""
return self["requestContext"]["httpMethod"]
@property
def identity(self) -> APIGatewayEventIdentity:
return APIGatewayEventIdentity(self._data)
@property
def message_direction(self) -> Optional[str]:
"""Message direction (WebSocket API)"""
return self["requestContext"].get("messageDirection")
@property
def message_id(self) -> Optional[str]:
"""A unique server-side ID for a message. Available only when the `eventType` is `MESSAGE`."""
return self["requestContext"].get("messageId")
@property
def path(self) -> str:
return self["requestContext"]["path"]
@property
def stage(self) -> str:
"""The deployment stage of the API request"""
return self["requestContext"]["stage"]
@property
def request_id(self) -> str:
"""The ID that API Gateway assigns to the API request."""
return self["requestContext"]["requestId"]
@property
def request_time(self) -> Optional[str]:
"""The CLF-formatted request time (dd/MMM/yyyy:HH:mm:ss +-hhmm)"""
return self["requestContext"].get("requestTime")
@property
def request_time_epoch(self) -> int:
"""The Epoch-formatted request time."""
return self["requestContext"]["requestTimeEpoch"]
@property
def resource_id(self) -> str:
return self["requestContext"]["resourceId"]
@property
def resource_path(self) -> str:
return self["requestContext"]["resourcePath"]
@property
def route_key(self) -> Optional[str]:
"""The selected route key."""
return self["requestContext"].get("routeKey")
@property
def operation_name(self) -> Optional[str]:
"""The name of the operation being performed"""
return self["requestContext"].get("operationName")
class APIGatewayProxyEvent(BaseProxyEvent):
"""AWS Lambda proxy V1
Documentation:
--------------
- https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html
"""
@property
def version(self) -> str:
return self["version"]
@property
def resource(self) -> str:
return self["resource"]
@property
def multi_value_headers(self) -> Dict[str, List[str]]:
return self["multiValueHeaders"]
@property
def multi_value_query_string_parameters(self) -> Optional[Dict[str, List[str]]]:
return self.get("multiValueQueryStringParameters")
@property
def request_context(self) -> APIGatewayEventRequestContext:
return APIGatewayEventRequestContext(self._data)
@property
def path_parameters(self) -> Optional[Dict[str, str]]:
return self.get("pathParameters")
@property
def stage_variables(self) -> Optional[Dict[str, str]]:
return self.get("stageVariables")
class RequestContextV2Http(DictWrapper):
@property
def method(self) -> str:
return self["requestContext"]["http"]["method"]
@property
def path(self) -> str:
return self["requestContext"]["http"]["path"]
@property
def protocol(self) -> str:
"""The request protocol, for example, HTTP/1.1."""
return self["requestContext"]["http"]["protocol"]
@property
def source_ip(self) -> str:
"""The source IP address of the TCP connection making the request to API Gateway."""
return self["requestContext"]["http"]["sourceIp"]
@property
def user_agent(self) -> str:
"""The User Agent of the API caller."""
return self["requestContext"]["http"]["userAgent"]
class RequestContextV2AuthorizerIam(DictWrapper):
@property
def access_key(self) -> Optional[str]:
"""The IAM user access key associated with the request."""
return self.get("accessKey")
@property
def account_id(self) -> Optional[str]:
"""The AWS account ID associated with the request."""
return self.get("accountId")
@property
def caller_id(self) -> Optional[str]:
"""The principal identifier of the caller making the request."""
return self.get("callerId")
@property
def cognito_amr(self) -> Optional[List[str]]:
"""This represents how the user was authenticated.
AMR stands for Authentication Methods References as per the openid spec"""
return self["cognitoIdentity"].get("amr")
@property
def cognito_identity_id(self) -> Optional[str]:
"""The Amazon Cognito identity ID of the caller making the request.
Available only if the request was signed with Amazon Cognito credentials."""
return self["cognitoIdentity"].get("identityId")
@property
def cognito_identity_pool_id(self) -> Optional[str]:
"""The Amazon Cognito identity pool ID of the caller making the request.
Available only if the request was signed with Amazon Cognito credentials."""
return self["cognitoIdentity"].get("identityPoolId")
@property
def principal_org_id(self) -> Optional[str]:
"""The AWS organization ID."""
return self.get("principalOrgId")
@property
def user_arn(self) -> Optional[str]:
"""The Amazon Resource Name (ARN) of the effective user identified after authentication."""
return self.get("userArn")
@property
def user_id(self) -> Optional[str]:
"""The IAM user ID of the effective user identified after authentication."""
return self.get("userId")
class RequestContextV2Authorizer(DictWrapper):
@property
def jwt_claim(self) -> Dict[str, Any]:
return self["jwt"]["claims"]
@property
def jwt_scopes(self) -> List[str]:
return self["jwt"]["scopes"]
@property
def get_lambda(self) -> Optional[Dict[str, Any]]:
"""Lambda authorization context details"""
return self.get("lambda")
@property
def iam(self) -> Optional[RequestContextV2AuthorizerIam]:
"""IAM authorization details used for making the request."""
iam = self.get("iam")
return None if iam is None else RequestContextV2AuthorizerIam(iam)
class RequestContextV2(DictWrapper):
@property
def account_id(self) -> str:
"""The AWS account ID associated with the request."""
return self["requestContext"]["accountId"]
@property
def api_id(self) -> str:
"""The identifier API Gateway assigns to your API."""
return self["requestContext"]["apiId"]
@property
def authorizer(self) -> Optional[RequestContextV2Authorizer]:
authorizer = self["requestContext"].get("authorizer")
return None if authorizer is None else RequestContextV2Authorizer(authorizer)
@property
def domain_name(self) -> str:
"""A domain name"""
return self["requestContext"]["domainName"]
@property
def domain_prefix(self) -> str:
return self["requestContext"]["domainPrefix"]
@property
def http(self) -> RequestContextV2Http:
return RequestContextV2Http(self._data)
@property
def request_id(self) -> str:
"""The ID that API Gateway assigns to the API request."""
return self["requestContext"]["requestId"]
@property
def route_key(self) -> str:
"""The selected route key."""
return self["requestContext"]["routeKey"]
@property
def stage(self) -> str:
"""The deployment stage of the API request"""
return self["requestContext"]["stage"]
@property
def time(self) -> str:
"""The CLF-formatted request time (dd/MMM/yyyy:HH:mm:ss +-hhmm)."""
return self["requestContext"]["time"]
@property
def time_epoch(self) -> int:
"""The Epoch-formatted request time."""
return self["requestContext"]["timeEpoch"]
class APIGatewayProxyEventV2(BaseProxyEvent):
"""AWS Lambda proxy V2 event
Notes:
-----
Format 2.0 doesn't have multiValueHeaders or multiValueQueryStringParameters fields. Duplicate headers
are combined with commas and included in the headers field. Duplicate query strings are combined with
commas and included in the queryStringParameters field.
Format 2.0 includes a new cookies field. All cookie headers in the request are combined with commas and
added to the cookies field. In the response to the client, each cookie becomes a set-cookie header.
Documentation:
--------------
- https://docs.aws.amazon.com/apigateway/latest/developerguide/http-api-develop-integrations-lambda.html
"""
@property
def version(self) -> str:
return self["version"]
@property
def route_key(self) -> str:
return self["routeKey"]
@property
def raw_path(self) -> str:
return self["rawPath"]
@property
def raw_query_string(self) -> str:
return self["rawQueryString"]
@property
def cookies(self) -> Optional[List[str]]:
return self.get("cookies")
@property
def request_context(self) -> RequestContextV2:
return RequestContextV2(self._data)
@property
def path_parameters(self) -> Optional[Dict[str, str]]:
return self.get("pathParameters")
@property
def stage_variables(self) -> Optional[Dict[str, str]]:
return self.get("stageVariables")
@property
def path(self) -> str:
return self.raw_path
@property
def http_method(self) -> str:
"""The HTTP method used. Valid values include: DELETE, GET, HEAD, OPTIONS, PATCH, POST, and PUT."""
return self.request_context.http.method
| StarcoderdataPython |
38565 | #coding: utf-8
'''
@Time: 2019/4/25 11:15
@Author: fangyoucai
'''
| StarcoderdataPython |
9786136 | <filename>iavm/management/commands/create_iavmcpe.py
from django.core.management.base import BaseCommand, CommandError
from iavm.libs.IAVM import *
class Command(BaseCommand):
help = 'Create IAVM-to-CPE Document'
def handle(self, *args, **options):
try:
iavm_to_cpe_doc()
except:
raise CommandError('iavm_to_cpe_doc failed')
self.stdout.write('Successfully created IAVM-to-CPE Document') | StarcoderdataPython |
6585330 | <filename>test/test_datamodel.py
#!/usr/bin/env python
# $Id$
#------------------------------------------------------------------------
# NAME: test_datamodel.py -
# HISTORY: -
# 2016-08-18 <EMAIL> -
#------------------------------------------------------------------------
import os, sys, traceback, unittest
import pdb # set_trace()
sys.path.insert( 0, os.path.join( os.path.dirname( __file__ ), os.pardir ) )
from data.datamodel import *
#------------------------------------------------------------------------
# CLASS: TestDataModel -
#------------------------------------------------------------------------
class TestDataModel( unittest.TestCase ):
"""
"""
# -- Object Methods
# --
#----------------------------------------------------------------------
# METHOD: TestDataModel.setUp() -
#----------------------------------------------------------------------
def setUp( self ):
path = \
os.path.join( os.environ[ 'HOME' ], 'study', 'casl', 'andrew', 'c1.h5' )
self.dataModel = DataModel( path )
#end setUp
#----------------------------------------------------------------------
# METHOD: TestDataModel.tearDown() -
#----------------------------------------------------------------------
def tearDown( self ):
self.dataModel.Close()
#end tearDown
#----------------------------------------------------------------------
# METHOD: TestDataModel.test_FindListIndex() -
#----------------------------------------------------------------------
def test_FindListIndex( self ):
core = self.dataModel.GetCore()
asc1 = core.axialMeshCenters
asc2 = core.detectorMesh
des1 = asc1[ :: -1 ]
des2 = asc2[ :: -1 ]
max_value = max( asc1[ -1 ], asc2[ -1 ] ) + 10.0
x = -5.0
while x <= max_value:
a0 = self.dataModel.FindListIndex( asc1, x )
a1 = self.dataModel.FindListIndex1( asc1, x )
self.assertEqual( a0, a1, 'ascending list one: %f' % x )
b0 = self.dataModel.FindListIndex( asc2, x )
b1 = self.dataModel.FindListIndex1( asc2, x )
self.assertEqual( b0, b1, 'ascending list two: %f' % x )
c0 = self.dataModel.FindListIndex( des1, x )
c1 = self.dataModel.FindListIndex1( des1, x )
self.assertEqual( c0, c1, 'descending list one: %f' % x )
d0 = self.dataModel.FindListIndex( des2, x )
d1 = self.dataModel.FindListIndex1( des2, x )
self.assertEqual( d0, d1, 'descending list two: %f' % x )
x += 0.1
#end while
#end test_FindListIndex
# -- Static Methods
# --
#end TestDataModel
#------------------------------------------------------------------------
# NAME: main() -
#------------------------------------------------------------------------
if __name__ == '__main__':
#unittest.main()
#suite = unittest.TestSuite()
#suite.addTest( TestDataModel( 'test_CalcGeneralAverage1' ) )
# tests = [
# 'test_CalcGeneralAverage1DAxial',
# 'test_CalcGeneralAverage2DAssy',
# 'test_CalcGeneralAverage2DPin',
# 'test_CalcGeneralAverage3DAssy',
# 'test_CalcGeneralAverageScalar'
# ]
# suite = unittest.TestSuite( map( TestAverager, tests ) )
suite = unittest.TestLoader().loadTestsFromTestCase( TestDataModel )
unittest.TextTestRunner( verbosity = 2 ).run( suite )
| StarcoderdataPython |
9686417 | import unittest
from fn.immutable import Deque
class FingerTreeDequeTestCase(unittest.TestCase):
def test_deque_basic_operations(self):
d1 = Deque()
d2 = d1.push_back(1)
d3 = d2.push_back(2)
d4 = d3.push_back(3)
d5 = d4.push_front(10)
d6 = d5.push_front(20)
self.assertEqual(1, d4.head())
self.assertEqual(3, d4.last())
self.assertEqual(20, d6.head())
self.assertEqual(3, d6.last())
# FIXME: write this test.
def test_deque_num_of_elements(self):
pass
def test_deque_is_empty(self):
self.assertTrue(Deque().is_empty())
self.assertFalse(Deque().push_back(1).is_empty())
self.assertTrue(Deque().push_back(1).tail().is_empty())
def test_iterator(self):
self.assertEqual([], list(Deque()))
self.assertEqual(
[1, 2, 3],
list(Deque().push_back(1).push_back(2).push_back(3))
)
self.assertEqual(
60,
sum(Deque().push_back(10).push_front(20).push_back(30))
)
self.assertEqual(
sum(range(1, 20)),
sum(Deque.from_iterable(range(1, 20)))
)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1772997 | import unittest
import sys
import random
import os
dirname, filename = os.path.split(os.path.abspath(__file__))
sys.path.append(dirname + '/..')
from src import populations
class TestPopulationGA(unittest.TestCase):
@classmethod
def setUpClass(cls):
def of1(*vars):
return vars[0] + vars[1]
def l1(*vars):
return -vars[0] * vars[1]
print('Running tests for Population class and it\'s derived classes of GAs...')
cls.pop_size = 5
cls.n_var = 2
cls.n_gen_var = 5
cls.n_of = 1
cls.n_lim = 1
cls.var_range = [[0., 1.], [-2., 3.5]]
cls.lim_range = [['greater', -0.1]]
cls.max_min = ['min']
cls.objective_functions = [of1]
cls.limiting_functions = [l1]
def setUp(self):
random.seed(1)
self.pop = populations.PopulationGA(self.pop_size, self.n_var, self.n_gen_var, self.n_of, self.n_lim)
self.pop.initialize()
def tearDown(self):
del self.pop
def test_population_GA_with_random_generation(self):
chromosomes_individuals = [[0, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 0],
[1, 1, 0, 0, 1, 1, 1, 0, 1, 1]]
for indv in range(self.pop_size):
self.assertEqual(self.pop.population[indv].getChromosome(), chromosomes_individuals[indv])
def test_population_GA_mutate(self):
mutation_rate = 0.3
self.pop.mutate(self.pop.population, mutation_rate)
chromosomes_mutated = [[0, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 1, 0, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 1]]
for indv in range(self.pop_size):
self.assertEqual(self.pop.population[indv].getChromosome(), chromosomes_mutated[indv])
def test_population_GA_decode_chromosome(self):
self.pop.decode(self.pop.population, self.var_range)
variables = [[0.3870, 0.1290],
[0.6451, 1.9032],
[0.3870, -2.000],
[0.0000, 0.4838],
[0.8064, 2.7903]]
for indv in range(self.pop_size):
for var in range(self.n_var):
self.assertAlmostEqual(self.pop.population[indv].getVar()[var], variables[indv][var], places=3)
def test_population_GA_evaluate_and_feasibility(self):
self.pop.decode(self.pop.population, self.var_range)
self.pop.evaluate(self.pop.population, self.objective_functions, self.limiting_functions)
objective_functions = [0.5161, 2.5483, -1.6129, 0.4838, 3.5967]
limiting_functions = [-0.0499, -1.2278, 0.7741, 0.000, -2.2502]
for indv in range(self.pop_size):
self.assertAlmostEqual(self.pop.population[indv].getOf()[0], objective_functions[indv], places=3)
self.assertAlmostEqual(self.pop.population[indv].getLimVar()[0], limiting_functions[indv], places=3)
self.pop.checkFeasibility(self.pop.population, self.lim_range)
feasibility_individuals = [True, False, True, True, False]
for indv in range(self.pop_size):
self.assertEqual(self.pop.population[indv].getFeasibility(), feasibility_individuals[indv])
def test_population_GA_tournament_selection(self):
tournament_selection_param = 0.9
self.pop.decode(self.pop.population, self.var_range)
self.pop.evaluate(self.pop.population, self.objective_functions, self.limiting_functions)
winner_1 = 4
winner_2 = 3
self.assertEqual(self.pop.tournamentSelection(tournament_selection_param, self.max_min), winner_1)
self.assertEqual(self.pop.tournamentSelection(tournament_selection_param, self.max_min), winner_2)
def test_population_GA_crossover(self):
chromosome_1 = [0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1]
chromosome_2 = [1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0]
cross_chromo_1 = [0, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0]
cross_chromo_2 = [1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1]
[cross_chromo_1_computed, cross_chromo_2_computed] = self.pop.crossover(chromosome_1, chromosome_2)
self.assertEqual(cross_chromo_1, cross_chromo_1_computed)
self.assertEqual(cross_chromo_2, cross_chromo_2_computed)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5182632 | import sys,re, os
from collections import defaultdict
from urllib2 import Request, urlopen, URLError
import urllib2
from random import randint
from time import sleep
''''
This class:
takes the files (uploads) and store them for next processing.
store the files in lists and dictionary:
1. NTDS into lists
2. Passwords Only file into lists
3. Hash_Password file
'''
class CLASS_upload_files_edit():
''' Input: files: NTDS, Password-Only File, Hash-Password File.
------------------------------------------------------
'''
list_NTDS_file = [] # List => [['user1', '500', 'aad3b43..', '3dd97e0fd...'], ['use2', '502', 'aad3...', '2b7...'],...
list_passwords = []
list_passwords_updated = []
dict_passwords = {}
list_Hash_pass = [] # hash:hash
list_Hash_pass_PasswordOnly = [] # passwordsOnly
list_user_hash_pass = [] # User:Hash:Password
Client_Name = ''
file_name_NTDS = ''
'''
Getting List Functions:
'''
def show_client_name(self,client_name):
self.Client_Name = client_name
return self.Client_Name
def get_ntds_list(self):
return self.list_NTDS_file
def make_password_list(self):
for i in self.list_passwords: #the list is: ['xx].['xx'],['xx'] List of lists
self.list_passwords_updated.append(i[0]) #making the list: ['pass', 'pass1', 'pass2', 'pass3']
def get_password_list_updated(self):
return self.list_passwords_updated #returning the list: ['pass', 'pass1', 'pass2', 'pass3']
def get_password_dict(self):
return self.dict_passwords
def make_passwords_of_hash_pass_file(self):
for x in self.list_Hash_pass:
self.list_Hash_pass_PasswordOnly.append(x[1])
def get_passwords_of_hash_pass_file_PASSWORDSonly(self):
return self.list_Hash_pass_PasswordOnly
def get_user_hash_pass(self): # Get ['USER:HASH:PASSWORD', 'USER:HASH:PASSWORD']
return self.list_user_hash_pass
''' Saving NTDS File into list '''
def NTDS_into_List(self):
uploads_directory = os.path.dirname(os.path.realpath(__file__)) + '/uploads/'
for file_name in os.listdir(uploads_directory):
if file_name.startswith(self.Client_Name+'_NTDS_') and file_name.endswith(".txt"):
self.file_name_NTDS = file_name
NTDS_file = os.path.dirname(os.path.realpath(__file__)) + '/uploads/'+self.file_name_NTDS
with open(NTDS_file, 'r') as f:
for i in f:
data = i.strip().split(':');
if data[0] != '':
self.list_NTDS_file.append(data) # Into List
''' Saving Password Only file into list '''
def PASSWORDS_into_list(self):
uploads_directory = os.path.dirname(os.path.realpath(__file__)) + '/uploads/'
for file_name in os.listdir(uploads_directory):
if file_name.startswith(self.Client_Name+'_passwords_') and file_name.endswith(".txt"):
file_name_passwords = file_name
Password_file = os.path.dirname(os.path.realpath(__file__)) + '/uploads/'+file_name_passwords
with open(Password_file,'r') as f:
for i in f:
data = i.strip().split(':')
if data[0] != '':
self.list_passwords.append(data)
self.dict_passwords[i] = data
''' Saving Hash_Pasword file into list '''
def Hash_Pass_into_list(self):
Hash_Pass_file = ''
NTDS_file = ''
uploads_directory = os.path.dirname(os.path.realpath(__file__)) + '/uploads/'
for file_name in os.listdir(uploads_directory):
if file_name.startswith(self.Client_Name+'_hash_pass_') and file_name.endswith(".txt"):
Hash_Pass_file=file_name
Hash_dict_1 = dict();
file_open = os.path.dirname(os.path.realpath(__file__)) + '/uploads/'+''+Hash_Pass_file
with open(file_open, 'r') as f:
for i in f:
#print i
data = i.strip().split(':');
#print data
#Hash_dict_1[data[0]] = data[1].strip();
self.list_Hash_pass.append(data)
#for key,value in Hash_dict_1.iteritems():
# print key, value
#print Hash_dict_1.items()
def merge_ntds_with_hashPass_file(self):
''' Merging the lists '''
for user_hash in self.list_NTDS_file:
#print user_hash[0],user_hash[3], 'NTLM_HASH' #username:NTLM
for hash_pass in self.list_Hash_pass:
#print hash_pass[0], hash_pass[1], 'hash_pass'
if user_hash[3] == hash_pass[0]:
#all += user_hash[0]+':'+user_hash[3]+':'+hash_pass[1]+ '\n'
self.list_user_hash_pass.append(user_hash[0]+':'+user_hash[3]+':'+hash_pass[1])
#print 'HERE'
#print self.list_user_hash_pass
'''
simple user names exists: user, archive, backup, abc, events, security, hr,
payroll, finance, student, tablet, feedback,servicedesk, ithelpdesk, support, training, temp,infodesk,employment, internet,info
'''
'''
Overall Risk based on number of LMS, week passwords...
'''
'''
Hacked Account:
https://haveibeenpwned.com/api/v2/breachedaccount/<<EMAIL>>@<EMAIL>.<EMAIL>?truncateResponse=true
security issue:
cannot test all users:
should test only users with week passwords?
should ask the client to confirm before uploading.
should ask the user to input which user to check!
'''
def get_hacked_user(self,username):
req = urllib2.Request('https://haveibeenpwned.com/api/v2/breachedaccount/'+username+'@'+self.Client_Name+'?truncateResponse=true')
req.add_header('Accept', 'application/vnd.haveibeenpwned.v2+json')
req.add_header('User-Agent', 'Mozilla/5.0 (Linux; <Android Version>; <Build Tag etc.>) AppleWebKit/<WebKit Rev> (KHTML, like Gecko) Chrome/<Chrome Rev> Mobile Safari/<WebKit Rev>')
try:
resp = urllib2.urlopen(req)
content = resp.read()
print content
# sleep(randint(5,20))
except URLError, e:
print 'No kittez. Got an error code:', e
| StarcoderdataPython |
4835243 | <filename>backend/users/migrations/0001_initial.py
# Generated by Django 3.2.8 on 2022-01-08 10:45
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0012_alter_user_first_name_max_length'),
]
operations = [
migrations.CreateModel(
name='CustomUser',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True, verbose_name='email address')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now)),
('is_staff', models.BooleanField(default=False)),
('is_active', models.BooleanField(default=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='UserParameters',
fields=[
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, primary_key=True, serialize=False, to='users.customuser', verbose_name='User')),
('firstname', models.TextField(default='', max_length=50)),
('height', models.FloatField()),
('weight', models.FloatField()),
('sex', models.TextField(max_length=10)),
('age', models.IntegerField()),
],
options={
'ordering': ['user'],
},
),
]
| StarcoderdataPython |
1891884 | <reponame>Gabydelgado/Invest-Simulator
from rest_framework import status
from rest_framework.test import APITestCase
class AccountTests(APITestCase):
def test_asset_list(self):
response = self.client.post('/api/v1/rest-auth/registration/', {
'username': 'francisco213422',
'password1': '<PASSWORD>',
'password2': '<PASSWORD>',
'email': '<EMAIL>',
'email2': '<EMAIL>',
'first_name': 'fanasdasd',
'last_name': 'asddasdasj',
'avatar': 1,
}, format='json')
response = self.client.post('/api/v1/rest-auth/login/',
{'username': 'francisco213422',
'password': '<PASSWORD>'},
format='json')
request = self.client.get('/api/v1/assets/')
self.assertEqual(request.status_code, status.HTTP_200_OK)
| StarcoderdataPython |
8181784 | <reponame>Scottx86-64/dotfiles-1
$NetBSD: patch-electrum-ecc_fast.py,v 1.1 2020/11/04 20:45:46 js Exp $
Don't assume libsecp256k1 is in site-packages/electrum.
--- electrum/ecc_fast.py.orig 2000-11-11 11:11:11.000000000 +0000
+++ electrum/ecc_fast.py
@@ -9,6 +9,7 @@ from ctypes import (
byref, c_byte, c_int, c_uint, c_char_p, c_size_t, c_void_p, create_string_buffer,
CFUNCTYPE, POINTER, cast
)
+import sysconfig
from .logging import get_logger
@@ -38,16 +39,13 @@ class LibModuleMissing(Exception): pass
def load_library():
if sys.platform == 'darwin':
- library_paths = (os.path.join(os.path.dirname(__file__), 'libsecp256k1.0.dylib'),
- 'libsecp256k1.0.dylib')
+ library_paths = (os.path.join(sysconfig.get_config_var('LIBDIR'), 'libsecp256k1.0.dylib'),)
elif sys.platform in ('windows', 'win32'):
- library_paths = (os.path.join(os.path.dirname(__file__), 'libsecp256k1-0.dll'),
- 'libsecp256k1-0.dll')
+ library_paths = (os.path.join(sysconfig.get_config_var('LIBDIR'), 'libsecp256k1-0.dll'),)
elif 'ANDROID_DATA' in os.environ:
library_paths = ('libsecp256k1.so',)
else: # desktop Linux and similar
- library_paths = (os.path.join(os.path.dirname(__file__), 'libsecp256k1.so.0'),
- 'libsecp256k1.so.0')
+ library_paths = (os.patah.join(sysconfig.get_config_var('LIBDIR'), 'libsecp256k1.so.0'),)
exceptions = []
secp256k1 = None
| StarcoderdataPython |
11232823 | import numpy as np
from imutils import face_utils
import cv2
import dlib
from scipy.spatial import distance as dist
import time
from firebase import firebase
FBconn = firebase.FirebaseApplication('https://ed-workshop.firebaseio.com/', None)
def MAR(mouth):
return (dist.euclidean(mouth[0],mouth[1]) + dist.euclidean(mouth[2],mouth[3]) + dist.euclidean(mouth[4], mouth[5]))/(3*dist.euclidean(mouth[6], mouth[7]))
def N_MID(nose):
return nose.mean(axis=0)
detect = dlib.get_frontal_face_detector()
file = "/media/khurshed2504/Data/ED Workshop/shape_predictor_68_face_landmarks.dat"
predictor = dlib.shape_predictor(file)
(mst, mend) = face_utils.FACIAL_LANDMARKS_68_IDXS["mouth"]
(ns, nend) = face_utils.FACIAL_LANDMARKS_68_IDXS["nose"]
total_devices = 4
for i in range(1,total_devices+1):
FBconn.put('/state', '/', int(10*i))
rad = 70
nose_pts_x = []
nose_pts_y = []
mars = []
nose_pose_x = 0
nose_pose_y = 0
state_space = ['OFF', 'ON']
start = time.time()
cap = cv2.VideoCapture(0)
while time.time() - start < 10:
_, image = cap.read()
image = cv2.flip(image,1)
hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV_FULL)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detect(gray, 0)
for (i,rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
nose = shape[31:36]
nose_pt = N_MID(nose)
nose_pts_x.append(nose_pt[0])
nose_pts_y.append(nose_pt[1])
nose_pose_x = np.mean(nose_pts_x)
nose_pose_y = np.mean(nose_pts_y)
nose_roi = shape[ns: nend]
nose_hull = cv2.convexHull(nose_roi)
cv2.drawContours(image, [nose_hull], -1, (0, 255, 0), 1)
m_ind = [50, 58, 51, 57, 52, 56, 48, 54]
mouth = shape[m_ind]
mouth_roi = shape[mst: mend]
mouth_hull = cv2.convexHull(mouth_roi)
cv2.drawContours(image, [mouth_hull], -1, (0, 255, 0), 1)
mars.append(MAR(mouth))
mar_mean = np.mean(mars)
board = np.zeros((200, 640, 3), dtype=np.uint8)
cv2.putText(board, "Open your mouth and keep your nose stable", (25,50), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255))
cv2.putText(board, "Calibration ON", (225,100), cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255))
res = np.vstack((image, board))
cv2.imshow('Calibration', res)
k = cv2.waitKey(5) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
print("Mean Nose Position: ", nose_pose_x, nose_pose_y)
print("Mean Mouth Aspect Ratio: ", mar_mean)
cap = cv2.VideoCapture(0)
rcnt = 0
lcnt = 0
dev_no = 1
ptr = 1
nose_pts_x = []
nose_pts_y = []
dev_arr = np.arange(1,total_devices+1)
dev_states = np.zeros(total_devices)
ut = 0.8
fcnt = 0
min_device_change_frames = 12
min_toggle_frames = 15
while True:
_, image = cap.read()
image = cv2.flip(image,1)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = detect(gray, 0)
for (i,rect) in enumerate(rects):
shape = predictor(gray, rect)
shape = face_utils.shape_to_np(shape)
m_ind = [50, 58, 51, 57, 52, 56, 48, 54]
mouth = shape[m_ind]
nose = shape[31:36]
nose_pt = N_MID(nose)
nose_x = nose_pt[0]
if nose_x > nose_pose_x + rad:
rcnt += 1
if nose_x < nose_pose_x - rad:
lcnt += 1
if rcnt > min_device_change_frames:
rcnt = 0
ptr += 1
dev_no = dev_arr[(ptr%total_devices) - 1]
print("Selected Device: ",dev_no)
print("Current State: ", state_space[int(dev_states[dev_no-1])])
if lcnt > min_device_change_frames:
lcnt = 0
ptr -= 1
dev_no = dev_arr[(ptr%total_devices) - 1]
print("Selected Device: ", dev_no)
print("Current State: ", state_space[int(dev_states[dev_no-1])])
mouth_roi = shape[mst: mend]
nose_roi = shape[ns: nend]
mouth_hull = cv2.convexHull(mouth_roi)
nose_hull = cv2.convexHull(nose_roi)
cv2.drawContours(image, [mouth_hull], -1, (0, 255, 0), 1)
cv2.drawContours(image, [nose_hull], -1, (0, 255, 0), 1)
mar = MAR(mouth)
if mar > mar_mean*ut:
fcnt += 1
if fcnt > min_toggle_frames:
fcnt = 0
dev_states[dev_no-1] = 1 - dev_states[dev_no-1]
print("Device Number: {}, State: {}".format(dev_no, state_space[int(dev_states[dev_no-1])]))
data = int(10*dev_no + dev_states[dev_no-1])
FBconn.put('/state', '/', data)
cv2.circle(image, (int(nose_pose_x), int(nose_pose_y)), rad, (255,0,0), 1)
cv2.imshow('Image', image)
k = cv2.waitKey(5) & 0xff
if k == 27:
cap.release()
break
cv2.destroyAllWindows()
cap.release() | StarcoderdataPython |
3528027 | '''
Please move this file to src/ before running the tests
'''
import unittest
from inverse_text_normalization.run_predict import inverse_normalize_text
class TamilInverseTextNormalization(unittest.TestCase):
def test_single_digit_numbers_are_converted_to_numerals(self):
data = ['நான்கு', 'எட்டு', 'என்னிடம் ஐந்து பேனாக்கள் உள்ளன']
expected_output = ['4', '8', 'என்னிடம் 5 பேனாக்கள் உள்ளன']
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
def test_double_digit_numbers_are_converted_to_numerals(self):
data = ['பதினெட்டு', 'என்னிடம் ஐம்பது பூனைகள் உள்ளன', 'என்னிடம் எண்பத்தியொன்று பேனாக்கள் உள்ளன'
'என்னிடம் இருபத்து நான்கு பேனாக்கள் உள்ளன']
expected_output = ['18', 'என்னிடம் 50 பூனைகள் உள்ளன', 'என்னிடம் 81 பேனாக்கள் உள்ளன'
'என்னிடம் 24 பேனாக்கள் உள்ளன']
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
def test_num_upto_nine_with_hundreds_are_converted_to_numerals(self):
#TODO: spelling of 900/90
'''
Numbers from 200 to 900 do not follow normal grammar
'''
data = ['நூறு',
'இருநூறு',
'முந்நூறு',
'நானூறு',
'ஐநூறு',
'அறுநூறு',
'எழுநூறு',
'எண்ணூறு',
'தொண்ணூறு' #90 not 900 tulayeram
]
expected_output = ['100',
'200',
'300',
'400',
'500',
'600',
'700',
'800',
'900']
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
def test_num_with_hundreds_are_converted_to_numerals(self):
data = ['இருநூறு',
'இருநூறு மூன்று',
'ஒன்று நூறு முப்பத்து ஒன்பது படங்கள் பார்த்திருக்கிறேன்',
'அவளிடம் எண்ணூறு நாற்பத்து நான்கு அட்டைகள் உள்ளன'
]
expected_output = ['200',
'203',
'139 படங்கள் பார்த்திருக்கிறேன்',
'அவளிடம் 844 அட்டைகள் உள்ளன'
]
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
def test_num_with_tens_of_hundreds_are_converted_to_numerals(self):
data = ['பதினொன்று நூறு',
'பன்னிரண்டு நூறு தொண்ணூற்றிஒன்பது',
'முப்பத்து ஆறு நூறு அறுபத்து ஏழு',
'எண்பத்தியொன்பது நூறு இருபத்து மூன்று']
expected_output = ['1100',
'1299',
'3667',
'8923']
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
def test_num_with_thousands_are_converted_to_numerals(self):
data = ['ஆயிரம்',
'ஒன்று ஆயிரம்',
'ஒன்று ஆயிரம் முப்பத்து ஒன்று',
'இருபத்து ஆறு ஆயிரம்'
]
expected_output = ['1000', '1000', '1031', '26,000']
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
def test_num_with_lakhs_are_converted_to_numerals(self):
data = ['இலட்சம்','ஒன்று இலட்சம்', 'ஒன்பது இலட்சம்',
'நாற்பத்து ஆறு இலட்சம் இருபத்து மூன்று ஆயிரம் ஒன்பது நூறு ஐம்பத்து இரண்டு',
'நாற்பத்து ஆறு இலட்சம் இருபத்து மூன்று ஆயிரம் தொண்ணூறு ஐம்பத்து இரண்டு'
]
expected_output = ['1,00,000', '1,00,000', '9,00,000', '46,23,952', '46,23,952']
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
def test_num_with_crores_are_converted_to_numerals(self):
data = ['கோடி',
'ஒன்று கோடி',
'ஏழு கோடி',
'தொண்ணூற்றிநான்கு கோடி ஐந்து இலட்சம் முந்நூறு இருபத்து இரண்டு'
]
expected_output = ['1,00,00,000',
'1,00,00,000',
'7,00,00,000',
'94,05,00,322']
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
def test_money_is_converted_to_corresponding_numerals(self):
data = ['ஆறு கோடி ரூபாய்',#'கோடி ரூபாய்',
'முந்நூறு ரூபாய்',
'வங்கியில் பத்து ரூபாய் கடன் வாங்கினார்',
'அவளிடம் இருபது டாலர்கள் உள்ளன',
'அவளிடம் ஒன்று நூறு பவுண்டுகள் உள்ளன'
]
expected_output = ['₹ 6,00,00,000',
'₹ 300',
'வங்கியில் ₹ 10 கடன் வாங்கினார்',
'அவளிடம் $ 20 உள்ளன',
'அவளிடம் £ 100 உள்ளன']
inverse_normalizer_prediction = inverse_normalize_text(data, lang='ta')
self.assertEqual(expected_output, inverse_normalizer_prediction)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
354663 | <gh_stars>1-10
"""
Run this example to check if cvxpy is working properly with external solvers such as CVXOPT and Gurobi"""
import cvxpy as cp
import numpy as np
# Problem data.
m = 30
n = 20
np.random.seed(1)
A = np.random.randn(m, n)
b = np.random.randn(m)
# Construct the problem.
x = cp.Variable(n)
objective = cp.Minimize(cp.sum_squares(A@x - b))
constraints = [0 <= x, x <= 1]
prob = cp.Problem(objective, constraints)
# Solve with GUROBI.
prob.solve(solver=cp.GUROBI)
print("optimal value with GUROBI:", prob.value)
# Solve with CVXOPT.
prob.solve(solver=cp.CVXOPT)
print("optimal value with CVXOPT:", prob.value)
| StarcoderdataPython |
5125695 | itm_no_item = 0
itm_french_cav_pistol = 1
itm_french_officer_pistol = 2
itm_french_pistol_1766 = 3
itm_french_pistol_1777 = 4
itm_russian_pistol = 5
itm_british_pistol = 6
itm_french_mousquiton = 7
itm_french_mousquiton_melee = 8
itm_french_mousquiton_light = 9
itm_french_mousquiton_light_melee = 10
itm_french_dragoon_musket = 11
itm_french_dragoon_musket_melee = 12
itm_russian_dragoon_musket = 13
itm_russian_dragoon_musket_melee = 14
itm_russian_cavalry_stutzer_1803 = 15
itm_russian_gusarskiy_karabin = 16
itm_russian_gusarskiy_karabin_melee = 17
itm_british_carbine = 18
itm_british_carbine_melee = 19
itm_independent_kentucky_rifle = 20
itm_russian_rifle_1805 = 21
itm_russian_rifle_1805_melee = 22
itm_british_baker_rifle = 23
itm_british_baker_rifle_melee = 24
itm_sniper_rifle = 25
itm_blunderbluss = 26
itm_cannon_ball_dummy = 27
itm_cannon_canister_dummy = 28
itm_cannon_explosion_dummy = 29
itm_drown_dummy = 30
itm_admin_kill_dummy = 31
itm_french_charleville = 32
itm_french_charleville_melee = 33
itm_french_versailles = 34
itm_french_versailles_melee = 35
itm_british_brown_bess = 36
itm_british_brown_bess_melee = 37
itm_russian_musket_1808 = 38
itm_russian_musket_1808_melee = 39
itm_austrian_musket = 40
itm_austrian_musket_melee = 41
itm_prussian_potsdam = 42
itm_prussian_potsdam_melee = 43
itm_french_art_off_sword = 44
itm_french_carabineer_sword = 45
itm_french_briquet_garde = 46
itm_french_light_cav_sabre_garde = 47
itm_french_heavy_cav_sabre_garde = 48
itm_french_inf_off_sabre_garde = 49
itm_french_heavy_cav_off_sabre = 50
itm_french_inf_off_sabre = 51
itm_french_light_cav_off_sabre = 52
itm_french_light_inf_off_sabre = 53
itm_french_line_cav_sabre = 54
itm_french_briquet = 55
itm_french_light_cav_sabre = 56
itm_french_sappeur_sword = 57
itm_russian_sabre_1798 = 58
itm_russian_sabre_1809 = 59
itm_russian_sword_1810 = 60
itm_russian_guard_sword_1799 = 61
itm_russian_briquet_1807 = 62
itm_russian_jaeger_bayonet = 63
itm_russian_jaeger_bayonet_jaeger = 64
itm_russian_officer_sword = 65
itm_russian_officer_sword_jaeger = 66
itm_russian_peasant_axe = 67
itm_brokenbottle = 68
itm_brokenbottle_melee = 69
itm_russian_kindjal = 70
itm_russian_guard_off_sword = 71
itm_russian_sappeur_dagger = 72
itm_russian_sappeur_dagger_invis = 73
itm_russian_peasant_knife = 74
itm_russian_peasant_serp = 75
itm_british_highlander_officer_sword = 76
itm_british_heavy_cav_sword = 77
itm_british_light_cav_sabre = 78
itm_british_baker_bayonet = 79
itm_british_musician_sword = 80
itm_british_musician_sword_invis = 81
itm_british_officer_sword = 82
itm_native_iroquois_tomahawk = 83
itm_native_iroquois_tomahawk_melee = 84
itm_native_cherokee_tomahawk = 85
itm_native_cherokee_tomahawk_melee = 86
itm_native_headache_club = 87
itm_austrian_infantry_briquet = 88
itm_austrian_infantry_briquet_black = 89
itm_austrian_jaeger_bayonet = 90
itm_austrian_jaeger_bayonet_invis = 91
itm_russian_briquet_1807_black = 92
itm_russian_briquet_1807_black_blackbelt = 93
itm_russian_briquet_1807_landwehr = 94
itm_russian_peasant_axe_landwehr = 95
itm_french_briquet_garde_fake = 96
itm_french_briquet_fake = 97
itm_russian_briquet_1807_fake = 98
itm_russian_briquet_1807_black_fake = 99
itm_russian_briquet_1807_black_blackbelt_fake = 100
itm_russian_briquet_1807_landwehr_fake = 101
itm_russian_peasant_axe_landwehr_fake = 102
itm_austrian_infantry_briquet_fake = 103
itm_banhammer = 104
itm_drumstick_right = 105
itm_flute = 106
itm_horn = 107
itm_trumpet = 108
itm_bugle = 109
itm_bagpipe = 110
itm_bullets = 111
itm_pistol_ammo = 112
itm_canister_ammo = 113
itm_shell_fragment = 114
itm_explosive_bullets = 115
itm_cannon_cartridge_round = 116
itm_cannon_cartridge_shell = 117
itm_cannon_cartridge_canister = 118
itm_cannon_cartridge_bomb = 119
itm_rockets = 120
itm_french_lance = 121
itm_prussian_lance = 122
itm_austrian_lance = 123
itm_russian_cossack_pike = 124
itm_russian_lancer_pike = 125
itm_russian_opolcheniye_pike = 126
itm_russian_peasant_kosa = 127
itm_russian_peasant_fork = 128
itm_russian_peasant_pitchfork = 129
itm_russian_peasant_sap = 130
itm_birch_trunk = 131
itm_russian_peasant_kosa2 = 132
itm_russian_peasant_club = 133
itm_russian_peasant_birch_club = 134
itm_russian_peasant_pike = 135
itm_russian_peasant_kuvalda = 136
itm_russian_peasant_2handed_axe = 137
itm_russian_peasant_rogatina = 138
itm_flag_france_45e = 139
itm_flag_france_84e = 140
itm_flag_france_vistula = 141
itm_flag_france_grenadiers = 142
itm_flag_france_15 = 143
itm_flag_france_hussard = 144
itm_flag_france_carab = 145
itm_flag_france_cuirassier = 146
itm_flag_france_cheval = 147
itm_flag_france_dragon = 148
itm_flag_france_lancier = 149
itm_flag_russia_opolcheniye = 150
itm_flag_russia_pavlovsk_color = 151
itm_flag_russia_pavlovsk_white = 152
itm_flag_russia_preobragenskiy_color = 153
itm_flag_russia_preobragenskiy_white = 154
itm_flag_russia_simbirsk_color = 155
itm_flag_russia_simbirsk_white = 156
itm_flag_russia_kyiv_dragoon = 157
itm_britain_colour_33rd_regt = 158
itm_britain_colour_33rd_king = 159
itm_britain_colour_42nd_regt = 160
itm_britain_colour_42nd_king = 161
itm_britain_colour_2nd_regt = 162
itm_britain_colour_2nd_king = 163
itm_britain_colour_51st_regt = 164
itm_britain_colour_51st_king = 165
itm_britain_colour_kgl_regt = 166
itm_britain_colour_kgl_king = 167
itm_britain_colour_blues = 168
itm_prussia_colour_infantry = 169
itm_prussia_colour_infantry2 = 170
itm_prussia_colour_guard = 171
itm_prussia_colour_landwehr = 172
itm_prussia_colour_hussard = 173
itm_austria_colour_leibfahne = 174
itm_austria_colour_ordinarfahne = 175
itm_austria_colour_ordinarfahne_cav = 176
itm_cannon_lighter = 177
itm_ramrod = 178
itm_rocket_placement = 179
itm_spyglass = 180
itm_grenade = 181
itm_sapper_axe = 182
itm_sapper_axe_rus = 183
itm_construction_hammer = 184
itm_shovel = 185
itm_shovel_undig = 186
itm_french_bomb = 187
itm_french_voltigeur_body_officer = 188
itm_french_voltigeur_body_ranker = 189
itm_french_voltigeur_body_sarge = 190
itm_french_45e_body_officer = 191
itm_french_45e_body_colours = 192
itm_french_45e_body_ranker = 193
itm_french_84e_body_officer = 194
itm_french_84e_body_ranker = 195
itm_french_84e_body_sarge = 196
itm_french_dragoon_body_ranker = 197
itm_french_dragoon_body_officer = 198
itm_french_general_body_boney = 199
itm_french_grenadiers_a_cheval_body_colours = 200
itm_french_grenadiers_a_cheval_body_ranker = 201
itm_french_grenadiers_a_cheval_body_officer = 202
itm_french_gap_body_colours = 203
itm_french_gap_body_ranker = 204
itm_french_gap_body_officer = 205
itm_french_art_ranker_body = 206
itm_french_art_ranker_body_alt = 207
itm_french_art_officer_body = 208
itm_french_vistula_body_ranker = 209
itm_french_vistula_body_colours = 210
itm_french_vistula_body_officer = 211
itm_french_lancer_body_ranker = 212
itm_french_lancer_body_colours = 213
itm_french_lancer_body_officer = 214
itm_french_cuirassier_ranker = 215
itm_french_cuirassier_colours = 216
itm_french_cuirassier_officer = 217
itm_french_carabineer_ranker = 218
itm_french_carabineer_colours = 219
itm_french_carabineer_officer = 220
itm_french_hussards_ranker = 221
itm_french_hussards_nco = 222
itm_french_hussards_officer = 223
itm_french_hussards_trumpeter = 224
itm_french_sapper = 225
itm_french_grap_body_drummer = 226
itm_french_grap_body_flautist = 227
itm_french_voltigeur_body_hornist = 228
itm_french_45e_body_drummer = 229
itm_french_45e_body_flautist = 230
itm_french_84e_body_drummer = 231
itm_french_84e_body_flautist = 232
itm_fr_carabineer_trumpeter = 233
itm_fr_cuirassier_body_trumpeter = 234
itm_french_dragoon_body_trumpeter = 235
itm_french_lancer_trumpeter_body = 236
itm_french_vistula_body_drummer = 237
itm_french_vistula_body_flautist = 238
itm_french_dragoon_body_sargent = 239
itm_french_arty_train = 240
itm_french_grenadier_a_cheval_body_trumpeter = 241
itm_british_infantry_ranker = 242
itm_british_infantry_sarge = 243
itm_british_infantry_officer = 244
itm_british_infantry_drum = 245
itm_british_infantry_flute = 246
itm_british_kgl_ranker = 247
itm_british_kgl_sarge = 248
itm_british_kgl_officer = 249
itm_british_kgl_drum = 250
itm_british_kgl_flute = 251
itm_british_highland_ranker = 252
itm_british_highland_sarge = 253
itm_british_highland_officer = 254
itm_british_highland_drummer = 255
itm_british_highland_piper = 256
itm_british_highland_piper_2 = 257
itm_british_guard_ranker = 258
itm_british_guard_sarge = 259
itm_british_guard_officer = 260
itm_british_guard_drum = 261
itm_british_guard_flute = 262
itm_british_light_ranker = 263
itm_british_light_sarge = 264
itm_british_light_officer = 265
itm_british_light_bugler = 266
itm_british_rifle_ranker = 267
itm_british_rifle_sarge = 268
itm_british_rifle_captain = 269
itm_british_rifle_bugler = 270
itm_british_artillery_ranker = 271
itm_british_artillery_officer = 272
itm_british_artillery_train = 273
itm_british_light_dragoon = 274
itm_british_light_dragoon_officer = 275
itm_british_inniskilling_ranker = 276
itm_british_inniskilling_officer = 277
itm_british_blue_ranker = 278
itm_british_blue_officer = 279
itm_british_wellington = 280
itm_british_rocketeer = 281
itm_british_sapper = 282
itm_prussian_infantry_ranker = 283
itm_prussian_infantry_nco = 284
itm_prussian_infantry_officer = 285
itm_prussian_infantry_drum = 286
itm_prussian_infantry_flute = 287
itm_prussian_dragoon_ranker = 288
itm_prussian_dragoon_nco = 289
itm_prussian_dragoon_officer = 290
itm_prussian_dragoon_trumpet = 291
itm_prussian_jaeger_ranker = 292
itm_prussian_jaeger_ranker_alt = 293
itm_prussian_jaeger_nco = 294
itm_prussian_jaeger_officer = 295
itm_prussian_jaeger_hornist = 296
itm_prussian_guard_ranker = 297
itm_prussian_guard_nco = 298
itm_prussian_guard_officer = 299
itm_prussian_guard_drummer = 300
itm_prussian_guard_flute = 301
itm_prussian_arty_ranker = 302
itm_prussian_arty_train = 303
itm_prussian_arty_officer = 304
itm_prussian_hussar_ranker = 305
itm_prussian_hussar_nco = 306
itm_prussian_hussar_officer = 307
itm_prussian_hussar_officer_variant = 308
itm_prussian_hussar_trumpet = 309
itm_prussian_infantry2_ranker = 310
itm_prussian_infantry2_nco = 311
itm_prussian_infantry2_officer = 312
itm_prussian_infantry2_drum = 313
itm_prussian_infantry2_flute = 314
itm_prussian_landwehr_ranker = 315
itm_prussian_landwehr_ranker_alt = 316
itm_prussian_landwehr_nco = 317
itm_prussian_landwehr_officer = 318
itm_prussian_landwehr_drum = 319
itm_prussian_landwehr_flute = 320
itm_prussian_landwehr_cav_ranker = 321
itm_prussian_landwehr_cav_nco = 322
itm_prussian_landwehr_cav_officer = 323
itm_prussian_landwehr_cav_trumpet = 324
itm_prussian_blucher = 325
itm_prussian_cuirassier_ranker = 326
itm_prussian_cuirassier_nco = 327
itm_prussian_cuirassier_officer = 328
itm_prussian_cuirassier_trumpet = 329
itm_prussian_freikorps_ranker = 330
itm_prussian_freikorps_nco = 331
itm_prussian_freikorps_officer = 332
itm_prussian_freikorps_drum = 333
itm_prussian_freikorps_flute = 334
itm_prussian_pioneer = 335
itm_russian_infantry1 = 336
itm_russian_infantry_nco = 337
itm_russian_infantry_officer = 338
itm_russian_drummer = 339
itm_russian_flautist = 340
itm_russian_hussar_officer = 341
itm_russian_hussar_nco = 342
itm_russian_hussar_ranker = 343
itm_russian_hussar_trumpeter = 344
itm_russian_horse_guard = 345
itm_russian_horse_guard_nco = 346
itm_russian_horse_guard_officer = 347
itm_russian_horse_guard_trumpeter = 348
itm_russian_cossack_officer = 349
itm_russian_cossack = 350
itm_kutuzov = 351
itm_russian_militia_officer = 352
itm_russian_militia_ranker = 353
itm_russian_guard_officer = 354
itm_russian_guard_nco = 355
itm_russian_guard_ranker = 356
itm_russian_guard_drummer = 357
itm_russian_guard_flautist = 358
itm_russian_jaeger_officer = 359
itm_russian_jaeger_nco = 360
itm_russian_jaeger_ranker = 361
itm_russian_jaeger_musician = 362
itm_russian_gren_officer = 363
itm_russian_gren_nco = 364
itm_russian_gren_ranker = 365
itm_russian_arty_officer = 366
itm_russian_arty_nco = 367
itm_russian_arty_ranker = 368
itm_russian_dragoon_officer = 369
itm_russian_dragoon_nco = 370
itm_russian_dragoon_ranker = 371
itm_russian_dragoon_trumpeter = 372
itm_rus_partizan1 = 373
itm_rus_partizan2 = 374
itm_russian_uhlan_officer = 375
itm_russian_uhlan_nco = 376
itm_russian_uhlan_ranker = 377
itm_russian_uhlan_trumpeter = 378
itm_rus_pioneer = 379
itm_austrian_infantry = 380
itm_austrian_infantry_nco = 381
itm_austrian_infantry_officer = 382
itm_austrian_infantry_drummer = 383
itm_austrian_infantry_fifer = 384
itm_austrian_infantry2 = 385
itm_austrian_infantry2_nco = 386
itm_austrian_infantry2_officer = 387
itm_austrian_infantry2_drummer = 388
itm_austrian_infantry2_fifer = 389
itm_austrian_grenadier = 390
itm_austrian_grenadier_alt = 391
itm_austrian_grenadier_nco = 392
itm_austrian_grenadier_officer = 393
itm_austrian_grenadier_drummer = 394
itm_austrian_grenadier_fifer = 395
itm_austrian_grenzer = 396
itm_austrian_grenzer_nco = 397
itm_austrian_grenzer_officer = 398
itm_austrian_grenzer_drummer = 399
itm_austrian_grenzer_fifer = 400
itm_austrian_jaeger = 401
itm_austrian_jaeger_nco = 402
itm_austrian_jaeger_officer = 403
itm_austrian_jaeger_hornist = 404
itm_austrian_lightcav = 405
itm_austrian_lightcav_nco = 406
itm_austrian_lightcav_trumpet = 407
itm_austrian_lightcav_officer = 408
itm_austrian_dragoon = 409
itm_austrian_dragoon_nco = 410
itm_austrian_dragoon_trumpet = 411
itm_austrian_dragoon_officer = 412
itm_austrian_uhlan = 413
itm_austrian_uhlan_nco = 414
itm_austrian_uhlan_trumpet = 415
itm_austrian_uhlan_officer = 416
itm_austrian_arty = 417
itm_austrian_arty_train = 418
itm_austrian_arty_officer = 419
itm_austrian_pioneer = 420
itm_austrian_hussar = 421
itm_austrian_hussar_nco = 422
itm_austrian_hussar_trumpet = 423
itm_austrian_hussar_officer = 424
itm_austrian_cuiraisser = 425
itm_austrian_cuiraisser_nco = 426
itm_austrian_cuiraisser_trumpet = 427
itm_austrian_cuiraisser_officer = 428
itm_austrian_schwarzenberg = 429
itm_character_uniform = 430
itm_rus_pavlovsk_ranker = 431
itm_rus_kyiv_dragoons_all = 432
itm_rus_kyiv_dragoons_trumpeter = 433
itm_rus_drummer_shako = 434
itm_rus_chevalier_hat = 435
itm_rus_chevalier_hat_officer = 436
itm_rus_chevalier_hat_trumpeter = 437
itm_rus_arty_shako_officer = 438
itm_rus_arty_shako_nco = 439
itm_rus_arty_shako_ranker = 440
itm_rus_cossack_hat_officer = 441
itm_rus_cossack_hat_nco = 442
itm_rus_cossack_hat_ranker = 443
itm_rus_hussar_shako_officer = 444
itm_rus_hussar_shako_nco = 445
itm_rus_hussar_shako_ranker = 446
itm_rus_hussar_shako_trumpeter = 447
itm_rus_jaeger_shako_nco = 448
itm_rus_jaeger_shako_ranker = 449
itm_kutuzov_hat = 450
itm_rus_guard_shako_officer = 451
itm_rus_guard_shako_nco = 452
itm_rus_guard_shako_ranker = 453
itm_rus_guard_shako_musician = 454
itm_rus_infantry_officer_shako = 455
itm_rus_infantry_shako_nco = 456
itm_rus_pioneer_shako = 457
itm_rus_partisan_hat1 = 458
itm_rus_partisan_hat2 = 459
itm_rus_partisan_hat3 = 460
itm_rus_partisan_hat4 = 461
itm_rus_partisan_hat5 = 462
itm_rus_uhlan_czapka_officer = 463
itm_rus_uhlan_czapka_nco = 464
itm_rus_uhlan_czapka_ranker = 465
itm_rus_uhlan_czapka_trumpeter = 466
itm_french_dragoon_helmet = 467
itm_french_dragoon_helmet_officer = 468
itm_french_dragoon_helmet_trumpeter = 469
itm_french_cuirassier_helmet = 470
itm_french_cuirassier_helmet_officer = 471
itm_french_cuirassier_helmet_trumpeter = 472
itm_french_carabineer_helmet = 473
itm_french_carabineer_helmet_officer = 474
itm_french_carabineer_helmet_trumpeter = 475
itm_french_hussar_shako_colours = 476
itm_french_hussar_shako_ranker = 477
itm_french_hussar_shako_trumpeter = 478
itm_french_inf_shako_45_ranker = 479
itm_french_vistula_shako_colours = 480
itm_french_vistula_shako_ranker = 481
itm_french_vistula_shako_officer = 482
itm_french_inf_shako_84_colours = 483
itm_french_inf_shako_84_ranker = 484
itm_french_inf_shako_84_officer = 485
itm_french_voltigeur_shako_officer = 486
itm_french_voltigeur_shako_ranker = 487
itm_french_ldlg_czapka_officer = 488
itm_french_ldlg_czapka_ranker = 489
itm_french_ldlg_czapka_trumpeter = 490
itm_french_artillery_bearskin_officer = 491
itm_french_artillery_bearskin_ranker = 492
itm_french_gap_bearskin_officer = 493
itm_french_gap_bearskin_colours = 494
itm_french_gap_bearskin_ranker = 495
itm_french_sapper_bearskin = 496
itm_french_inf_bicorne_45_officer = 497
itm_french_hussar_bearskin_officer = 498
itm_french_artillery_train_shako = 499
itm_french_nappy_hat = 500
itm_grach_bearskin_ranker = 501
itm_grach_bearskin_officer = 502
itm_grach_bearskin_trumpeter = 503
itm_aus_arty_bicorn = 504
itm_aus_arty_cap_bicorn = 505
itm_aus_arty_train_hat = 506
itm_aus_cavalry_helmet_officer = 507
itm_aus_cavalry_helmet_ranker = 508
itm_aus_cavalry_helmet_trumpeter = 509
itm_aus_uhlan_czapka = 510
itm_aus_uhlan_czapka_officer = 511
itm_aus_uhlan_czapka_trumpeter = 512
itm_aus_shwarzenberg_bicorn = 513
itm_aus_grenadier_bearskin = 514
itm_aus_tyrol_hat = 515
itm_aus_tyrol_hat_officer = 516
itm_aus_pioneer_hat = 517
itm_aus_grenadier_bearskin_officer = 518
itm_aus_grenzer_officer = 519
itm_aus_grenzer_ranker = 520
itm_aus_infantry_nco = 521
itm_aus_infantry_officer = 522
itm_aus_infantry_ranker = 523
itm_aus_hussard_shako_nco = 524
itm_aus_hussard_shako_officer = 525
itm_aus_hussard_shako_ranker = 526
itm_aus_hussard_shako_trumpeter = 527
itm_british_artillery_shako_officer = 528
itm_british_artillery_shako_ranker = 529
itm_british_rocket_tarleton = 530
itm_british_kgl_shako_officer = 531
itm_british_kgl_shako_ranker = 532
itm_british_light_shako_ranker = 533
itm_british_light_shako_officer = 534
itm_british_rifle_shako_officer = 535
itm_british_rifle_shako_ranker = 536
itm_british_rifle_beret = 537
itm_british_rifle_beret_2 = 538
itm_british_highland_bonnet_ranker = 539
itm_british_highland_bonnet_ensign = 540
itm_british_highland_bonnet_officer = 541
itm_british_highland_bonnet_drummer = 542
itm_british_guard_shako_officer = 543
itm_british_guard_shako_ranker = 544
itm_33_stovepipe = 545
itm_british_lightdragoon_shako_ranker = 546
itm_british_lightdragoon_shako_officer = 547
itm_british_lightdragoon_shako_trumpeter = 548
itm_british_iniskilling_helmet_ranker = 549
itm_british_iniskilling_helmet_officer = 550
itm_british_iniskilling_helmet_trumpeter = 551
itm_british_lifeguard_helmet_ranker = 552
itm_british_lifeguard_helmet_officer = 553
itm_british_lifeguard_helmet_trumpeter = 554
itm_british_wellington_bicorne = 555
itm_prussian_landwehr_hat = 556
itm_prussian_landwehr_hat_2 = 557
itm_prussian_landwehr_hat_3 = 558
itm_prussian_shako = 559
itm_prussian_shako_2 = 560
itm_prussian_infantry_hat_officer = 561
itm_prussian_hussar_shako = 562
itm_prussian_hussar_officer_shako = 563
itm_prussian_blucher_hat = 564
itm_prussian_arty_shako_officer = 565
itm_prussian_arty_shako_ranker = 566
itm_prussian_dragoon_shako_officer = 567
itm_prussian_dragoon_shako_ranker = 568
itm_prussian_guard_musician_shako = 569
itm_prussian_guard_colours_shako = 570
itm_prussian_guard_officer_shako = 571
itm_prussian_guard_ranker_shako = 572
itm_prussian_inf_off_shako = 573
itm_prussian_jaeger_officer_shako = 574
itm_prussian_landwehr_cav_shako = 575
itm_prussian_landwehr_cav_shako_officer = 576
itm_prussian_pioneer_shako = 577
itm_prussian_shako_colours = 578
itm_prussian_shako_colours_2 = 579
itm_prussian_cuirassier_helmet = 580
itm_prussian_cuirassier_helmet_trumpet = 581
itm_prussian_freikorps_officer_hat = 582
itm_prussian_infantry2_hat = 583
itm_rus_opol_hat_ranker = 584
itm_rus_opol_hat_officer = 585
itm_pirate_hat = 586
itm_french_voltigeur_pants = 587
itm_french_voltigeur_officer_pants = 588
itm_french_dragoon_pants = 589
itm_french_general_boots = 590
itm_french_grenadiers_a_cheval_pants = 591
itm_french_vistula_pants = 592
itm_french_lancer_pants = 593
itm_french_lancer_officer_pants = 594
itm_french_hussards_ranker_pants = 595
itm_french_hussards_officer_pants = 596
itm_french_hussards_trumpeter_pants = 597
itm_french_sappeur_pants = 598
itm_french_art_ranker_pants = 599
itm_french_grap_pants = 600
itm_fr_arty_captain_bottefortes = 601
itm_fr_arty_train_bottefortes = 602
itm_fr_officer_bottefortes = 603
itm_french_captain_pants = 604
itm_french_basic_infantry_pants = 605
itm_rus_infantry_pants1 = 606
itm_rus_infantry_pants2 = 607
itm_rus_hussar_pants1 = 608
itm_rus_hussar_pants2 = 609
itm_rus_hussar_pants_nco = 610
itm_rus_hussar_pants_officer = 611
itm_rus_horse_guard_pants = 612
itm_rus_cossack_off_pants = 613
itm_rus_kutuzov_pants = 614
itm_rus_militia_off_pants = 615
itm_rus_militia_ranker_pants = 616
itm_rus_militia_ranker_pants1 = 617
itm_rus_jaeger_pants = 618
itm_rus_arty_pants = 619
itm_rus_dragoon_pants1 = 620
itm_rus_dragoon_pants2 = 621
itm_rus_uhlan_pants = 622
itm_austrian_infantry_pants = 623
itm_hungarian_pants = 624
itm_hungarian_pants_officer = 625
itm_austrian_jaeger_pants = 626
itm_austrian_jaeger_pants_officer = 627
itm_hungarian_hussar_pants = 628
itm_hungarian_uhlan_pants = 629
itm_austrian_schwarzenberg_pants = 630
itm_austrian_officer_boots = 631
itm_austrian_cavalry_boots = 632
itm_british_highland_kilt = 633
itm_british_highland_kilt_2 = 634
itm_british_highland_kilt_officer = 635
itm_british_rifle_captain_pants = 636
itm_british_rifle_pants_green = 637
itm_british_rifle_pants_grey = 638
itm_british_rifle_pants_rolled = 639
itm_british_coldstream_pants = 640
itm_british_sapper_pants = 641
itm_british_wellington_pants = 642
itm_british_light_dragoon_pants = 643
itm_british_rocketeer_pants = 644
itm_british_cav_pants = 645
itm_prussian_infantry_pants = 646
itm_prussian_jaeger_pants = 647
itm_prussian_landwehr_pants = 648
itm_prussian_landwehr_pants2 = 649
itm_prussian_blucher_pants = 650
itm_prussian_cavalry_pants = 651
itm_prussian_cavalry_pants2 = 652
itm_prussian_hussar_pants = 653
itm_prussian_hussar_pants_officer = 654
itm_prussian_freikorps_pants = 655
itm_officer_gloves = 656
itm_drummer_gloves = 657
itm_rus_chevgarde_gloves = 658
itm_fr_cuirassier_gloves = 659
itm_fr_artillery_gloves = 660
itm_br_cavalry_gloves = 661
itm_br_horseguard_gloves = 662
itm_br_cavalry_gloves_short = 663
itm_br_horseguard_gloves_short = 664
itm_hussar_horse_french = 665
itm_hussar_horse_french_trumpet = 666
itm_lancer_horse_french = 667
itm_lancer_horse_french_trumpet = 668
itm_dragoon_horse_french = 669
itm_dragoon_horse_french_trumpet = 670
itm_cuirassier_horse_french = 671
itm_cuirassier_horse_french_officer = 672
itm_cuirassier_horse_french_trumpet = 673
itm_carabineer_horse_french = 674
itm_carabineer_horse_french_officer = 675
itm_carabineer_horse_french_trumpet = 676
itm_heavy_horse_french = 677
itm_heavy_horse_french_trumpet = 678
itm_lightdragoon_horse_britain_1 = 679
itm_lightdragoon_horse_britain_2 = 680
itm_lightdragoon_horse_britain_3 = 681
itm_lightdragoon_horse_britain_4 = 682
itm_heavydragoon_horse_britain = 683
itm_heavy_horse_britain = 684
itm_lancer_horse_prussia_1 = 685
itm_lancer_horse_prussia_2 = 686
itm_hussar_horse_prussia_1 = 687
itm_hussar_horse_prussia_2 = 688
itm_dragoon_horse_prussia_1 = 689
itm_dragoon_horse_prussia_2 = 690
itm_heavy_horse_prussia_1 = 691
itm_heavy_horse_prussia_2 = 692
itm_cossack_horse_russia_1 = 693
itm_cossack_horse_russia_2 = 694
itm_cossack_horse_russia_3 = 695
itm_cossack_horse_russia_4 = 696
itm_lancer_horse_russia = 697
itm_hussar_horse_russia = 698
itm_dragoon_horse_russia = 699
itm_heavy_horse_russia = 700
itm_lancer_horse_austria = 701
itm_hussar_horse_austria = 702
itm_lightcav_horse_austria = 703
itm_dragoon_horse_austria = 704
itm_heavy_horse_austria = 705
itm_lancer_horse_placeholder = 706
itm_hussar_horse_placeholder = 707
itm_dragoon_horse_placeholder = 708
itm_heavy_horse_placeholder = 709
itm_admin_horse = 710
itm_arty_horse_cannon_french = 711
itm_arty_horse_cannon_british = 712
itm_arty_horse_cannon_russian = 713
itm_arty_horse_cannon_austrian = 714
itm_arty_horse_cannon_prussian = 715
itm_arty_horse_howitzer_french = 716
itm_arty_horse_howitzer_british = 717
itm_arty_horse_howitzer_russian = 718
itm_arty_horse_howitzer_austrian = 719
itm_arty_horse_howitzer_prussian = 720
itm_items_end = 721
| StarcoderdataPython |
1673696 | <filename>src/bxcommon/rpc/requests/subscribe_rpc_request.py
import fnmatch
from typing import TYPE_CHECKING, Callable, Any, List, Optional
from bxcommon import constants
from bxcommon.feed.feed import FeedKey
from bxcommon.feed.feed_manager import FeedManager
from bxcommon.feed.subscriber import Subscriber
from bxcommon.models.bdn_account_model_base import BdnAccountModelBase
from bxcommon.models.bdn_service_model_config_base import BdnFeedServiceModelConfigBase
from bxcommon.rpc.bx_json_rpc_request import BxJsonRpcRequest
from bxcommon.rpc.json_rpc_response import JsonRpcResponse
from bxcommon.rpc.requests.abstract_rpc_request import AbstractRpcRequest
from bxcommon.rpc.rpc_errors import RpcInvalidParams, RpcAccountIdError, RpcError
from bxutils import logging
from bxutils.logging.log_record_type import LogRecordType
if TYPE_CHECKING:
# noinspection PyUnresolvedReferences
# pylint: disable=ungrouped-imports,cyclic-import
from bxcommon.connections.abstract_node import AbstractNode
logger = logging.get_logger(__name__)
logger_filters = logging.get_logger(LogRecordType.TransactionFiltering, __name__)
class SubscribeRpcRequest(AbstractRpcRequest["AbstractNode"]):
help = {
"params":
'[feed_name, '
'{'
'"include": [field_1, field_2], "duplicates": false, "include_from_blockchain": true, '
'"blockchain_network": "Mainnet", "blockchain_protocol: "Ethereum"}].\n'
"Available feeds: newTxs, pendingTxs, newBlocks, txReceipts, onBlock\n"
"Available fields for transaction feeds: tx_hash, tx_contents (default: all)\n"
"Available fields for block feed: hash, block (default: all)\n"
"duplicates: false (filter out duplicates from feed, typically low fee "
"transactions, default), true (include all duplicates)\n"
"include_from_blockchain: include transactions received from the connected blockchain node (default: true)\n",
"description": "Subscribe to a named feed for notifications",
}
def __init__(
self,
request: BxJsonRpcRequest,
node: "AbstractNode",
feed_manager: FeedManager,
subscribe_handler: Callable[[Subscriber, FeedKey, Optional[str]], None],
feed_network: int = 0,
account_details: Optional[BdnAccountModelBase] = None
) -> None:
self.feed_name = ""
self.feed_network = feed_network
self.feed_key = FeedKey(self.feed_name, self.feed_network)
self.feed_manager = feed_manager
self.subscribe_handler = subscribe_handler
self.options = {}
self.available_fields = []
self.all_fields = []
self.account_details = account_details
self.service_model: Optional[BdnFeedServiceModelConfigBase] = None
super().__init__(request, node)
assert self.feed_name != ""
def validate_params(self) -> None:
try:
if not self.feed_manager.feeds:
raise RpcAccountIdError(
self.request_id,
f"Account does not have access to the transaction streaming service.",
)
self.validate_params_get_options()
self.validate_params_feed_details()
self.validate_params_service_details()
self.validate_params_include_fields()
self.validate_params_filters()
assert self.feed_name != ""
except RpcError as e:
logger.debug({"msg": "Failed to validate subscribe request", "params": self.params, **e.to_json()})
raise e
def validate_params_get_options(self):
params = self.params
if not isinstance(params, list) or len(params) != 2:
raise RpcInvalidParams(
self.request_id,
"Subscribe RPC request params must be a list of length 2.",
)
feed_name, options = params
if self.feed_network == constants.ALL_NETWORK_NUM:
self.feed_network = self.node.network_num
self.feed_name = feed_name
self.feed_key = FeedKey(self.feed_name, self.feed_network)
logger.debug("Got new subscribe request for {}", self.feed_key)
self.options = options
def validate_params_feed_details(self):
feed = self.feed_manager.get_feed(self.feed_key)
if feed is None:
raise RpcInvalidParams(
self.request_id,
f"{self.feed_name} is an invalid feed. "
f"Available feeds: {[key.name for key in self.feed_manager.get_feed_keys(self.feed_network)]}",
)
self.available_fields = feed.FIELDS
self.all_fields = feed.ALL_FIELDS
def validate_params_include_fields(self):
if self.service_model and self.service_model.available_fields:
self.available_fields = [
field for field in self.available_fields if allowed_field(field, self.service_model.available_fields)
]
invalid_options = RpcInvalidParams(
self.request_id,
f"{self.options} Invalid feed include parameter. "
"Your plan does not support all requested include parameters "
'Valid format/fields: {"include": '
f"{self.available_fields}"
"}.",
)
if not isinstance(self.options, dict):
raise invalid_options
include = self.options.get("include", self.all_fields)
if not isinstance(include, list):
raise invalid_options
# check for empty list
if not include:
include = self.all_fields
if self.available_fields:
if any(
included_field not in self.available_fields for included_field in include
):
raise invalid_options
# update options["include"] to support if was not specified
self.options["include"] = include
else:
self.options["include"] = self.available_fields
def validate_params_filters(self):
if "filters" in self.options and (not self.service_model or not self.service_model.allow_filtering):
raise RpcAccountIdError(
self.request_id,
f"Account does not have filtering enabled for {self.feed_name} service.",
)
filters = self.options.get("filters", None)
if filters:
logger_filters.debug(filters)
formatted_filters = self.format_filters(filters)
logger_filters.debug("Validated filters: {}", formatted_filters)
self.options["filters"] = formatted_filters
def validate_params_service_details(self):
if self.account_details is None:
return
service = self.account_details.get_feed_service_config_by_name(self.feed_name)
if service:
service_model = service.feed
else:
service_model = None
if not service or not service.is_service_valid():
raise RpcAccountIdError(
self.request_id,
f"Account does not have access to the {self.feed_name} service.",
)
self.service_model = service_model
async def process_request(self) -> JsonRpcResponse:
params = self.params
assert isinstance(params, list)
subscriber = self.feed_manager.subscribe_to_feed(self.feed_key, self.options)
assert subscriber is not None # already validated
account_id = None
if self.account_details is not None:
account_details = self.account_details
assert account_details is not None
account_id = account_details.account_id
self.subscribe_handler(subscriber, self.feed_key, account_id)
return JsonRpcResponse(self.request_id, subscriber.subscription_id)
def format_filters(self, filters: Any) -> str:
valid_filters = self.feed_manager.get_valid_feed_filters(self.feed_key)
invalid_filters = RpcInvalidParams(
self.request_id,
f"{filters} is not a valid set of filters. "
'Valid format/filters: {"include": '
f"{valid_filters}"
"}.",
)
if not isinstance(filters, str):
logger.error("Wrong filter type")
raise invalid_filters
if not valid_filters:
raise invalid_filters
logger_filters.debug("Validating filters")
try:
filters, keys = self.feed_manager.validate_feed_filters(self.feed_key, filters)
except Exception:
raise invalid_filters
# for key in filters, if not in valid_filters, raise
for key in keys:
if key not in valid_filters:
raise RpcInvalidParams(
self.request_id,
f"{key} is not a valid filter. "
'Valid format/filters: {"include": '
f"{valid_filters}"
"}.",
)
return filters
def allowed_field(field: str, available_fields: List[str]):
for available_field in available_fields:
if fnmatch.fnmatch(field, available_field) or available_field == "all":
return True
return False
| StarcoderdataPython |
4939485 | <filename>full_pipe_line.py
import numpy as np
import pandas as pd
import cv2
import math
import os
import argparse
import pytesseract
import operator
from PIL import Image
from darknet import *
from tqdm import tqdm
net = load_net(b"/home/sasuke/Downloads/All_detection/yolov3-table.cfg",
b"/home/sasuke/Downloads/All_detection/yolov3-table_18000.weights", 0)
meta = load_meta(b"/home/sasuke/Downloads/All_detection/data/table.data")
#image_path = b"/home/sasuke/Downloads/darknet-master/test_data/time.png"
def cropping(image_path_str, r):
for i in range(len(r)):
a , b , c , d = r[i][2]
a = int(a)
b = int(b)
c = int(c)
d = int(d)
img = cv2.imread(image_path_str)
img1 = img[b-d//2:b+d//2, a-c//2:a+c//2]
img1 = cv2.resize(img1, None, fx=2, fy=2, interpolation=cv2.INTER_CUBIC)
q = cv2.imwrite('cropped_images/' + image_path_str.split('/')[-1].split('.')[0] + '_' + str(i+1) + '.jpg', img1)
#return q
def sort_contours(cnts, method="left-to-right"):
# initialize the reverse flag and sort index
reverse = False
i = 0
# handle if we need to sort in reverse
if method == "right-to-left" or method == "bottom-to-top":
reverse = True
# handle if we are sorting against the y-coordinate rather than
# the x-coordinate of the bounding box
if method == "top-to-bottom" or method == "bottom-to-top":
i = 1
# construct the list of bounding boxes and sort them from top to
# bottom
boundingBoxes = [cv2.boundingRect(c) for c in cnts]
(cnts, boundingBoxes) = zip(*sorted(zip(cnts, boundingBoxes),
key=lambda b:b[1][i], reverse=reverse))
# return the list of sorted contours and bounding boxes
return (cnts, boundingBoxes)
def row_detect(give_images):
img = cv2.imread(give_images, 0)
# Thresholding the image
(thresh, img_bin) = cv2.threshold(img, 128, 255,cv2.THRESH_BINARY|cv2.THRESH_OTSU)
# Invert the image
img_bin = 255-img_bin
#cv2.imwrite("Image_bin_asd.jpg",img_bin)
#print('.....')
kernel_length = np.array(img).shape[1]//50
kernel_length_ver = np.array(img).shape[1]//30
verticle_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (1, kernel_length_ver))
hori_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (kernel_length, 1))
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
# print('=====')
img_temp1 = cv2.erode(img_bin, verticle_kernel, iterations=3)
verticle_lines_img = cv2.dilate(img_temp1, verticle_kernel, iterations=3)
#cv2.imwrite("verticle_lines_fuch.jpg",verticle_lines_img)
img_temp2 = cv2.erode(img_bin, hori_kernel, iterations=3)
horizontal_lines_img = cv2.dilate(img_temp2, hori_kernel, iterations=3)
#cv2.imwrite("horizontal_lines_fuch.jpg",horizontal_lines_img)
alpha = 0.5
beta = 1.0 - alpha
img_final_bin = cv2.addWeighted(verticle_lines_img, alpha, horizontal_lines_img, beta, 0.0)
img_final_bin = cv2.erode(~img_final_bin, kernel, iterations=2)
(thresh, img_final_bin) = cv2.threshold(img_final_bin, 128,255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
#cv2.imwrite("img_final_bin_image_44300.v1.jpg",img_final_bin)
im2, contours, hierarchy = cv2.findContours(img_final_bin, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
(contours, boundingBoxes) = sort_contours(contours, method="top-to-bottom")
#print('++++')
idx = 0
l = []
for c in contours:
# pint('^^^^')
# Returns the location and width,height for every contour
x, y, w, h = cv2.boundingRect(c)
# print(x);print(y);print(w);print(h);print('....')
if (w > 80 and h > 20) and w > 1*h:
idx += 1
new_img = img[y:y+h, x:x+w]
#print('******')
cv2.imwrite('kaata/' + give_images.split('/')[-1].split('.')[0] + '_' + str(idx) + '.jpg', new_img)
im = Image.open('kaata/' + give_images.split('/')[-1].split('.')[0] + '_' + str(idx) + '.jpg')
nx, ny = im.size
im = im.resize((int(nx*2.5), int(ny*2.5)), Image.BICUBIC)
im.save("resized/resize_" + give_images.split('/')[-1].split('.')[0] + '_' + str(idx) + '.jpg'
, dpi=(300,300))
#ghaat = "resized/resize_" + give_images.split('/')[-1].split('.')[0] + '_' + str(idx) + '.jpg'
#print(ghaat)
#chal = give_images.split('/')[-1].split('.')[0] + '_' + str(idx)
text = pytesseract.image_to_string(Image.open("resized/resize_" +
give_images.split('/')[-1].split('.')[0] + '_' +
str(idx) + '.jpg'))
if text == (''):
pass
else:
l.append(text)
df_l = pd.DataFrame(l)
# print(df_l)
return df_l
def sakta(image_path):
bob = []
r = detect(net, meta, image_path)
r = sorted(r,key = operator.itemgetter(2))
image_path_str = image_path.decode("utf-8")
h = cropping(image_path_str, r)
for i in tqdm(range(len(r))):
give_images = 'cropped_images/' + image_path_str.split('/')[-1].split('.')[0] + '_' + str(i+1) + '.jpg'
#print(give_images)
y = row_detect(give_images)
bob.append(y)
#y = pd.DataFrame(bob)
y = pd.concat(bob, axis=1)
#y = y.rename(columns=y.iloc[0]).drop(y.index[0])
#print(y.iloc[0])
#y = y.rename(columns=y.iloc[0]).drop(y.index[0])
new_header = y.iloc[0] # the first row for the header
y = y[1:] #take the data less the header row
y.columns = new_header
#print(y)
#y = y.reset_index(drop = True)
y = y.to_csv('csv_results/' + image_path_str.split('/')[-1].split('.')[0] + '.csv', index = None)
return y
#df = pd.read_csv('csv_results/time.csv')
# In[ ]:
# def main(args):
# sakta(args.image_path)
if __name__ == "__main__":
# parser = argparse.ArgumentParser()
# parser.add_argument('--image_path', type = bytes, help='path for image')
# args = parser.parse_args()
# print(args)
# main(args)
image_path = b"/home/sasuke/Downloads/All_detection/test_data/time.png"
j = sakta(image_path)
#kar do changes
#nhi krenge
#kar do bhai please
| StarcoderdataPython |
6520697 | <gh_stars>0
import tensorflow as tf
from models.base_go_model import GoModel
from models import rnn_cells
class GoModelRNN(GoModel):
"""Base RNN Go Model."""
def bottom(self, features):
self.max_game_length = tf.reduce_max(features["game_length"])
return features
def top(self, body_output, features):
hp = self._hparams
board_size = hp.board_size
num_moves = hp.num_moves
is_training = hp.mode == tf.estimator.ModeKeys.TRAIN
legal_moves = features["legal_moves"]
# Policy Head
with tf.variable_scope('policy_head'):
p_conv = self.my_conv2d(body_output, filters=2, kernel_size=1)
p_conv = self.my_batchnorm(p_conv, center=False, scale=False, training=is_training)
p_conv = tf.nn.relu(p_conv)
p_logits = tf.reshape(p_conv, [-1, self.max_game_length, 2 * board_size * board_size])
p_logits = tf.layers.dense(p_logits, num_moves)
p_logits = tf.multiply(p_logits, legal_moves, name='policy_logits')
# Value Head
with tf.variable_scope('value_head'):
v_conv = self.my_conv2d(body_output, filters=1, kernel_size=1)
v_conv = self.my_batchnorm(v_conv, center=False, scale=False, training=is_training)
v_conv = tf.nn.relu(v_conv)
v_fc = tf.reshape(v_conv, [-1, self.max_game_length, board_size * board_size])
v_fc = tf.layers.dense(v_fc, 256)
v_fc = tf.nn.relu(v_fc)
v_output = tf.layers.dense(v_fc, 1)
v_output = tf.reshape(v_output, [-1, self.max_game_length])
v_output = tf.nn.tanh(v_output, name='value_output')
return p_logits, v_output
def loss(self, logits, features):
game_lengths = features["game_length"]
mask = tf.sequence_mask(game_lengths)
p_logits, v_output = logits
with tf.variable_scope('policy_loss'):
p_targets = features["p_targets"]
p_losses = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=p_logits,
labels=tf.stop_gradient(p_targets))
p_losses_masked = tf.boolean_mask(p_losses, mask)
p_loss = tf.reduce_mean(p_losses_masked)
with tf.variable_scope('value_loss'):
v_targets = features['v_targets']
v_losses = tf.square(v_targets - v_output)
v_losses_masked = tf.boolean_mask(v_losses, mask)
v_loss = tf.reduce_mean(v_losses_masked)
with tf.variable_scope('l2_loss'):
reg_vars = [v for v in tf.trainable_variables()
if 'bias' not in v.name and 'beta' not in v.name]
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in reg_vars])
return [p_loss, v_loss, l2_loss], [p_losses, v_losses]
def policy_accuracy(self, features, predictions, mask=None):
with tf.variable_scope('policy_accuracy'):
p_targets = features["p_targets"]
game_lengths = features["game_length"]
p_correct = tf.equal(p_targets, predictions)
if mask is None:
mask = tf.sequence_mask(game_lengths)
p_correct = tf.boolean_mask(p_correct, mask)
p_acc = tf.reduce_mean(tf.cast(p_correct, tf.float32))
return p_acc
def split_to_min_length(self, inputs, features):
hp = self.hparams
self.max_game_length = hp.min_length
inputs = inputs[:, :hp.min_length]
features["game_length"] = tf.constant([hp.min_length] * hp.batch_size, tf.int64)
features["p_targets"] = features["p_targets"][:, :hp.min_length]
features["v_targets"] = features["v_targets"][:, :hp.min_length]
features["legal_moves"] = features["legal_moves"][:, :hp.min_length]
return inputs, features
class VanillaRNNModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a vanilla RNN layer."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
with tf.variable_scope("VanillaRNN"):
rnn_in = tf.reshape(out, [-1, self.max_game_length, hp.num_filters * board_size * board_size])
rnn_in = tf.transpose(rnn_in, [1, 0, 2])
num_units = hp.num_dense_filters * board_size * board_size
rnn = tf.contrib.cudnn_rnn.CudnnRNNTanh(num_layers=1, num_units=num_units)
rnn_outputs, _ = rnn(rnn_in)
rnn_outputs = tf.transpose(rnn_outputs, [1, 0, 2])
rnn_outputs = tf.reshape(rnn_outputs,
[-1, self.max_game_length, hp.num_dense_filters, board_size, board_size])
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_dense_filters, board_size, board_size])
return rnn_outputs
class LSTMModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a LSTM RNN layer."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
with tf.variable_scope("lstm"):
rnn_in = tf.reshape(out, [-1, self.max_game_length, hp.num_filters * board_size * board_size])
rnn_in = tf.transpose(rnn_in, [1, 0, 2])
num_units = hp.num_dense_filters * board_size * board_size
lstm = tf.contrib.cudnn_rnn.CudnnLSTM(num_layers=1, num_units=num_units)
rnn_outputs, _ = lstm(rnn_in)
rnn_outputs = tf.transpose(rnn_outputs, [1, 0, 2])
rnn_outputs = tf.reshape(rnn_outputs,
[-1, self.max_game_length, hp.num_dense_filters, board_size, board_size])
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_dense_filters, board_size, board_size])
return rnn_outputs
class GRUModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a GRU RNN layer."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
with tf.variable_scope("gru"):
rnn_in = tf.reshape(out, [-1, self.max_game_length, hp.num_filters * board_size * board_size])
rnn_in = tf.transpose(rnn_in, [1, 0, 2])
num_units = hp.num_dense_filters * board_size * board_size
gru = tf.contrib.cudnn_rnn.CudnnGRU(num_layers=1, num_units=num_units)
rnn_outputs, _ = gru(rnn_in)
rnn_outputs = tf.transpose(rnn_outputs, [1, 0, 2])
rnn_outputs = tf.reshape(rnn_outputs,
[-1, self.max_game_length, hp.num_dense_filters, board_size, board_size])
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_dense_filters, board_size, board_size])
return rnn_outputs
class ConvRNNModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a Conv RNN layer."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
game_length = features["game_length"]
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
with tf.variable_scope("conv_rnn"):
rnn_in = tf.reshape(out, [-1, self.max_game_length, hp.num_filters, board_size, board_size])
cell = rnn_cells.ConvRNNCell(input_shape=[hp.num_filters, board_size, board_size],
output_channels=hp.num_filters,
kernel_shape=[3, 3],
activation=tf.nn.relu)
rnn_outputs, _ = tf.nn.dynamic_rnn(cell, rnn_in, sequence_length=game_length,
time_major=False, dtype=tf.float32)
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_filters, board_size, board_size])
return rnn_outputs
class ConvLSTMModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a Conv LSTM RNN layer."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
game_length = features["game_length"]
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i + 1)):
out = self.residual_block(out)
with tf.variable_scope("conv_lstm"):
rnn_in = tf.reshape(out, [-1, self.max_game_length, hp.num_filters, board_size, board_size])
rnn_in = tf.transpose(rnn_in, perm=[0, 1, 3, 4, 2])
cell = tf.contrib.rnn.Conv2DLSTMCell(input_shape=[board_size, board_size, hp.num_filters],
kernel_shape=[3, 3],
output_channels=hp.num_filters,
use_bias=False,
skip_connection=False)
rnn_outputs, _ = tf.nn.dynamic_rnn(cell, rnn_in, sequence_length=game_length,
time_major=False, dtype=tf.float32)
rnn_outputs = tf.transpose(rnn_outputs, perm=[0, 1, 4, 2, 3])
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_filters, board_size, board_size])
return rnn_outputs
class ConvGRUModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a Conv GRU RNN layer."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
game_length = features["game_length"]
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
rnn_in = tf.reshape(out, [-1, self.max_game_length, hp.num_filters, board_size, board_size])
with tf.variable_scope("conv_gru"):
cell = rnn_cells.ConvGRUCell(input_shape=[board_size, board_size],
kernel_shape=[3, 3],
output_channels=hp.num_filters,
use_bias=False,
normalize=False,
data_format='channels_first')
rnn_outputs, _ = tf.nn.dynamic_rnn(cell, rnn_in, sequence_length=game_length,
time_major=False, dtype=tf.float32)
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_filters, board_size, board_size])
return rnn_outputs
def static_rnn(cell, inputs, init_state, min_length, name):
"""Statically unrolled RNN using only the first min_length positions.
Args:
cell: instance of a tf.nn.rnn_cell
inputs: (tf.Tensor) input of the rnn
init_state: (tf.Tensor) initial state of the rnn cell
min_length: (int) minimal length in the dataset
name: (str) name of the static rnn variable scope
Returns:
(tf.Tensor) output of the rnn
"""
inputs = tf.unstack(inputs, min_length, axis=1)
rnn_outputs = []
with tf.variable_scope(name) as scope:
for i, rnn_in in enumerate(inputs):
if i > 0:
scope.reuse_variables()
rnn_output, init_state = cell(rnn_in, init_state)
rnn_outputs.append(rnn_output)
rnn_outputs = tf.stack(rnn_outputs, axis=1)
return rnn_outputs
class MyConvRNNModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a Conv RNN layer statically unrolled using only the first
min_length positions."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
rnn_ins = tf.reshape(out, [-1, self.max_game_length, hp.num_filters, board_size, board_size])
rnn_ins, features = self.split_to_min_length(rnn_ins, features)
cell = rnn_cells.ConvRNNCell(input_shape=[hp.num_filters, board_size, board_size],
output_channels=hp.num_dense_filters,
kernel_shape=[3, 3],
activation=tf.nn.relu)
init_state = cell.zero_state(hp.batch_size, tf.float32)
rnn_outputs = static_rnn(cell, rnn_ins, init_state, hp.min_length, "my_conv_rnn")
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_dense_filters, board_size, board_size])
return rnn_outputs
class MyConvLSTMModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a Conv LSTM RNN layer statically unrolled using only the first
min_length positions."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
rnn_ins = tf.reshape(out, [-1, self.max_game_length, hp.num_filters, board_size, board_size])
rnn_ins, features = self.split_to_min_length(rnn_ins, features)
rnn_ins = tf.transpose(rnn_ins, perm=[0, 1, 3, 4, 2])
cell = tf.contrib.rnn.Conv2DLSTMCell(input_shape=[board_size, board_size, hp.num_filters],
kernel_shape=[3, 3],
output_channels=hp.num_dense_filters,
use_bias=False,
skip_connection=False)
init_state = cell.zero_state(hp.batch_size, tf.float32)
rnn_outputs = static_rnn(cell, rnn_ins, init_state, hp.min_length, "my_conv_lstm")
rnn_outputs = tf.transpose(rnn_outputs, perm=[0, 1, 4, 2, 3])
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_dense_filters, board_size, board_size])
return rnn_outputs
class MyConvGRUModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a Conv GRU RNN layer statically unrolled using only the first
min_length positions."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
rnn_ins = tf.reshape(out, [-1, self.max_game_length, hp.num_filters, board_size, board_size])
rnn_ins, features = self.split_to_min_length(rnn_ins, features)
cell = rnn_cells.ConvGRUCell(input_shape=[board_size, board_size],
kernel_shape=[3, 3],
output_channels=hp.num_dense_filters,
use_bias=False,
normalize=False,
data_format='channels_first')
init_state = cell.zero_state(hp.batch_size, tf.float32)
rnn_outputs = static_rnn(cell, rnn_ins, init_state, hp.min_length, "my_conv_gru")
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_dense_filters, board_size, board_size])
return rnn_outputs
class BNConvGRUModel(GoModelRNN):
"""Model as in AlphaGo Zero paper but adding a Conv GRU RNN layer statically unrolled using only the first
min_length positions."""
def body(self, features):
hp = self.hparams
board_size = hp.board_size
is_training = hp.mode == tf.estimator.ModeKeys.TRAIN
inputs = features["inputs"]
inputs = tf.reshape(inputs, [-1, 3, board_size, board_size])
with tf.variable_scope("conv_block"):
out = self.conv_block_in(inputs)
for i in range(hp.num_res_blocks):
with tf.variable_scope("residual_block_{}".format(i+1)):
out = self.residual_block(out)
rnn_ins = tf.reshape(out, [-1, self.max_game_length, hp.num_filters, board_size, board_size])
rnn_ins, features = self.split_to_min_length(rnn_ins, features)
rnn_ins = tf.transpose(rnn_ins, perm=[0, 1, 3, 4, 2])
cell = rnn_cells.BNConvGRUCell(input_shape=[board_size, board_size],
kernel_shape=[3, 3],
output_channels=hp.num_dense_filters,
use_bias=True,
max_bn_steps=hp.min_length,
training=is_training,
activation=tf.nn.relu)
init_state = cell.zero_state(hp.batch_size, tf.float32)
init_state = (init_state, 0)
rnn_outputs = static_rnn(cell, rnn_ins, init_state, hp.min_length, "bn_conv_gru")
rnn_outputs = tf.transpose(rnn_outputs, perm=[0, 1, 4, 2, 3])
rnn_outputs = tf.reshape(rnn_outputs, [-1, hp.num_dense_filters, board_size, board_size])
return rnn_outputs
| StarcoderdataPython |
6436600 | <reponame>Xrenya/algorithms
rub = int(input())
kop = int(input())
num = int(input())
def price(rub, kop, num):
rubles = rub * num
kops = kop * num
if kops >= 100:
rubles += kops // 100
kops = kops - kops // 100 * 100
return f"{rubles} {kops}"
print(price(rub, kop, num))
| StarcoderdataPython |
256455 | import os
from djangobench.base_settings import * # NOQA
USE_I18N = False
USE_L10N = True
TEMPLATE_DIRS = (
os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')),
)
INSTALLED_APPS = ['l10n_render', 'django.contrib.auth', 'django.contrib.contenttypes']
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.abspath(os.path.join(os.path.dirname(__file__), 'templates')),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
},
},
]
| StarcoderdataPython |
8169183 | # Approach 1 - Ordered Dictionary
# Time: O(1)
# Space: O(capacity)
from collections import OrderedDict
class LRUCache(OrderedDict):
def __init__(self, capacity: int):
self.capacity = capacity
def get(self, key: int) -> int:
if key not in self:
return -1
self.move_to_end(key)
return self[key]
def put(self, key: int, value: int) -> None:
if key in self:
self.move_to_end(key)
self[key] = value
if len(self) > self.capacity:
self.popitem(last = False)
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value) | StarcoderdataPython |
6650577 | #!/usr/bin/env python3
"""
Author : <NAME> <<EMAIL>>
Date : 2019-04-08
Purpose: Graph through sequences
"""
import argparse
import logging
import os
import sys
from collections import defaultdict
from Bio import SeqIO
# --------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Graph through sequences',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('file', metavar='str', help='FASTA file')
parser.add_argument(
'-k',
'--overlap',
help='K size of overlap',
metavar='int',
type=int,
default=3)
parser.add_argument(
'-d', '--debug', help='Debug', action='store_true')
return parser.parse_args()
# --------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
# --------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
# --------------------------------------------------
def find_kmers(seq, k):
"""Find k-mers in string"""
seq = str(seq)
n = len(seq) - k + 1
return list(map(lambda i: seq[i:i + k], range(n)))
# --------------------------------------------------
def main():
"""Make a jazz noise here"""
args = get_args()
file = args.file
k = args.overlap
if not os.path.isfile(file):
die('"{}" is not a file'.format(file))
if k < 1:
die('-k "{}" must be a positive integer'.format(k))
logging.basicConfig(
filename='.log',
filemode='w',
level=logging.DEBUG if args.debug else logging.CRITICAL
)
beginning = defaultdict(list)
end = defaultdict(list)
for rec in SeqIO.parse(file, 'fasta'):
kmers = find_kmers(rec.seq, k)
beginning[kmers[0]].append(rec.id)
end[kmers[-1]].append(rec.id)
logging.debug('beginnings = {}'.format(beginning))
logging.debug('ends = {}'.format(end))
for kmer in end:
if kmer in beginning:
for seq_id in end[kmer]:
for other in beginning[kmer]:
if seq_id != other:
print(seq_id, other)
# --------------------------------------------------
if __name__ == '__main__':
main()
| StarcoderdataPython |
11303236 | <reponame>yut23/Microphysics<filename>python_library/StarKiller/setup.py<gh_stars>10-100
from setuptools import setup, find_packages
setup(name='StarKiller',
version='0.1',
description='Python interfaces to StarKiller Microphysics',
url='https://github.com/starkiller-astro/Microphysics',
author='<NAME>',
author_email='<EMAIL>',
license='BSD',
packages=find_packages(),
package_data={"StarKiller": ["burner/*", "eos/*", "network/*", "interfaces/*", "integration/*", "models/*", "examples/*"]},
install_requires=['numpy', 'matplotlib'],
zip_safe=False)
| StarcoderdataPython |
9794827 |
import re
from urllib.parse import urlparse
from flask import Blueprint, request
from bs4 import BeautifulSoup
from utils.http import Req, parse_request_parameter
from novels.models import NovelModel
from episodes.models import EpisodeModel, EpisodeTextModel
from sites.narou.parser import NarouNovelIndexParser, NarouNovelEpisodeParser
application = Blueprint('tasks/narou', __name__)
@application.route("/tasks/narou/get_novel_index", methods=['POST'])
def get_novel_index():
url = parse_request_parameter(request, 'url')
resp = Req.get(url)
if resp.status_code != 200:
# todo: logging
return '', 503
parser = NarouNovelIndexParser(url, html=resp.text)
novel = parser.parse_novel_info()
# todo: create attr class
nm = NovelModel.get(novel["novel_id"])
if nm:
nm.assign(novel)
if nm.is_changed():
nm.put()
else:
nm = NovelModel.from_dict(novel)
nm.put()
episodes = parser.parse_episodes_info()
# todo: use batch write
# todo: enqueue get_body when updated
# todo: create attr class
# todo: single episode
# todo: maybe cause DeadlineExceededException
for episode in episodes["episodes"]:
em = EpisodeModel.get(episode["episode_id"])
if em:
em.assign(episode)
if em.is_changed():
em.put()
else:
em = EpisodeModel.from_dict(episode)
em.put()
return str(parser.parse_episodes_info())
@application.route("/tasks/narou/get_episode_text", methods=['POST'])
def get_episode_text():
episode_id = parse_request_parameter(request, 'episode_id')
url = parse_request_parameter(request, 'url')
resp = Req.get(url)
if resp.status_code != 200:
# todo: logging
return '', 503
parser = NarouNovelEpisodeParser(url, html=resp.text)
episode = parser.parse()
episode["episode_id"] = episode_id
em = EpisodeTextModel.get(episode_id)
if em:
em.assign(episode)
if em.is_changed():
em.put()
else:
em = EpisodeTextModel.from_dict(episode)
em.put()
return str(episode)
| StarcoderdataPython |
9714976 | from tests.graph_case import GraphTestCase
class TestOutlookContacts(GraphTestCase):
def test0_ensure_user_context(self):
whoami = self.client.me.get().execute_query()
self.assertIsNotNone(whoami.id)
def test1_create_contacts(self):
contact_info = {
"givenName": "Pavel",
"surname": "Bansky",
"emailAddresses": [
{
"address": "<EMAIL>",
"name": "<NAME>"
}
],
"businessPhones": [
"+1 732 555 0102"
]
}
contact = self.client.me.contacts.add_from_json(contact_info).execute_query()
self.assertIsNotNone(contact.properties["givenName"])
def test2_get_contacts(self):
contacts = self.client.me.contacts.get().execute_query()
self.assertGreaterEqual(len(contacts), 1)
def test3_update_contact(self):
results = self.client.me.contacts.top(1).get().execute_query()
if len(results) == 1:
contact = results[0]
self.assertIsNotNone(contact.id)
contact.set_property("department", "Media").update().execute_query()
def test4_delete_contact(self):
results = self.client.me.contacts.top(1).get().execute_query()
if len(results) == 1:
contact = results[0]
contact.delete_object().execute_query()
# verify
contacts = self.client.me.contacts.get().execute_query()
results = [c for c in contacts if c.id == contact.id]
self.assertEqual(len(results), 0)
| StarcoderdataPython |
1812844 | <reponame>konstantinKim/vd-backend<gh_stars>0
from flask import Flask, Response
from flask_restful import Resource, Api
from flask_cors import CORS, cross_origin
from flask.ext.sqlalchemy import SQLAlchemy
class MyResponse(Response):
default_mimetype = 'application/json'
# http://flask.pocoo.org/docs/0.10/patterns/appfactories/
def create_app(config_filename):
app = Flask(__name__)
CORS(app)
app.config.from_object(config_filename)
app.response_class = MyResponse
# from app.users.models import db
# db.init_app(app)
db = SQLAlchemy(app)
# Blueprints
from app.auth.views import auth
app.register_blueprint(auth, url_prefix='/api/v1/auth')
from app.projects.views import projects
app.register_blueprint(projects, url_prefix='/api/v1/projects')
from app.haulers.views import haulers
app.register_blueprint(haulers, url_prefix='/api/v1/haulers')
from app.ticketsRd.views import tickets_rd_bp
app.register_blueprint(tickets_rd_bp, url_prefix='/api/v1/tickets_rd')
from app.ticketsSr.views import tickets_sr_bp
app.register_blueprint(tickets_sr_bp, url_prefix='/api/v1/tickets_sr')
from app.materials.views import materials
app.register_blueprint(materials, url_prefix='/api/v1/materials')
from app.facilities.views import facilities
app.register_blueprint(facilities, url_prefix='/api/v1/facilities')
from app.statistics.views import statistics
app.register_blueprint(statistics, url_prefix='/api/v1/statistics')
from app.haulersImages.views import haulers_images_bp
app.register_blueprint(haulers_images_bp, url_prefix='/api/v1/haulers_images')
return app
| StarcoderdataPython |
3329884 | import random
from audiomate.corpus.subset import utils
def run_split_identifiers():
identifiers = list(range(10000))
proportions = {'a': 0.2, 'b': 0.2, 'c': 0.6}
utils.split_identifiers(identifiers, proportions)
def test_split_identifiers(benchmark):
benchmark(run_split_identifiers)
def run_get_identifiers_splitted_by_weights():
identifiers = {}
for i in range(100000):
identifiers[str(i)] = {
'a': random.randint(2, 10),
'b': random.randint(2, 10),
'c': random.randint(2, 10)
}
proportions = {'a': 0.2, 'b': 0.2, 'c': 0.6}
utils.get_identifiers_splitted_by_weights(identifiers, proportions)
def test_get_identifiers_splitted_by_weights(benchmark):
benchmark(run_get_identifiers_splitted_by_weights)
| StarcoderdataPython |
5142602 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
# Lint as: python3
"""Executor for BenchmarkResultPublisher."""
from typing import Any, Dict, List, Text
from nitroml.benchmark import result as br
from nitroml.benchmark.result_publisher import serialize
import tensorflow.compat.v2 as tf
import tensorflow_model_analysis as tfma
from tfx.dsl.components.base import base_executor
from tfx.types import artifact_utils
from tfx.types.artifact import Artifact
class BenchmarkResultPublisherExecutor(base_executor.BaseExecutor):
"""Executor for BenchamarkResultPublisher."""
def Do(self, input_dict: Dict[Text, List[Artifact]],
output_dict: Dict[Text, List[Artifact]],
exec_properties: Dict[Text, Any]) -> None:
"""Take evaluator output and publish results to MLMD.
It updates custom properties of BenchmarkResult artifact to contain
benchmark results.
Args:
input_dict: Input dict from input key to a list of artifacts, including:
- evaluation: Model evaluation results.
output_dict: Output dict from key to a list of artifacts, including:
- benchmark_result: `BenchmarkResult` artifact.
exec_properties: A dict of execution properties, including either one of:
- benchmark_name: An unique name of a benchmark.
Raises:
ValueError: If evaluation uri doesn't exists.
"""
uri = artifact_utils.get_single_uri(input_dict['evaluation'])
if not tf.io.gfile.exists(uri):
raise ValueError('The uri="{}" does not exist.'.format(uri))
benchmark_result = artifact_utils.get_single_instance(
output_dict['benchmark_result'])
benchmark_result.set_string_custom_property(
br.BenchmarkResult.BENCHMARK_NAME_KEY,
exec_properties['benchmark_name'])
benchmark_result.set_int_custom_property(
br.BenchmarkResult.BENCHMARK_RUN_KEY, exec_properties['run'])
benchmark_result.set_int_custom_property(
br.BenchmarkResult.RUNS_PER_BENCHMARK_KEY, exec_properties['num_runs'])
# Publish evaluation metrics
evals = self._load_evaluation(uri)
for name, val in evals.items():
# TODO(b/151723291): Use correct type instead of string.
benchmark_result.set_string_custom_property(name, str(val))
context_properties = serialize.decode(exec_properties['additional_context'])
# TODO(b/175802446): Add additional properties storing
# `additional_context` and `metric` keys so user can distinguish between
# custom properties.
for name, val in context_properties.items():
# TODO(b/151723291): Use correct type instead of string.
benchmark_result.set_string_custom_property(name, str(val))
def _load_evaluation(self, file_path: Text) -> Dict[str, Any]:
"""Returns evaluations for a bechmark run.
This method makes following assumptions:
1. `tf.enable_v2_behavior()` was called beforehand.
2. file_path points to a dir containing artifacts of single output model.
Args:
file_path: A root directory where pipeline's evaluation artifacts are
stored.
Returns:
An evaluation metrics dictionary. If no evaluations found then returns an
empty dictionary.
"""
# We assume this is a single output model, hence the following keys are "".
output_name = ''
multi_class_key = ''
eval_result = tfma.load_eval_result(file_path)
# Slicing_metric is a tuple, index 0 is slice, index 1 is its value.
_, metrics_dict = eval_result.slicing_metrics[0]
if output_name not in metrics_dict or multi_class_key not in metrics_dict[
output_name]:
raise ValueError('Evaluation can only be loaded for single output model.')
metrics_dict = metrics_dict[output_name][multi_class_key]
return {k: v.get('doubleValue') for k, v in metrics_dict.items()}
| StarcoderdataPython |
1866969 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Implement CBC mode
#
# CBC mode is a block cipher mode that allows us to encrypt irregularly-sized
# messages, despite the fact that a block cipher natively only transforms
# individual blocks.
#
# In CBC mode, each ciphertext block is added to the next plaintext block
# before the next call to the cipher core.
#
# The first plaintext block, which has no associated previous ciphertext
# block, is added to a "fake 0th ciphertext block" called the initialization
# vector, or IV.
#
# Implement CBC mode by hand by taking the ECB function you wrote earlier,
# making it encrypt instead of decrypt (verify this by decrypting whatever
# you encrypt to test), and using your XOR function from the previous
# exercise to combine them.
#
# The file here:
#
# http://cryptopals.com/static/challenge-data/10.txt
#
# is intelligible (somewhat) when CBC decrypted against "YELLOW SUBMARINE"
# with an IV of all ASCII 0 (\x00\x00\x00 &c).
#
import inspect
import os
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(inspect.getfile(lambda: 0)))))
from util.aes_wrappers import aes_cbc_decrypt
from util.loader import loader
def main():
key = b"YELLOW SUBMARINE"
ctext = loader("10.txt", "base64", split=False)
ptext = aes_cbc_decrypt(ctext, key)
print("Decrypted a ciphertext AES-CBC.")
print("Used a null IV and the following key:", key)
print()
print(ptext.decode())
print()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
# Output:
#
# Decrypted a ciphertext AES-CBC.
# Used a null IV and the following key: b'YELLOW SUBMARINE'
#
# I'm back and I'm ringin' the bell
# A rockin' on the mike while the fly girls yell
# In ecstasy in the back of me
# Well that's my DJ Deshay cuttin' all them Z's
# Hittin' hard and the girlies goin' crazy
# Vanilla's on the mike, man I'm not lazy.
#
# <remainder of output omitted>
#
| StarcoderdataPython |
9759903 | # pyright: reportMissingModuleSource=false
"""
Simple Truss Calculator
Version: 1.5
Source: https://github.com/lorcan2440/Simple-Truss-Calculator
By: <NAME>
Contact: <EMAIL>
Tests: test_TrussCalc.py
Calculator and interactive program for finding internal/reaction forces,
stresses and strains of a pin-jointed, straight-membered, plane truss.
Intended for personal use only; documented but not in a module form.
Soon I hope to make it more user-friendly and interactive.
"""
# builtin utility modules
from typing import Hashable, Union, Optional
from enum import Enum, auto, unique
import warnings
# builtin core modules
import math
import functools
import os
# auto install missing modules, least likely to be already installed first
try:
import sigfig # for rounding values nicely
from scipy.sparse import (csr_matrix, linalg as linsolver) # for faster solving
from matplotlib import pyplot as plt # to display graphical output
import numpy as np # to do matrix operations
except ImportError:
import subprocess
import sys
subprocess.check_call([sys.executable, "-m", "pip", "install", "sigfig", "scipy", "numpy", "matplotlib"])
print(' \t ~~ All dependencies succesfully installed. ~~ \n\n')
finally:
import numpy as np
from matplotlib import pyplot as plt
from scipy.sparse import (csr_matrix, linalg as linsolver)
import sigfig
# globals: `active_truss: Truss; LEN: float`
global active_truss, LEN
# Utility/helper classes
class ClassIter(type):
"""
A metaclass to support iteration over class instances. For reference see
https://codereview.stackexchange.com/questions/126100/recording-all-instances-of-a-class-python
https://stackoverflow.com/questions/28676399/iteration-over-class-instances-using-iter
"""
def __iter__(cls):
return iter(cls._ClassRegistry)
def __len__(cls):
return len(cls._ClassRegistry)
@unique
class SolveMethod(Enum):
"""
A class to contain the different methods for solving the truss force balance equation
Ax = B. Can see the different methods using get_constants(SolveMethod).
"""
NUMPY_STD = auto()
NUMPY_SOLVE = auto()
SCIPY = auto()
@unique
class Unit(Enum):
# units of force
NEWTONS = "N"
KILONEWTONS = "kN"
POUND_FORCE = "lbf"
# units of length
METRES = "m"
CENTIMETRES = "cm"
MILLIMETRES = "mm"
INCHES = "in"
# conversion lookup table, all units are converted to metric N-m-Pa internally.
# value in N-m-Pa = value given * _CONV[unit given]
_CONV = {
NEWTONS: 1, KILONEWTONS: 1e3, POUND_FORCE: 0.224809,
METRES: 1, CENTIMETRES: 1e-2, MILLIMETRES: 1e-3, INCHES: 0.0254,
}
# MAIN CLASS FOR TRUSSES
@functools.total_ordering
class Truss(metaclass=ClassIter):
"""
A class containing the truss to be worked with.
"""
_ClassRegistry = []
def __init__(self, name: str = "My Truss", bar_params: Optional[dict] = None,
units: tuple[Unit] = (Unit.KILONEWTONS, Unit.MILLIMETRES),
var_name: Optional[str] = None):
"""
Initialise a truss by setting the units system to be used
and the default properties (thickness, modulus etc) which
bars will have when added.
"""
self._ClassRegistry.append(self) # add the new truss object to the list of trusses
# TODO: rewrite this to use class-defined constants
if bar_params is None: # set the units that the calculations should be done in
if units == 'N, m':
# some default values. symbols defined on databook pg. 8
self.default_params = {"b": 0.016, "t": 0.004, "D": 0.020, "E": 2.1e11}
elif units == 'kN, mm':
# same values as above but in other units
self.default_params = {"b": 1.6, "t": 4, "D": 20, "E": 210}
else:
raise ValueError('Units must be either "N, m" or "kN, mm".')
else:
self.default_params = bar_params
self.name = name
self.var_name = var_name
self.results = None
self.units = units
# PARTS OF THE TRUSS (INNER CLASSES)
class Joint(metaclass=ClassIter):
"""
Joints define the locations where other objects can go.
Bars go between two joints.
Each joint can have loads and supports applied.
"""
_ClassRegistry = []
def __init__(self, truss: object, name: str, x: float, y: float,
var_name: Optional[str] = None):
self.name = name
self.var_name = var_name
if self.name not in (i.name for i in self._ClassRegistry):
self._ClassRegistry.append(self)
self.truss = truss
self.x = x
self.y = y
self.loads = {}
class Bar(metaclass=ClassIter):
"""
Bars go between the `first_joint` and the `second_joint`.
Each bar can have different thickness, strength, etc in `my_params`.
"""
_ClassRegistry = []
def __init__(self, truss: object, name: str, first_joint: object, second_joint: object,
my_params: Optional[dict] = None, var_name: Optional[str] = None):
"""
Initialise a bar with a given name, which joints it should connect between, and
its physical properties.
"""
self.name = name # The user-defined name
self.var_name = var_name # the name of the variable this instance is assigned to
if self.name not in (i.name for i in self._ClassRegistry):
self._ClassRegistry.append(self)
# the class which this bar belongs to
self.truss = truss
# the object and name of the connected joints
self.first_joint, self.first_joint_name = first_joint, first_joint.name
self.second_joint, self.second_joint_name = second_joint, second_joint.name
# take the truss's default if bar not given any
self.params = truss.default_params if my_params is None else my_params
# physical and geometric properties of the bar, as defined on databook pg. 8
[setattr(self, attr, self.params[attr]) for attr in ["b", "t", "D", "E", "strength_max"]]
self.length = math.sqrt((self.first_joint.x - self.second_joint.x)**2 + # noqa \
(self.first_joint.y - self.second_joint.y)**2)
self.section_area = (self.b ** 2 - (self.b - self.t) ** 2) * 1.03
self.effective_area = (1.5 * self.b - self.D) * 0.9 * self.t
self.buckling_ratio = self.length / self.b
def get_direction(self, origin_joint: Optional[object] = None, as_degrees: bool = False) -> float:
"""
Calculates the (polar) angle this bar makes with the horizontal,
with the origin taken as the origin_joint. 0 = horizontal right, +pi/2 = vertical up,
-pi/2 = vertical down, pi = horizontal left, etc. (-pi < angle <= pi).
"""
if origin_joint in (connected_joints := self.truss.get_all_joints_connected_to_bar(self)):
other_joint_index = 1 - connected_joints.index(origin_joint)
angle = math.atan2(connected_joints[other_joint_index].y - origin_joint.y,
connected_joints[other_joint_index].x - origin_joint.x)
elif origin_joint is None:
# if no joint is specified, the joint is chosen such that the angle
# is not upside-down (used to allign the text along the bars)
angle_from_first = self.get_direction(self.first_joint, as_degrees=as_degrees)
if as_degrees and -90 < angle_from_first <= 90 or \
not as_degrees and -1 * math.pi / 2 < angle_from_first <= math.pi / 2:
return angle_from_first
else:
return self.get_direction(self.second_joint, as_degrees=as_degrees)
else:
raise SyntaxError(f'The bar "{self.name}" has an invalid origin joint when finding \n'
f'its direction. It should be the objects associated with either \n'
f'{self.first_joint_name} or {self.second_joint_name}.')
return angle if not as_degrees else math.degrees(angle)
class Load(metaclass=ClassIter):
"""
Loads can be applied at any joint.
Their components are specified as (x_comp, y_comp)
aligned with the coordinate system used to define the joints.
"""
_ClassRegistry = []
def __init__(self, name: str, joint: object, x_comp: float = 0.0, y_comp: float = 0.0,
var_name: Optional[str] = None):
"""
Initialise a load with a name, a joint to be applied at, and a force value in terms
of x and y components (as defined by the coordinate and unit systems).
"""
self.name = name
self.var_name = var_name
if self.name not in (i.name for i in self._ClassRegistry):
self._ClassRegistry.append(self)
self.joint = joint
self.x, self.y = x_comp, y_comp
# magnitude of the force in the chosen units system
self.magnitude = math.sqrt(self.x ** 2 + self.y ** 2)
# direction of the force clockwise from the positive x-axis in radians
self.direction = math.atan2(self.y, self.x)
# add this load's components to the joint's dict attribute
joint.loads[self.name] = (self.x, self.y)
class Support(metaclass=ClassIter):
"""
Supports are points from which external reaction forces can be applied.
"""
_ClassRegistry = []
def __init__(self, name: str, joint: object, support_type: str = 'pin',
roller_normal: np.array = None, pin_rotation: float = 0,
var_name: Optional[str] = None):
"""
Initialise a support with a name, a joint object to convert to a support, the type of support
and a direction if a roller joint is chosen.
support_type: can be 'pin' or 'roller' or 'encastre'
roller_normal: only relevant with roller joints, sets the direction of their reaction force
pin_rotation: only relevant with pin joints, sets the direction which they are displayed
"""
self.name = name
self.var_name = var_name
if self.name not in (i.name for i in self._ClassRegistry):
self._ClassRegistry.append(self)
self.joint = joint
self.support_type = support_type
self.pin_rotation = pin_rotation
if roller_normal not in [None, (0, 0)]:
self.roller_normal = np.array(roller_normal) / np.linalg.norm(roller_normal)
self.reaction_direction = math.atan2(*reversed(self.roller_normal))
else:
self.roller_normal = None
self.reaction_direction = None
if self.support_type in {'encastre', 'pin', 'roller'}:
joint.loads[f'Reaction @ {self.name}'] = (None, None)
else:
raise ValueError('Support type must be "encastre", "pin" or "roller".')
# TRUSS RESULTS CLASS
class Result:
"""
Allows the results to be analysed and manipulated.
"""
def __init__(self, truss: object, sig_figs: Optional[int] = None,
solution_method: SolveMethod = SolveMethod.NUMPY_SOLVE,
_delete_truss_after: bool = False, _override_res: Optional[tuple[dict]] = None):
self.truss = truss
self.sig_figs = sig_figs
warnings.filterwarnings('ignore')
if _override_res is None:
self.results = truss.calculate(solution_method=solution_method)
self.tensions, self.reactions, self.stresses, self.strains = {}, {}, {}, {}
self.buckling_ratios = {}
# populate the tensions, reactions, etc. dictionaries from the results
self.get_data(truss)
else:
self.tensions, self.reactions, self.stresses, self.strains, self.buckling_ratios = \
(*_override_res,)
# set the truss's results before rounding but after zeroing small numbers
self.truss.results = {'internal_forces': self.tensions.copy(),
'reaction_forces': self.reactions.copy(),
'stresses': self.stresses.copy(), 'strains': self.strains.copy(),
'buckling_ratios': self.buckling_ratios.copy()}
# round these results to the required precision
self.round_data()
# HACK: clear the truss registry to avoid issues if building another truss
if _delete_truss_after:
truss._delete_truss()
def __repr__(self):
repr_str = f'\n Axial forces are: '\
f'(positive = tension; negative = compression) \n \t {str(self.tensions)}'
repr_str += f'\n Axial stresses are: \n \t {str(self.stresses)}'
repr_str += f'\n Reaction forces are (horizontal, vertical) components (signs '\
f'consistent with coordinate system): \n \t {str(self.reactions)}'
repr_str += f'\n Buckling ratios are: \n \t {str(self.buckling_ratios)}'
repr_str += f'\n Strains are: \n \t {str(self.strains)}'
repr_str += f'\n\n Units are {self.truss.units.split(",")[0]}, values '\
f'{f"not rounded" if self.sig_figs is None else f"rounded to {self.sig_figs} s.f."}'
return repr_str
def round_data(self) -> None:
"""
Replaces the calculated data with rounded values, to precision given by Truss.Result.sig_figs.
"""
for item in list(self.tensions.keys()):
try:
self.tensions[item] = sigfig.round(self.tensions[item], self.sig_figs)
self.stresses[item] = sigfig.round(self.stresses[item], self.sig_figs)
self.strains[item] = sigfig.round(self.strains[item], self.sig_figs)
self.buckling_ratios[item] = sigfig.round(self.buckling_ratios[item], self.sig_figs)
except KeyError:
continue
for item in list(self.reactions.keys()):
try:
self.reactions[item] = (sigfig.round(self.reactions[item][0], self.sig_figs),
sigfig.round(self.reactions[item][1], self.sig_figs))
except KeyError:
continue
def get_data(self, truss: object) -> None:
"""
Calculate tensions, stresses, strains, reaction forces and buckling ratios
from the calculate() function.
"""
# any forces smaller than `SMALL_NUM` will be set to zero (assumed to be due to rounding
# errors in the solver function). Currently set to 10 times smaller than the least
# significant digit of the smallest internal force value.
# NOTE: maybe move this functionality into `round_data()`.
# SMALL_NUM = 1e-8
SMALL_NUM = 0.1 * 10 ** (-1 * self.sig_figs) * min(
[abs(f) for f in self.results.values()
if type(f) is not tuple and f > (0.1 * 10 ** (-1 * self.sig_figs))])
print(SMALL_NUM)
for item in self.results:
if isinstance(self.results[item], float):
if abs(self.results[item]) < SMALL_NUM:
self.tensions.update({item: 0})
else:
self.tensions.update({item: self.results[item]})
self.stresses.update({
item: self.tensions[item] / truss.get_bar_by_name(item).effective_area})
self.strains.update({item: self.stresses[item] / truss.get_bar_by_name(item).E})
self.buckling_ratios.update({item: truss.get_bar_by_name(item).buckling_ratio})
# NOTE: could check if the bar is in compression using: if self.results[item] < 0:
elif isinstance(self.results[item], tuple):
self.reactions.update({item: (
self.results[item][0] if abs(self.results[item][0]) > SMALL_NUM else 0,
self.results[item][1] if abs(self.results[item][1]) > SMALL_NUM else 0)})
else:
warnings.warn(f'''A result appears to have been formed incorrectly. This is an internal
error. Bad value ignored: {self.results[item]}''', RuntimeWarning)
continue
# TRUSS METHODS
def calculate(self, solution_method: SolveMethod = SolveMethod.SCIPY) -> dict[str, Union[float, tuple]]:
"""
The main part of the program. Calculates the forces in the truss's bars and supports
in order to maintain force equilibrium with the given loads. Outputs as a dictionary in the form
`{bar_name: axial_force_value} + {support_name: (reaction_force_value_x, reaction_force_value_y)}`
"""
# List of dictionaries for unknowns, given default zero values
wanted_vars = []
for bar in self.get_all_bars():
wanted_vars.append('Tension in ' + bar.name)
for support in self.get_all_supports():
if support.support_type in {'pin', 'encastre'}:
wanted_vars.append('Horizontal reaction at ' + support.joint.name)
wanted_vars.append('Vertical reaction at ' + support.joint.name)
elif support.support_type == 'roller':
wanted_vars.append('Magnitude of reaction at ' + support.joint.name)
else:
continue
all_directions = {}
for joint in self.get_all_joints():
# Reset the directions dictionary for this joint
directions = {}
connected_bars = self.get_all_bars_connected_to_joint(joint)
# Get the anticlockwise (polar) angle of each connected joint relative to this joint which have bars
for bar in connected_bars:
angle = bar.get_direction(joint)
directions['Tension in ' + bar.name] = angle
# If there are reactions at this joint, store their directions too
if any([s.joint.name == joint.name for s in self.get_all_supports()]):
if self.get_support_by_joint(joint).support_type == 'roller':
directions['Magnitude of reaction at ' + joint.name] = math.atan2(
*reversed(list(self.get_support_by_joint(joint).roller_normal)))
else:
directions['Horizontal reaction at ' + joint.name] = 0
directions['Vertical reaction at ' + joint.name] = math.pi / 2
# If there are external loads at this joint, store their directions too
for load in self.get_all_loads_at_joint(joint):
directions['Horizontal component of {} at {}'.format(load.name, joint.name)] = 0
directions['Vertical component of {} at {}'.format(load.name, joint.name)] = math.pi / 2
all_directions[joint.name] = directions
# Populate the coefficients and constants matrices (initially lists of lists)
# in preparation to solve the matrix equation M * x = B
coefficients, constants = [], []
for joint_name in self.get_all_joints(str_names_only=True):
# get the coefficients (matrix M), representing the unknown internal/reaction forces
current_line = [round(math.cos(all_directions[joint_name].get(var, math.pi / 2)), 10)
for var in wanted_vars]
coefficients.append(current_line)
current_line = [round(math.sin(all_directions[joint_name].get(var, 0)), 10) for var in wanted_vars]
coefficients.append(current_line)
# get the constants (vector B), representing the external loads, -ve since on other side of eqn
loads_here = self.get_all_loads_at_joint_by_name(joint_name)
constants.append([-1 * sum([load.x for load in loads_here])])
constants.append([-1 * sum([load.y for load in loads_here])])
# Sanitise load data
for i in range(len(constants)):
if constants[i] == [] or constants[i] == [None]:
constants[i] = [0]
# Solve the system - both coefficient and constant matrices are sparse (for most practical cases)
# so ideally the SCIPY method is faster. NOTE: However testing showed that the difference is not huge,
# possibly because the solution itself is not sparse.
if solution_method is SolveMethod.NUMPY_STD:
m, b = np.matrix(coefficients), np.matrix(constants)
x = np.linalg.inv(m) * b
elif solution_method is SolveMethod.NUMPY_SOLVE:
m, b = np.matrix(coefficients), np.matrix(constants)
x = np.linalg.solve(m, b)
elif solution_method is SolveMethod.SCIPY:
m, b = csr_matrix(coefficients), csr_matrix(constants)
x = linsolver.spsolve(m, b)
else:
raise SyntaxError(f"The solution method {solution_method} is not supported. \n"
f"The allowed methods are (either using constants or string literals): \n"
f"{get_constants(SolveMethod)}\n"
f"For example: \t solution_method=SolveMethod.NUMPY_SOLVE \t or \t"
f"solution_method='numpy_solve'")
# Match values back to variable names
output_dict = {}
for i, bar in enumerate(self.get_all_bars()):
output_dict[bar.name] = float(x[i])
else:
_i = i
for support in self.get_all_supports():
output_dict[support.name] = (float(x[_i]), float(x[_i + 1]))
_i += 2
# HACK: For whatever reason, sometimes the pin jointed reaction forces are wrong.
# Couldn't be bothered fixing the root cause so correct them here by resolving at the supports.
for support in self.get_all_supports():
reaction_corrected = [0, 0]
for bar in self.get_all_bars_connected_to_joint(support.joint):
angle = bar.get_direction(support.joint)
reaction_corrected[0] -= output_dict[bar.name] * math.cos(angle)
reaction_corrected[1] -= output_dict[bar.name] * math.sin(angle)
output_dict[support.name] = tuple(reaction_corrected)
# Return the values in dict form
return output_dict
def is_statically_determinate(self) -> bool:
"""
Does a simple arithmetic check to estimate if the truss
is statically determinate (b + F = 2j). Also stores attributes for later quick use.
"""
# b: number of bars in the truss
# F: number of degrees of freedom for the reactions at the supports
# j: number of joints in the truss
# if b + F > 2j, the truss is overconstrained, while if b + F < 2j, the truss is a mechanism
self.b = len(self.get_all_bars(str_names_only=True))
self.F = sum([2 if support.support_type in {'encastre', 'pin'}
else 1 if support.support_type == 'roller'
else 0 for support in Truss.Support])
self.j = len(self.get_all_joints(str_names_only=True))
return self.b + self.F == 2 * self.j
def classify_error_in_truss(self, e: np.linalg.LinAlgError) -> None:
"""
If there was an exception raised when solving, attempt to find the cause and raise
a more user-friendly exception message.
"""
valid = self.is_statically_determinate()
if not valid:
raise ArithmeticError(f'''The truss is not statically determinate.
It cannot be solved. \nBars: {self.b} \t Reactions: {self.F} \t Joints: {self.j}.
\n b + F = {self.b + self.F}, 2j = {2 * self.j}''')
elif str(e) == "Singular matrix":
raise TypeError('''
The truss contains mechanistic and/or overconstrained components despite
being globally statically determinate. It cannot be solved.''')
else:
raise TypeError("Something else went wrong. Requires attention.")
def dump_truss_to_json(self, filedir: Optional[str] = None, filename: Optional[str] = None) -> None:
"""
Writes the details of the truss, with the results if available, to
a JSON file which can be read using `load_truss_from_json()`.
NOTE: If this truss is deleted before this function is called, only the results
will be available.
"""
import json
import os
# create the output directory if specified and it does not already exist
if filedir is not None:
if not os.path.exists(filedir):
os.mkdir(filedir)
# set the file name as the truss's var_name
out_file_dir = os.path.join('' if filedir is None else filedir,
(str(self.var_name) + '.json') if filename is None else filename)
# fill out the dictionary, using dict.get() where values may be unavailable (will appear as nulls)
json_dict = {
'truss': {
'name': self.name,
'var_name': self.var_name,
'default_bar_params': {'b': self.default_params.get('b'), 't': self.default_params.get('t'),
'D': self.default_params.get('D'), 'E': self.default_params.get('E'),
'strength_max': self.default_params.get('strength_max')},
'units': self.units
},
'joints': [
{'name': j.name, 'var_name': j.var_name, 'x': j.x, 'y': j.y} for j in self.get_all_joints()
],
'bars': [
{'name': b.name, 'var_name': b.var_name,
'connected_joint_names': [b.first_joint_name, b.second_joint_name],
'bar_params': {'b': b.params.get('b'), 't': b.params.get('t'),
'D': b.params.get('D'), 'E': b.params.get('E'),
'strength_max': b.params.get('strength_max')}
} for b in self.get_all_bars()
],
'loads': [
{'name': load.name, 'var_name': load.var_name, 'joint_name': load.joint.name,
'x': load.x, 'y': load.y} for load in self.get_all_loads()
],
'supports': [
{'name': s.name, 'var_name': s.var_name, 'joint_name': s.joint.name,
'support_type': s.support_type,
'roller_normal': tuple(s.roller_normal) if s.roller_normal is not None else None,
'pin_rotation': s.pin_rotation} for s in self.get_all_supports()
],
'results': {
'internal_forces': self.results.get('internal_forces'),
'reaction_forces': self.results.get('reaction_forces'),
'stresses': self.results.get('stresses'),
'strains': self.results.get('strains'),
'buckling_ratios': self.results.get('buckling_ratios')
} if self.results is not None else None,
}
# write to the chosen JSON file location
with open(out_file_dir, 'w') as f:
json.dump(json_dict, f, indent=4)
@classmethod
def _delete_truss(cls) -> None:
"""
Delete the truss and clear the _ClassRegistry when the calculation for a truss
is done. Required to prevent the _ClassRegistry adding duplicate objects.
"""
from inspect import isclass
# resets all of Truss.Joint._class_registry, Truss.Bar._class_registry etc to empty lists
all_objs = {**cls.__dict__, 'Truss': cls}.values()
current_classes = filter(lambda c: isclass(c) and hasattr(c, '_ClassRegistry'), all_objs)
for c in current_classes:
setattr(c, '_ClassRegistry', [])
'''
Allow ordering of the trusses by their position in the _ClassRegistry
which represents the order they were created in. Used by @functools.total_ordering.
'''
def __le__(self, other):
return self._ClassRegistry.index(self) <= self._ClassRegistry.index(other)
def __eq__(self, other):
return self._ClassRegistry.index(self) == self._ClassRegistry.index(other)
"""
Object/name getters
"""
@staticmethod
def get_all_bars(str_names_only: bool = False) -> Union[list[Bar], set[str]]:
"""
Returns a list of bar objects or set of string names in this truss.
"""
if not str_names_only:
return [bar for bar in Truss.Bar]
else:
return {bar.name for bar in Truss.Bar}
@staticmethod
def get_all_joints(str_names_only: bool = False) -> Union[list[Joint], set[str]]:
"""
Returns a list of all joint objects or strings in this truss.
"""
if not str_names_only:
return [joint for joint in Truss.Joint]
else:
return {joint.name for joint in Truss.Joint}
@staticmethod
def get_all_bars_connected_to_joint(joint: Joint, str_names_only: bool = False) -> Union[list, set]:
"""
Returns a list of bar objects or names which are connected to a given joint object.
"""
if not str_names_only:
return [bar for bar in Truss.Bar
if joint.name in {bar.first_joint.name, bar.second_joint.name}]
else:
return {bar.name for bar in Truss.Bar
if joint.name in {bar.first_joint.name, bar.second_joint.name}}
@staticmethod
def get_all_joints_connected_to_bar(bar: Bar, str_names_only: bool = False) -> tuple[Union[Bar, str]]:
"""
Returns a list of joint objects or names which are connected to a given bar object.
The order is arbitrary but consistent.
"""
if not str_names_only:
return (bar.first_joint, bar.second_joint)
else:
return (bar.first_joint.name, bar.second_joint.name)
@staticmethod
def get_all_loads() -> list[Load]:
"""
Returns a list of load objects in the truss.
"""
return list(Truss.Load)
@staticmethod
def get_all_loads_at_joint(joint: Joint) -> list[Load]:
"""
Returns a list of load objects which are applied at a given joint object.
"""
return list(filter(lambda load: load.joint is joint, Truss.Load))
@staticmethod
def get_all_loads_at_joint_by_name(joint_name: str) -> list[Load]:
"""
Returns a list of load objects which are applied at a given joint name.
"""
return list(filter(lambda load: load.joint.name is joint_name, Truss.Load))
@staticmethod
def get_all_supports(str_names_only: bool = False) -> Union[list[Support], set[str]]:
"""
Returns a list of support objects in the truss.
"""
if not str_names_only:
return list(Truss.Support)
else:
return {s.name for s in Truss.Support}
@staticmethod
def get_support_by_joint(joint: Joint) -> Optional[Support]:
"""
Returns the support object placed at a given joint, or None if there is no support there.
FIXME: if there are multiple supports, returns only the first one, which may be inconsistent.
"""
_supports = list(filter(lambda s: s.joint is joint, Truss.Support))
return _supports[0] if len(_supports) >= 1 else None
@staticmethod
def get_bar_by_name(bar_name: str) -> Bar:
"""
Returns the corresponding bar object given a bar name.
"""
try:
bar = next(b for b in Truss.Bar if b.name is bar_name)
return bar
except StopIteration:
raise ValueError(f'The bar with name {bar_name} does not exist in the truss.')
# TRUSS INNER CLASSES END HERE, MAIN RESULTS FUNCTIONS START HERE
def plot_diagram(truss: Truss, results: Truss.Result,
show_reactions: bool = True, _delete_truss_after: bool = True) -> None:
"""
Create a matplotlib output image showing the truss geometry, annotated with arrows, labels and supports.
"""
global LEN
# Find a suitable length-scale to make the annotations look nicer.
# All drawing dimensions are relative to this. As a rough value, this is 10% of the average bar length.
LEN = [x.length for x in truss.get_all_bars()]
LEN = np.average(LEN) * 0.1
# Plot all joints without supports
_xjl, _yjl = map(list, zip(*[(joint.x, joint.y) for joint in truss.get_all_joints()
if truss.get_support_by_joint(joint) is None]))
plt.plot(_xjl, _yjl, 'o', color='black', markersize=5)
plt.plot(_xjl, _yjl, 'o', color='white', markersize=3.5) # small circle with white centre
# Plot all bars
for bar in truss.get_all_bars():
rot = bar.get_direction(as_degrees=True)
norm = math.radians(rot + 90)
# connect the two joints with a line
plt.plot([bar.first_joint.x, bar.second_joint.x], [bar.first_joint.y, bar.second_joint.y],
label=bar.name + ': ' + str(results.tensions[bar.name]) + ' ' + truss.units.split(',')[0],
zorder=0)
# label the bar with its name
plt.text((bar.first_joint.x + bar.second_joint.x) / 2 + LEN / 3 * math.cos(norm),
(bar.first_joint.y + bar.second_joint.y) / 2 + LEN / 3 * math.sin(norm),
bar.name, ha='center', va='center', rotation=rot, rotation_mode='anchor',
transform_rotates_text=True)
# Plot all supports
for support in truss.get_all_supports():
plt.plot(support.joint.x, support.joint.y, '*', markersize=0,
label=support.name + ': ' + str(results.reactions[support.name]) + ' ' + # noqa \
truss.units.split(',')[0])
for support in truss.get_all_supports():
if show_reactions:
reaction_direction = math.atan2(*reversed(list(results.reactions[support.name])))
# draw an arrow of fixed length to show the direction of the reaction
plt.arrow(support.joint.x, support.joint.y,
LEN * math.cos(reaction_direction),
LEN * math.sin(reaction_direction),
head_width=LEN / 5, head_length=LEN / 4, facecolor='red')
# TODO: if there is another support at this `support.joint`,
# label it at an angle of `180 + pin_rotation` instead
label_angle = find_free_space_around_joint(support.joint, results, show_reactions=show_reactions)
plt.text(support.joint.x + 0.9 * LEN * math.cos(label_angle),
support.joint.y + 0.9 * LEN * math.sin(label_angle),
support.name, va='center', ha='left' if -90 < math.degrees(label_angle) <= 90 else 'right',
label=f'{support.name}: {str(results.reactions[support.name])} {truss.units.split(",")[0]}')
# draw a icon-like symbol representing the type of support
# TODO: maybe make this into a matplotlib patch to use it in the legend
draw_support(support.joint.x, support.joint.y, LEN * 0.9,
support_type=support.support_type, roller_normal=support.roller_normal,
pin_rotation=support.pin_rotation)
# Plot all loads
for load in truss.get_all_loads():
# draw an arrow of fixed length to show the direction of the load force
plt.arrow(load.joint.x, load.joint.y, LEN * math.cos(load.direction), LEN * math.sin(load.direction),
head_width=LEN / 5, head_length=LEN / 4)
# TODO: if there is another load at this `load.joint`, label it at the arrow midpoint + normal a bit
label_angle = find_free_space_around_joint(load.joint, results=results)
plt.text(load.joint.x + LEN / 3 * math.cos(label_angle),
load.joint.y + LEN / 3 * math.sin(label_angle),
f'{load.name}: ({str(load.x)}, {str(load.y)}) {truss.units.split(",")[0]}',
va='center', ha='left' if -90 < math.degrees(label_angle) <= 90 else 'right')
# Graphical improvements
AXES_COLOUR = '#BBBBBB' # light grey
plt.title(truss.name)
plt.legend(loc='upper right')
plt.autoscale()
plt.axis('equal')
plt.xlabel(f'$x$-position / {truss.units.split(",")[1]}')
plt.ylabel(f'$y$-position / {truss.units.split(",")[1]}')
ax = plt.gca()
spines = ax.spines
spines['right'].set_visible(False) # make upper-right spines disappear
spines['top'].set_visible(False)
spines['left'].set_color(AXES_COLOUR) # axis lines
spines['bottom'].set_color(AXES_COLOUR)
ax.tick_params(axis='x', colors=AXES_COLOUR, grid_alpha=0.5) # axis ticks and their number labels
ax.tick_params(axis='y', colors=AXES_COLOUR, grid_alpha=0.5)
ax.xaxis.label.set_color(AXES_COLOUR) # axis name labels
ax.yaxis.label.set_color(AXES_COLOUR)
set_matplotlib_fullscreen()
plt.show()
# HACK: Clear the truss registry to avoid issues if building another truss
if _delete_truss_after:
truss._delete_truss()
def load_truss_from_json(file: str, show_if_results: bool = True, set_as_active_truss: bool = True,
_delete_truss_after: Optional[bool] = False) -> object:
"""
Builds a truss from a JSON file provided by `dump_truss_to_json()`.
If the results are available, they can be showed.
"""
import json
with open(file) as json_file:
f = json.load(json_file)
truss_attr = f['truss']
init_truss(truss_attr.get('name'), truss_attr.get('default_bar_params'), truss_attr.get('units'),
set_as_active_truss, truss_attr.get('var_name'))
for joint_attr in f['joints']:
create_joint(joint_attr.get('name'), joint_attr.get('x'), joint_attr.get('y'), active_truss)
for bar_attr in f['bars']:
create_bar(bar_attr.get('name'), *bar_attr.get('connected_joint_names'),
bar_attr.get('bar_params'), active_truss, bar_attr.get('var_name'))
for load_attr in f['loads']:
create_load(load_attr.get('name'), load_attr.get('joint_name'), load_attr.get('x'),
load_attr.get('y'), active_truss, load_attr.get('var_name'))
for supp_attr in f['supports']:
create_support(supp_attr['name'], supp_attr['joint_name'], supp_attr['support_type'],
supp_attr['roller_normal'], supp_attr['pin_rotation'], active_truss,
supp_attr['var_name'])
if show_if_results and (res := f['results']) is not None:
bar_names = active_truss.get_all_bars(str_names_only=True)
support_names = active_truss.get_all_supports(str_names_only=True)
truss_results = active_truss.Result(active_truss, sig_figs=3, solution_method=None,
_override_res=(
{bn: res['internal_forces'][bn] for bn in bar_names},
{sn: res['reaction_forces'][sn] for sn in support_names},
{bn: res['stresses'][bn] for bn in bar_names},
{bn: res['strains'][bn] for bn in bar_names},
{bn: res['buckling_ratios'][bn] for bn in bar_names}
)
)
print(truss_results)
plot_diagram(active_truss, truss_results, show_reactions=True,
_delete_truss_after=_delete_truss_after)
return get_active_truss() if set_as_active_truss else None
# HELPER AND UTILITY FUNCTIONS
def validate_var_name(var_name: str, allow_existing_vars: bool = True, raise_error: bool = True) -> bool:
"""
Checks if a var_name, which is used internally to instantiate the
subclass objects (Joint, Bars, Load, Support) is as valid as if it
were declared explicitly i.e. var_name = Class(...). They are set using
globals() where the key is var_name and the object reference is the value.
"""
import keyword
if var_name in globals() and not allow_existing_vars:
if raise_error:
raise NameError(f'A global variable {var_name} (with the value {globals()[var_name]}) is already '
f'in use, possibly because it is a builtin. \nIt cannot be used in the truss.')
else:
return False
elif not var_name.isidentifier() or keyword.iskeyword(var_name) or var_name.startswith('__'):
if raise_error:
raise NameError(f'{var_name} is not a valid variable name. \n'
'It can only contain alphanumerics and underscores \n'
'and cannot start with double underscore (__).')
else:
return False
else:
return True
def convert_to_valid_var_name(name: str, allow_existing_vars=True) -> str:
"""
Given a user-defined name, converts it to a similar looking valid variable name.
e.g. `convert_to_valid_var_name("My First Truss")` -> "my_first_truss"
If this already exists and `allow_existing_vars = False`, a number is appended to the name
to make it distinct, e.g. "my_first_truss_2", "my_first_truss_3", etc.
"""
import re
# remove trailing whitespace, convert to lowercase and replace spaces with underscores
new_name = name.strip().lower().replace(' ', '_')
# remove non-alphanumeric characters except underscores
pattern = re.compile(r'[\W]+', re.UNICODE)
new_name = pattern.sub('', new_name)
# add a number at the end of the name if it already exists and is needed
if not allow_existing_vars and new_name in globals().keys():
suffix = 2
while not validate_var_name(new_name + '_' + str(suffix)):
suffix += 1
# double-check the new name is valid
if validate_var_name(new_name):
return new_name
else:
raise SyntaxError(f'Unable to convert the name {name} to a suitable internal variable name'
f'(attempt was {new_name}). Please change to a simpler name and try again.')
def get_constants(cls: type) -> dict[str, Hashable]:
"""
Used to get a dict of constants {const_name: const_value}
from the utility classes.
"""
# get a list of the names of the constants
names = list(filter(
lambda a: not callable(getattr(cls(), a)) and not a.startswith('_') and a == a.upper(), dir(cls())))
# get a list of the values of these constants
vals = [getattr(cls(), a) for a in names]
# return in dict {'name': value} form
return dict(zip(names, vals))
def set_active_truss(var_name: str) -> None:
"""
Sets which truss is currently being built on.
"""
global active_truss
active_truss = globals()[var_name]
def get_active_truss() -> Optional[Truss]:
"""
Gets the truss which is currently being built on, or None if there is none.
NOTE: active_truss is a global var.
"""
return active_truss if has_active_truss() else None
def is_active_truss(var_name: str) -> bool:
"""
Determines whether the given truss variable name is being built on.
"""
return globals()[var_name] is active_truss
def has_active_truss() -> bool:
"""
Determines whether an active truss has been set yet, returning True or False.
"""
return 'active_truss' in globals().keys()
def set_matplotlib_fullscreen() -> None:
"""
Automatically set the matplotlib output to fullscreen.
"""
import os
from matplotlib import pyplot as plt
backend = str(plt.get_backend())
mgr = plt.get_current_fig_manager()
if backend == 'TkAgg':
if os.name == 'nt':
mgr.window.state('zoomed')
else:
mgr.resize(*mgr.window.maxsize())
elif backend == 'wxAgg':
mgr.frame.Maximize(True)
elif backend in ['Qt4Agg', 'Qt5Agg']:
mgr.window.showMaximized()
else:
raise RuntimeWarning(f'The backend in use, {backend}, is not supported in fullscreen mode.')
def find_free_space_around_joint(joint: Truss.Joint, results: Truss.Result = None,
truss: Optional[Truss] = None, show_reactions: bool = True,
as_degrees: bool = False) -> float:
"""
Helper function to find a place to label text around a joint. Finds a location
at a fixed small distance from the joint, such that the surrounding bars, loads
and supports/reaction arrows are as far away as possible.
"""
truss = active_truss if truss is None else truss
support = truss.get_support_by_joint(joint)
# find the angles occupied due to bars being there
used_angles = [bar.get_direction(origin_joint=joint)
for bar in truss.get_all_bars_connected_to_joint(joint)]
# find the angles occupied due to load arrows being there
used_angles += [load.direction for load in truss.get_all_loads_at_joint(joint)]
# find the angles occupied due to support icons and/or reaction arrows being there
# TODO: don't add if the reaction force is zero
if support is not None:
if show_reactions:
if support.support_type == 'roller':
used_angles.append(math.pi + support.reaction_direction)
used_angles.append(math.atan2(*reversed(results.reactions[support.name])))
else:
if support.support_type == 'pin':
used_angles.append(math.pi / 2 - support.pin_rotation)
# sort ascending from 0 to 360 (through 2 * pi)
used_angles = sorted([i % (2 * math.pi) for i in used_angles])
# find the angular sizes of the gaps
differences = [(used_angles[i] - used_angles[i - 1]) % (2 * math.pi) for i in range(len(used_angles))]
# determine at what angle is the most free
max_i = differences.index(max(differences))
most_free_angle = np.average([used_angles[max_i], used_angles[max_i - 1]])
if used_angles[max_i] < used_angles[max_i - 1]:
most_free_angle -= math.pi
return math.degrees(most_free_angle) if as_degrees else most_free_angle
def draw_support(x: float, y: float, size: float, support_type: str = 'pin', pin_rotation: float = 0,
roller_normal: np.array = None) -> None:
"""
Draw a particular type of support, using the standard conventional symbols, on
the matplotlib truss diagram. If roller is chosen, its direction is
shown by rotating the drawing. Optional pin rotation in clockwise degrees from vertical.
"""
# Helper function to rotate the drawing
if (pin_rotation != 0) ^ (roller_normal is not None): # either but not both: cannot be encastre
if support_type == 'roller':
a = math.pi / 2 - math.atan2(*reversed(roller_normal))
elif support_type == 'pin':
a = math.radians(pin_rotation)
else:
raise TypeError(f'''
'The combination of supplied information: support type ({support_type}), pin rotation angle'
'({pin_rotation}) and roller direction ({roller_normal}) is invalid.''')
# function for rotating a given coordinate tuple _p = (_x, _y) by a radians clockwise about (x, y)
rot = lambda _p: (x + (_p[0] - x) * math.cos(a) + (_p[1] - y) * math.sin(a), # noqa
y - (_p[0] - x) * math.sin(a) + (_p[1] - y) * math.cos(a))
if support_type == 'encastre':
# Encastre symbol: solid line and hashed lines representing ground
plt.plot((x - size / 2, x + size / 2), (y, y), # horizontal line
linewidth=1, color='black', zorder=0)
for x_pos in np.linspace(x - 0.3 * size, x + 0.5 * size, 5):
plt.plot((x_pos, x_pos - size / 5), (y, y - size / 5), # hashed lines
linewidth=1, color='black', zorder=0)
if (support_type == 'pin' and pin_rotation != 0) or support_type == 'roller':
# NOTE: element indices are
# 0: triangle top left, 1: triangle bottom left, 2: triangle bottom right, 3: triangle top right
# 4,5,6,7,8: ground top right diagonal points, 9,10,11,12,13: ground bottom left diagonal points
# 14: ground left point, 15: ground right point
_old_pts = [
(x - size / 20, y - math.sqrt(3) * size / 20),
(x - (1 / (3 * math.sqrt(3))) * size, y - size / 3),
(x + (1 / (3 * math.sqrt(3))) * size, y - size / 3),
(x + size / 20, y - math.sqrt(3) * size / 20)
] + [(x_pos, y - (size / 3 if support_type == 'pin' else 8 / 15 * size))
for x_pos, y_pos in zip(list(np.linspace(x - 0.3 * size, x + 0.5 * size, 5)), [y] * 5)
] + [(x_pos - size / 5, y - (8/15 * size if support_type == 'pin' else 11/15 * size)) # noqa \
for x_pos, y_pos in zip(list(np.linspace(x - 0.3 * size, x + 0.5 * size, 5)), [y] * 5)
] + [(x - size / 2, y - (size / 3 if support_type == 'pin' else 8 / 15 * size)), # noqa
(x + size / 2, y - (size / 3 if support_type == 'pin' else 8 / 15 * size))]
if support_type == 'pin':
if pin_rotation == 0:
# Pin symbol: triangle resting on ground
plt.plot((x - size / 20, x - (1 / (3 * math.sqrt(3))) * size, # equilateral triangle
x + (1 / (3 * math.sqrt(3))) * size, x + size / 20),
(y - math.sqrt(3) * size / 20, y - size / 3,
y - size / 3, y - math.sqrt(3) * size / 20),
linewidth=1, color='black', zorder=0)
plt.gca().add_patch( # circle pin
plt.Circle((x, y), size / 10, color='black', linewidth=1, zorder=1))
plt.gca().add_patch(
plt.Circle((x, y), size / 14, color='white', linewidth=1, zorder=1))
plt.plot((x - size / 2, x + size / 2), (y - size / 3, y - size / 3), # ground
linewidth=1, color='black', zorder=0)
for x_pos in np.linspace(x - 0.3 * size, x + 0.5 * size, 5):
plt.plot((x_pos, x_pos - size / 5), (y - size / 3, y - 8 / 15 * size),
linewidth=1, color='black', zorder=0)
else:
# Transform the important points to be plotted
_new_pts = list(map(rot, _old_pts))
xtl, ytl = map(list, zip(*_new_pts))
plt.plot(xtl[0:4], ytl[0:4], linewidth=1, color='black', zorder=0) # triangle
plt.gca().add_patch( # circle pin
plt.Circle((x, y), size / 10, linewidth=1, zorder=1,
color='black'))
plt.gca().add_patch(
plt.Circle((x, y), size / 14, linewidth=1, zorder=1,
color='white'))
plt.plot(xtl[14:], ytl[14:], linewidth=1, color='black', zorder=0) # ground
for i, (x_tr, y_tr) in enumerate(_new_pts[4:9]):
n = i + 4
plt.plot([x_tr, _new_pts[n + 5][0]], [y_tr, _new_pts[n + 5][1]],
linewidth=1, color='black', zorder=0)
if support_type == 'roller':
# Roller symbol: pin with wheels, rotated about pin circle to show direction
# Transform the important points to be plotted
# NOTE: element indices are (0-15 unchanged) from pin
# 16: wheel left centre point, 17: wheel right centre point
_old_pts += [(x - (0.7 / (3 * math.sqrt(3))) * size, y - 13 / 30 * size),
(x + (0.7 / (3 * math.sqrt(3))) * size, y - 13 / 30 * size)]
_new_pts = list(map(rot, _old_pts))
xtl, ytl = map(list, zip(*_new_pts))
plt.plot(xtl[0:4], ytl[0:4], linewidth=1, color='black', zorder=0) # triangle
plt.gca().add_patch( # circle pin
plt.Circle((x, y), size / 10, linewidth=1, zorder=1,
color='black'))
plt.gca().add_patch(
plt.Circle((x, y), size / 14, linewidth=1, zorder=1,
color='white'))
plt.plot(xtl[14:16], ytl[14:16], linewidth=1, color='black', zorder=0) # ground
for i, (x_tr, y_tr) in enumerate(_new_pts[4:9]):
n = i + 4
plt.plot([x_tr, _new_pts[n + 5][0]], [y_tr, _new_pts[n + 5][1]],
linewidth=1, color='black', zorder=0)
plt.gca().add_patch( # wheels
plt.Circle((xtl[16], ytl[16]), size / 10, color='black', linewidth=1, zorder=1))
plt.gca().add_patch(
plt.Circle((xtl[16], ytl[16]), size / 14, color='white', linewidth=1, zorder=1))
plt.gca().add_patch(
plt.Circle((xtl[17], ytl[17]), size / 10, color='black', linewidth=1, zorder=1))
plt.gca().add_patch(
plt.Circle((xtl[17], ytl[17]), size / 14, color='white', linewidth=1, zorder=1))
# OBJECT BUILDING FUNCTIONS: use from the module
'''
Allows trusses to be constructed with user-defined names instead of fixed variable names.
Objects are still stored internally with names given by `var_name` but displayed to the user as
`joint_name`, `bar_name`, `load_name`, `support_name`.
This is done by directly accessing the globals() dictionary and adding `{var_name : object}` to it.
'''
def init_truss(truss_name: str, bar_params: dict = None, units: str = 'kN, mm',
set_as_active_truss: bool = True, var_name: str = None, print_info: bool = False) -> None:
"""
Initialise an empty truss with name `truss_name`, optionally set the default `bar_params`
and the `units` in which the calculations should be done and displayed.
"""
var_name = convert_to_valid_var_name(truss_name) if var_name is None else var_name
for var, val in globals().copy().items():
if hasattr(val, 'name') and val.name == truss_name and isinstance(val, Truss):
var_name = var
if validate_var_name(var_name):
globals()[var_name] = Truss(name=truss_name, bar_params=bar_params, units=units, var_name=var_name)
if print_info:
print(f'The truss with name "{globals()[var_name].name}", internally stored as "{var_name}", '
f'has been created with bar parameters {bar_params} and units {units}.')
if set_as_active_truss:
set_active_truss(var_name)
def create_joint(joint_name: str, x: float, y: float,
truss: Optional[Truss] = None, var_name: str = None, print_info: bool = False) -> None:
"""
Create an instance of a joint in a truss, with a user defined name joint_name,
stored internally as var_name, at position (x, y).
"""
truss = active_truss if truss is None else truss
var_name = convert_to_valid_var_name(joint_name) if var_name is None else var_name
if validate_var_name(var_name):
globals()[var_name] = truss.Joint(truss, joint_name, x, y, var_name=var_name)
if print_info:
print(f'The joint with name "{globals()[var_name].name}", internally stored as "{var_name}", '
f'has been assigned the location ({globals()[var_name].x}, {globals()[var_name].y})')
def create_bar(bar_name: str, first_joint_name: str, second_joint_name: str,
params: Optional[dict] = None, truss: Optional[Truss] = None,
var_name: str = None, print_info: bool = False) -> None:
"""
Create an instance of a bar in a truss, with a user defined name bar_name,
stored internally as var_name, between two joints with string names, with bar_params.
"""
truss = active_truss if truss is None else truss
var_name = convert_to_valid_var_name(bar_name) if var_name is None else var_name
for var, val in globals().copy().items():
if hasattr(val, 'name') and val.name == first_joint_name and isinstance(val, Truss.Joint):
first_joint_var_name = var
if hasattr(val, 'name') and val.name == second_joint_name and isinstance(val, Truss.Joint):
second_joint_var_name = var
if validate_var_name(var_name):
globals()[var_name] = truss.Bar(truss, bar_name, globals()[first_joint_var_name],
globals()[second_joint_var_name], params, var_name=var_name)
if print_info:
print(f'The bar with name "{globals()[var_name].name}", internally stored as "{var_name}", '
f'has been placed between joints named ({globals()[first_joint_var_name].name}, '
f'{globals()[second_joint_var_name].name}), internally stored as '
f'({first_joint_var_name}, {second_joint_var_name}).')
def create_load(load_name: str, joint_name: str, x: float, y: float,
truss: Optional[Truss] = None, var_name: str = None, print_info: bool = False) -> None:
"""
Create an instance of a load in a truss, with a user defined name load_name,
stored internally as var_name, at joint string joint_var_name, with components (x, y).
"""
truss = active_truss if truss is None else truss
var_name = convert_to_valid_var_name(load_name) if var_name is None else var_name
for var, val in globals().copy().items():
if hasattr(val, 'name') and val.name == joint_name and isinstance(val, Truss.Joint):
joint_var_name = var
if validate_var_name(var_name):
globals()[var_name] = truss.Load(load_name, globals()[joint_var_name], x, y, var_name=var_name)
if print_info:
print(f'The load with name "{globals()[var_name].name}", internally stored as "{var_name}", '
f'has been applied at joint named {globals()[joint_var_name].name}, '
f'internally stored as "{joint_var_name}", with components ({x}, {y}).')
def create_support(support_name: str, joint_name: str, support_type: str,
roller_normal: np.array = None, pin_rotation: float = 0,
truss: Optional[Truss] = None, var_name: str = None, print_info: bool = False) -> None:
"""
Create an instance of a support in a truss, with a user defined name support_name,
stored internally as var_name, at joint variable name string joint_var_name.
"""
truss = active_truss if truss is None else truss
var_name = convert_to_valid_var_name(support_name) if var_name is None else var_name
for var, val in globals().copy().items():
if hasattr(val, 'name') and val.name == joint_name and isinstance(val, Truss.Joint):
joint_var_name = var
if validate_var_name(var_name):
globals()[var_name] = truss.Support(support_name,
globals()[joint_var_name], support_type=support_type,
roller_normal=roller_normal,
pin_rotation=pin_rotation, var_name=var_name)
if print_info:
print(f'The support with name "{globals()[var_name].name}", internally stored as "{var_name}", '
f'has been applied at joint named {globals()[joint_var_name].name}, internally stored as '
f'"{joint_var_name}", with type "{support_type}" in direction {roller_normal}, '
f'and pin rotation {pin_rotation} degrees.')
"""---------------------------------------------------------------------------------------------------------"""
#####################################################
# PROGRAM EXECUTION STARTS HERE #
#####################################################
"""---------------------------------------------------------------------------------------------------------"""
# Fix issue with warning appearing when run from .exe
if os.path.basename(__file__).endswith('.exe'):
warnings.filterwarnings("ignore", "(?s).*MATPLOTLIBDATA.*",
category=UserWarning) # deprecation warning inherits from UserWarning
'''
load_truss_from_json('./Saved Trusses/bridge.json')
'''
if __name__ == "__main__":
# -- An example truss - cantilever used in SDC --
# Define some example bar parameters, four choices of bar
weak = {"b": 12.5, "t": 0.7, "D": 5, "E": 210, "strength_max": 0.216}
medium_1 = {"b": 16, "t": 0.9, "D": 5, "E": 210, "strength_max": 0.216}
medium_2 = {"b": 16, "t": 1.1, "D": 5, "E": 210, "strength_max": 0.216}
strong = {"b": 19, "t": 1.1, "D": 5, "E": 210, "strength_max": 0.216}
# Define some custom bar parameters and initialise the truss
custom_params = weak
init_truss('SDC: Steel Cantilever', bar_params=custom_params, units='kN, mm')
# Step 1. Create the joints
create_joint('Joint A', 0, 0)
create_joint('Joint B', 290, -90)
create_joint('Joint C', 815, 127.5)
create_joint('Joint D', 290, 345)
create_joint('Joint E', 0, 255)
create_joint('Joint F', 220.836, 127.5)
# Step 2. Create the bars
create_bar('Bar AB', 'Joint A', 'Joint B', medium_2)
create_bar('Bar BC', 'Joint B', 'Joint C', strong)
create_bar('Bar CD', 'Joint C', 'Joint D', medium_1)
create_bar('Bar DE', 'Joint D', 'Joint E', medium_1)
create_bar('Bar EF', 'Joint E', 'Joint F', medium_1)
create_bar('Bar AF', 'Joint F', 'Joint A', medium_2)
create_bar('Bar DF', 'Joint F', 'Joint D', medium_1)
create_bar('Bar BF', 'Joint F', 'Joint B', weak)
# Step 3. Create the loads
create_load('W', 'Joint C', 0, -0.675 * 1)
# Step 4. Create the supports
create_support('Support A', 'Joint A', support_type='encastre')
create_support('Support E', 'Joint E', support_type='pin', pin_rotation=90)
# Get the results of the truss calculation and display graphic
try:
my_results = active_truss.Result(active_truss, sig_figs=3, solution_method=SolveMethod.NUMPY_STD)
print(my_results)
except np.linalg.LinAlgError as e:
# The truss was badly made, so could not be solved
active_truss.classify_error_in_truss(e)
# Save truss as a JSON file
active_truss.dump_truss_to_json(filedir='../Saved Trusses')
# Show in a matplotlib window
plot_diagram(active_truss, my_results, show_reactions=True)
| StarcoderdataPython |
281014 | import csv
"""
File: generator_category.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/pceuropa/generator-category-id-and-parent-id
Description: Get list of categories and return name, id, parent_id
"""
class CsvRead(object):
filename_csv = ''
def __init__(self, filename=None):
self.filename_csv = filename
def toList(self):
try:
with open(self.filename_csv, newline='') as csvfile:
csv_list = csv.reader(csvfile, delimiter=',', quotechar='"')
for row in csv_list:
yield row[0]
except Exception as e:
print(e)
def save(self, cat):
"""
save list to csv
:cat: list
"""
with open(self.filename_csv, 'w', newline='') as csvfile:
csv_file = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
csv_file.writerow(['name', 'id', 'parent_id'])
for row in cat:
csv_file.writerow(row)
def generatorIDCategory(list_categories):
"""name, id, parent_id"""
x = 1
data = []
parent = [0, 0, 0, 0, 0, 0]
for i in list_categories:
lc = i.split("/")
pointer = len(lc[:-1])
data = [lc[-1], x]
parent[pointer] = data[1]
if pointer:
data.append(parent[pointer - 1])
x += 1
yield data
if __name__ == "__main__":
c = CsvRead('csv/categories_example.csv').toList()
categories = generatorIDCategory(c)
CsvRead('csv/export_categories.csv').save(categories)
| StarcoderdataPython |
4901941 | <filename>mmfashion/models/__init__.py<gh_stars>0
from .attr_predictor import *
from .backbones import *
from .builder import (build_attr_predictor, build_backbone, build_concat,
build_embed_extractor, build_global_pool,
build_landmark_detector,
build_landmark_feature_extractor,
build_landmark_regression, build_loss, build_predictor,
build_retriever, build_roi_pool,
build_visibility_classifier,
build_triplet_net,
build_type_specific_net,
build_fashion_recommender)
from .concats import *
from .embed_extractor import *
from .global_pool import *
from .landmark_detector import *
from .landmark_feature_extractor import *
from .landmark_regression import *
from .losses import *
from .predictor import *
from .registry import (ATTRPREDICTOR, BACKBONES, CONCATS, EMBEDEXTRACTOR,
GLOBALPOOLING, LANDMARKDETECTOR, LOSSES, PREDICTOR,
RETRIEVER, ROIPOOLING, RECOMMENDER)
from .retriever import *
from .roi_pool import *
from .visibility_classifier import *
from .type_specific_net import *
from .triplet_net import *
from .fashion_recommender import *
__all__ = [
'BACKBONES', 'GLOBALPOOLING', 'ROIPOOLING', 'CONCATS', 'LOSSES',
'PREDICTOR', 'RETRIEVER', 'ATTRPREDICTOR', 'EMBEDEXTRACTOR',
'LANDMARKDETECTOR', 'RECOMMENDER',
'build_backbone', 'build_global_pool',
'build_roi_pool', 'build_concat', 'build_attr_predictor',
'build_embed_extractor', 'build_predictor', 'build_retriever',
'build_landmark_feature_extractor', 'build_landmark_regression',
'build_visibility_classifier', 'build_landmark_detector', 'build_loss',
'build_triplet_net', 'build_type_specific_net',
'build_fashion_recommender'
]
| StarcoderdataPython |
1604936 | <gh_stars>1-10
import cv2
import numpy as np
from copy_paste import CopyPaste
from coco import CocoDetectionCP
from visualize import display_instances
import albumentations as A
import random
from matplotlib import pyplot as plt
#%%
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = np.zeros_like(x)
y[:, 0] = x[:, 0] # top left x
y[:, 1] = x[:, 1] # top left y
y[:, 2] = x[:, 0] + x[:, 2] # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] # bottom right y
y[:, 4] = x[:,4]
return y
transform = A.Compose([
A.RandomScale(scale_limit=(-0.9, 1), p=1), #LargeScaleJitter from scale of 0.1 to 2
A.PadIfNeeded(512, 512, border_mode=0), #pads with image in the center, not the top left like the paper
A.RandomCrop(512, 512),
CopyPaste(blend=True, sigma=1, pct_objects_paste=0.8, p=1.) #pct_objects_paste is a guess
], bbox_params=A.BboxParams(format="coco", min_visibility=0.05)
)
data = CocoDetectionCP(
'../data/foof_full_coco/',
'../data/foof_full_coco/annotations.json',
transform
)
#%%
path='../copy-paste-aug/fake_image/'
for i in range(100):
#f, ax = plt.subplots(1, 1, figsize=(16, 16))
index = random.randint(0, len(data)-1)
img_data = data[index]
image = img_data['image']
masks = img_data['masks']
bboxes = img_data['bboxes']
bbox=xywh2xyxy(np.array(bboxes)[:,:5]).astype(int)
cv2.imwrite(path+'{}.jpg'.format(i),cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
with open('lable.txt','a',encoding='utf-8') as f:
f.write(path+'{}.jpg'.format(i)+'')
for i in bbox:
f.write(" " + ",".join([str(a) for a in i[:5]]))
f.write('\n')
# empty = np.array([])
# #display_instances(image, empty, empty, empty, empty, show_mask=False, show_bbox=False, ax=ax)
# f1, ax1 = plt.subplots(1, 1, figsize=(16, 16))
# if len(bboxes) > 0:
# boxes = np.stack([b[:4] for b in bboxes], axis=0)
# box_classes = np.array([b[-2] for b in bboxes])
# mask_indices = np.array([b[-1] for b in bboxes])
# show_masks = np.stack(masks, axis=-1)[..., mask_indices]
# class_names = {k: data.coco.cats[k]['name'] for k in data.coco.cats.keys()}
#display_instances(image, boxes, show_masks, box_classes, class_names, show_bbox=True, ax=ax1)
#else:
#display_instances(image, empty, empty, empty, empty, show_mask=False, show_bbox=False, ax=ax1)
| StarcoderdataPython |
11341477 | import logging
import platform
def createLogger(name, debug: bool = False):
logger = logging.getLogger(name)
level = logging.DEBUG if debug else logging.INFO
logger.setLevel(level)
# remove all handlers
logger.handlers = []
# file
logpath = "/tmp/" + name + ".log"
if platform.system() == "Windows":
logpath = "C:\\Temp\\" + name + ".log"
fhandle = logging.FileHandler(logpath)
fhandle.setLevel(logging.DEBUG)
fhandle.setFormatter(
logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
)
logger.addHandler(fhandle)
# stdout
stdout = logging.StreamHandler()
stdout.setLevel(level)
stdout.setFormatter(logging.Formatter("[+] %(levelname)s - %(name)s - %(message)s"))
logger.addHandler(stdout)
logger.debug("Log file for {} :: {}".format(name, logpath))
return logger
| StarcoderdataPython |
11230504 | <filename>tests/unit/test_world_manager.py<gh_stars>10-100
# # coding: utf-8
# import pytest
# import typing
# from rolling.game.world import WorldManager
# from rolling.kernel import Kernel
# # NOTE: test are based on tile_clutter_capacity config
# class TestWorldManager:
# def _find_available_place_where_drop(
# self,
# world_manager: WorldManager,
# resource_id: typing.Optional[str] = None,
# resource_quantity: typing.Optional[float] = None,
# stuff_id: typing.Optional[str] = None,
# ) -> typing.List[typing.Tuple[typing.Tuple[int, int], typing.Optional[float]]]:
# return world_manager.find_available_place_where_drop(
# resource_id=resource_id,
# resource_quantity=resource_quantity,
# stuff_id=stuff_id,
# world_row_i=1,
# world_col_i=1,
# start_from_zone_row_i=69,
# start_from_zone_col_i=40,
# allow_fallback_on_start_coordinates=False,
# )
# @pytest.mark.parametrize(
# "resource_id,quantity,expected",
# [
# ("WOOD", 0.005, [((69, 40), 0.005)]),
# ("WOOD", 0.05, [((69, 40), 0.02), ((70, 39), 0.02), ((71, 39), 0.01)]),
# ],
# )
# def test_find_available_place_where_drop_when_place_resource_on_full_free_space(
# self,
# worldmapc_kernel: Kernel,
# resource_id: str,
# quantity: float,
# expected: typing.List[
# typing.Tuple[typing.Tuple[int, int], typing.Optional[float]]
# ],
# ) -> None:
# # Given
# kernel = worldmapc_kernel
# # When
# places = self._find_available_place_where_drop(
# kernel.game.world_manager,
# resource_id=resource_id,
# resource_quantity=quantity,
# )
# # Then
# assert places == expected
# @pytest.mark.parametrize(
# "resource_id,quantity,expected",
# [
# ("WOOD", 0.005, [((69, 40), 0.002), ((70, 39), 0.003)]),
# (
# "WOOD",
# 0.05,
# [
# ((69, 40), 0.002),
# ((70, 39), 0.02),
# ((71, 39), 0.02),
# ((68, 39), 0.008),
# ],
# ),
# ],
# )
# def test_find_available_place_where_drop_when_place_resource_on_occupied_space(
# self,
# worldmapc_kernel: Kernel,
# resource_id: str,
# quantity: float,
# expected: typing.List[
# typing.Tuple[typing.Tuple[int, int], typing.Optional[float]]
# ],
# ) -> None:
# # Given
# kernel = worldmapc_kernel
# kernel.resource_lib.add_resource_to(
# resource_id="STONE",
# quantity=9,
# ground=True,
# world_row_i=1,
# world_col_i=1,
# zone_row_i=69,
# zone_col_i=40,
# )
# # When
# places = self._find_available_place_where_drop(
# kernel.game.world_manager,
# resource_id=resource_id,
# resource_quantity=quantity,
# )
# # Then
# assert places == expected
# @pytest.mark.parametrize(
# "resource_id,quantity,expected",
# [
# ("WOOD", 0.005, [((69, 40), 0.005)]),
# ("WOOD", 0.05, [((69, 40), 0.02), ((70, 39), 0.02), ((71, 39), 0.01)]),
# ],
# )
# def test_find_available_place_where_drop_when_place_resource_on_walled_space(
# self,
# worldmapc_kernel: Kernel,
# resource_id: str,
# quantity: float,
# expected: typing.List[
# typing.Tuple[typing.Tuple[int, int], typing.Optional[float]]
# ],
# ) -> None:
# # Given
# kernel = worldmapc_kernel
# kernel.build_lib.place_build(
# world_row_i=1,
# world_col_i=1,
# zone_row_i=69,
# zone_col_i=41,
# build_id="STONE_WALL",
# under_construction=False,
# )
# # When
# places = self._find_available_place_where_drop(
# kernel.game.world_manager,
# resource_id=resource_id,
# resource_quantity=quantity,
# )
# # Then
# assert places == expected
# def test_find_available_place_where_drop_when_place_stuff_on_full_free_space(
# self, worldmapc_kernel: Kernel
# ) -> None:
# # Given
# kernel = worldmapc_kernel
# # When
# places = self._find_available_place_where_drop(
# kernel.game.world_manager, stuff_id="STONE_HAXE"
# )
# # Then
# assert places == [((69, 40), 1.0)]
# def test_find_available_place_where_drop_when_place_stuff_on_occupied_space(
# self, worldmapc_kernel: Kernel
# ) -> None:
# # Given
# kernel = worldmapc_kernel
# kernel.resource_lib.add_resource_to(
# resource_id="STONE",
# quantity=9,
# ground=True,
# world_row_i=1,
# world_col_i=1,
# zone_row_i=69,
# zone_col_i=40,
# )
# # When
# places = self._find_available_place_where_drop(
# kernel.game.world_manager, stuff_id="STONE_HAXE"
# )
# # Then
# assert places == [((69, 40), 1.0)]
# def test_find_available_place_where_drop_when_place_stuff_on_walled_space(
# self, worldmapc_kernel: Kernel
# ) -> None:
# # Given
# kernel = worldmapc_kernel
# kernel.build_lib.place_build(
# world_row_i=1,
# world_col_i=1,
# zone_row_i=69,
# zone_col_i=41,
# build_id="STONE_WALL",
# under_construction=False,
# )
# # When
# places = self._find_available_place_where_drop(
# kernel.game.world_manager, stuff_id="STONE_HAXE"
# )
# # Then
# assert places == [((69, 40), 1.0)]
| StarcoderdataPython |
6651386 | <filename>obsolete/reports/pipeline_chipseq/trackers/PeakCalling.py
from ChipseqReport import *
class MacsSummary(DefaultTracker):
pattern = "(macs_summary)"
def getTracks(self, subset=None):
return self.getValues("SELECT track FROM macs_summary ORDER BY track")
def __call__(self, track, slice=None):
resultsdir = os.path.abspath(os.path.join(EXPORTDIR, "MACS"))
fields = (
"called_positive", "called_negative",
"scan_window", "shift",
"tag_treatment_total", "tag_treatment_filtered",
"tag_control_total", "tag_control_filtered",
"ncandidates_positive", "ncandidates_negative",
"min_tags",
"paired_peaks", )
f = ",".join(fields)
data = self.getFirstRow(
'''SELECT %(f)s FROM macs_summary WHERE track="%(track)s"''' % locals())
result = odict(list(zip(fields, data)))
if os.path.exists(resultsdir):
result[
"peakshape"] = "`pdf <%(resultsdir)s/%(track)s_model.pdf>`_" % locals()
return result
class MacsDiagnostics(ChipseqTracker):
"""Closest distance of transcript models to gene models in the reference set."""
pattern = "(.*)_macsdiag"
def __call__(self, track, slice=None):
data = self.get(
"SELECT fc,npeaks,p20,p30,p40,p50,p60,p70,p80,p90 FROM %(track)s_macsdiag" % locals())
result = odict()
for fc, npeaks, p20, p30, p40, p50, p60, p70, p80, p90 in data:
result[fc] = odict()
result[fc]["npeaks"] = npeaks
result[fc]["proportion of reads"] = list(range(20, 100, 10))
result[fc]["proportion of peaks"] = list(map(
float, (p20, p30, p40, p50, p60, p70, p80, p90)))
return result
class MacsFiltering(ChipseqTracker, SingleTableTrackerColumns):
column = "fdr"
table = "macs_fdr"
| StarcoderdataPython |
8108418 | <reponame>ORC-RIS/beiwe-backend<gh_stars>10-100
# Generated by Django 2.2.14 on 2020-10-08 21:02
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('database', '0040_fileastext'),
]
operations = [
migrations.AlterField(
model_name='participantfcmhistory',
name='token',
field=models.CharField(db_index=True, max_length=256, unique=True, validators=[django.core.validators.MinLengthValidator(1)]),
),
]
| StarcoderdataPython |
23934 | <filename>yocto/poky/bitbake/lib/bb/ui/crumbs/hobcolor.py
#
# BitBake Graphical GTK User Interface
#
# Copyright (C) 2012 Intel Corporation
#
# Authored by <NAME> <<EMAIL>>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
class HobColors:
WHITE = "#ffffff"
PALE_GREEN = "#aaffaa"
ORANGE = "#eb8e68"
PALE_RED = "#ffaaaa"
GRAY = "#aaaaaa"
LIGHT_GRAY = "#dddddd"
SLIGHT_DARK = "#5f5f5f"
DARK = "#3c3b37"
BLACK = "#000000"
PALE_BLUE = "#53b8ff"
DEEP_RED = "#aa3e3e"
KHAKI = "#fff68f"
OK = WHITE
RUNNING = PALE_GREEN
WARNING = ORANGE
ERROR = PALE_RED
| StarcoderdataPython |
9729923 | import os
import signal
import subprocess
from contextlib import contextmanager
from dagster import execute_pipeline, seven
from dagster.core.definitions.reconstructable import ReconstructablePipeline
from dagster.core.instance import DagsterInstance
from dagster.core.test_utils import instance_for_test
BUILDKITE = os.getenv("BUILDKITE")
REPO_FILE = os.path.join(os.path.dirname(__file__), "repo.py")
@contextmanager
def tempdir_wrapper(tempdir=None):
if tempdir:
yield tempdir
else:
with seven.TemporaryDirectory() as t:
yield t
@contextmanager
def _instance_wrapper(instance):
if instance:
yield instance
else:
with instance_for_test() as instance:
yield instance
@contextmanager
def execute_pipeline_on_celery(
pipeline_name, instance=None, run_config=None, tempdir=None, tags=None, subset=None
):
with tempdir_wrapper(tempdir) as tempdir:
pipeline_def = ReconstructablePipeline.for_file(
REPO_FILE, pipeline_name
).subset_for_execution(subset)
with _instance_wrapper(instance) as wrapped_instance:
run_config = run_config or {
"intermediate_storage": {"filesystem": {"config": {"base_dir": tempdir}}},
"execution": {"celery": {}},
}
result = execute_pipeline(
pipeline_def, run_config=run_config, instance=wrapped_instance, tags=tags,
)
yield result
@contextmanager
def execute_eagerly_on_celery(pipeline_name, instance=None, tempdir=None, tags=None, subset=None):
with seven.TemporaryDirectory() as tempdir:
run_config = {
"intermediate_storage": {"filesystem": {"config": {"base_dir": tempdir}}},
"execution": {"celery": {"config": {"config_source": {"task_always_eager": True}}}},
}
with execute_pipeline_on_celery(
pipeline_name,
instance=instance,
run_config=run_config,
tempdir=tempdir,
tags=tags,
subset=subset,
) as result:
yield result
def execute_on_thread(pipeline_name, done, instance_ref, tempdir=None, tags=None):
with DagsterInstance.from_ref(instance_ref) as instance:
with execute_pipeline_on_celery(
pipeline_name, tempdir=tempdir, tags=tags, instance=instance
):
done.set()
@contextmanager
def start_celery_worker(queue=None):
process = subprocess.Popen(
["dagster-celery", "worker", "start", "-A", "dagster_celery.app"]
+ (["-q", queue] if queue else [])
+ (["--", "--concurrency", "1"])
)
try:
yield
finally:
os.kill(process.pid, signal.SIGINT)
process.wait()
subprocess.check_output(["dagster-celery", "worker", "terminate"])
def events_of_type(result, event_type):
return [event for event in result.event_list if event.event_type_value == event_type]
| StarcoderdataPython |
336464 | <filename>src/frameworks_and_drivers/healthchecks/healthchecks.py
from healthcheck import HealthCheck, EnvironmentDump
from src.frameworks_and_drivers.healthchecks.postgres import postgres_healthcheck
from src.frameworks_and_drivers.healthchecks.redis import redis_healthcheck
from src.frameworks_and_drivers.healthchecks.info import application_data
def init_app(app):
health = HealthCheck(app, '/healthcheck')
health.add_check(redis_healthcheck)
health.add_check(postgres_healthcheck)
envdump = EnvironmentDump(app, '/environment')
envdump.add_section("application", application_data)
| StarcoderdataPython |
3331513 | import logging
from queue import Queue
import concurrent.futures
"""
Post processing decorater logic for FtpDownloader
"""
class FtpDownloaderPostProcess:
def __init__(self, ftp_downloader, post_processor, num_workers=None, config_dict=None):
self.post_processor = post_processor
self.ftp_downloader = ftp_downloader
self.num_workers = num_workers or self._get_from_config(config_dict, "num_workers", 5)
@staticmethod
def _get_from_config(config_dict, key, default_value):
value = default_value
if config_dict is not None:
cls_name = "FtpDownloaderPostProcess"
if config_dict.get(cls_name, None) is not None:
value = config_dict[cls_name].get(key, 5)
return value
@property
def logger(self):
return logging.getLogger(__name__)
def iterate(self, *args, **kwargs):
"""
Uses worker queues to perform the postprocessing
:param args:
:param kwargs:
"""
# use thread pool to parallel process
q = Queue()
max_workers = self.num_workers
with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:
# Set up workers
futures = []
for i in range(max_workers):
futures.append(executor.submit(self._worker, q))
# Submit worker jobs
# Wrap the main task in a try block so that the queue completes regardless of success/failure of main job
try:
for f in self.ftp_downloader.iterate(*args, **kwargs):
q.put(f)
yield f
finally:
# Stop processing
# Not doing a queue to join, because if all workers fail this will hang with items still left in q...
# q.join()
# poison pill
for i in range(max_workers):
q.put(None)
for future in futures:
future.result()
def _worker(self, read_queue):
while True:
item = read_queue.get()
if item is None:
return
try:
self.post_processor(item)
except Exception as e:
self.logger.warning("The task has failed with error ..{}".format(e))
raise e
read_queue.task_done()
def __call__(self, *args, **kwargs):
items = self.ftp_downloader(*args, **kwargs)
for item in items:
self.post_processor(item)
return items
| StarcoderdataPython |
6538505 | from bittrex_api import Bittrex
bittrex = Bittrex(
api_key='', # YOUR API KEY
secret_key='', # YOUR API SECRET
max_request_try_count=3, # Max tries for a request to succeed
sleep_time=2, # sleep seconds between failed requests
debug_level=3
)
v3 = bittrex.v3
# or
# from bittrex_api import *
# v3 = BittrexV3(
# api_key='', # YOUR API KEY
# secret_key='', # YOUR API SECRET
# max_request_try_count=3, # Max tries for a request to succeed
# sleep_time=2, # sleep seconds between failed requests
# debug_level=3,
# reverse_market_names=True
# )
# V3 Usage samples
from kcu import kjson
MARKET_NAME = 'BTC-XRP'
kjson.print(v3.get_market(market=MARKET_NAME))
kjson.print(v3.get_market_summary(market=MARKET_NAME))
kjson.print(v3.get_orderbook(market=MARKET_NAME, depth=1)) | StarcoderdataPython |
9786544 | <gh_stars>1000+
# Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
import pytest
from molecule import config
from molecule.dependency.ansible_galaxy import collections
@pytest.fixture
def _patched_ansible_galaxy_has_requirements_file(mocker):
m = mocker.patch(
(
"molecule.dependency.ansible_galaxy.collections."
"Collections._has_requirements_file"
)
)
m.return_value = True
return m
@pytest.fixture
def _dependency_section_data():
return {
"dependency": {
"name": "galaxy",
"options": {"foo": "bar", "v": True, "role-file": "bar.yml"},
"env": {"FOO": "bar"},
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(_dependency_section_data, patched_config_validate, config_instance):
return collections.Collections(config_instance)
@pytest.fixture
def role_file(_instance):
return os.path.join(_instance._config.scenario.directory, "collections.yml")
@pytest.fixture
def roles_path(_instance):
return os.path.join(_instance._config.scenario.ephemeral_directory, "collections")
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance, role_file, roles_path):
x = {"requirements-file": role_file, "collections-path": roles_path, "force": True}
assert x == _instance.default_options
def test_default_env_property(_instance):
env = _instance.default_env
assert "MOLECULE_FILE" in env
assert "MOLECULE_INVENTORY_FILE" in env
assert "MOLECULE_SCENARIO_DIRECTORY" in env
assert "MOLECULE_INSTANCE_CONFIG" in env
def test_name_property(_instance):
assert "galaxy" == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
@pytest.mark.parametrize("config_instance", ["_dependency_section_data"], indirect=True)
def test_options_property(_instance, role_file, roles_path):
x = {
"force": True,
"requirements-file": role_file,
"collections-path": roles_path,
"foo": "bar",
"v": True,
}
assert x == _instance.options
@pytest.mark.parametrize("config_instance", ["_dependency_section_data"], indirect=True)
def test_options_property_handles_cli_args(role_file, roles_path, _instance):
_instance._config.args = {"debug": True}
x = {
"force": True,
"requirements-file": role_file,
"collections-path": roles_path,
"foo": "bar",
"vvv": True,
}
assert x == _instance.options
@pytest.mark.parametrize("config_instance", ["_dependency_section_data"], indirect=True)
def test_env_property(_instance):
assert "bar" == _instance.env["FOO"]
@pytest.mark.parametrize("config_instance", ["_dependency_section_data"], indirect=True)
def test_collections_bake(_instance, role_file, roles_path):
_instance.bake()
args = [
"ansible-galaxy",
"collection",
"install",
"--collections-path",
roles_path,
"--foo",
"bar",
"--force",
"--requirements-file",
role_file,
"-v",
]
assert _instance._sh_command.cmd == args
def test_execute(
patched_run_command,
_patched_ansible_galaxy_has_requirements_file,
patched_logger_info,
_instance,
):
_instance._sh_command = "patched-command"
_instance.execute()
role_directory = os.path.join(
_instance._config.scenario.directory, _instance.options["collections-path"]
)
assert os.path.isdir(role_directory)
patched_run_command.assert_called_once_with(
"patched-command", debug=False, check=True
)
msg = "Dependency completed successfully."
patched_logger_info.assert_called_once_with(msg)
def test_execute_does_not_execute_when_disabled(
patched_run_command, patched_logger_warning, _instance
):
_instance._config.config["dependency"]["enabled"] = False
_instance.execute()
assert not patched_run_command.called
msg = "Skipping, dependency is disabled."
patched_logger_warning.assert_called_once_with(msg)
def test_execute_does_not_execute_when_no_requirements_file(
patched_run_command,
_patched_ansible_galaxy_has_requirements_file,
patched_logger_warning,
_instance,
):
_patched_ansible_galaxy_has_requirements_file.return_value = False
_instance.execute()
assert not patched_run_command.called
msg = "Skipping, missing the requirements file."
patched_logger_warning.assert_called_once_with(msg)
def test_execute_bakes(
patched_run_command,
_instance,
role_file,
_patched_ansible_galaxy_has_requirements_file,
roles_path,
):
_instance.execute()
assert _instance._sh_command is not None
assert 1 == patched_run_command.call_count
def test_collections_executes_catches_and_exits_return_code(
patched_run_command, _patched_ansible_galaxy_has_requirements_file, _instance
):
patched_run_command.side_effect = SystemExit(1)
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
def test_setup(_instance):
role_directory = os.path.join(
_instance._config.scenario.directory, _instance.options["collections-path"]
)
assert not os.path.isdir(role_directory)
_instance._setup()
assert os.path.isdir(role_directory)
def test_role_file(role_file, _instance):
assert role_file == _instance.requirements_file
def test_has_requirements_file(_instance):
assert not _instance._has_requirements_file()
| StarcoderdataPython |
8120468 | <filename>utils.py
import re,pprint
from nltk.corpus import stopwords
import nltk
import json
import os
import numpy as np
import copy
import xlwt
from collections import defaultdict
def meaningless_words():
'''
bag-of-words feature can be reinforced by deleting stopwords in SVM and LR
'''
stopwords_list = []
for word in stopwords.words('english'):
tokens = nltk.word_tokenize(word)
stopwords_list += tokens
stopwords_list = list(set(stopwords_list)) + stopwords.words('english')
return stopwords_list
def normalize(text):
'''
This is for cleaning sentences
'''
# deal with some spell error
text = re.sub(r'dien\'t', 'did not', text)
text = re.sub(r' y/o ', ' year old ', text)
text = re.sub(r' dayy ', ' day ', text)
text = re.sub(r' sumhow ', ' somehow ', text)
text = re.sub(r' juss ', ' just ', text)
text = re.sub(r' wiil ', ' will ', text)
text = re.sub(r' kry ', ' cry ', text)
text = re.sub(r' messeges ', ' messages ', text)
text = re.sub(r' rigjt ', ' right ', text)
text = re.sub(r' girlfrined ', ' girlfriend ', text)
text = re.sub(r' mounths ', ' months ', text)
text = re.sub(r' togheter ', ' together ', text)
text = re.sub(r' bieng ', ' being ', text)
text = re.sub(r' evryone ', ' everyone ', text)
text = re.sub(r' ingnore ', ' ignore ', text)
text = re.sub(r'ppppppplllllllleeeeeeeeeeeaaaaaaaaaaassssassseeeeeee', ' please ', text)
text = re.sub(r' veryyyy ', ' very ', text)
text = re.sub(r' realllly ', 'really', text)
text = re.sub(r' [wW]hyyyyy', ' why ', text)
text = re.sub(r' othr ', ' other ', text)
text = re.sub(r'T\'was', 'i was', text)
text = re.sub(r' tommarow ', ' tomarrow ', text)
text = re.sub(r' funnily ', ' funny ', text)
# lower case all words and clean strange symbols
text = text.lower()
text = text.replace('\n',' . ')
text = re.sub(r'[^A-z0-9!?.,\':&]', ' ', text)
text = text.replace('_', ' ')
# deal with num
text = re.sub(r'(^|\s)\d*?[\'.:]\d+[A-z]*?(\s|$)', ' <NUM> ', text)
text = re.sub(r'(^|\s)\d*[$£]\d+(\s|$)', ' <NUM> ', text)
text = re.sub(r'\d+', ' <NUM> ', text)
# deal with special mark
text = text.replace('&', ' and ')
text = re.sub(r':\'\(', ' , ', text)
text = re.sub(r'[([)\]]', ' ', text)
text = re.sub(r':[A-Z]', ' ', text)
text = re.sub(r':','', text)
text = re.sub(r'\*', '', text)
text = re.sub(r'[/\\]', ' ', text)
text = re.sub(r', \' \.', ' . ', text)
text = re.sub(r'&+', ' and ', text)
text = re.sub(r'(,\s*\.)+', ' . ', text)
text = re.sub(r'(\.\s*,)+', ' . ', text)
# add space to marks
text = re.sub(r',', ' , ', text)
text = re.sub(r'\.', ' . ', text)
text = re.sub(r'!', ' ! ', text)
text = re.sub(r'\?', ' ? ', text)
text = re.sub(r'\n', ' . ', text)
# deal with repeating marks
text = re.sub(r'(!\s+)+', ' ! ', text)
text = re.sub(r'(\?\s*)+', ' ? ', text)
text = re.sub(r'(\.\s*)+', ' . ', text)
text = re.sub(r'(,\s*)+', ' , ', text)
# join together
text = nltk.word_tokenize(text) # split original sent
text = ' '.join(text)
text = text.replace('< NUM >', '<NUM>')
return text
def F1_score(pred_prob, true_prob):
'''
return P,R,A,F1,TP,FP,TN,FN
:param pred_prob: predicted probability list
:param true_prob: true probability list
:return: F1 score and other metrics
'''
TP, FP, FN, TN = 0, 0, 0, 0
for i, label in enumerate(true_prob):
if label == 0 and pred_prob[i] <= 0.5:
TP += 1
elif label == 0 and pred_prob[i] > 0.5:
FN += 1
elif label == 1 and pred_prob[i] <= 0.5:
FP += 1
elif label == 1 and pred_prob[i] > 0.5:
TN += 1
total_num = len(true_prob)
assert TP + TN + FP + FN == len(true_prob)
if TP + FP == 0:
precision = 0
else:
precision = TP / (TP + FP)
recall = TP / (TP + FN)
accu = (TP + TN) / (TP + TN + FP + FN)
if precision + recall == 0:
f1_score = 0
else:
f1_score = 2 * precision * recall / (precision + recall)
other_metrics = precision, recall, accu, TP / total_num, FP / total_num, TN / total_num, FN / total_num
return f1_score, other_metrics
def sub_UNK(sent, word_dict):
words = sent.split()
for i, w in enumerate(words):
if w not in word_dict:
words[i] = '<UNK>'
return ' '.join(words)
def generate_configuration(config):
'''
transform some str values into int or list
:param config: class configparser
:return: dict
'''
configuration = {}
for sec_name, section in config.items():
configuration[sec_name] = {}
for k, v in section.items():
if re.match(r'\d+\.\d+', v):
configuration[sec_name][k] = float(v)
elif re.match(r'\d+', v):
configuration[sec_name][k] = int(v)
elif re.match(r'\[.+\]', v):
try:
configuration[sec_name][k] = \
list([int(i) for i in v.replace("[", "").replace("]", "").replace(" ", "").split(",")])
except:
configuration[sec_name][k] = \
list([i for i in v.replace("[", "").replace("]", "").replace(" ", "").split(",")])
else:
configuration[sec_name][k] = v
return configuration
def find_super_category(ontology, sub_category):
for k , vs in ontology.items():
if sub_category in vs:
return k
def generate_results(model, vector_size, **kwargs):
'''
load all the result files
output complete results for this model
there should be num_of_seeds * num_of_labels * num_oversamplingratio * num_of_rounds files
please using this function when you get all the results.
'''
with open('Data/CBT_ontology.json') as f:
CBT_ontology = json.load(f)
all_labels = CBT_ontology['emotions'] + \
CBT_ontology['situations'] + CBT_ontology['thinking_errors']
label_count = {'Anger': 595,
'Anxiety': 2547,
'Bereavement': 107,
'Black_and_white': 840,
'Blaming': 325,
'Catastrophising': 479,
'Comparing': 132,
'Depression': 836,
'Disqualifying_the_positive': 248,
'Emotional_reasoning': 537,
'Existential': 885,
'Fortune_telling': 1037,
'Grief': 230,
'Guilt': 136,
'Health': 428,
'Hurt': 802,
'Inflexibility': 326,
'Jealousy': 126,
'Jumping_to_negative_conclusions': 1782,
'Labelling': 424,
'Loneliness': 299,
'Low_frustration_tolerance': 647,
'Mental_filtering': 222,
'Mind-reading': 589,
'Other': 223,
'Over-generalising': 512,
'Personalising': 236,
'Relationships': 2727,
'School_College': 334,
'Shame': 229,
'Work': 246}
complete_results = {}
for metric in ['Precision', 'Recall', 'F1_score', 'Accuracy', 'TP', 'FP', 'TN', 'FN']:
complete_results[metric] = {}
for label in all_labels:
complete_results[metric][label] = {'oversampling_ratio1': [],
'oversampling_ratio3': [],
'oversampling_ratio5': [],
'oversampling_ratio7': [],
'oversampling_ratio0': []}
save_dir = kwargs['save_dir']
if model in ['LR_BOW', 'SVM_BOW']:
saved_results_dir = os.path.join(save_dir, '%s_Results' % model)
output_metrics_filename = 'Complete_results_for_%s.xls' % model
else:
saved_results_dir = os.path.join(save_dir, '%s_%dd_Results'%(model, vector_size))
output_metrics_filename = 'Complete_results_for_%s_%dd.xls' % (model, vector_size)
for seed in os.listdir(saved_results_dir):
for label in all_labels:
for ratio in [0,1,3,5,7]:
tmp_pre, tmp_rec, tmp_F1, tmp_acc = [], [], [], []
tmp_TP, tmp_FP, tmp_TN, tmp_FN = [], [], [], []
for round_id in range(1,1+kwargs['cross_validation']):
filepath = os.path.join(
saved_results_dir,
seed,
label,
'oversampling_ratio%d' % ratio,
'round%d' % round_id, 'results.txt')
try:
with open(filepath) as f:
for line in f:
m = re.match('.*? test F1 score: (\d)\.(\d{4})===.*', line)
if m:
tmp_F1.append(int(m.group(1)) + int(m.group(2)) / 10000)
n = re.match(
'.* other test_metrics: pre=(\d)\.(\d+) recall=(\d)\.(\d+) accu=(\d)\.(\d+) TP=(\d)\.(\d+) FP=(\d)\.(\d+) TN=(\d)\.(\d+) FN=(\d)\.(\d+)===.*',
line)
if n:
tmp_pre.append(int(n.group(1)) + int(n.group(2)) / 10000)
tmp_rec.append(int(n.group(3)) + int(n.group(4)) / 10000)
tmp_acc.append(int(n.group(5)) + int(n.group(6)) / 10000)
tmp_TP.append(int(n.group(7)) + int(n.group(8)) / 10000)
tmp_FP.append(int(n.group(9)) + int(n.group(10)) / 10000)
tmp_TN.append(int(n.group(11)) + int(n.group(12)) / 10000)
tmp_FN.append(int(n.group(13)) + int(n.group(14)) / 10000)
except:
print('The results is not complete !')
print('can not find file %s'%filepath)
exit(0)
complete_results['F1_score'][label]['oversampling_ratio%d'%ratio].append(np.mean(tmp_F1))
complete_results['Precision'][label]['oversampling_ratio%d'%ratio].append(np.mean(tmp_pre))
complete_results['Recall'][label]['oversampling_ratio%d'%ratio].append(np.mean(tmp_rec))
complete_results['Accuracy'][label]['oversampling_ratio%d'%ratio].append(np.mean(tmp_acc))
complete_results['TP'][label]['oversampling_ratio%d'%ratio].append(np.mean(tmp_TP))
complete_results['FP'][label]['oversampling_ratio%d'%ratio].append(np.mean(tmp_FP))
complete_results['TN'][label]['oversampling_ratio%d' % ratio].append(np.mean(tmp_TN))
complete_results['FN'][label]['oversampling_ratio%d' % ratio].append(np.mean(tmp_FN))
wb = xlwt.Workbook()
for k, item in complete_results.items():
ws = wb.add_sheet(k)
write_excel(ws, all_labels, label_count, item, CBT_ontology)
wb.save(os.path.join(save_dir, output_metrics_filename))
def write_excel(ws, all_labels, label_count, complete_results_metric, CBT_ontology):
ws.write(0, 0, 'label')
ws.write(0, 1, 'Freq')
ws.write(0, 2, 'ratio 1')
ws.write(0, 3, 'ratio 3')
ws.write(0, 4, 'ratio 5')
ws.write(0, 5, 'ratio 7')
ws.write(0, 6, 'no ratio')
AVG_F1_mean, AVG_F1_std = defaultdict(list), defaultdict(list)
weighted_AVG_F1_mean, weighted_AVG_F1_std = defaultdict(list), defaultdict(list)
Emotion_mean, Emotion_std = defaultdict(list), defaultdict(list)
Situation_mean, Situation_std = defaultdict(list), defaultdict(list)
ThinkingError_mean, ThinkingError_std = defaultdict(list), defaultdict(list)
for i, label in enumerate(all_labels):
ws.write(i + 1, 0, label)
ws.write(i + 1, 1, label_count[label])
for i, label in enumerate(all_labels):
for ratio, number in complete_results_metric[label].items():
if ratio == 'oversampling_ratio0':
ws.write(i + 1, 6, '%0.3f±%0.3f' % (np.mean(number), np.std(number)))
elif ratio == 'oversampling_ratio1':
ws.write(i + 1, 2, '%0.3f±%0.3f' % (np.mean(number), np.std(number)))
elif ratio == 'oversampling_ratio3':
ws.write(i + 1, 3, '%0.3f±%0.3f' % (np.mean(number), np.std(number)))
elif ratio == 'oversampling_ratio5':
ws.write(i + 1, 4, '%0.3f±%0.3f' % (np.mean(number), np.std(number)))
else:
ws.write(i + 1, 5, '%0.3f±%0.3f' % (np.mean(number), np.std(number)))
AVG_F1_mean[ratio].append(np.mean(number))
AVG_F1_std[ratio].append(np.std(number))
weighted_AVG_F1_mean[ratio].append(np.mean(number) * label_count[label])
weighted_AVG_F1_std[ratio].append(np.std(number) * label_count[label])
if label in CBT_ontology['emotions']:
Emotion_mean[ratio].append(np.mean(number))
Emotion_std[ratio].append(np.std(number))
elif label in CBT_ontology['situations']:
Situation_mean[ratio].append(np.mean(number))
Situation_std[ratio].append(np.std(number))
else:
ThinkingError_mean[ratio].append(np.mean(number))
ThinkingError_std[ratio].append(np.std(number))
ws.write(len(all_labels) + 4, 0, 'AVG F1')
for ratio, idx in zip([0, 1, 3, 5, 7], [6, 2, 3, 4, 5]):
ws.write(35, idx, '%0.3f±%0.3f' % (np.mean(AVG_F1_mean['oversampling_ratio%d' % ratio]),
np.mean(AVG_F1_std['oversampling_ratio%d' % ratio])))
ws.write(len(all_labels) + 5, 0, 'weighted AVG F1')
for ratio, idx in zip([0, 1, 3, 5, 7], [6, 2, 3, 4, 5]):
ws.write(36, idx, '%0.3f±%0.3f' % (
np.sum(weighted_AVG_F1_mean['oversampling_ratio%d' % ratio]) / np.sum(list(label_count.values())),
np.sum(weighted_AVG_F1_std['oversampling_ratio%d' % ratio]) / np.sum(list(label_count.values()))))
ws.write(len(all_labels) + 6, 0, 'Emotion')
for ratio, idx in zip([0, 1, 3, 5, 7], [6, 2, 3, 4, 5]):
ws.write(37, idx, '%0.3f±%0.3f' % (np.mean(Emotion_mean['oversampling_ratio%d' % ratio]),
np.mean(Emotion_std['oversampling_ratio%d' % ratio])))
ws.write(len(all_labels) + 7, 0, 'Situation')
for ratio, idx in zip([0, 1, 3, 5, 7], [6, 2, 3, 4, 5]):
ws.write(38, idx, '%0.3f±%0.3f' % (np.mean(Situation_mean['oversampling_ratio%d' % ratio]),
np.mean(Situation_std['oversampling_ratio%d' % ratio])))
ws.write(len(all_labels) + 8, 0, 'ThinkingError')
for ratio, idx in zip([0, 1, 3, 5, 7], [6, 2, 3, 4, 5]):
ws.write(39, idx, '%0.3f±%0.3f' % (np.mean(ThinkingError_mean['oversampling_ratio%d' % ratio]),
np.mean(ThinkingError_std['oversampling_ratio%d' % ratio])))
def generate_predictions(model, vector_size, **kwargs):
'''
get the results of oversampling ratio 1:1 due to it's the best
get the prediction of the labelled data
using majority voting by multiple seeds
'''
def F1_SCORE(true_labels, predict_labels):
TP = len(true_labels & predict_labels)
if len(predict_labels) == 0:
precision = 0
else:
precision = TP / len(predict_labels)
if len(true_labels) == 0:
recall = 0
else:
recall = TP / len(true_labels)
if precision + recall == 0:
F1 = 0
else:
F1 = 2 * precision * recall / (precision + recall)
return F1
with open('Data/CBT_ontology.json') as f:
CBT_ontology = json.load(f)
all_labels = CBT_ontology['emotions'] + \
CBT_ontology['situations'] + CBT_ontology['thinking_errors']
save_dir = kwargs['save_dir']
if model in ['LR_BOW', 'SVM_BOW']:
saved_results_dir = os.path.join(save_dir, '%s_Results' % model)
output_predictions_filename = 'Predictions_for_%s.json' % model
else:
saved_results_dir = os.path.join(save_dir, '%s_%dd_Results' % (model, vector_size))
output_predictions_filename = 'Predictions_for_%s_%dd.json' % (model, vector_size)
predictions = {}
for seed in os.listdir(saved_results_dir):
for label in all_labels:
for round_id in range(1,1+kwargs['cross_validation']):
filepath = os.path.join(
saved_results_dir,
seed,
label,
'oversampling_ratio1',
'round%d' % round_id, 'results.txt')
try:
with open(filepath) as f:
flag = False
for line in f:
if 'predictions' in line:
flag = True
continue
m = re.match('(\w{24}) ([01]).*', line)
if m and flag:
if m.group(1) not in predictions:
predictions[m.group(1)] = {}
if label not in predictions[m.group(1)]:
predictions[m.group(1)][label] = []
predictions[m.group(1)][label].append(int(m.group(2)))
except:
print('The results is not complete !')
print('can not find file %s' % filepath)
exit(0)
with open(kwargs['labelled_data_filepath'], 'r') as f:
labelled_data = json.load(f)
predicted_labelled_data = {}
for ID, pred in predictions.items():
predicted_labelled_data[ID] = {}
predicted_labelled_data[ID]['prediction'] = {'emotions':[], 'situations':[], 'thinking_errors':[]}
for l, count in pred.items():
if np.mean(count) > 0.5:
if l in CBT_ontology['emotions']:
predicted_labelled_data[ID]['prediction']['emotions'].append(l)
elif l in CBT_ontology['situations']:
predicted_labelled_data[ID]['prediction']['situations'].append(l)
else:
predicted_labelled_data[ID]['prediction']['thinking_errors'].append(l)
for ID in predicted_labelled_data.keys():
for this_data in labelled_data:
if ID != this_data['id']:
continue
else:
predicted_labelled_data[ID]['label'] = copy.deepcopy(this_data['label'])
predicted_labelled_data[ID]['problem'] = this_data['problem']
predicted_labelled_data[ID]['negative_take'] = this_data['negative_take']
predicted_labelled_data[ID]['F1 score'] = F1_SCORE(
set(predicted_labelled_data[ID]['label']['emotions']+
predicted_labelled_data[ID]['label']['situations'] +
predicted_labelled_data[ID]['label']['thinking_errors']),
set(predicted_labelled_data[ID]['prediction']['emotions'] +
predicted_labelled_data[ID]['prediction']['situations'] +
predicted_labelled_data[ID]['prediction']['thinking_errors']))
predicted_labelled_data = list(predicted_labelled_data.values())
predicted_labelled_data.sort(key=lambda x: x['F1 score'])
with open(os.path.join(save_dir,output_predictions_filename), 'w') as f:
json.dump(predicted_labelled_data, f, indent=2)
print('mean F1 score for labelled data:', np.mean([x['F1 score'] for x in predicted_labelled_data]))
| StarcoderdataPython |
11211891 | <gh_stars>1-10
"""
View logic used in catalog app
"""
import traceback
from django.http import HttpRequest, HttpResponse
from django.db import transaction
from django.shortcuts import render, redirect, reverse
from seev.apps.utils.country import UnoCountry
from seev.apps.utils.generators import *
from seev.apps.utils.codetable import getGeneralTranslation
from seev.apps.utils.messages import get_app_message, addSnackDataToContext
from seev.apps.utils.session import *
from seev.apps.utils.process import *
from seev.apps.core.views import go_error, go_success
from seev.apps.core.models import UnoClient
from .models import *
def go_ord_home(request, context=None):
context = get_context_in_session(request)
if not context:
context = {}
return render(request, 'order/index.html', context=context)
def find_oppo_by_num(request, context=None):
if request.method == 'POST':
try:
oppoNumber = request.POST['opportunity-number']
opportunity = UnoOpportunity.objects.get(
opportunity_number=oppoNumber)
if not opportunity.active or opportunity.deal_count >= opportunity.deal_limit:
raise AssertionError
# Get client and customer
client = UnoClient.objects.get(client_id=opportunity.client_id)
customer = UnoCustomer.objects.get(
customer_id=opportunity.customer_id)
# Fill opportunity details
oppoData = {}
opportunity.discount_nrc = getGeneralTranslation(
opportunity.discount_nrc)
opportunity.discount_mrc = getGeneralTranslation(
opportunity.discount_mrc)
opportunity.opportunity_number = str(
opportunity.opportunity_number).replace('-', '')
oppoData['opportunity'] = opportunity
oppoData['reDeal'] = int(
opportunity.deal_limit) - int(opportunity.deal_count)
oppoData['clientName'] = client.entity_name
oppoData['clientEml'] = client.contact_email
oppoData['clientPh'] = client.contact_phone
oppoData['clientCty'] = UnoCountry.get_country_by_code(
client.country)
oppoData['custName'] = customer.customer_name
oppoData['custEml'] = customer.contact_email
context = {}
context['oppoData'] = oppoData
return render(request, 'order/index.html', context=context)
except ObjectDoesNotExist:
store_context_in_session(request, addSnackDataToContext(
context, 'Opportunity not found'))
return redirect('go_ord_home')
except AssertionError:
store_context_in_session(request, addSnackDataToContext(
context, 'Opportunity has expired'))
return redirect('go_ord_home')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown Error'))
return redirect('go_ord_home')
else:
return redirect('go_ord_home')
@transaction.atomic
def create_order(request, context=None):
if request.method == 'POST':
try:
oppoId = request.POST['opportunity-id']
ordName = request.POST['order-name']
ordSecret = request.POST['order-secret']
# Fetch data
opportunity = UnoOpportunity.objects.get(opportunity_id=oppoId)
if not opportunity.active or opportunity.deal_count >= opportunity.deal_limit:
raise AssertionError
client = UnoClient.objects.get(client_id=opportunity.client_id)
customer = UnoCustomer.objects.get(
customer_id=opportunity.customer_id)
# Create basket and order
basket = PtaBasket(
client=client,
customer=customer
)
order = PtaOrderInstance(
order_name=ordName,
secret=ordSecret,
client=client,
customer=customer,
opportunity=opportunity,
basket=basket,
status='IN'
)
basket.save()
order.save()
# Store order in session
meta = generateOrderMeta(order)
save_ord_meta_to_session(request, meta)
return redirect('go_ord_config_home')
except AssertionError:
store_context_in_session(request, addSnackDataToContext(
context, 'Opportunity invalid or expired'))
return redirect('go_ord_home')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(request, addSnackDataToContext(
context, 'Order creation failed'))
return redirect('go_ord_home')
else:
return redirect('go_ord_home')
def go_ord_config_home(request, context=None):
context = get_context_in_session(request)
if not context:
context = {}
# Fill context with order metadata
context = load_ord_meta_to_context(request, context)
if not context:
store_context_in_session(request, addSnackDataToContext(
context, 'Order request failed'))
return redirect('go_ord_home')
order = PtaOrderInstance.objects.get(
order_number=context['ordMeta']['order_number'])
context['numSites'] = len(getAllSitesInOrder(order))
context['numPrs'] = len(getAllProductsInOrder(order))
context['isValid'] = True if order.status in ('VA', 'FL') else False
# Order validation request
if 'ord_valid_count' in request.session:
context['validCnt'] = request.session['ord_valid_count']
del request.session['ord_valid_count']
return render(request, 'order/order-home.html', context=context)
def find_ord_by_num(request, context=None):
if request.method == 'POST':
try:
ordNumber = request.POST['order-number']
# Check order
order = PtaOrderInstance.objects.get(order_number=ordNumber)
if order.status not in ('IN', 'IP', 'VA', 'FL'):
store_context_in_session(request, addSnackDataToContext(
context, 'Order cannot be accessed'))
return redirect('go_ord_home')
# Get order data
ordData = generateOrderData(order)
context = {}
context['ordData'] = ordData
return render(request, 'order/index.html', context=context)
except ObjectDoesNotExist:
store_context_in_session(
request, addSnackDataToContext(context, 'Order not found'))
return redirect('go_ord_home')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown Error'))
return redirect('go_ord_home')
else:
return redirect('go_ord_home')
def auth_access_order(request, context=None):
if request.method == 'POST':
try:
ordId = request.POST['order-id']
ordSec = request.POST['order-secret']
order = PtaOrderInstance.objects.get(order_instance_id=ordId)
context = {}
if ordSec == order.secret:
meta = generateOrderMeta(order)
save_ord_meta_to_session(request, meta)
return redirect('go_ord_config_home')
else:
clear_ord_meta(request)
ordData = generateOrderData(order)
context['ordData'] = ordData
context['snack_data'] = 'Invalid secret, access denied'
return render(request, 'order/index.html', context=context)
except Exception:
# traceback.print_exc()
logError(request)
clear_ord_meta(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unexpected error'))
return redirect('go_ord_home')
def exit_order(request, context=None):
clear_ord_meta(request)
return redirect('go_landing')
def go_site_config(request, context=None):
try:
context = get_context_in_session(request)
if not context:
context = {}
# Get order metadata
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
store_context_in_session(request, addSnackDataToContext(
context, 'Order request failed'))
return redirect('go_ord_home')
else:
context = load_ord_meta_to_context(request, context)
context['mapApi'] = getGoogleMapApiSource()
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
if isOrderLocked(order):
store_context_in_session(
request, addSnackDataToContext(context, 'Order is locked'))
return redirect('go_ord_config_home')
# Load existing sites
siteData = []
sites = getAllSitesInOrder(order)
for site in sites:
data = {}
doc = site.site
data['id'] = str(site.pta_site_id)
data['name'] = site.site_name
data['valid'] = '1' if site.is_valid else '0'
data['addr'] = ', '.join([doc.address_1, doc.city, doc.country])
data['state'] = doc.state
data['prCount'] = len(getAllProductsInSite(site))
siteData.append(data)
context['siteData'] = siteData
context['siteCount'] = len(siteData)
return render(request, 'order/order-site.html', context=context)
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Redirect error'))
return redirect('go_ord_home')
@transaction.atomic
def add_new_site(request, context=None):
if request.method == 'POST':
try:
context = {}
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
return redirect('go_site_config')
# Get address form data
siteName = request.POST['site_name']
addrL1 = request.POST['address_line_1']
addrL2 = request.POST['address_line_2']
addrL3 = request.POST['address_line_3']
city = request.POST['address_city']
state = request.POST['address_state']
zipcode = request.POST['address_postal']
country = request.POST['address_country']
# Get order
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
customer = order.customer
# Validation
dupSite = PtaSite.objects.filter(
order_instance=order, site_name=siteName)
if len(dupSite) > 0:
raise TabError
site = UnoSite.objects.filter(address_1=addrL1, address_2=addrL2, address_3=addrL3,
city=city, state=state, zipcode=zipcode, country=country, customer=customer)
if len(site) > 0:
site = site[0]
extSite = PtaSite.objects.filter(
site=site, order_instance=order)
if len(extSite) > 0:
raise AssertionError
else:
site = UnoSite(
customer=customer,
address_1=addrL1,
address_2=addrL2,
address_3=addrL3,
city=city,
state=state,
zipcode=zipcode,
country=country
)
site.save()
ordSite = PtaSite(
site_name=siteName,
site=site,
order_instance=order,
)
ordSite.save()
invalidateOrder(order)
refreshOrdSessionData(order, request)
store_context_in_session(
request, addSnackDataToContext(context, 'New location added'))
return redirect('go_site_config')
except AssertionError:
store_context_in_session(request, addSnackDataToContext(
context, 'Site already exists'))
return redirect('go_site_config')
except TabError:
store_context_in_session(request, addSnackDataToContext(
context, 'Location name already exists'))
return redirect('go_site_config')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_site_config')
@transaction.atomic
def rm_site(request, context=None):
if request.method == 'POST':
try:
context = {}
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
return redirect('go_site_config')
siteId = request.POST['site-id']
site = PtaSite.objects.get(pta_site_id=siteId)
order = site.order_instance
# Delete all products
products = getAllProductsInSite(site)
for product in products:
deleteProductItem(product)
site.delete()
invalidateOrder(order)
refreshOrdSessionData(order, request)
store_context_in_session(request, addSnackDataToContext(
context, 'Location has been removed'))
return redirect('go_site_config')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_site_config')
def go_build_pr(request, context=None):
try:
context = get_context_in_session(request)
if not context:
context = {}
# Metadata
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
store_context_in_session(request, addSnackDataToContext(
context, 'Order request failed'))
return redirect('go_ord_home')
else:
context = load_ord_meta_to_context(request, context)
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
if isOrderLocked(order):
store_context_in_session(
request, addSnackDataToContext(context, 'Order is locked'))
return redirect('go_ord_config_home')
client = order.client
# Load catalog products
ctgList = getAllClientProducts(client)
ctgData = []
if not ctgList or len(ctgList) == 0:
return go_error(HttpRequest(), {'error': get_app_message('catalog_error'), 'message': get_app_message('catalog_error_message')})
for pr in ctgList:
prDoc = {}
prDoc['id'] = str(pr.ctg_doc_id)
prDoc['code'] = getDefCatalogCode(pr.itemcode)
prDoc['name'] = pr.name
ctgData.append(prDoc)
context['prData'] = ctgData
# Load Sites
sites = getAllSitesInOrder(order)
if len(sites) == 0:
store_context_in_session(
request, addSnackDataToContext(context, 'No sites found'))
return redirect('go_site_config')
siteData = []
for site in sites:
data = {}
doc = site.site
data['id'] = str(site.pta_site_id)
data['name'] = site.site_name
data['valid'] = '1' if site.is_valid else '0'
siteData.append(data)
context['siteData'] = siteData
# Load current site and products
site = sites[0]
siteId = request.GET.get('site_id')
if siteId:
sites = sites.filter(pta_site_id=siteId)
if len(sites) > 0:
site = sites[0]
else:
store_context_in_session(request, addSnackDataToContext(
context, 'Requested site not found'))
return redirect('go_site_config')
siteDoc = site.site
context['selId'] = str(site.pta_site_id)
context['siteDoc'] = siteDoc
products = getAllProductsInSite(site)
biData = []
for bi in products:
biDoc = {}
biDoc['id'] = str(bi.basket_item_id)
biDoc['name'] = getBasketItemName(bi)
biDoc['serial'] = zeroPrepender(bi.serial, 5)
biDoc['valid'] = '1' if bi.is_valid else '0'
biData.append(biDoc)
context['biData'] = biData
context['prCount'] = len(biData)
return render(request, 'order/order-product.html', context=context)
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Redirect error'))
return redirect('go_ord_home')
@transaction.atomic
def add_pr_to_basket(request, context=None):
if request.method == 'POST':
try:
context = {}
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
return redirect('go_build_pr')
prData = parseJson(request.POST['ctg_add_data'])
siteId = request.POST['ord_site_id']
# Get order details
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
site = PtaSite.objects.get(pta_site_id=siteId)
redir = reverse('go_build_pr') + '?site_id=' + \
str(site.pta_site_id).replace('-', '')
leadSerial = getLeadSerialInOrderSite(order, site)
tempSerial = leadSerial
if not prData or not leadSerial:
store_context_in_session(
request, addSnackDataToContext(context, 'Invalid order data'))
return redirect(redir)
# Add products to basket
for ctgId, count in prData.items():
tempSerial = addNewProductsToSite(
order, site, ctgId, count, tempSerial)
invalidateSite(site)
invalidateOrder(order)
refreshOrdSessionData(order, request)
if tempSerial > leadSerial:
diff = tempSerial - leadSerial
if diff == 1:
store_context_in_session(request, addSnackDataToContext(
context, '1 product has been added'))
else:
store_context_in_session(request, addSnackDataToContext(
context, str(diff) + ' products have been added'))
else:
store_context_in_session(request, addSnackDataToContext(
context, 'No product is added'))
return redirect(redir)
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_build_pr')
@transaction.atomic
def del_pr_in_site(request, context=None):
if request.method == 'POST':
try:
context = {}
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
return redirect('go_build_pr')
biId = request.POST['bi_rm_id']
siteId = request.POST['ord_site_id']
site = PtaSite.objects.get(pta_site_id=siteId)
order = site.order_instance
item = PtaBasketItem.objects.get(basket_item_id=biId)
redir = reverse('go_build_pr') + '?site_id=' + \
str(site.pta_site_id).replace('-', '')
# Delete process
deleteProductItem(item)
invalidateSite(site)
invalidateOrder(order)
refreshOrdSessionData(order, request)
store_context_in_session(request, addSnackDataToContext(
context, 'Product has been deleted'))
return redirect(redir)
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_build_pr')
def go_svc_config(request, context=None):
try:
context = get_context_in_session(request)
if not context:
context = {}
# Metadata
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
store_context_in_session(request, addSnackDataToContext(
context, 'Order request failed'))
return redirect('go_ord_home')
else:
context = load_ord_meta_to_context(request, context)
serviceList = []
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
if isOrderLocked(order):
store_context_in_session(
request, addSnackDataToContext(context, 'Order is locked'))
return redirect('go_ord_config_home')
# Get all sites and products
sites = PtaSite.objects.filter(
order_instance=order).order_by('site_name')
if not sites or len(sites) < 1:
store_context_in_session(
request, addSnackDataToContext(context, 'No sites found'))
return redirect('go_ord_config_home')
else:
for site in sites:
products = PtaBasketItem.objects.filter(
pta_site=site, parent_id=None).order_by('serial')
if products and len(products) > 0:
for pr in products:
serviceList.append(
str(pr.basket_item_id).replace('-', ''))
if len(serviceList) < 1:
store_context_in_session(
request, addSnackDataToContext(context, 'No services found'))
return redirect('go_ord_config_home')
serviceId = request.GET.get('svc_id') if request.GET.get(
'svc_id') else serviceList[0]
if serviceId not in serviceList:
store_context_in_session(
request, addSnackDataToContext(context, 'Invalid service'))
return redirect('go_ord_config_home')
preSvcId = None
nxtSvcId = None
if serviceList.index(serviceId) > 0:
preSvcId = serviceList[serviceList.index(serviceId) - 1]
if serviceList.index(serviceId) < len(serviceList) - 1:
nxtSvcId = serviceList[serviceList.index(serviceId) + 1]
context['preId'] = preSvcId
context['nxtId'] = nxtSvcId
service = PtaBasketItem.objects.get(basket_item_id=serviceId)
siteDoc = service.pta_site
addrDoc = siteDoc.site
svcData = {}
svcData['id'] = str(service.basket_item_id)
svcData['name'] = getBasketItemName(service)
svcData['serial'] = zeroPrepender(service.serial, 5)
svcData['valid'] = '1' if service.is_valid else '0'
context['siteDoc'] = siteDoc
context['addrDoc'] = addrDoc
context['svcData'] = svcData
svcBasketDoc = populateServiceDoc(service)
# Load catalog definitions
# Product level
prSpecList = []
prCtg = CtgProduct.objects.get(ctg_doc_id=service.ctg_doc_id)
prSpecs = CtgSpecification.objects.filter(
parent_ctg_id=service.ctg_doc_id, active=1)
pspCnt = 0
for psp in prSpecs:
val = getLeafValueFromSvcDoc(
svcBasketDoc, service.itemcode, psp.leaf_name)
if psp.leaf_name == 'SP_BASE':
prSpecList.insert(0, buildSpecInfo(psp, val))
else:
prSpecList.append(buildSpecInfo(psp, val))
pspCnt += 1
# Feature level
prFetList = []
fetCtg = CtgFeature.objects.filter(
product=prCtg, active=1).order_by('creation_time')
fspCnt = 0
for fet in fetCtg:
fetDoc = {}
fetDoc['id'] = str(fet.ctg_doc_id)
fetDoc['itemcode'] = fet.itemcode
fetDoc['name'] = fet.name
fetSpList = []
fetSpecs = CtgSpecification.objects.filter(
parent_ctg_id=fet.ctg_doc_id, active=1)
for fsp in fetSpecs:
val = getLeafValueFromSvcDoc(
svcBasketDoc, fet.itemcode, fsp.leaf_name)
if fsp.leaf_name == 'SP_BASE':
fetSpList.insert(0, buildSpecInfo(fsp, val))
else:
fetSpList.append(buildSpecInfo(fsp, val))
fspCnt += 1
fetDoc['specs'] = fetSpList
prFetList.append(fetDoc)
context['prCtgData'] = prSpecList
context['pspCnt'] = pspCnt
context['fetCtgData'] = prFetList
context['fspCnt'] = fspCnt
# Populate error
errorList = getOrCreateSvcError(request, str(service.basket_item_id))
context['errList'] = errorList
context['errLen'] = len(errorList)
return render(request, 'order/order-service.html', context=context)
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Redirect error'))
return redirect('go_ord_config_home')
@transaction.atomic
def save_svc_config(request, context=None):
if request.method == 'POST':
try:
context = {}
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
return redirect('go_svc_config')
svcDataStruct = parseJson(request.POST['svc_json'])
svcId = svcDataStruct['svcId']
pspList = svcDataStruct['pspList']
fetList = svcDataStruct['fetList']
# Check order
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
basket = order.basket
productItem = PtaBasketItem.objects.get(basket_item_id=svcId)
site = productItem.pta_site
redir = reverse('go_svc_config') + '?svc_id=' + \
str(svcId).replace('-', '')
if isOrderLocked(order):
store_context_in_session(
request, addSnackDataToContext(context, 'Order is locked'))
return redirect('go_ord_config_home')
# Save product level specs
saveBaseSpec(productItem)
for psp in pspList:
createOrUpdateSpec(psp['id'], productItem, psp['value'])
# Save features
for fet in fetList:
if fet['addFlag']:
featureItem = createOrGetFeature(fet['id'], productItem)
# Save feature level specs
saveBaseSpec(featureItem)
for fsp in fet['fspList']:
createOrUpdateSpec(
fsp['id'], featureItem, fsp['value'])
else:
existFeature = getExistingFeature(productItem, fet['id'])
if existFeature:
deleteFeatureItem(existFeature)
clearSitePrice(site)
# Validation
valid = True
errorList = []
valid = validateProductItem(productItem, errorList)
saveErrorInMap(request, str(productItem.basket_item_id), errorList)
if valid:
store_context_in_session(request, addSnackDataToContext(
context, 'Configuration is saved'))
else:
invalidateSite(site)
invalidateOrder(order)
refreshOrdSessionData(order, request)
store_context_in_session(request, addSnackDataToContext(
context, 'Error(s) detected in service'))
return redirect(redir)
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_svc_config')
@transaction.atomic
def do_ord_valid(request, context=None):
if request.method == 'POST':
try:
# Metadata
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
store_context_in_session(request, addSnackDataToContext(
context, 'Order request failed'))
return redirect('go_ord_home')
else:
context = load_ord_meta_to_context(request, context)
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
valid = validateOrder(order)
refreshOrdSessionData(order, request)
request.session['ord_valid_count'] = valid
return redirect('go_ord_config_home')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_ord_config_home')
def go_ord_summary(request, context=None):
try:
context = get_context_in_session(request)
if not context:
context = {}
# Metadata
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
store_context_in_session(request, addSnackDataToContext(
context, 'Order request failed'))
return redirect('go_ord_home')
else:
context = load_ord_meta_to_context(request, context)
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
if order.status not in ('VA', 'FL'):
store_context_in_session(request, addSnackDataToContext(
context, 'Summary is not available'))
return redirect('go_ord_config_home')
# Load product/service tree data
siteDataList = []
sites = PtaSite.objects.filter(
order_instance=order).order_by('creation_time')
if sites and len(sites) > 0:
for site in sites:
populateSiteSummary(siteDataList, site)
context['siteDataList'] = siteDataList
return render(request, 'order/order-summary.html', context=context)
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
@transaction.atomic
def do_site_price(request, context=None):
if request.method == 'POST':
try:
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
return redirect('go_ord_summary')
siteIds = request.POST['site_array']
siteList = str(siteIds).split(',') if siteIds else None
sites = []
# Check sites
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
if siteList and len(siteList) > 0:
for sid in siteList:
site = PtaSite.objects.get(pta_site_id=sid)
if site.order_instance.order_instance_id == order.order_instance_id:
sites.append(site)
if sites and len(sites) > 0:
for site in sites:
# Do pricing
priceSite(site)
else:
store_context_in_session(
request, addSnackDataToContext(context, 'No site to price'))
return redirect('go_ord_summary')
store_context_in_session(request, addSnackDataToContext(
context, 'Pricing is received'))
return redirect('go_ord_summary')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_ord_summary')
@transaction.atomic
def do_ord_submit(request, context=None):
if request.method == 'POST':
try:
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
return redirect('go_ord_summary')
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
# Validation
if order.status == 'VA':
order.status = 'FL'
basket = order.basket
basket.is_locked = True
# Send external request here
# No code is provided due to the nature of this project
# Archive record
oldAr = UnoOrder.objects.filter(
order_number=order.order_number)
if not oldAr or len(oldAr) < 1:
arOrder = UnoOrder(
order_number=order.order_number,
client=order.client,
customer=order.customer,
opportunity=order.opportunity,
status='SM'
)
arOrder.save()
# Increase deal count
oppo = order.opportunity
oppo.deal_count += 1
oppo.save()
basket.save()
order.save()
clear_ord_meta(request)
return go_success(HttpRequest(), {'message': get_app_message('order_submit_message')})
else:
store_context_in_session(request, addSnackDataToContext(
context, 'Cannot submit this order'))
return redirect('go_ord_config_home')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_ord_summary')
@transaction.atomic
def do_ord_cancel(request, context=None):
if request.method == 'POST':
try:
ordMeta = request.session['order_meta'] if 'order_meta' in request.session else None
if not ordMeta:
store_context_in_session(request, addSnackDataToContext(
context, 'Order request failed'))
return redirect('go_ord_home')
order = PtaOrderInstance.objects.get(
order_number=ordMeta['order_number'])
if order.status in ('IN', 'IP', 'VA'):
order.status = 'VD'
basket = order.basket
basket.is_locked = True
basket.save()
order.save()
clear_ord_meta(request)
return go_success(HttpRequest(), {'message': get_app_message('order_cancel_message')})
else:
store_context_in_session(request, addSnackDataToContext(
context, 'Invalid cancellation request'))
return redirect('go_ord_config_home')
except Exception:
# traceback.print_exc()
logError(request)
store_context_in_session(
request, addSnackDataToContext(context, 'Unknown error'))
return redirect('go_ord_config_home')
else:
return redirect('go_ord_config_home')
| StarcoderdataPython |
9708598 | # -*- coding: utf-8 -*-
# 版权所有 2019 深圳米筐科技有限公司(下称“米筐科技”)
#
# 除非遵守当前许可,否则不得使用本软件。
#
# * 非商业用途(非商业用途指个人出于非商业目的使用本软件,或者高校、研究所等非营利机构出于教育、科研等目的使用本软件):
# 遵守 Apache License 2.0(下称“Apache 2.0 许可”),您可以在以下位置获得 Apache 2.0 许可的副本:http://www.apache.org/licenses/LICENSE-2.0。
# 除非法律有要求或以书面形式达成协议,否则本软件分发时需保持当前许可“原样”不变,且不得附加任何条件。
#
# * 商业用途(商业用途指个人出于任何商业目的使用本软件,或者法人或其他组织出于任何目的使用本软件):
# 未经米筐科技授权,任何个人不得出于任何商业目的使用本软件(包括但不限于向第三方提供、销售、出租、出借、转让本软件、本软件的衍生产品、引用或借鉴了本软件功能或源代码的产品或服务),任何法人或其他组织不得出于任何目的使用本软件,否则米筐科技有权追究相应的知识产权侵权责任。
# 在此前提下,对本软件的使用同样需要遵守 Apache 2.0 许可,Apache 2.0 许可与本许可冲突之处,以本许可为准。
# 详细的授权流程,请联系 <EMAIL> 获取。
import re
from typing import Optional, Sequence, List, Set, Tuple
from PySide2.QtWidgets import QMainWindow, QComboBox
from PySide2.QtCore import Signal, Qt
from rqams_client.client import RQAMSClient
from rqams_client.models import AssetUnit, Portfolio, Broker, Product, Account
from rqams_client.utils import ReqestException
from rqams_helper.utils.slot import slot
from rqams_helper.utils.widgets import add_enter_press_event, disable_widget, enabled_widget
from rqams_helper.utils.future import Future
from .ui.createAccountWindow import Ui_CreateAccountWIndow
from .resources import get_icon
def _disable_combo_box(box: QComboBox):
disable_widget(box, "数据正在加载")
box.clear()
def _enable_combo_box(box: QComboBox, items: List[str]):
box.addItems(items)
enabled_widget(box)
class CreateAccountWindow(QMainWindow):
account_created = Signal(Account)
def __init__(self):
super(CreateAccountWindow, self).__init__(None, Qt.WindowStaysOnTopHint | Qt.WindowCloseButtonHint)
self.ui = Ui_CreateAccountWIndow()
self.ui.setupUi(self)
self.ui.lineEdit_products.setReadOnly(True)
self.ui.pushButton_reset.clicked.connect(self._reset)
self.ui.lineEdit_account.textChanged[str].connect(self._check_form)
self.ui.lineEdit_name.textChanged[str].connect(self._check_form)
self.ui.comboBox_assetUnits.currentIndexChanged.connect(self._on_asset_unit_changed)
self.ui.pushButton_create.clicked.connect(self._on_create_button_clicked)
self.setWindowIcon(get_icon())
self._client: Optional[RQAMSClient]= None
self._brokers: Optional[Sequence[Broker]] = None
self._asset_units: Optional[Sequence[AssetUnit]] = None
self._portfolios: Optional[Sequence[Portfolio]] = None
self._product: Optional[Product] = None
self._account_black_list: Set[str] = set()
self._portfolio_black_list: Set[str] = set()
self._asset_unit_black_list: Set[str] = set()
add_enter_press_event(
(self.ui.lineEdit_account, self.ui.lineEdit_name),
lambda: self._on_create_button_clicked() if self.ui.pushButton_create.isEnabled() else None
)
def setup(
self, client: RQAMSClient, account_black_list: Set[str], portfolio_black_list: Set[str],
asset_unit_black_list: Set[str]
):
self._client = client
self._brokers = self._asset_units = self._portfolios = self._product = None
self._account_black_list = account_black_list
self._portfolio_black_list = portfolio_black_list
self._asset_unit_black_list = asset_unit_black_list
for lineEdit in (self.ui.lineEdit_name, self.ui.lineEdit_account, self.ui.lineEdit_products):
lineEdit.clear()
self.ui.lineEdit_products.setReadOnly(True)
for comboBox in (self.ui.comboBox_assetUnits, self.ui.comboBox_portfolios, self.ui.comboBox_broker):
_disable_combo_box(comboBox)
disable_widget(self.ui.pushButton_create, "请输入/选择所有字段")
Future(self, lambda c: list(c.brokers.values()), (client, ), self._update_brokers).run()
Future(self, lambda c: list(c.asset_units.values()), (client, ), self._update_asset_units).run()
@slot
def _on_asset_unit_changed(self, index):
self.ui.lineEdit_products.setText("")
if self._asset_units:
_disable_combo_box(self.ui.comboBox_portfolios)
Future(
self, lambda a: (list(a.portfolios.values()), a.product), (self._asset_units[index], ),
self._update_portfolios_and_product
).run()
@slot
def _on_create_button_clicked(self):
account = Account(
name=self.ui.lineEdit_name.text(),
account=self.ui.lineEdit_account.text(),
broker=self._brokers[self.ui.comboBox_broker.currentIndex()],
portfolio=self._portfolios[self.ui.comboBox_portfolios.currentIndex()],
asset_unit=self._asset_units[self.ui.comboBox_assetUnits.currentIndex()],
product=self._product,
client=self._client,
)
self.account_created.emit(account)
def _update_brokers(self, brokers: Sequence[Broker]):
self._brokers = brokers
_enable_combo_box(self.ui.comboBox_broker, [b.name for b in brokers])
self._check_form()
def _update_asset_units(self, asset_units: Sequence[AssetUnit]):
self._asset_units = [a for a in asset_units if a.id not in self._asset_unit_black_list]
_enable_combo_box(self.ui.comboBox_assetUnits, [a.name for a in self._asset_units])
self._check_form()
def _update_portfolios_and_product(self, result: Tuple[Sequence[Portfolio], Optional[Product]]):
portfolios, product = result
self._portfolios = [p for p in portfolios if p.id not in self._portfolio_black_list]
self._product = product
_enable_combo_box(self.ui.comboBox_portfolios, [p.name for p in self._portfolios])
if product:
self.ui.lineEdit_products.setText(product.name)
self._check_form()
@slot
def _check_form(self, *_):
account_text = self.ui.lineEdit_account.text()
if not (
self.ui.lineEdit_name.text() and
account_text and
self.ui.comboBox_broker.isEnabled() and
self.ui.comboBox_assetUnits.currentIndex() >= 0 and
self.ui.comboBox_portfolios.currentIndex() >= 0
):
disable_widget(self.ui.pushButton_create, "请输入/选择所有字段")
elif account_text in self._account_black_list:
disable_widget(self.ui.pushButton_create, "资金账号重复")
elif not re.match(r"^[0-9a-zA-Z]+$", account_text):
disable_widget(self.ui.pushButton_create, "资金账号不合法")
else:
enabled_widget(self.ui.pushButton_create)
@slot
def _reset(self):
self.setup(self._client, self._account_black_list, self._portfolio_black_list, self._asset_unit_black_list)
| StarcoderdataPython |
1755122 | import csv
import json
import random
input_file = "/home/justin/Eloquent/Datasets/idk/idkdatasettest_small.tsv"
output_file = "/home/justin/Eloquent/Datasets/idk/idk_dataset_fluency_turk_in.jsonl"
#task = "sentiment"
#taskVerb = "understood what emotional sentiment was conveyed by"
#task = "Respond"
#taskVerb = "understood what was asked by "
bonus = 0.2
reward = 0.24
estimatedTime = 60
responses = []
with open(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t")
i = 0
for line in reader:
#responses.append({"id": i, "prompt": line[0], "response": line[1]})
responses.append({"id": i, "value": line[1]})
i += 1
random.shuffle(responses)
with open(output_file, "w+") as f:
for i in range(len(responses)//16):
inputs = responses[16*i: 16*i+16].copy()
#input_line = {"inputs": inputs, "task": task, "taskVerb": taskVerb}
json_object = {"input": inputs, "bonus": bonus, "reward": reward, "estimatedTime": estimatedTime}
f.write(json.dumps(json_object, separators=(',', ':')) + "\n")
| StarcoderdataPython |
1734808 | #!/usr/bin/env python3
from os import listdir
from os.path import isfile, join
import argparse
import re
import string
from PIL import Image, ImageFont, ImageDraw
import pystache
def print_hex(val):
return "{0:#0{1}x}".format(val,4)
def process_char(font, char):
# Calculate char info
char_size = font.getsize(char);
char_mask = font.getmask(char);
#Create image
char_image = Image.new('1', char_size, 0);
#Draw font to image
d = ImageDraw.Draw(char_image);
d.fontmode = '1';
d.text((0, 0), char, font=font, fill=(1));
return char_image;
def generate_bin_image(image):
data = image.getdata();
size = image.size;
bin_image = [[0 for i in range(size[0])] for j in range(size[1])];
for j in range(size[1]):
for i in range(size[0]):
index = j * size[0] + i;
if data[index] == 0:
bin_image[j][i] = '0';
else:
bin_image[j][i] = '1';
return bin_image;
def bin_to_byte_data(min_width_bytes, min_height, image_bin):
data = [];
for i in range(0, len(image_bin)) :
row = image_bin[i];
row_data = []
temp = '';
for j in range(0, len(row)):
temp += str(row[j]);
if(len(temp) % 8 == 0):
row_data.append(print_hex(int(temp, 2)));
temp = '';
if len(temp) > 0 :
while len(temp) < 8:
temp += '0';
row_data.append(print_hex(int(temp, 2)));
while len(row_data) < min_width_bytes:
row_data.append(print_hex(0));
data.append(row_data);
while(len(data) < min_height):
row_data = [print_hex(0) for i in range(0, min_width_bytes)];
data.append(row_data)
return data;
def bytes_to_string(image_bytes):
data = '';
for i in range(0, len(image_bytes)):
data += ', '.join(image_bytes[i]) + ',\r\n';
return data;
def generate_file(template, name, data):
#print("Writing to output file: " + name)
template_file = open(template, 'r')
template = template_file.read()
template_file.close()
output_data = pystache.render(template, data)
output_file = open(name, 'w')
output_file.write(output_data)
output_file.close()
# Setup arguments
parser = argparse.ArgumentParser(description='Process open-iconic png files to c sources')
parser.add_argument('--size', nargs=1, type=int, default=[16],
help='font size')
parser.add_argument('--font', nargs=1, default=["../resources/RobotoMono-Regular.ttf"],
help='ttf font file to generate from')
parser.add_argument('--template', nargs=1, default=['font-template.c'],
help='mustache template to fill')
parser.add_argument('--output', nargs=1, default=['font.c'],
help='output file name')
parser.add_argument('--start', nargs=1, type=int, default=[32],
help='start character')
parser.add_argument('--end', nargs=1, type=int, default=[127],
help='end character')
# Parse arguments
args = parser.parse_args()
font_file = args.font[0];
font_path = font_file.split('/');
font_name = font_path[len(font_path) - 1].split('.')[0].replace('-', '_').lower();
font_size = args.size[0];
# Generate char list
chars = [chr(c) for c in range(args.start[0], args.end[0])];
font = ImageFont.truetype(font=font_file, size=font_size);
images = {};
# Generate character images
for c in chars:
images[c] = process_char(font, c);
#print("Created: " + str(len(images)) + " sprites");
# Determine minimum height and width
min_width = 0;
min_height = 0;
for i in images:
if min_width < images[i].size[0]:
min_width = images[i].size[0];
if min_height < images[i].size[1]:
min_height = images[i].size[1];
#print("Minimum width: " + str(min_width) + " pixels");
#print("Minimum height: " + str(min_height) + " pixels");
# Calculate minimum common image width
min_width_bytes = int(min_width / 8);
if min_width % 8 != 0:
min_width_bytes += 1;
# Generate image data
image_data = {};
for i in chars:
image_data[i] = generate_bin_image(images[i]);
# Convert into bytes
image_bytes = {};
for i in chars:
image_bytes[i] = bin_to_byte_data(min_width_bytes, min_height, image_data[i]);
# Generate character data strings
image_strings = {};
for i in chars:
image_strings[i] = bytes_to_string(image_bytes[i]);
# Combine into structure for template use
template_data = {};
template_data['chars'] = [];
template_data['size'] = font_size;
template_data['name'] = font_name;
template_data['NAME'] = font_name.upper();
template_data['start'] = ord(chars[0]);
template_data['end'] = ord(chars[len(chars) - 1]);
template_data['count'] = args.end[0] - args.start[0];
template_data['char_height'] = min_height;
template_data['char_width'] = min_width_bytes;
for i in chars:
char_data = {};
char_data['char'] = i;
char_data['code'] = ord(i);
char_data['bin'] = image_data[i];
char_data['byte'] = image_bytes[i];
char_data['string'] = image_strings[i];
char_data['width'] = images[i].size[0];
char_data['height'] = images[i].size[1];
template_data['chars'].append(char_data);
#print(template_data);
generate_file(args.template[0], args.output[0], template_data);
| StarcoderdataPython |
5133096 | """Producer base-class providing common utilites and functionality"""
import logging
import time
from confluent_kafka import avro
from confluent_kafka.admin import AdminClient, NewTopic
from confluent_kafka.avro import AvroProducer
logger = logging.getLogger(__name__)
BROKER_URL = "PLAINTEXT://localhost:9092"
SCHEMA_REGISTRY_URL = "http://localhost:8081"
class Producer:
"""Defines and provides common functionality amongst Producers"""
# Tracks existing topics across all Producer instances
existing_topics = set([])
def __init__(
self,
topic_name,
key_schema,
value_schema=None,
num_partitions=1,
num_replicas=1,
):
"""Initializes a Producer object with basic settings"""
self.topic_name = topic_name
self.key_schema = key_schema
self.value_schema = value_schema
self.num_partitions = num_partitions
self.num_replicas = num_replicas
#
#
# TODO: Configure the broker properties below. Make sure to reference the project README
# and use the Host URL for Kafka and Schema Registry!
#
#
self.broker_properties = {
"bootstrap.servers": BROKER_URL,
"linger.ms": 1000,
"batch.num.messages": 100,
"compression.type": "lz4"
}
# If the topic does not already exist, try to create it
if self.topic_name not in Producer.existing_topics:
self.create_topic()
Producer.existing_topics.add(self.topic_name)
# TODO: Configure the AvroProducer
self.producer = AvroProducer({
'bootstrap.servers': BROKER_URL,
'schema.registry.url': SCHEMA_REGISTRY_URL,
'default.topic.config': {'acks': 'all'}},
default_value_schema=value_schema)
def create_topic(self):
"""Creates the producer topic if it does not already exist"""
#
#
# TODO: Write code that creates the topic for this producer if it does not already exist on
# the Kafka Broker.
client = AdminClient({"bootstrap.servers": BROKER_URL})
futures = client.create_topics([NewTopic(
topic=self.topic_nname,
num_partitions=self.num_partitions,
replication_factor=self.num_replicas
)]
)
for topic, future in futures.items():
try:
future.result()
logger.info(f"topic {topic} was successfully created")
except Exception as e:
print(e)
logger.info(f"topic '{topic}' creation kafka integration incomplete - skipping")
def time_millis(self):
return int(round(time.time() * 1000))
def close(self):
"""Prepares the producer for exit by cleaning up the producer"""
try:
self.producer.close()
except RuntimeError as re:
logger.info(f"ERROR: {re}\nproducer close incomplete - skipping")
def time_millis(self):
"""Use this function to get the key for Kafka Events"""
return int(round(time.time() * 1000))
| StarcoderdataPython |
104105 | """This is the Solution for Year 2021 Day 05"""
import itertools
from collections import Counter
from dataclasses import dataclass
from aoc.abstracts.solver import Answers, StrLines
@dataclass(frozen=True)
class Point:
"""Immutable point that will define x and y on 2D plane"""
x: int
y: int
@dataclass
class LineSegment:
"""Define a line object that takes a start and end point"""
start: Point
end: Point
@property
def slope(self) -> int:
return int((self.start.y - self.end.y) / (self.start.x - self.end.x))
@property
def intercept(self) -> int:
return int(self.start.y - (self.start.x * self.slope))
def is_vertical(self) -> bool:
return self.start.x == self.end.x
def is_horizontal(self) -> bool:
return self.start.y == self.end.y
def y_range(self) -> range:
coords = self.start.y, self.end.y
return range(min(coords), max(coords) + 1)
def x_range(self) -> range:
coords = self.start.x, self.end.x
return range(min(coords), max(coords) + 1)
def calculate_y(self, x: int) -> int:
return int(self.slope * x + self.intercept)
def parse_point(raw_point: str) -> Point:
"""Parse point from raw string"""
x, y = raw_point.split(",")
return Point(x=int(x), y=int(y))
def parse_lines(lines: StrLines) -> list[LineSegment]:
"""Parse raw lines into Lines and Points"""
parsed_lines = []
for raw_line in lines:
raw_start, raw_end = raw_line.split(" -> ")
start_point = parse_point(raw_start)
end_point = parse_point(raw_end)
line = LineSegment(start=start_point, end=end_point)
parsed_lines.append(line)
return parsed_lines
def get_horizontal_vertical_lines(lines: list[LineSegment]) -> list[LineSegment]:
"""Filter for only horizontal or vertical lines"""
return [line for line in lines if line.is_horizontal() or line.is_vertical()]
def get_point_segment(line: LineSegment) -> list[Point]:
"""Get a list of points in a given line"""
if line.is_vertical():
return [Point(x=line.start.x, y=y) for y in line.y_range()]
return [Point(x=x, y=line.calculate_y(x)) for x in line.x_range()]
def get_point_occurences(lines: list[LineSegment]) -> dict[Point, int]:
"""Count up the number of occurences for a given point"""
segment_points = (get_point_segment(line) for line in lines)
return Counter(itertools.chain.from_iterable(segment_points))
class Solver:
def __init__(self, data: str) -> None:
self.data = data
def _preprocess(self) -> StrLines:
return self.data.splitlines()
def _solve_part_one(self, lines: StrLines) -> int:
parsed_lines = parse_lines(lines)
filtered_lines = get_horizontal_vertical_lines(parsed_lines)
point_count = get_point_occurences(filtered_lines)
return sum(1 for n_occurences in point_count.values() if n_occurences >= 2)
def _solve_part_two(self, lines: StrLines) -> int:
parsed_lines = parse_lines(lines)
point_count = get_point_occurences(parsed_lines)
return sum(1 for n_occurences in point_count.values() if n_occurences >= 2)
def solve(self) -> Answers:
lines = self._preprocess()
ans_one = self._solve_part_one(lines)
ans_two = self._solve_part_two(lines)
return Answers(part_one=ans_one, part_two=ans_two)
| StarcoderdataPython |
6407897 | <reponame>newtonkiragu/asset-manager
from django.apps import AppConfig
class AssetManagerConfig(AppConfig):
name = 'asset_manager'
| StarcoderdataPython |
254572 | from pymel.core import *
import maya.cmds as cmds
import webbrowser
def standardWindow(windowName, title, buttons):
if len(buttons) == 0:
error('This window should have at least one button!')
if windowName == '': windowName = window(w=300, h=150, title=title)
elif window(windowName, exists=1):
showWindow(windowName)
return(windowName)
else: window(windowName, w=300, h=150, title=title, menuBar=True)
result = []
result.append(windowName)
form = formLayout(nd=100)
tab = tabLayout(tv=0, scr=0, cr=1)
result.append(columnLayout(adj=1))
setParent(form)
sep = separator(h=10)
for b in buttons: result.append(button(label=b))
formLayout(form, edit=1,
attachForm = [(tab, 'top', 10),
(tab, 'left', 5),
(tab, 'right', 5),
(sep, 'left', 5),
(sep, 'right', 5)],
attachControl = [(tab, 'bottom', 5, sep),
(sep, 'bottom', 5, result[2])],
attachNone = [(sep, 'top')])
formLayout(form, edit=1,
attachForm = [(result[2], 'left', 5),
(result[2], 'bottom', 5),
(result[-1], 'right', 5),
(result[-1], 'bottom', 5)],
attachNone = [(result[2], 'top'),
(result[-1], 'top')])
gapStep = 100 / len(buttons)
for i in range(3, len(result)):
formLayout(form, edit=1,
attachPosition = [(result[i-1], 'right', 2, gapStep*(i-2)),
(result[i], 'left', 2, gapStep*(i-2))],
attachForm = [(result[i], 'bottom', 5)])
return result
class CircularizeWindow(object):
def __init__(self):
self.flatten = False
self.autoEstimateRadius = True
self.radius = 1.0
self.twist = 0.0
self.strength = 1.0
# Load default values
if optionVar(exists='addCircularizeNodeFlatten'):
self.flatten = optionVar(q='addCircularizeNodeFlatten')
if optionVar(exists='addCircularizeNodeAuto'):
self.autoEstimateRadius = optionVar(q='addCircularizeNodeAuto')
if optionVar(exists='addCircularizeNodeRadius'):
self.radius = optionVar(q='addCircularizeNodeRadius')
if optionVar(exists='addCircularizeNodeTwist'):
self.twist = optionVar(q='addCircularizeNodeTwist')
if optionVar(exists='addCircularizeNodeStrength'):
self.strength = optionVar(q='addCircularizeNodeStrength')
self.hWin, cLayout, createButton, applyButton, closeButton = standardWindow('CircularizeWin', 'Circularize Options', ('Circularize', 'Apply', 'Close'))
menu(label='Edit')
menuItem(label='Reset Settings', c='win.reset()')
menu(label='Help', hm=True)
menuItem(label='View plugin documentation', c='webbrowser.open_new_tab("https://github.com/AndersElmholdt/AFETools")')
setParent(cLayout)
frameLayout(label='Settings')
columnLayout(adj=1)
self.flattenRadioGrp = radioButtonGrp(label='Depth control', nrb=2, labelArray2=('Flatten', 'Maintain depth'), sl=1 if self.flatten==True else 2)
self.strengthSlider = floatSliderGrp(f=True, label='Strength', min=0, fieldMinValue=0, max=1, fieldMaxValue=1, v=self.strength)
self.twistSlider = floatSliderGrp(f=True, label='Twist', min=-360, fieldMinValue=-360, max=360, fieldMaxValue=360, v=self.twist)
self.radiusSlider = floatSliderGrp(f=True, label='Radius', min=-0, fieldMinValue=0, max=5, fieldMaxValue=10000, v=self.radius, en=not self.autoEstimateRadius)
self.autoCheckbox = checkBoxGrp(label='Auto Estimate Radius', l1='', ncb=1, v1=self.autoEstimateRadius, onc='floatSliderGrp(win.radiusSlider, e=True, en=False)', ofc='floatSliderGrp(win.radiusSlider, e=True, en=True)')
button(createButton, e=1, c='win.createAction()')
button(applyButton, e=1, c='win.applyAction()')
button(closeButton, e=1, c='win.closeAction()')
def reset(self):
floatSliderGrp(self.strengthSlider, e=1, v=1)
floatSliderGrp(self.twistSlider, e=1, v=0)
floatSliderGrp(self.radiusSlider, e=1, v=1, max=5, fieldMaxValue=10000, en=False)
checkBoxGrp(self.autoCheckbox, e=1, v1=True)
radioButtonGrp(self.flattenRadioGrp, e=1, sl=2)
def createAction(self):
self.applyAction()
self.closeAction()
def applyAction(self):
self.updateValues()
optionVar(iv = ('addCircularizeNodeFlatten', 0 if self.flatten == False else 1))
optionVar(iv = ('addCircularizeNodeAuto', 0 if self.autoEstimateRadius == False else 1))
optionVar(fv = ('addCircularizeNodeRadius', self.radius))
optionVar(fv = ('addCircularizeNodeTwist', self.twist))
optionVar(fv = ('addCircularizeNodeStrength', self.strength))
if self.autoEstimateRadius == True:
cmds.addCircularizeNode(f=self.flatten, t=self.twist, s=self.strength)
else:
cmds.addCircularizeNode(f=self.flatten, t=self.twist, s=self.strength, r=self.radius)
def closeAction(self):
deleteUI(self.hWin)
def updateValues(self):
self.twist = floatSliderGrp(self.twistSlider, q=True, v=True)
self.radius = floatSliderGrp(self.radiusSlider, q=True, v=True)
self.strength = floatSliderGrp(self.strengthSlider, q=True, v=True)
self.autoEstimateRadius = checkBoxGrp(self.autoCheckbox, q=True, v1=True)
self.flatten = True if radioButtonGrp(self.flattenRadioGrp, q=True, sl=True) == 1 else False
if not window('CircularizeWin', exists=1):
global win
win = CircularizeWindow()
showWindow('CircularizeWin') | StarcoderdataPython |
201567 | <filename>cognitive/urls.py
# Copyright 2015 Cisco Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf.urls import patterns, include, url
from app import views, urls
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^admin/', include(admin.site.urls)),
url(r'^$', views.index),
url(r'^api/v1/', include(urls)),
url(r'^docs/', include('rest_framework_swagger.urls')),
)
| StarcoderdataPython |
1971997 | #!/usr/bin/env python3
from tkinter import *
import time
__author__ = '<NAME>'
__version__ = '0.1.0'
def guiInput(tkObj, promptText):
c = Canvas(tkObj)
c.pack()
confirmButton = Button(c, text='Confirm')
confirmButton.pack()
l = Label(c, text=promptText)
l.pack()
time.sleep(10)
print(guiInput(Tk(), 'Prompt')) | StarcoderdataPython |
8167775 | <reponame>rpratap-bot/ceph-qe-scripts
"""
Boot a VM, attach volume to the VM, detach from the VM. Cleanup the VM, volume instances
"""
from lib.nova import NovaAuth, NovaActions
from lib.glance import GlanceAuth, GlanceActions
from lib.cinder import CinderAuth, CinderVolumes
import lib.log as log
from lib.test_desc import AddTestInfo
from utils import wait, uuid
import time
import sys
class GlanceCycle(object):
def __init__(self, glance):
self.timer = wait.Wait()
self.glance_image = GlanceActions(glance.glance)
self.img = None
def image_create(self, name):
add_test_info.sub_test_info('1', 'Create glance image')
self.image = self.glance_image.upload_images(name=name)
assert self.image.status, 'Image creation failure'
log.info('image name: %s' % self.image.image.name)
self.timer.wait_for_state_change(self.image.image.status, 'queued')
img = self.glance_image.get_image(self.image.image)
self.img = img.image
log.info('Image created')
add_test_info.sub_test_completed_info()
return self.img
def image_delete(self):
add_test_info.sub_test_info('8', 'Delete glance image')
image_to_delete = self.glance_image.delete_image(self.img.id)
assert image_to_delete.execute, 'Image deletion failure'
self.timer.wait_for_state_change(self.image.image.status, 'active')
image_exists = self.glance_image.get_image(self.image.image)
if not image_exists.status:
log.info('Image deleted')
else:
log.error('Image status: %s' % image_exists.image.status)
raise AssertionError("Image still exists")
add_test_info.sub_test_completed_info()
class CinderCycle(object):
def __init__(self, cinder):
self.timer = wait.Wait()
self.cinder_vol = CinderVolumes(cinder.cinder)
self.volume = None
def vol_create(self, name, size):
add_test_info.sub_test_info('2', 'Create volume')
init_create_volume = self.cinder_vol.create_volume(name, size)
assert init_create_volume.status, "Volume create initialize error"
log.info('volume name: %s' % init_create_volume.vol.name)
self.timer.wait_for_state_change(init_create_volume.vol.status, 'creating')
volume = self.cinder_vol.get_volume(init_create_volume.vol)
self.volume = volume.volume
log.info('Volume exists')
add_test_info.sub_test_completed_info()
return self.volume
def delete_vol(self):
add_test_info.sub_test_info('6', 'Delete volume')
vol_delete = self.cinder_vol.delete_volume(self.volume)
assert vol_delete.execute, "volume delete initialize error"
time.sleep(10)
volume_exists = self.cinder_vol.get_volume(self.volume)
if not volume_exists.status:
log.info('volume deleted')
else:
log.error('volume status: %s' % volume_exists.volume.status)
raise AssertionError("volume still exists")
add_test_info.sub_test_completed_info()
class NovaCycle(object):
def __init__(self, nova):
self.timer = wait.Wait()
self.nova_server = NovaActions(nova.nova)
self.vm = None
self.attached_volume = None
def boot_server(self, image, name):
add_test_info.sub_test_info('3', 'Create VM')
vm = self.nova_server.boot_vm(image=image, name=name)
assert vm.status, 'Vm creation initialization error'
log.info('server name: %s' % vm.server.name)
self.timer.wait_for_state_change(vm.server.status, 'BUILD')
time.sleep(10)
self.vm = self.nova_server.vm_details(vm.server)
log.debug('status: %s' % self.vm.vm.status)
log.info('VM created')
add_test_info.sub_test_completed_info()
def attach_vol(self, volume, device):
add_test_info.sub_test_info('4', 'Attach volume to VM')
self.attached_volume = self.nova_server.attach_volume(self.vm.vm.id, volume=volume.id, device=device)
time.sleep(10)
assert self.attached_volume.status, "volume attach failed"
log.debug('volume %s attached to server %s' % (self.attached_volume.vol.id, self.vm.vm.name))
log.info('Volume attached to VM successfully')
add_test_info.sub_test_completed_info()
def detach_vol(self, volume):
add_test_info.sub_test_info('5', 'Detach volume to VM')
self.nova_server.detach_volume(self.vm.vm.id, volume=volume.id)
time.sleep(10)
log.debug('volume %s detached from server %s' %(self.attached_volume.vol.volumeId, self.vm.vm.name))
log.info('Volume detached successfully')
add_test_info.sub_test_completed_info()
def delete_server(self):
add_test_info.sub_test_info('7', 'Delete server')
vm_delete = self.nova_server.vm_delete(self.vm.vm.id)
assert vm_delete.execute, "Server delete initialize error"
time.sleep(5)
vm_exists = self.nova_server.vm_details(self.vm.vm)
if not vm_exists.status:
log.info('Server deleted')
else:
log.error('Server status: %s' % vm_exists.vm.status)
raise AssertionError("Server still exists")
add_test_info.sub_test_completed_info()
def exec_test():
uuid.set_env()
global add_test_info
add_test_info = AddTestInfo(9, 'Nova server create test')
try:
add_test_info.started_info()
nova = NovaAuth()
nova = nova.auth()
glance = GlanceAuth()
glance = glance.auth()
cinder = CinderAuth()
cinder = cinder.auth()
assert nova.status, "Nova authentication failed"
assert glance.status, "Glance authentication failed"
assert cinder.status, "Cinder authentication failed"
nova_cycle = NovaCycle(nova)
glance_cycle = GlanceCycle(glance)
volume_cycle = CinderCycle(cinder)
image = glance_cycle.image_create(name='testimg')
volume = volume_cycle.vol_create('testvol', 2)
nova_cycle.boot_server(image=image, name='testvm')
nova_cycle.attach_vol(volume, device='/dev/vdc')
nova_cycle.detach_vol(volume)
volume_cycle.delete_vol()
nova_cycle.delete_server()
glance_cycle.image_delete()
add_test_info.success_status('ok')
except AssertionError, e:
log.error(e)
add_test_info.failed_status('error')
sys.exit(1)
add_test_info.completed_info()
if __name__ == "__main__":
exec_test()
| StarcoderdataPython |
4814800 | <filename>drafts/models.py
import uuid
from random import shuffle
from typing import Optional, List
from django.contrib.auth.models import User
from django.db import models
from cubes.models import Cube
class Draft(models.Model):
uuid = models.UUIDField(unique=True, default=uuid.uuid4)
name = models.CharField(max_length=100)
current_round = models.IntegerField(default=0)
max_players = models.IntegerField(default=8)
cube = models.ForeignKey(Cube, null=True, default=None, on_delete=models.SET_NULL)
def __str__(self):
return self.name
def get_absolute_url(self):
from django.shortcuts import reverse
return reverse('draft-detail', args=[self.uuid])
def join(self, user: User) -> Optional['DraftEntry']:
try:
return self.entries.get(player_id=user.id)
except DraftEntry.DoesNotExist:
entries_count = self.entries.count()
if entries_count >= self.max_players:
return None
return DraftEntry.objects.create(draft=self, player=user)
def begin(self, add_bots=True) -> bool:
if self.entries.count() == 0 or self.current_round != 0:
return False
players: List[Optional[User]] = []
for entry in self.entries.all():
players.append(entry.player)
if add_bots:
while len(players) < self.max_players:
players.append(None)
shuffle(players)
for seat, player in enumerate(players):
DraftSeat.objects.create(draft=self, user=player, position=seat)
self.entries.all().delete()
if self.cube is not None:
packs = self.cube.generate_packs()
for seat in self.seats.all():
for i in range(1, self.cube.default_pack_count + 1):
pack = DraftPack.objects.create(draft=self, round_number=i, seat_number=seat.position)
self.current_round = 1
return True
def is_user_in_draft(self, user: User) -> bool:
for seat in self.seats.all():
if user == seat.user:
return True
return False
def get_seat_for_user(self, user: User) -> Optional['DraftSeat']:
try:
return self.seats.get(user=user)
except DraftSeat.DoesNotExist:
return None
class DraftEntry(models.Model):
"""
Represents a player's entry in a draft that has not yet started
"""
draft = models.ForeignKey(Draft, on_delete=models.CASCADE, related_name='entries')
player = models.ForeignKey(User, on_delete=models.CASCADE, related_name='entries')
def __str__(self):
return f'Entry for {self.player.username} in {self.draft}'
class DraftSeat(models.Model):
draft = models.ForeignKey(Draft, on_delete=models.CASCADE, related_name='seats')
user = models.ForeignKey(User, null=True, on_delete=models.SET_NULL)
position = models.IntegerField()
def __str__(self):
if self.user is None:
seat_name = 'Bot'
else:
seat_name = self.user.username
return f'Seat #{self.position} of {self.draft}: {seat_name}'
def get_pack_count(self) -> int:
return self.draft.packs.filter(seat_number=self.position).count()
def get_current_pack(self) -> Optional['DraftPack']:
packs = self.draft.packs.filter(seat_number=self.position).order_by('pick_number')
if packs.count() == 0:
return None
return packs[0]
def make_selection(self, card_id: str) -> bool:
current_pack = self.get_current_pack()
if current_pack is None:
return False
try:
selected_card = current_pack.cards.get(uuid=card_id)
except DraftCard.DoesNotExist:
return False
# Take the card out of its pack and place is in this seat's pool
selected_card.pack = None
selected_card.seat = self
# Now, move the pack and increment the pick number
if self.draft.current_round % 2 == 1: # Odd rounds pass left (inc)
current_pack.seat_number = (current_pack.seat_number + 1) % self.draft.max_players
else: # Even rounds pass right (decrement seat number)
current_pack.seat_number = (current_pack.seat_number - 1) % self.draft.max_players
current_pack.pick_number += 1
selected_card.save()
current_pack.save()
return True
class DraftPack(models.Model):
draft = models.ForeignKey(Draft, on_delete=models.CASCADE, related_name='packs')
round_number = models.IntegerField()
pick_number = models.IntegerField(default=1)
seat_number = models.IntegerField()
class DraftCard(models.Model):
uuid = models.UUIDField(db_index=True, default=uuid.uuid4)
pack = models.ForeignKey(DraftPack, null=True, on_delete=models.CASCADE, related_name='cards')
seat = models.ForeignKey(DraftSeat, null=True, default=None, on_delete=models.CASCADE, related_name='picks')
card_name = models.CharField(max_length=20)
| StarcoderdataPython |
6431910 | """Module grouping tests for the pydov.util.caching module."""
import datetime
import gzip
import os
import tempfile
from io import open
import time
import pytest
import pydov
from pydov.util.caching import (
PlainTextFileCache,
GzipTextFileCache,
)
from pydov.util.dovutil import build_dov_url
@pytest.fixture
def mp_remote_xml(monkeypatch):
"""Monkeypatch the call to get the remote Boring XML data.
Parameters
----------
monkeypatch : pytest.fixture
PyTest monkeypatch fixture.
"""
def _get_remote_data(*args, **kwargs):
with open('tests/data/types/boring/boring.xml', 'r') as f:
data = f.read()
if type(data) is not bytes:
data = data.encode('utf-8')
return data
monkeypatch.setattr(pydov.util.caching.AbstractFileCache,
'_get_remote', _get_remote_data)
@pytest.fixture
def plaintext_cache(request):
"""Fixture for a temporary cache.
This fixture should be parametrized, with a list of parameters in the
order described below.
Paramaters
----------
max_age : datetime.timedelta
The maximum age to use for the cache.
"""
orig_cache = pydov.cache
if len(request.param) == 0:
max_age = datetime.timedelta(seconds=1)
else:
max_age = request.param[0]
plaintext_cache = PlainTextFileCache(
cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests'),
max_age=max_age)
pydov.cache = plaintext_cache
yield plaintext_cache
plaintext_cache.remove()
pydov.cache = orig_cache
@pytest.fixture
def gziptext_cache(request):
"""Fixture for a temporary cache.
This fixture should be parametrized, with a list of parameters in the
order described below.
Paramaters
----------
max_age : datetime.timedelta
The maximum age to use for the cache.
"""
orig_cache = pydov.cache
if len(request.param) == 0:
max_age = datetime.timedelta(seconds=1)
else:
max_age = request.param[0]
gziptext_cache = GzipTextFileCache(
cachedir=os.path.join(tempfile.gettempdir(), 'pydov_tests'),
max_age=max_age)
pydov.cache = gziptext_cache
yield gziptext_cache
gziptext_cache.remove()
pydov.cache = orig_cache
@pytest.fixture
def nocache():
"""Fixture to temporarily disable caching."""
orig_cache = pydov.cache
pydov.cache = None
yield
pydov.cache = orig_cache
class TestPlainTextFileCacheCache(object):
"""Class grouping tests for the pydov.util.caching.PlainTextFileCache
class."""
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_clean(self, plaintext_cache, mp_remote_xml):
"""Test the clean method.
Test whether the cached file and the cache directory are nonexistent
after the clean method has been called.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
plaintext_cache.clean()
assert os.path.exists(cached_file)
assert os.path.exists(plaintext_cache.cachedir)
time.sleep(1.5)
plaintext_cache.clean()
assert not os.path.exists(cached_file)
assert os.path.exists(plaintext_cache.cachedir)
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_remove(self, plaintext_cache, mp_remote_xml):
"""Test the remove method.
Test whether the cache directory is nonexistent after the remove
method has been called.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
plaintext_cache.remove()
assert not os.path.exists(cached_file)
assert not os.path.exists(plaintext_cache.cachedir)
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_get_save(self, plaintext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_get_reuse(self, plaintext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache and reused in a
second function call.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
first_download_time = os.path.getmtime(cached_file)
time.sleep(0.5)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
# assure we didn't redownload the file:
assert os.path.getmtime(cached_file) == first_download_time
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_get_invalid(self, plaintext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache not reused if the
second function call is after the maximum age of the cached file.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
first_download_time = os.path.getmtime(cached_file)
time.sleep(1.5)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
# assure we did redownload the file, since original is invalid now:
assert os.path.getmtime(cached_file) > first_download_time
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_save_content(self, plaintext_cache, mp_remote_xml):
"""Test whether the data is saved in the cache.
Test if the contents of the saved document are the same as the
original data.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
with open('tests/data/types/boring/boring.xml', 'r',
encoding='utf-8') as ref:
ref_data = ref.read()
with open(cached_file, 'r', encoding='utf-8') as cached:
cached_data = cached.read()
assert cached_data == ref_data
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_reuse_content(self, plaintext_cache, mp_remote_xml):
"""Test whether the saved data is reused.
Test if the contents returned by the cache are the same as the
original data.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
with open('tests/data/types/boring/boring.xml', 'r') as ref:
ref_data = ref.read().encode('utf-8')
cached_data = plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert cached_data == ref_data
@pytest.mark.parametrize('plaintext_cache', [[]],
indirect=['plaintext_cache'])
def test_return_type(self, plaintext_cache, mp_remote_xml):
"""The the return type of the get method.
Test wether the get method returns the data in the same datatype (
i.e. bytes) regardless of the data was cached or not.
Parameters
----------
plaintext_cache : pytest.fixture providing
pydov.util.caching.PlainTextFileCache
PlainTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
plaintext_cache.cachedir, 'boring', '2004-103984.xml')
plaintext_cache.clean()
assert not os.path.exists(cached_file)
ref_data = plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert type(ref_data) is bytes
assert os.path.exists(cached_file)
cached_data = plaintext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert type(cached_data) is bytes
class TestGzipTextFileCacheCache(object):
"""Class grouping tests for the pydov.util.caching.PlainTextFileCache
class."""
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_clean(self, gziptext_cache, mp_remote_xml):
"""Test the clean method.
Test whether the cached file and the cache directory are nonexistent
after the clean method has been called.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
gziptext_cache.clean()
assert os.path.exists(cached_file)
assert os.path.exists(gziptext_cache.cachedir)
time.sleep(1.5)
gziptext_cache.clean()
assert not os.path.exists(cached_file)
assert os.path.exists(gziptext_cache.cachedir)
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_remove(self, gziptext_cache, mp_remote_xml):
"""Test the remove method.
Test whether the cache directory is nonexistent after the remove
method has been called.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
gziptext_cache.remove()
assert not os.path.exists(cached_file)
assert not os.path.exists(gziptext_cache.cachedir)
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_get_save(self, gziptext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_get_reuse(self, gziptext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache and reused in a
second function call.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
first_download_time = os.path.getmtime(cached_file)
time.sleep(0.5)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
# assure we didn't redownload the file:
assert os.path.getmtime(cached_file) == first_download_time
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_get_invalid(self, gziptext_cache, mp_remote_xml):
"""Test the get method.
Test whether the document is saved in the cache not reused if the
second function call is after the maximum age of the cached file.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
first_download_time = os.path.getmtime(cached_file)
time.sleep(1.5)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
# assure we did redownload the file, since original is invalid now:
assert os.path.getmtime(cached_file) > first_download_time
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_save_content(self, gziptext_cache, mp_remote_xml):
"""Test whether the data is saved in the cache.
Test if the contents of the saved document are the same as the
original data.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
with open('tests/data/types/boring/boring.xml', 'r',
encoding='utf-8') as ref:
ref_data = ref.read()
with gzip.open(cached_file, 'rb') as cached:
cached_data = cached.read().decode('utf-8')
assert cached_data == ref_data
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_reuse_content(self, gziptext_cache, mp_remote_xml):
"""Test whether the saved data is reused.
Test if the contents returned by the cache are the same as the
original data.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert os.path.exists(cached_file)
with open('tests/data/types/boring/boring.xml', 'r') as ref:
ref_data = ref.read().encode('utf-8')
cached_data = gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert cached_data == ref_data
@pytest.mark.parametrize('gziptext_cache', [[]],
indirect=['gziptext_cache'])
def test_return_type(self, gziptext_cache, mp_remote_xml):
"""The the return type of the get method.
Test wether the get method returns the data in the same datatype (
i.e. bytes) regardless of the data was cached or not.
Parameters
----------
gziptext_cache : pytest.fixture providing
pydov.util.caching.GzipTextFileCache
GzipTextFileCache using a temporary directory and a maximum age
of 1 second.
mp_remote_xml : pytest.fixture
Monkeypatch the call to the remote DOV service returning an XML
document.
"""
cached_file = os.path.join(
gziptext_cache.cachedir, 'boring', '2004-103984.xml.gz')
gziptext_cache.clean()
assert not os.path.exists(cached_file)
ref_data = gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert type(ref_data) is bytes
assert os.path.exists(cached_file)
cached_data = gziptext_cache.get(
build_dov_url('data/boring/2004-103984.xml'))
assert type(cached_data) is bytes
| StarcoderdataPython |
5101618 | <filename>pipeline/reach-es-extractor/refparse/utils/file_manager.py
import pandas as pd
import gzip
import logging
import os
import pickle
import tempfile
import json
from .s3 import S3
from refparse.settings import settings
class FileManager():
def __init__(self, mode='LOCAL', bucket=settings.BUCKET):
self.mode = mode
self.logger = settings.logger
self.logger.setLevel(logging.INFO)
if self.mode == 'S3':
self.s3 = S3(bucket)
self.loaders = {
'csv': self.load_csv_file,
'json': self.load_json_file,
'pickle': self.load_pickle_file,
}
def to_row(self, line, lineno, scraping_columns):
try:
return {
k: v for k, v in json.loads(line).items()
if k in scraping_columns
}
except Exception as e:
self.logger.error(
'Error on line %d: exception=%s line=%r',
lineno, e, line)
raise
def get_scraping_results(
self, file_name, file_prefix,
scraping_columns=('title', 'file_hash', 'sections', 'uri', 'metadata')
):
"""Takes a scraping result-json to return it cleared of its unused
parts, as a pandas DataFrame. This function is used instead of the
others because of the size of the scraper result files, which requires
a tempfile and some field filtering.
In: file_name: the name of the json file
file_prefix: the path to the file (excluding the file name)
Out: A pandas.DataFrame containing the json's interesting items
"""
if self.mode == 'S3':
with tempfile.TemporaryFile() as tf:
# If we don't have the filename, take the last file
if not file_name:
file_path = self.s3._get_last_modified_file_key(
file_prefix
)
else:
file_path = os.path.join(file_prefix, file_name)
self.s3.get(file_path, tf)
tf.seek(0)
if file_name.endswith('.gz'):
with gzip.GzipFile(fileobj=tf, mode='r') as text_tf:
rows = (
self.to_row(line, lineno, scraping_columns)
for lineno, line in enumerate(text_tf)
)
return pd.DataFrame(rows)
else:
rows = (
self.to_row(
line, lineno, scraping_columns
) for lineno, line in enumerate(tf)
)
return pd.DataFrame(rows)
return self._get_from_local(file_prefix, file_name, 'json')
def get_file(self, file_name, file_prefix, file_type):
if self.mode == 'S3':
with tempfile.TemporaryFile() as tf:
return self._get_from_s3(file_prefix, file_name, file_type, tf)
return self._get_from_local(file_prefix, file_name, file_type)
def _get_from_s3(self, file_prefix, file_name, file_type, tf):
# If we don't have the filename, take the last file
if not file_name:
file_path = self.s3._get_last_modified_file_key(file_prefix)
else:
file_path = os.path.join(file_prefix, file_name)
self.s3.get(file_path, tf)
tf.seek(0)
self.logger.info('Using %s file from S3', file_path)
if file_name.endswith('.gz'):
with gzip.GzipFile(fileobj=tf) as text_tf:
return self.loaders[file_type](text_tf)
else:
return self.loaders[file_type](tf)
def _get_from_local(self, file_prefix, file_name, file_type):
file_path = os.path.join(file_prefix, file_name)
with open(file_path, 'rb') as file_content:
self.logger.info('Using %s file from local storage', file_path)
dataframe = self.loaders[file_type](file_content)
return dataframe
def load_csv_file(self, file_content):
"""Takes the path and name of a csv file and returns its content."""
# csv_file = StringIO(file_content.decode('utf-8'))
raw_text_data = pd.read_csv(file_content)
return raw_text_data
def load_json_file(self, temp_file):
"""Takes the path and name of a json file and returns its content."""
raw_text_data = pd.read_json(temp_file, lines=True)
return raw_text_data
def load_pickle_file(self, file_content):
"""Load a pickle file from a given path and file name and returns the
unpickled file.
"""
unpickled_file = pickle.loads(file_content.read())
return unpickled_file
| StarcoderdataPython |
1764081 | from jumping_number import jumping_number
from jumping_number import jumping_number2
def test(benchmark):
assert benchmark(jumping_number, 1) == "Jumping!!"
def test2(benchmark):
assert benchmark(jumping_number2, 1) == "Jumping!!"
''''''''''
-------------------------------------------------------------------------------------- benchmark: 2 tests -----------------------------------------------------------------------------------
--
Name (time in us) Min Max Mean StdDev Median IQR Outliers OPS (Kops/s) Rounds Iteratio
ns
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--
test2 2.0526 (1.0) 813.2575 (1.0) 3.7276 (1.0) 3.9965 (1.0) 2.8737 (1.0) 2.4632 (1.0) 1079;926 268.2676 (1.0) 105908
1
test 19.7054 (9.60) 2,278.4346 (2.80) 33.8293 (9.08) 33.6839 (8.43) 32.4318 (11.29) 17.2422 (7.00) 165;182 29.5602 (0.11) 5679
1
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
--
Legend:
Outliers: 1 Standard Deviation from Mean; 1.5 IQR (InterQuartile Range) from 1st Quartile and 3rd Quartile.
OPS: Operations Per Second, computed as 1 / Mean
================================================================================= 2 passed in 3.48 seconds =================================================================================
''''''''' | StarcoderdataPython |
3229957 | from guizero import App, ButtonGroup
def selected():
print(choice.value + " " + choice2.value)
app = App()
choice = ButtonGroup(app, options=["cheese", "ham", "salad"], command=selected)
# You can use specific values for the button group by passing them as a 2d list.
# choice = ButtonGroup(app, options=[["cheese", "c"], ["ham", "h"], ["salad", "s"]], selected="h", command=selected)
choice2 = ButtonGroup(app, command=selected)
choice2.append("sandwich")
choice2.append("salad")
app.display()
| StarcoderdataPython |
9787973 | <filename>stimuli/Python/one_file_per_item/en/56_# str_seq 2.py
filename = "alphabet.java"
modified = filename.split(".")
print(modified[-1])
| StarcoderdataPython |
1997266 | # Copyright 2021 VMware, Inc.
# SPDX-License-Identifier: Apache-2.0
from unittest.mock import MagicMock
import pytest
from vdk.api.plugin.connection_hook_spec import (
ConnectionHookSpec,
)
from vdk.internal.builtin_plugins.connection.impl.router import ManagedConnectionRouter
from vdk.internal.builtin_plugins.connection.managed_connection_base import (
ManagedConnectionBase,
)
from vdk.internal.builtin_plugins.connection.pep249.interfaces import PEP249Connection
from vdk.internal.core.config import Configuration
from vdk.internal.core.errors import VdkConfigurationError
def managed_connection_router():
conf = MagicMock(spec=Configuration)
mock_conn = MagicMock(spec=PEP249Connection)
class TestManagedConnection(ManagedConnectionBase):
def _connect(self) -> PEP249Connection:
return mock_conn
router = ManagedConnectionRouter(conf, MagicMock(spec=ConnectionHookSpec))
router.add_open_connection_factory_method(
"TEST_DB", lambda: TestManagedConnection()
)
return router, mock_conn, conf
def test_router_open_connection():
router, mock_conn, _ = managed_connection_router()
conn = router.open_connection("TEST_DB")
assert mock_conn == conn.connect()
def test_router_raw_connection():
conf = MagicMock(spec=Configuration)
router = ManagedConnectionRouter(conf, MagicMock(spec=ConnectionHookSpec))
mock_conn = MagicMock(spec=PEP249Connection)
router.add_open_connection_factory_method("RAW_DB", lambda: mock_conn)
conn = router.open_connection("RAW_DB")
assert mock_conn == conn.connect()
def test_router_open_connection_closed():
router, mock_conn, _ = managed_connection_router()
conn = router.open_connection("TEST_DB")
conn.close()
conn = router.open_connection("TEST_DB")
assert mock_conn == conn.connect()
def test_router_no_such_connection():
router, mock_conn, _ = managed_connection_router()
with pytest.raises(VdkConfigurationError):
router.open_connection("NO_SUCH")
def test_router_open_default_connection():
router, mock_conn, mock_conf = managed_connection_router()
mock_conf.get_value.return_value = "TEST_DB"
conn = router.open_default_connection()
assert mock_conn == conn.connect()
def test_router_open_default_connection_no_conf():
router, mock_conn, mock_conf = managed_connection_router()
mock_conf.get_value.return_value = None
conn = router.open_default_connection()
assert mock_conn == conn.connect()
| StarcoderdataPython |
4818940 | import os
import argparse
import warnings
import numpy as np
import tensorflow as tf
import scipy.stats as sps
import tensorflow_probability as tfp
import seaborn as sns
from matplotlib import pyplot as plt
from callbacks import RegressionCallback
from regression_data import generate_toy_data
# workaround: https://github.com/tensorflow/tensorflow/issues/34888
gpus = tf.config.experimental.list_physical_devices('GPU')
if len(gpus) > 0:
tf.config.experimental.set_memory_growth(gpus[0], True)
def softplus_inverse(x):
return tf.math.log(tf.exp(x) - 1)
def neural_network(d_in, d_hidden, f_hidden, d_out, f_out=None, name=None):
# Weight ini
# initialiser = tf.keras.initializers.RandomNormal(
# mean=0.0, stddev=0.05, seed=100)
nn = tf.keras.Sequential(name=name)
nn.add(tf.keras.layers.InputLayer(d_in))
# , bias_initializer=initialiser, kernel_initializer=initialiser
nn.add(tf.keras.layers.Dense(d_hidden, f_hidden, name=f'layer1{name}'))
nn.add(tf.keras.layers.Dense(d_out, f_out, name=f'layer2{name}'))
return nn
class VariationalNormalRegression(tf.keras.Model):
def __init__(self, prior_type, y_mean, y_var, num_mc_samples):
super(VariationalNormalRegression, self).__init__()
assert isinstance(prior_type, str)
poops = len(prior_type.split('_poops')) > 1
prior_type = prior_type.split('_poops')[0]
assert prior_type in {'mle', 'standard', 'vamp',
'vamp_uniform', 'vamp_trainable', 'vbem'}
assert not poops or prior_type in {'vamp', 'vamp_trainable', 'vbem'}
assert isinstance(num_mc_samples, int) and num_mc_samples > 0
# save configuration
self.prior_type = prior_type
self.poops = poops
self.y_mean = tf.constant(y_mean, dtype=tf.float32)
self.y_var = tf.constant(y_var, dtype=tf.float32)
self.y_std = tf.sqrt(self.y_var)
self.num_mc_samples = num_mc_samples
self.epsilon_p = tf.constant(0.0, dtype=tf.float32)
self.epsilon_q = tf.constant(0.0, dtype=tf.float32)
def px(self, mean, precision):
px = tfp.distributions.Normal(
loc=mean, scale=1 / (tf.sqrt(precision) + self.epsilon_p))
return tfp.distributions.Independent(px)
@ staticmethod
def ll(y, mu, expected_lambda, expected_log_lambda):
ll = 0.5 * (expected_log_lambda - tf.math.log(2 * np.pi)
- (y - mu) ** 2 * expected_lambda)
return tf.reduce_sum(ll, axis=-1)
def whiten(self, y, mu, expected_lambda, expected_log_lambda):
y = (y - self.y_mean) / self.y_std
return y, mu, expected_lambda, expected_log_lambda
def de_whiten(self, y, mu, expected_lambda, expected_log_lambda):
mu = mu * self.y_std + self.y_mean
expected_lambda = expected_lambda / self.y_var
expected_log_lambda = expected_log_lambda - tf.math.log(self.y_var)
return y, mu, expected_lambda, expected_log_lambda
def variational_objective(self, x, y):
# run mean network
mu = self.mu(x)
# run variational family
qp, dkl = self.variational_family(x)
# variational variance log likelihood E_{q(lambda|alpha(x), beta(x))}[log p(y|mu(x), lambda)]
expected_log_lambda = self.expected_log_lambda(x)
ll = self.ll(*self.whiten(y, mu, qp.mean(), expected_log_lambda))
# tf.print('mu\n', self.mu(x).dtype)
# tf.print('alpha\n', self.alpha(x))
# tf.print('beta\n', self.beta(x))
# tf.print('dkl\n', tf.math.reduce_mean(dkl))
# tf.print('ll\n', tf.math.reduce_mean(ll))
# # fixed-variance log likelihood
# ll_fv = self.px(mu, 0.25).log_prob((y - self.y_mean) / self.y_std)
# evidence lower bound
elbo = ll - dkl
# compute adjusted log likelihood of non-scaled y using de-whitened model parameter
ll_adjusted = self.ll(
*self.de_whiten(y, mu, qp.mean(), expected_log_lambda))
# compute squared error for reporting purposes
error_dist = tf.norm(y - (mu * self.y_std + self.y_mean), axis=-1)
squared_error = error_dist ** 2
# add metrics for call backs
self.add_metric(elbo, name='ELBO', aggregation='mean')
self.add_metric(ll, name='LL', aggregation='mean')
self.add_metric(dkl, name='KL', aggregation='mean')
self.add_metric(ll_adjusted, name='LL (adjusted)', aggregation='mean')
self.add_metric(error_dist, name='MAE', aggregation='mean')
self.add_metric(squared_error, name='MSE', aggregation='mean')
# add minimization objective
self.add_loss(-tf.reduce_mean(elbo))
def posterior_predictive_mean(self, x):
return self.mu(x) * self.y_std + self.y_mean
def posterior_predictive_std(self, x, num_mc_samples=2000):
# Correct
prec = tf.reduce_mean(self.qp(x).sample(num_mc_samples), axis=0)
return 1 / tf.sqrt(prec + self.epsilon_p) * self.y_std
# Original way
return tf.reduce_mean(1 / (tf.sqrt(self.qp(x).sample(num_mc_samples)) + self.epsilon_p), axis=0) * self.y_std
def posterior_predictive_sample(self, x):
return self.posterior_predictive_mean(x) + self.posterior_predictive_std(x) * tf.random.normal(tf.shape(x))
def posterior_predictive_log_likelihood(self, x, y, exact=True):
qp = self.qp(x)
if exact:
if self.epsilon_p != 0:
warnings.warn(
'exact method is approximate since it doesnt account for the eps > 0 in p(x|mu,lambda)')
ll = tf.reduce_mean(
self.ll(*self.de_whiten(y, self.mu(x), qp.mean(), self.expected_log_lambda(x))))
else:
precision_samples = qp.sample(
sample_shape=self.num_mc_samples) / self.y_var
mu = self.mu(x) * self.y_std + self.y_mean
ll = tf.reduce_mean(tf.map_fn(lambda p: self.px(
mu, p).log_prob(y), precision_samples))
return ll
def call(self, inputs, **kwargs):
self.variational_objective(x=inputs['x'], y=inputs['y'])
return tf.constant(0.0, dtype=tf.float32)
class GammaNormalRegression(VariationalNormalRegression):
def __init__(self, d_in, d_hidden, f_hidden, d_out, prior, y_mean, y_var, a=None, b=None, u=None, k=None, n_mc=1):
super(GammaNormalRegression, self).__init__(prior, y_mean, y_var, n_mc)
assert isinstance(d_in, int) and d_in > 0
assert isinstance(d_hidden, int) and d_hidden > 0
assert isinstance(d_out, int) and d_out > 0
# give the model a name
self.type = 'Gamma-Normal'
# save fixed prior parameters
self.a = tf.constant([a] * d_out, dtype=tf.float32)
self.b = tf.constant([b] * d_out, dtype=tf.float32)
if self.prior_type == 'standard':
# set prior for precision
self.pp = tfp.distributions.Gamma(
concentration=self.a, rate=self.b)
self.pp = tfp.distributions.Independent(
self.pp, reinterpreted_batch_ndims=1)
elif 'vamp' in self.prior_type:
# pseudo-inputs
trainable = 'trainable' in self.prior_type
self.u = tf.Variable(
initial_value=u, dtype=tf.float32, trainable=trainable, name='u')
elif self.prior_type == 'vbem':
# trainable prior parameters for precision
u = tf.random.uniform(
shape=(k, d_out), minval=-3, maxval=3, dtype=tf.float32)
v = tf.random.uniform(
shape=(k, d_out), minval=-3, maxval=3, dtype=tf.float32)
self.u = tf.Variable(
initial_value=u, dtype=tf.float32, trainable=True, name='u')
self.v = tf.Variable(
initial_value=v, dtype=tf.float32, trainable=True, name='v')
# build parameter networks
self.mu = neural_network(
d_in, d_hidden, f_hidden, d_out, f_out=None, name='mu')
self.alpha = neural_network(
d_in, d_hidden, f_hidden, d_out, f_out='softplus', name='alpha')
self.beta = neural_network(
d_in, d_hidden, f_hidden, d_out, f_out='softplus', name='beta')
if self.prior_type in {'vamp', 'vamp_trainable', 'vbem'}:
d_out = self.u.shape[0] + int(self.poops)
self.pi = neural_network(
d_in, d_hidden, f_hidden, d_out, f_out='softmax', name='pi')
self.pc = tfp.distributions.Categorical(
logits=[1] * self.u.shape[0] + [self.u.shape[0]] * self.poops)
def qp(self, x):
qp = tfp.distributions.Gamma(self.alpha(
x) + self.epsilon_q, self.beta(x) + self.epsilon_q)
return tfp.distributions.Independent(qp)
def variational_family(self, x):
# variational family q(precision|x)
qp = self.qp(x)
# compute kl-divergence depending on prior type
if self.prior_type == 'standard':
dkl = qp.kl_divergence(self.pp)
elif self.prior_type in {'vamp', 'vamp_trainable', 'vamp_uniform', 'vbem'}:
if self.prior_type in {'vamp', 'vamp_trainable', 'vamp_uniform'}:
alpha = self.alpha(self.u)
beta = self.beta(self.u)
else:
alpha = tf.nn.softplus(self.u)
beta = tf.nn.softplus(self.v)
if self.poops:
alpha = tf.concat(
(alpha, tf.expand_dims(self.a, axis=0)), axis=0)
beta = tf.concat(
(beta, tf.expand_dims(self.b, axis=0)), axis=0)
# compute VAMP prior's mixing densities
priors = tfp.distributions.Gamma(alpha, beta)
priors = tfp.distributions.Independent(
priors, reinterpreted_batch_ndims=1)
# MC estimate kl-divergence due to pesky log-sum
if self.prior_type == 'vamp_uniform':
pi_x = tf.ones(self.u.shape[0])
else:
pi_x = tf.clip_by_value(
self.pi(x), clip_value_min=1e-6, clip_value_max=tf.float32.max)
p = qp.sample(self.num_mc_samples)
log_qp = qp.log_prob(p)
p = tf.tile(tf.expand_dims(p, axis=-2),
[1, 1] + priors.batch_shape.as_list() + [1])
log_pp = tf.reduce_logsumexp(priors.log_prob(
p) + tf.math.log(tf.expand_dims(pi_x, axis=0)), axis=-1)
dkl = tf.reduce_mean(log_qp - log_pp, axis=0)
if self.prior_type != 'vamp_uniform':
dkl += tfp.distributions.Categorical(
logits=pi_x).kl_divergence(self.pc)
else:
dkl = tf.constant(0.0)
return qp, dkl
def expected_log_lambda(self, x):
return tf.math.digamma(self.alpha(x) + self.epsilon_q) - tf.math.log(self.beta(x) + self.epsilon_q)
class LogNormalNormalRegression(VariationalNormalRegression):
def __init__(self, d_in, d_hidden, f_hidden, d_out, prior, y_mean, y_var, a=None, b=None, u=None, k=None, n_mc=1):
super(LogNormalNormalRegression, self).__init__(
prior, y_mean, y_var, n_mc)
assert isinstance(d_in, int) and d_in > 0
assert isinstance(d_hidden, int) and d_hidden > 0
assert isinstance(d_out, int) and d_out > 0
# give the model a name
self.type = 'LogNormal-Normal'
# save fixed prior parameters
self.a = tf.constant([a] * d_out, dtype=tf.float32)
self.b = tf.constant([b] * d_out, dtype=tf.float32)
if self.prior_type == 'standard':
# set prior for precision
self.pp = tfp.distributions.LogNormal(loc=self.a, scale=self.b)
self.pp = tfp.distributions.Independent(
self.pp, reinterpreted_batch_ndims=1)
elif 'vamp' in self.prior_type:
# pseudo-inputs
trainable = 'trainable' in self.prior_type
self.u = tf.Variable(
initial_value=u, dtype=tf.float32, trainable=trainable, name='u')
elif self.prior_type == 'vbem':
# trainable prior parameters for precision
u = tf.random.uniform(
shape=(k, d_out), minval=-3, maxval=3, dtype=tf.float32)
v = tf.random.uniform(
shape=(k, d_out), minval=-3, maxval=3, dtype=tf.float32)
self.u = tf.Variable(
initial_value=u, dtype=tf.float32, trainable=True, name='u')
self.v = tf.Variable(
initial_value=v, dtype=tf.float32, trainable=True, name='v')
# build parameter networks
self.mu = neural_network(
d_in, d_hidden, f_hidden, d_out, f_out=None, name='mu')
self.alpha = neural_network(
d_in, d_hidden, f_hidden, d_out, f_out=None, name='alpha')
self.beta = neural_network(
d_in, d_hidden, f_hidden, d_out, f_out='softplus', name='beta')
if self.prior_type in {'vamp', 'vamp_trainable', 'vbem'}:
d_out = self.u.shape[0] + int(self.poops)
self.pi = neural_network(
d_in, d_hidden, f_hidden, d_out, f_out='softmax', name='pi')
self.pc = tfp.distributions.Categorical(
logits=[1] * self.u.shape[0] + [self.u.shape[0]] * self.poops)
def qp(self, x):
qp = tfp.distributions.LogNormal(
self.alpha(x), self.beta(x) + self.epsilon_q)
return tfp.distributions.Independent(qp)
def variational_family(self, x):
# variational family q(precision|x)
qp = self.qp(x)
# compute kl-divergence depending on prior type
if self.prior_type == 'standard':
dkl = qp.kl_divergence(self.pp)
elif self.prior_type in {'vamp', 'vamp_trainable', 'vamp_uniform', 'vbem'}:
if self.prior_type in {'vamp', 'vamp_trainable', 'vamp_uniform'}:
alpha = self.alpha(self.u)
beta = self.beta(self.u)
else:
alpha = self.u
beta = tf.nn.softplus(self.v)
if self.poops:
alpha = tf.concat(
(alpha, tf.expand_dims(self.a, axis=0)), axis=0)
beta = tf.concat(
(beta, tf.expand_dims(self.b, axis=0)), axis=0)
# compute VAMP prior's mixing densities
priors = tfp.distributions.LogNormal(alpha, beta)
priors = tfp.distributions.Independent(
priors, reinterpreted_batch_ndims=1)
# MC estimate kl-divergence due to pesky log-sum
if self.prior_type == 'vamp_uniform':
pi_x = tf.ones(self.u.shape[0])
else:
pi_x = tf.clip_by_value(
self.pi(x), clip_value_min=1e-6, clip_value_max=tf.float32.max)
p = qp.sample(self.num_mc_samples)
log_qp = qp.log_prob(p)
p = tf.tile(tf.expand_dims(p, axis=-2),
[1, 1] + priors.batch_shape.as_list() + [1])
log_pp = tf.reduce_logsumexp(priors.log_prob(
p) + tf.math.log(tf.expand_dims(pi_x, axis=0)), axis=-1)
dkl = tf.reduce_mean(log_qp - log_pp, axis=0)
if self.prior_type != 'vamp_uniform':
dkl += tfp.distributions.Categorical(
logits=pi_x).kl_divergence(self.pc)
else:
dkl = tf.constant(0.0)
return qp, dkl
def expected_log_lambda(self, x):
return self.alpha(x)
def fancy_plot(x_train, y_train, x_eval, true_mean, true_std, mdl_mean, mdl_std, title):
# squeeze everything
x_train = np.squeeze(x_train)
y_train = np.squeeze(y_train)
x_eval = np.squeeze(x_eval)
true_mean = np.squeeze(true_mean)
true_std = np.squeeze(true_std)
mdl_mean = np.squeeze(mdl_mean)
mdl_std = np.squeeze(mdl_std)
# get a new figure
fig, ax = plt.subplots(2, 1)
fig.suptitle(title)
# plot the data
sns.scatterplot(x_train, y_train, ax=ax[0])
# plot the true mean and standard deviation
ax[0].plot(x_eval, true_mean, '--k')
ax[0].plot(x_eval, true_mean + true_std, ':k')
ax[0].plot(x_eval, true_mean - true_std, ':k')
# plot the model's mean and standard deviation
l = ax[0].plot(x_eval, mdl_mean)[0]
ax[0].fill_between(x_eval[:, ], mdl_mean - mdl_std,
mdl_mean + mdl_std, color=l.get_color(), alpha=0.5)
ax[0].plot(x_eval, true_mean, '--k')
# clean it up
ax[0].set_ylim([-20, 20])
ax[0].set_ylabel('y')
# plot the std
ax[1].plot(x_eval, mdl_std, label='predicted')
ax[1].plot(x_eval, true_std, '--k', label='truth')
ax[1].set_ylim([0, 5])
ax[1].set_xlabel('x')
ax[1].set_ylabel('std(y|x)')
plt.legend()
return fig
if __name__ == '__main__':
# enable background tiles on plots
sns.set(color_codes=True)
# random number seeds
np.random.seed(123)
tf.random.set_seed(123)
# unit test
test = np.random.uniform(-10, 10, 100)
assert (np.abs(softplus_inverse(tf.nn.softplus(test)) - test) < 1e-6).all()
test = np.random.uniform(0, 10, 100)
assert (np.abs(tf.nn.softplus(softplus_inverse(test)) - test) < 1e-6).all()
# script arguments
parser = argparse.ArgumentParser()
parser.add_argument('--prior', type=str, default='vamp_uniform',
help="{mle, standard, vamp, vamp_uniform, vamp_trainable, vbem}")
args = parser.parse_args()
# set configuration
D_HIDDEN = 50
PRIOR_TYPE = args.prior
N_MC_SAMPLES = 20
LEARNING_RATE = 1e-2
EPOCHS = int(6e3)
# load data
x_train, y_train, x_eval, true_mean, true_std = generate_toy_data()
ds_train = tf.data.Dataset.from_tensor_slices(
{'x': x_train, 'y': y_train}).batch(x_train.shape[0])
# VAMP prior pseudo-input initializers
u = np.expand_dims(np.linspace(
np.min(x_eval), np.max(x_eval), 20), axis=-1)
# declare Gamma-Normal model
a, _, b_inv = sps.gamma.fit(
1 / true_std[(np.min(x_train) <= x_eval) * (x_eval <= np.max(x_train))] ** 2, floc=0)
print('Gamma Prior:', a, 1 / b_inv)
mdl = GammaNormalRegression(d_in=x_train.shape[1],
d_hidden=D_HIDDEN,
f_hidden='sigmoid',
d_out=y_train.shape[1],
prior=PRIOR_TYPE,
y_mean=0.0,
y_var=1.0,
a=a,
b=1 / b_inv,
k=20,
u=u,
n_mc=N_MC_SAMPLES)
# build the model. loss=[None] avoids warning "Output output_1 missing from loss dictionary".
mdl.compile(optimizer=tf.keras.optimizers.Adam(
learning_rate=LEARNING_RATE), loss=[None], run_eagerly=False)
# train, evaluate on test points, and plot results
hist = mdl.fit(ds_train, epochs=EPOCHS, verbose=0,
callbacks=[RegressionCallback(EPOCHS)])
plt.figure()
plt.plot(hist.history['LL (adjusted)'])
# print and plot results
mdl.num_mc_samples = 2000
print(mdl.posterior_predictive_log_likelihood(x_train, y_train, exact=True))
print(mdl.posterior_predictive_log_likelihood(
x_train, y_train, exact=False))
mdl_mean, mdl_std = mdl.posterior_predictive_mean(
x_eval), mdl.posterior_predictive_std(x_eval)
fig = fancy_plot(x_train, y_train, x_eval, true_mean,
true_std, mdl_mean, mdl_std, mdl.type)
if PRIOR_TYPE == 'vamp_uniform':
fig.savefig(os.path.join('assets', 'fig_vamp_uniform_gamma.pdf'))
# declare LogNormal-Normal model
precisions = 1 / true_std[(np.min(x_train) <= x_eval)
* (x_eval <= np.max(x_train))] ** 2
a, b = np.mean(np.log(precisions)), np.std(np.log(precisions))
print('LogNormal Prior:', a, b)
mdl = LogNormalNormalRegression(d_in=x_train.shape[1],
d_hidden=D_HIDDEN,
f_hidden='sigmoid',
d_out=y_train.shape[1],
prior=PRIOR_TYPE,
y_mean=0.0,
y_var=1.0,
a=a,
b=b,
k=20,
u=u,
n_mc=N_MC_SAMPLES)
# build the model. loss=[None] avoids warning "Output output_1 missing from loss dictionary".
mdl.compile(optimizer=tf.keras.optimizers.Adam(
learning_rate=LEARNING_RATE), loss=[None], run_eagerly=False)
# train, evaluate on test points, and plot results
hist = mdl.fit(ds_train, epochs=EPOCHS, verbose=0,
callbacks=[RegressionCallback(EPOCHS)])
plt.figure()
plt.plot(hist.history['LL (adjusted)'])
# print and plot results
mdl.num_mc_samples = 2000
print(mdl.posterior_predictive_log_likelihood(x_train, y_train, exact=True))
print(mdl.posterior_predictive_log_likelihood(
x_train, y_train, exact=False))
mdl_mean, mdl_std = mdl.posterior_predictive_mean(
x_eval), mdl.posterior_predictive_std(x_eval)
fig = fancy_plot(x_train, y_train, x_eval, true_mean,
true_std, mdl_mean, mdl_std, mdl.type)
if PRIOR_TYPE == 'vamp_uniform':
fig.savefig(os.path.join('assets', 'fig_vamp_uniform_log_normal.pdf'))
# hold the plots
plt.show()
| StarcoderdataPython |
1872847 | <reponame>ToonKBC/mlflow
class ViewType(object):
"""Enum to qualify `ListExperiments` API query for requested experiment types."""
ACTIVE_ONLY, DELETED_ONLY, ALL = range(1, 4)
| StarcoderdataPython |
3498412 | import torch
from .stiefel import Stiefel
from .positive_definite import PositiveDefinite
from .euclidean import Euclidean
from .hyperbolic import Hyperbolic
from .doublystochastic import DoublyStochastic
from ..parameter import Parameter
class ManifoldShapeFactory(object):
"""
Base class for manifold shape factory. This is used by torch
modules to determine shape of Manifolds give shape of weight matrix
For each Manifold type it takes shape and whether to transpose the
tensor when shape is vague (in instances when both transpose and normal
are valid).
To register a new factory implement a new subclass and create its object
with manifold as parameter
"""
factories = {}
@staticmethod
def _addFactory(manifold, factory):
ManifoldShapeFactory.factories[manifold] = factory
@staticmethod
def create_manifold_parameter(manifold, shape, transpose=False):
if manifold not in ManifoldShapeFactory.factories:
raise NotImplementedError
return ManifoldShapeFactory.factories[manifold].create(shape, transpose)
def __init__(self, manifold):
self.manifold = manifold
ManifoldShapeFactory._addFactory(manifold, self)
# TODO: change return of create to manifold param and modified tensor if any
def create(self, shape, transpose=False):
raise NotImplementedError
class StiefelLikeFactory(ManifoldShapeFactory):
"""
Stiefel like factory implements shape factory where tensor constrains are
similar to that of Stiefel.
Constraints:
if 3D tensor (k,h,w):
k > 1 and h > w > 1
if 2D tensor (h,w):
h > w > 1
in case of h == w both normal and tranpose are valid and user has
flexibility to choose if he wants (h x w) or (w x h) as manifold
"""
def create(self, shape, transpose=False):
if len(shape) == 3:
k, h, w = shape
elif len(shape) == 2:
k = 1
h, w = shape
else:
raise ValueError(("Invalid shape {}, length of shape "
"tuple should be 2 or 3").format(shape))
to_transpose = transpose
to_return = None
if h > w:
to_transpose = False
to_return = Parameter(manifold=self.manifold(h, w, k=k))
elif h < w:
to_transpose = True
to_return = Parameter(manifold=self.manifold(w, h, k=k))
elif h == w:
# use this argument only in case when shape is vague
to_transpose = transpose
to_return = Parameter(manifold=self.manifold(w, h, k=k))
return to_transpose, to_return
class SquareManifoldFactory(ManifoldShapeFactory):
"""
Manifold shape factory for manifold constrained parameter which
allows only for square shapes. For example PositiveDefinite manifold
Constraints:
if 3D tensor (k,n,n):
k > 1 and n > 1
if 2D tensor (n,n):
n > 1
"""
def create(self, shape, transpose=False):
if len(shape) == 3:
k, n, m = shape
elif len(shape) == 2:
k = 1
n, m = shape
else:
raise ValueError(("Invalid shape {}, length of shape"
"tuple should be 2 or 3").format(shape))
if n != m:
raise ValueError(("Invalid shape {} dimensions should "
"be equal").format(shape))
return transpose, Parameter(manifold=self.manifold(n=n, k=k))
class EuclideanManifoldFactory(ManifoldShapeFactory):
"""
Manifold factory fro euclidean just initializes parameter manifold with
shape parameter of create without transpose
"""
def create(self, shape, transpose=False):
if len(shape) == 0:
raise ValueError("Shape length cannot be 0")
else:
return transpose, Parameter(manifold=self.manifold(*shape))
class HyperbolicManifoldFactory(ManifoldShapeFactory):
"""
Manifold factory for hyperbolic manifold
shape parameter of create without transpose
"""
def create(self, shape, transpose=False):
if len(shape) == 1:
k, n = 1, shape
elif len(shape) == 2:
k, n = shape
else:
raise ValueError(("Invalid shape {}, length of shape"
"tuple should be 1 or 2").format(shape))
return transpose, Parameter(manifold=self.manifold(n=n, k=k))
class DSManifoldFactory(ManifoldShapeFactory):
"""
Manifold factory for DoublyStochastic manifold
"""
def create(self, shape, transpose=False):
given = len(shape)
allowed = [2, 3]
assert given in allowed, ValueError(f"Shape should be in {allowed}")
n, m = shape[-2], shape[-1]
k = given == allowed[1] and shape[0] or 1
return transpose, Parameter(manifold=self.manifold(n=n, m=m, k=k))
create_manifold_parameter = ManifoldShapeFactory.create_manifold_parameter
StiefelLikeFactory(Stiefel)
SquareManifoldFactory(PositiveDefinite)
EuclideanManifoldFactory(Euclidean)
HyperbolicManifoldFactory(Hyperbolic)
DSManifoldFactory(DoublyStochastic)
def manifold_random_(tensor):
if not hasattr(tensor, 'manifold') or tensor.manifold is None:
return tensor
with torch.no_grad():
return tensor.copy_(tensor.manifold.rand())
| StarcoderdataPython |
302895 | <reponame>titos-carrasco/MindWave-BB8-Python
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import threading
import serial
import time
class MindWaveData:
def __init__( self ):
self.poorSignalQuality = 200 # byte (0 <=> 200) 0=OK; 200=sensor sin contacto con la piel
self.attentionESense = 0 # byte (1 <=> 100) 0=no confiable
self.meditationESense = 0 # byte (1 <=> 100) 0=no confiable
self.blinkStrength = 0 # byte (1 <=> 255)
self.rawWave16Bit = 0 # int16 (-32768 <=> 32767)
self.delta = 0 # uint32 (0 <=> 16777215)
self.theta = 0 # uint32 (0 <=> 16777215)
self.lowAlpha = 0 # uint32 (0 <=> 16777215)
self.highAlpha = 0 # uint32 (0 <=> 16777215)
self.lowBeta = 0 # uint32 (0 <=> 16777215)
self.highBeta = 0 # uint32 (0 <=> 16777215)
self.lowGamma = 0 # uint32 (0 <=> 16777215)
self.midGamma = 0 # uint32 (0 <=> 16777215)
class MindWave():
def __init__( self, port, timeout, ghid ):
self.port = port
self.timeout = timeout
self.ghid = ghid
self.mutex = threading.Lock()
self.connected = False
self.mwd = MindWaveData()
self.conn = None
self.tRunning = False
self.tParser = None
self.queue = None
self.bytesLeidos = 0
self.bytesPerdidos = 0
def connect( self ):
if( self.connected ):
print( "MindWave Connect(): Ya se encuentra conectado a", self.port )
return True
self.mwd = MindWaveData()
self.conn = None
self.tRunning = False
self.tParser = None
self.queue = bytearray()
self.bytesLeidos = 0
self.bytesPerdidos = 0
print( "MindWave Connect(): Intentando conectar a", self.port, " ...", end='' )
try:
self.conn = serial.Serial( self.port, baudrate=115200, bytesize=8,
parity='N', stopbits=1, timeout=0.1 )
self.conn.flushInput()
self.conn.flushOutput()
self.connected = True
except Exception as e:
self.conn = None
print( e )
return False
print( "OK" )
#resetea conexión anterior
print( "MindWave Connect(): Limpiando conexión previa ...", end='' )
try:
# request "Disconnect"
self.conn.write( bytearray( [ 0xc1 ] ) )
time.sleep( 1 )
self.conn.flushInput()
except Exception as e:
self.conn.close()
self.conn = None
self.connected = False
print( e )
return False
print( "OK" )
# conecta al headset
try:
# especifica un Global Headset Unique Identifier (ghid)
if( self.ghid != 0x0000 ):
print( "MindWave Connect(): Enlazando headset ", end='' )
# request "Connect"
self.conn.write( bytearray( [ 0xc0, ( self.ghid >> 8 ) & 0xFF, self.ghid & 0xFF ] ) )
self.conn.flush()
# busca un Global Headset Unique Identifier (ghid)
else:
print( "MindWave Connect(): Buscando headset ", end='' )
# request "Auto-Connect"
self.conn.write( bytearray( [ 0xc2 ] ) )
self.conn.flush()
except Exception as e:
self.conn.close()
self.conn = None
self.connected = False
print( e )
return False
# esperamos la respuesta del dongle
while True:
print( ".", end = '' )
# lee respuesta
payload, err = self._getPayload()
if( err != None ):
break
# analiza respuesta
cmd = payload[0]
if( cmd == 0xd0 ): # headset found and connected
self.ghid = ( payload[2] << 8 ) + payload[3]
break
if( cmd == 0xd1 ): # headset not found
if( payload[1] == 0x00 ):
err = "ErrNoHeadsetFound"
else:
err = "ErrHeadsetNotFound"
break
if( cmd == 0xd2 ): # headset disconnected
err = "ErrDisconnected"
break
if( cmd == 0xd3 ): # request denied
err = "ErrRequestDenied"
break
if( cmd == 0xd4 ):
if( payload[2] == 0x00 ): # dongle in stand by mode
break
else: # searching
time.sleep( 0.0001 )
else:
err = "ErrInvResponse"
break
if( err != None ):
self.conn.close()
self.conn = None
self.connected = False
print( err )
return False
print( "OK" )
# levantamos la tarea de apoyo
print( "MindWave Connect(): Levantando tarea de lectura de datos ...", end='' )
self.tParser = threading.Thread( target=self._TParser, args=(), name="_TParser" )
self.tParser.start()
while ( not self.tRunning ):
time.sleep( 0.0001 )
print( "OK" )
return True
def disconnect( self ):
if( self.connected ):
print( "MindWave Disconnect(): Deteniendo Tarea ...", end='' )
self.tRunning = False
self.tParser.join()
self.tParser = None
self.queue = bytearray()
print( "OK" )
# request "Disconnect"
print( "MindWave Disconnect(): Desconectando headset y cerrando puerta ...", end='' )
try:
self.conn.write( bytearray( [ 0xc1 ] ) )
time.sleep( 1 )
self.conn.close()
except Exception as e:
pass
self.connected = False
self.conn = None
print( "OK" )
print( "Bytes Leidos :", self.bytesLeidos )
print( "Bytes Perdidos :", self.bytesPerdidos )
print( threading.enumerate() )
def isConnected( self ):
return self.connected
def getGlobalHeadsetID( self ):
return "%04X" % self.ghid
def fillMindWaveData( self, mwd ):
self.mutex.acquire()
mwd.poorSignalQuality = self.mwd.poorSignalQuality
mwd.attentionESense = self.mwd.attentionESense
mwd.meditationESense = self.mwd.meditationESense
mwd.blinkStrength = self.mwd.blinkStrength
mwd.rawWave16Bit = self.mwd.rawWave16Bit
mwd.delta = self.mwd.delta
mwd.theta = self.mwd.theta
mwd.lowAlpha = self.mwd.lowAlpha
mwd.highAlpha = self.mwd.highAlpha
mwd.lowBeta = self.mwd.lowBeta
mwd.highBeta = self.mwd.highBeta
mwd.lowGamma = self.mwd.lowGamma
mwd.midGamma = self.mwd.midGamma
self.mutex.release()
# privadas
def _getByte( self ):
while( True ):
if( self.conn.in_waiting > 0 ):
data = self.conn.read( self.conn.in_waiting )
if( type( data ) == str ):
self.queue = self.queue + bytearray( data )
else:
self.queue = self.queue + data
self.bytesLeidos = self.bytesLeidos + len( data )
if( len( self.queue ) > 0 ):
return self.queue.pop( 0 )
time.sleep( 0.0001 )
def _getPayload( self ):
# 0xaa 0xaa [0xaa]*
scanning = True
while( scanning ):
b = self._getByte()
if( b == 0xaa ):
b = self._getByte()
if( b == 0xaa ):
while( scanning ):
plength = self._getByte()
if( plength != 0xaa ):
scanning = False
else:
self.bytesPerdidos = self.bytesPerdidos + 1
else:
self.bytesPerdidos = self.bytesPerdidos + 2
else:
self.bytesPerdidos = self.bytesPerdidos + 1
# packet length
if( plength <= 0 or plength >= 0xaa ):
self.bytesPerdidos = self.bytesPerdidos + 1
return None, "ErrInvPLength (%02X)" % plength
# payload
payload = bytearray( plength )
for i in range( plength ):
payload[i] = self._getByte()
# checksum
checksum = self._getByte()
suma = 0
for i in range( plength ):
suma = suma + payload[i]
suma = ( ~( suma & 0xff ) ) & 0xff
if( checksum != suma ):
self.bytesPerdidos = self.bytesPerdidos + 1 + plength + 1
return None, "ErrChecksum (%02X/%02X)" % (checksum, suma)
# ok
return payload, None
def _TParser( self, *args ):
self.bytesLeidos = 0
self.bytesPerdidos = 0
self.queue = bytearray()
self.conn.flushInput()
self.tRunning = True
while( self.tRunning ):
err = self._parsePayload()
if( err != None ):
print( "TParser: ", err )
def _parsePayload( self ):
payload, err = self._getPayload()
if( err != None ):
return err
if( payload[0] == 0xd2 ): # disconnected
return "ErrDisconnected"
if( payload[0] == 0xd4 ): # alive message in stand by mode
return None
pos = 0
self.mutex.acquire()
while pos < len( payload ):
exCodeLevel = 0
while( payload[pos] == 0x55 ):
exCodeLevel = exCodeLevel + 1
pos = pos + 1
code = payload[pos]
pos = pos + 1
if( code >= 0x80 ):
vlength = payload[pos]
pos = pos + 1
else:
vlength = 1
data = bytearray( vlength )
for i in range( vlength ):
data[i] = payload[pos + i]
pos = pos + vlength
if( exCodeLevel == 0 ):
if( code == 0x02 ): # poor signal quality (0 to 255) 0=>OK; 200 => no skin contact
self.mwd.poorSignalQuality = data[0]
elif( code == 0x04 ): # attention eSense (0 to 100) 40-60 => neutral, 0 => result is unreliable
self.mwd.attentionESense = data[0]
elif( code == 0x05 ): # meditation eSense (0 to 100) 40-60 => neutral, 0 => result is unreliable
self.mwd.meditationESense = data[0]
elif( code == 0x16 ): # blink strength (1 to 255)
self.mwd.blinkStrength = data[0]
elif( code == 0x80 ): # raw wave value (-32768 to 32767) - big endian
n = ( data[0]<<8 ) + data[1]
if( n >= 32768 ):
n = n - 65536
self.mwd.rawWave16Bit = n
elif( code == 0x83 ): # asic eeg power struct (8, 3 bytes unsigned int big indian)
self.mwd.delta = ( data[0]<<16 ) + ( data[1]<<8 ) + data[2]
self.mwd.theta = ( data[3]<<16 ) + ( data[4]<<8 ) + data[5]
self.mwd.lowAlpha = ( data[6]<<16 ) + ( data[7]<<8 ) + data[8]
self.mwd.highAlpha = ( data[9]<<16 ) + ( data[10]<<8 ) + data[11]
self.mwd.lowBeta = ( data[12]<<16 ) + ( data[13]<<8 ) + data[14]
self.mwd.highBeta = ( data[15]<<16 ) + ( data[16]<<8 ) + data[17]
self.mwd.lowGamma = ( data[18]<<16 ) + ( data[19]<<8 ) + data[20]
self.mwd.midGamma = ( data[21]<<16 ) + ( data[22]<<8 ) + data[23]
# elif( code == 0x01 ): # code battery - battery low (0x00)
# elif( code == 0x03 ): # heart rate (0 to 255)
# elif( code == 0x06 ): # 8bit raw wave value (0 to 255)
# elif( code == 0x07 ): # raw marker section start (0)
# elif( code == 0x81 ): # eeg power struct (legacy float)
# elif( code == 0x86 ): # rrinterval (0 to 65535)
else:
print( "ExCodeLevel: %02x, Code: %02x, Data: [%s]" % ( exCodeLevel, code, ''.join(format(x, '02X') for x in data) ) )
self.mutex.release()
return None
| StarcoderdataPython |
158695 | class Expansion(object):
__expansion_table = [
31, 0, 1, 2, 3, 4,
3, 4, 5, 6, 7, 8,
7, 8, 9, 10, 11, 12,
11, 12, 13, 14, 15, 16,
15, 16, 17, 18, 19, 20,
19, 20, 21, 22, 23, 24,
23, 24, 25, 26, 27, 28,
27, 28, 29, 30, 31, 0
]
__p = [
1, 2, 3, 4, 5, 8, 9, 10,
11, 14, 15, 16, 17, 20, 21, 22,
23, 26, 27, 28, 29, 32, 33, 34,
35, 38, 39, 40, 41, 44, 45, 46
]
def __BitList_to_String(self, data):
"""Turn the list of bits -> data, into a string"""
result = []
pos = 0
c = 0
while pos < len(data):
c += data[pos] << (7 - (pos % 8))
if (pos % 8) == 7:
result.append(c)
c = 0
pos += 1
if 2.7 < 3:
return ''.join([ chr(c) for c in result ])
else:
return bytes(result)
def __permutate(self, table, block):
"""Permutate this block with the specified table"""
return list(map(lambda x: block[x], table))
def __String_to_BitList(self, data):
"""Turn the string data, into a list of bits (1, 0)'s"""
if 2.7 < 3:
# Turn the strings into integers. Python 3 uses a bytes
# class, which already has this behaviour.
data = [ord(c) for c in data]
l = len(data) * 8
result = [0] * l
pos = 0
for ch in data:
i = 7
while i >= 0:
if ch & (1 << i) != 0:
result[pos] = 1
else:
result[pos] = 0
pos += 1
i -= 1
return result
def __String_to_hex(self, data):
ords = [ord(c) for c in data]
hexs = [hex(h) for h in ords]
return ','.join(hexs)
def __Hex_to_str(self, data):
ints = [int(hx,16) for hx in data]
strs = [str(chr(it)) for it in ints]
return ''.join(strs)
def expand(self, fbits):
"""Return the 6 bytes of expansion en hexadecimal"""
bitlist = self.__String_to_BitList(fbits)
expansion = self.__permutate(self.__expansion_table, bitlist)
expansion_str = self.__BitList_to_String(expansion)
return self.__String_to_hex(expansion_str)
def re2(self, hexbytes):
data = hexbytes.split(',')
hex_data = self.__Hex_to_str(data)
expanded_bytes = self.__String_to_BitList(hex_data)
reduced = self.__permutate(self.__p, expanded_bytes)
return self.__BitList_to_String(reduced)
| StarcoderdataPython |
5102310 | <reponame>aming/Paper-Board
import csv
import pyowm
import yfinance
import urllib
from datetime import datetime
from . import config
from . import gsheet
DATA_FILE_PATH = config.config_dir + '/data'
DELIMITER = ','
def write_csv_data(data = [], index = 0):
with open(DATA_FILE_PATH + "-" + str(index), 'w+') as file:
writer = csv.writer(file, delimiter=DELIMITER)
for d in data:
writer.writerow([d])
def read_csv_data(index = 0):
data = []
with open(DATA_FILE_PATH + "-" + str(index), 'r') as file:
reader = csv.reader(file, delimiter=DELIMITER)
for row in reader:
data.append(' '.join(row))
return data
def get_data(index = 0):
return read_csv_data(index)
def get_price(ticker):
close = ticker.info['previousClose']
last = ticker.info['regularMarketPrice']
change = last - close
return '{}: {} ({:.2f} {:.2f}%)'.format(
ticker.info['symbol'],
last,
change,
change / close * 100,
)
def get_gsheet_data():
if not 'gsheet' in config.config:
return "No GSheet data"
gsheet_config=config.config['gsheet']
gsheet.init(gsheet_config['credentials_file'])
return gsheet.get_range(
gsheet_config['spreadsheet_id'],
gsheet_config['sheet_name'],
gsheet_config['cell_id'],
)
def get_weather(index = 0):
filename = DATA_FILE_PATH + "-" + str(index)
urllib.request.urlretrieve('https://wttr.in/?0ATmQ', filename=filename)
def update():
place=config.config['weather']['location']
OpenWMap=pyowm.OWM(config.config['weather']['api_token'])
manager=OpenWMap.weather_manager()
weather=manager.weather_at_place(place)
forecast = manager.forecast_at_place(place, 'daily')
data0 = [
'data-pull',
datetime.now().strftime("%b-%d %H:%M:%S"),
'{}C.'.format(weather.weather.temperature(unit='celsius')['temp']),
'{}'.format(weather.weather.detailed_status),
'Tomorrow: {}'.format(forecast.get_weather_at(pyowm.utils.timestamps.tomorrow()).detailed_status),
]
write_csv_data(data0, 0)
get_weather(1)
data2 = get_gsheet_data()
write_csv_data(data2, 2)
| StarcoderdataPython |
210546 | from collections import defaultdict
func_stack = {}
func_ranges = defaultdict(list)
| StarcoderdataPython |
8192551 | import copy
import numpy as np
import logging
from bnpy.allocmodel.hmm import HMMUtil
from bnpy.allocmodel.hmm.HDPHMMUtil import ELBOTermDimMap, calcELBO
from bnpy.allocmodel.hmm.HDPHMMUtil import calcELBO_LinearTerms, calcELBO_NonlinearTerms
from bnpy.allocmodel import AllocModel
from bnpy.suffstats import SuffStatBag
from bnpy.util import digamma, gammaln
from bnpy.util import StickBreakUtil
from bnpy.allocmodel.topics import OptimizerRhoOmega
from bnpy.allocmodel.topics.HDPTopicUtil import c_Beta, c_Dir, L_top
from bnpy.util import sharedMemToNumpyArray, numpyToSharedMemArray
Log = logging.getLogger('bnpy')
class HDPHMM(AllocModel):
""" Hierarchical Dirichlet process Hidden Markov model (HDP-HMM)
Truncated to finite number of K active states.
Attributes
-------
inferType : string {'VB', 'moVB', 'soVB'}
indicates which updates to perform for local/global steps
K : int
number of states
startAlpha : float
scalar pseudo-count
used in Dirichlet prior on starting state probabilities.
transAlpha : float
scalar pseudo-count
used in Dirichlet prior on state-to-state transition probabilities.
kappa : float
scalar pseudo-count
adds mass to probability of self-transition
Attributes for VB
---------
startTheta : 1D array, K
Vector parameterizes Dirichlet posterior q(\pi_{0})
transTheta : 2D array, K x K
Vector that parameterizes Dirichlet posterior q(\pi_k), k>0
Local Parameters
--------
resp : 2D array, T x K
q(z_t=k) = resp[t,k]
respPair : 3D array, T x K x K
q(z_t=k, z_t-1=j) = respPair[t,j,k]
"""
def __init__(self, inferType, priorDict=dict()):
if inferType == 'EM':
raise ValueError('EM is not supported for HDPHMM')
self.set_prior(**priorDict)
self.inferType = inferType
self.K = 0
def set_prior(self, gamma=10,
transAlpha=0.5,
startAlpha=5.0, hmmKappa=0.0,
nGlobalIters=1, nGlobalItersBigChange=10, **kwargs):
self.gamma = gamma
self.transAlpha = transAlpha
self.startAlpha = startAlpha
self.kappa = hmmKappa
self.nGlobalIters = nGlobalIters
self.nGlobalItersBigChange = nGlobalItersBigChange
def get_active_comp_probs(self):
''' Return K vector of appearance probabilities for each of the K comps
'''
return StickBreakUtil.rho2beta_active(self.rho)
def get_init_prob_vector(self):
''' Get vector of initial probabilities for all K active states
'''
expELogPi0 = digamma(
self.startTheta) - digamma(np.sum(self.startTheta))
np.exp(expELogPi0, out=expELogPi0)
return expELogPi0[0:self.K]
def get_trans_prob_matrix(self):
''' Get matrix of transition probabilities for all K active states
'''
digammaSumVec = digamma(np.sum(self.transTheta, axis=1))
expELogPi = digamma(self.transTheta) - digammaSumVec[:, np.newaxis]
np.exp(expELogPi, out=expELogPi)
return expELogPi[0:self.K, 0:self.K]
def calc_local_params(self, Data, LP, **kwargs):
''' Calculate local parameters for each data item and each component.
This is part of the E-step.
Args
-------
Data : bnpy data object with Data.nObs observations
LP : local param dict with fields
* E_log_soft_ev : Data.nObs x K array where
E_log_soft_ev[n,k] = log p(data obs n | comp k)
Returns
-------
LP : dict of local parameters.
'''
return HMMUtil.calcLocalParams(Data, LP,
transTheta=self.transTheta,
startTheta=self.startTheta,
**kwargs)
def initLPFromResp(self, Data, LP, limitMemoryLP=1):
''' Fill in remaining local parameters given resp.
Returns
--------
LP : dict, with fields
* respPair
'''
K = LP['resp'].shape[1]
if limitMemoryLP:
LP['TransCount'] = np.zeros((Data.nDoc, K, K))
else:
LP['respPair'] = np.zeros((Data.doc_range[-1], K, K))
for n in range(Data.nDoc):
start = Data.doc_range[n]
stop = Data.doc_range[n + 1]
if limitMemoryLP:
for t in range(start + 1, stop):
respPair_t = np.outer(
LP['resp'][
t - 1,
:],
LP['resp'][
t,
:])
LP['TransCount'][n] += respPair_t
else:
R = LP['resp']
LP['respPair'][start + 1:stop] = \
R[start:stop - 1][:, :, np.newaxis] \
* R[start + 1:stop][:, np.newaxis, :]
return LP
def selectSubsetLP(self, Data, LP, relIDs):
''' Create local parameter dict for subset of sequences in Data
Returns
-------
subsetLP : local params dict
'''
relIDs = np.asarray(relIDs, dtype=np.int32)
if relIDs.size == Data.nDoc:
if np.allclose(relIDs, np.arange(Data.nDoc)):
return copy.deepcopy(LP)
T_all = np.sum(Data.doc_range[relIDs + 1] - Data.doc_range[relIDs])
K = LP['resp'].shape[1]
resp = np.zeros((T_all, K))
if 'respPair' in LP:
respPair = np.zeros((T_all, K, K))
else:
TransCount = np.zeros((len(relIDs), K, K))
Htable = np.zeros((len(relIDs), K, K))
nstart = 0
for ii, n in enumerate(relIDs):
start = Data.doc_range[n]
stop = Data.doc_range[n + 1]
nstop = nstart + stop - start
resp[nstart:nstop] = LP['resp'][start:stop]
if 'respPair' in LP:
respPair[nstart:nstop] = LP['respPair'][start:stop]
else:
TransCount[ii] = LP['TransCount'][n]
Htable[ii] = LP['Htable'][n]
nstart = nstop
if 'respPair' in LP:
subsetLP = dict(resp=resp, respPair=respPair)
else:
subsetLP = dict(resp=resp, TransCount=TransCount, Htable=Htable)
return subsetLP
def fillSubsetLP(self, Data, LP, targetLP, targetIDs):
''' Fill in local parameters for a subset of sequences/documents.
Args
-----
LP : dict of local params
represents K states and nDoc sequences
targetLP : dict of local params
represents K+Kx states and a subset of nDoc sequences
Returns
-------
newLP : dict of local params, with K + Kx components
'''
nAtom = LP['resp'].shape[0]
Knew = targetLP['resp'].shape[1]
Kold = LP['resp'].shape[1]
newResp = np.zeros((nAtom, Knew))
newResp[:, :Kold] = LP['resp']
newTransCount = np.zeros((Data.nDoc, Knew, Knew))
newTransCount[:, :Kold, :Kold] = LP['TransCount']
newHtable = np.zeros((Data.nDoc, Knew, Knew))
newHtable[:, :Kold, :Kold] = LP['Htable']
start_t = 0
for ii, n in enumerate(targetIDs):
assert n >= 0
assert n < Data.nDoc
start = Data.doc_range[n]
stop = Data.doc_range[n+1]
stop_t = start_t + (stop-start)
newResp[start:stop] = targetLP['resp'][start_t:stop_t]
newTransCount[n] = targetLP['TransCount'][ii]
newHtable[n] = targetLP['Htable'][ii]
start_t = stop_t
return dict(resp=newResp, TransCount=newTransCount, Htable=newHtable)
def getSummaryFieldNames(self):
return ['StartStateCount', 'TransStateCount']
def getSummaryFieldDims(self):
return [('K'), ('K', 'K')]
def get_global_suff_stats(self, Data, LP, **kwargs):
return calcSummaryStats(Data, LP, **kwargs)
def forceSSInBounds(self, SS):
''' Force TransStateCount and StartStateCount to be >= 0.
This avoids numerical issues in memoized updates
where SS "chunks" are added and subtracted incrementally
such as:
x = 10
x += 1e-15
x -= 10
x -= 1e-15
resulting in x < 0.
Returns
-------
Nothing. SS is updated in-place.
'''
np.maximum(SS.TransStateCount, 0, out=SS.TransStateCount)
np.maximum(SS.StartStateCount, 0, out=SS.StartStateCount)
def find_optimum_rhoOmega(self, **kwargs):
''' Performs numerical optimization of rho and omega for M-step update.
Note that the optimizer forces rho to be in [EPS, 1-EPS] for
the sake of numerical stability
Returns
-------
rho : 1D array, size K
omega : 1D array, size K
Info : dict of information about optimization.
'''
# Calculate expected log transition probability
# using theta vectors for all K states plus initial state
ELogPi = digamma(self.transTheta) \
- digamma(np.sum(self.transTheta, axis=1))[:, np.newaxis]
sumELogPi = np.sum(ELogPi, axis=0)
startELogPi = digamma(self.startTheta) \
- digamma(np.sum(self.startTheta))
# Select initial rho, omega values for gradient descent
if hasattr(self, 'rho') and self.rho.size == self.K:
initRho = self.rho
else:
initRho = None
if hasattr(self, 'omega') and self.omega.size == self.K:
initOmega = self.omega
else:
initOmega = None
# Do the optimization
try:
rho, omega, fofu, Info = \
OptimizerRhoOmega.find_optimum_multiple_tries(
sumLogPi=sumELogPi,
sumLogPiActiveVec=None,
sumLogPiRemVec=None,
startAlphaLogPi=self.startAlpha * startELogPi,
nDoc=self.K + 1,
gamma=self.gamma,
alpha=self.transAlpha,
kappa=self.kappa,
initrho=initRho,
initomega=initOmega)
self.OptimizerInfo = Info
self.OptimizerInfo['fval'] = fofu
except ValueError as error:
if hasattr(self, 'rho') and self.rho.size == self.K:
Log.error(
'***** Optim failed. Remain at cur val. ' +
str(error))
rho = self.rho
omega = self.omega
else:
Log.error('***** Optim failed. Set to prior. ' + str(error))
omega = (self.gamma + 1) * np.ones(SS.K)
rho = 1 / float(1 + self.gamma) * np.ones(SS.K)
return rho, omega
def update_global_params_EM(self, SS, **kwargs):
raise ValueError('HDPHMM does not support EM')
def update_global_params_VB(self, SS,
mergeCompA=None, mergeCompB=None,
**kwargs):
''' Update global parameters.
'''
self.K = SS.K
if not hasattr(self, 'rho') or self.rho.size != SS.K:
# Big change from previous model is being proposed.
# We'll init rho from scratch, and need more iters to improve.
nGlobalIters = self.nGlobalItersBigChange
else:
# Small change required. Current rho is good initialization.
nGlobalIters = self.nGlobalIters
# Special update case for merges:
# Fast, heuristic update for new rho given original value
if mergeCompA is not None:
beta = OptimizerRhoOmega.rho2beta_active(self.rho)
beta[mergeCompA] += beta[mergeCompB]
beta = np.delete(beta, mergeCompB, axis=0)
self.rho = OptimizerRhoOmega.beta2rho(beta, SS.K)
omega = self.omega
omega[mergeCompA] += omega[mergeCompB]
self.omega = np.delete(omega, mergeCompB, axis=0)
# TODO think about smarter init for rho/omega??
# Update theta with recently updated info from suff stats
self.transTheta, self.startTheta = self._calcTheta(SS)
for giter in range(nGlobalIters):
# Update rho, omega through numerical optimization
self.rho, self.omega = self.find_optimum_rhoOmega(**kwargs)
# Update theta again to reflect the new rho, omega
self.transTheta, self.startTheta = self._calcTheta(SS)
def update_global_params_soVB(self, SS, rho, **kwargs):
''' Updates global parameters when learning with stochastic online VB.
Note that the rho here is the learning rate parameter, not
the global stick weight parameter rho
'''
self.K = SS.K
# Update theta (1/2), incorporates recently updated suff stats
transThetaStar, startThetaStar = self._calcTheta(SS)
self.transTheta = rho * transThetaStar + (1 - rho) * self.transTheta
self.startTheta = rho * startThetaStar + (1 - rho) * self.startTheta
# Update rho/omega
rhoStar, omegaStar = self.find_optimum_rhoOmega(**kwargs)
g1 = (1 - rho) * (self.rho * self.omega) + rho * (rhoStar * omegaStar)
g0 = (1 - rho) * ((1 - self.rho) * self.omega) + \
rho * ((1 - rhoStar) * omegaStar)
self.rho = g1 / (g1 + g0)
self.omega = g1 + g0
# TODO: update theta (2/2)?? incorporates recent rho/omega updates
def _calcTheta(self, SS):
''' Update parameters theta to maximize objective given suff stats.
Returns
---------
transTheta : 2D array, size K x K+1
startTheta : 1D array, size K
'''
K = SS.K
if not hasattr(self, 'rho') or self.rho.size != K:
self.rho = OptimizerRhoOmega.create_initrho(K)
# Calculate E_q[alpha * Beta_l] for l = 1, ..., K+1
Ebeta = StickBreakUtil.rho2beta(self.rho)
alphaEBeta = self.transAlpha * Ebeta
# transTheta_kl = M_kl + E_q[alpha * Beta_l] + kappa * 1_{k==l}
transTheta = np.zeros((K, K + 1))
transTheta += alphaEBeta[np.newaxis, :]
transTheta[:K, :K] += SS.TransStateCount + self.kappa * np.eye(self.K)
# startTheta_k = r_1k + E_q[alpha * Beta_l] (where r_1,>K = 0)
startTheta = self.startAlpha * Ebeta
startTheta[:K] += SS.StartStateCount
return transTheta, startTheta
def init_global_params(self, Data, K=0, **initArgs):
''' Initialize rho, omega, and theta to reasonable values.
This is only called by "from scratch" init routines.
'''
self.K = K
self.rho = OptimizerRhoOmega.create_initrho(K)
self.omega = (1.0 + self.gamma) * np.ones(K)
# To initialize theta, perform standard update given rho, omega
# but with "empty" sufficient statistics.
SS = SuffStatBag(K=self.K, D=Data.dim)
SS.setField('StartStateCount', np.ones(K), dims=('K'))
SS.setField('TransStateCount', np.ones((K, K)), dims=('K', 'K'))
self.transTheta, self.startTheta = self._calcTheta(SS)
def set_global_params(self, hmodel=None,
rho=None, omega=None,
startTheta=None, transTheta=None,
**kwargs):
''' Set rho, omega to provided values.
'''
if hmodel is not None:
self.K = hmodel.allocModel.K
if hasattr(hmodel.allocModel, 'rho'):
self.rho = hmodel.allocModel.rho
self.omega = hmodel.allocModel.omega
else:
raise AttributeError('Unrecognized hmodel. No field rho.')
if hasattr(hmodel.allocModel, 'startTheta'):
self.startTheta = hmodel.allocModel.startTheta
self.transTheta = hmodel.allocModel.transTheta
else:
raise AttributeError(
'Unrecognized hmodel. No field startTheta.')
elif rho is not None \
and omega is not None \
and startTheta is not None:
self.rho = rho
self.omega = omega
self.startTheta = startTheta
self.transTheta = transTheta
self.K = omega.size
assert self.K == self.startTheta.size - 1
else:
self._set_global_params_from_scratch(**kwargs)
def _set_global_params_from_scratch(self, beta=None,
Data=None, nDoc=None, **kwargs):
''' Set rho, omega to values that reproduce provided appearance probs
Args
--------
beta : 1D array, size K
beta[k] gives top-level probability for active comp k
'''
if nDoc is None:
nDoc = Data.nDoc
if nDoc is None:
raise ValueError('Bad parameters. nDoc not specified.')
if beta is not None:
beta = beta / beta.sum()
if beta is None:
raise ValueError('Bad parameters. Vector beta not specified.')
Ktmp = beta.size
rem = np.minimum(0.05, 1. / (Ktmp))
beta = np.hstack([np.squeeze(beta), rem])
beta = beta / np.sum(beta)
self.K = beta.size - 1
self.rho, self.omega = self._convert_beta2rhoomega(beta)
assert self.rho.size == self.K
assert self.omega.size == self.K
def _convert_beta2rhoomega(self, beta, nDoc=10):
''' Find vectors rho, omega that are probable given beta
Returns
--------
rho : 1D array, size K
omega : 1D array, size K
'''
assert abs(np.sum(beta) - 1.0) < 0.001
rho = OptimizerRhoOmega.beta2rho(beta, self.K)
omega = (nDoc + self.gamma) * np.ones(rho.size)
return rho, omega
def calc_evidence(self, Data, SS, LP, todict=False, **kwargs):
''' Calculate ELBO objective function value for provided state.
Returns
-------
L : float
'''
assert hasattr(self, 'rho')
return calcELBO(Data=Data, SS=SS, LP=LP,
startAlpha=self.startAlpha, alpha=self.transAlpha,
kappa=self.kappa, gamma=self.gamma,
rho=self.rho, omega=self.omega,
transTheta=self.transTheta, startTheta=self.startTheta,
todict=todict, **kwargs)
def calcELBO_LinearTerms(self, **kwargs):
''' Compute sum of ELBO terms that are linear/const wrt suff stats
Returns
-------
L : float
'''
return calcELBO_LinearTerms(
startAlpha=self.startAlpha, alpha=self.transAlpha,
kappa=self.kappa, gamma=self.gamma,
rho=self.rho, omega=self.omega,
transTheta=self.transTheta, startTheta=self.startTheta,
**kwargs)
def calcELBO_NonlinearTerms(self, **kwargs):
''' Compute sum of ELBO terms that are NONlinear wrt suff stats
Returns
-------
L : float
'''
return calcELBO_NonlinearTerms(**kwargs)
def calcHardMergeGap(self, SS, kA, kB):
''' Calculate scalar improvement in ELBO for hard merge of comps kA, kB
Does *not* include any entropy.
Returns
---------
L : scalar
'''
m_K = SS.K - 1
m_SS = SuffStatBag(K=SS.K, D=0)
m_SS.setField('StartStateCount', SS.StartStateCount.copy(), dims='K')
m_SS.setField('TransStateCount', SS.TransStateCount.copy(),
dims=('K', 'K'))
m_SS.mergeComps(kA, kB)
# Create candidate beta vector
m_beta = StickBreakUtil.rho2beta(self.rho)
m_beta[kA] += m_beta[kB]
m_beta = np.delete(m_beta, kB, axis=0)
# Create candidate rho and omega vectors
m_rho = StickBreakUtil.beta2rho(m_beta, m_K)
m_omega = np.delete(self.omega, kB)
# Create candidate startTheta
m_startTheta = self.startAlpha * m_beta.copy()
m_startTheta[:m_K] += m_SS.StartStateCount
# Create candidate transTheta
m_transTheta = self.transAlpha * np.tile(m_beta, (m_K, 1))
if self.kappa > 0:
m_transTheta[:, :m_K] += self.kappa * np.eye(m_K)
m_transTheta[:, :m_K] += m_SS.TransStateCount
# Evaluate objective func. for both candidate and current model
Lcur = calcELBO_LinearTerms(
SS=SS, rho=self.rho, omega=self.omega,
startTheta=self.startTheta, transTheta=self.transTheta,
alpha=self.transAlpha, startAlpha=self.startAlpha,
gamma=self.gamma, kappa=self.kappa)
Lprop = calcELBO_LinearTerms(
SS=m_SS, rho=m_rho, omega=m_omega,
startTheta=m_startTheta, transTheta=m_transTheta,
alpha=self.transAlpha, startAlpha=self.startAlpha,
gamma=self.gamma, kappa=self.kappa)
# Note: This gap relies on fact that all nonlinear terms are entropies,
return Lprop - Lcur
def calcHardMergeGap_AllPairs(self, SS):
''' Calc matrix of improvement in ELBO for all possible pairs of comps
'''
Gap = np.zeros((SS.K, SS.K))
for kB in range(1, SS.K):
for kA in range(0, kB):
Gap[kA, kB] = self.calcHardMergeGap(SS, kA, kB)
return Gap
def calcHardMergeGap_SpecificPairs(self, SS, PairList):
''' Calc matrix of improvement in ELBO for all possible pairs of comps
'''
Gaps = np.zeros(len(PairList))
for ii, (kA, kB) in enumerate(PairList):
Gaps[ii] = self.calcHardMergeGap(SS, kA, kB)
return Gaps
def to_dict(self):
return dict(transTheta=self.transTheta,
startTheta=self.startTheta,
omega=self.omega, rho=self.rho)
def from_dict(self, myDict):
self.inferType = myDict['inferType']
self.K = myDict['K']
self.transTheta = myDict['transTheta']
self.startTheta = myDict['startTheta']
self.omega = myDict['omega']
self.rho = myDict['rho']
def get_prior_dict(self):
return dict(gamma=self.gamma, alpha=self.transAlpha, K=self.K,
hmmKappa=self.kappa, startAlpha=self.startAlpha)
def getSerializableParamsForLocalStep(self):
""" Get compact dict of params for parallel local step.
Returns
-------
Info : dict
"""
return dict(inferType=self.inferType,
K=self.K)
def fillSharedMemDictForLocalStep(self, ShMem=None):
""" Get dict of shared mem arrays needed for parallel local step.
Returns
-------
ShMem : dict of RawArray objects
"""
# No shared memory required here.
if not isinstance(ShMem, dict):
ShMem = dict()
K = self.K
if 'startTheta' in ShMem:
shared_startTheta = sharedMemToNumpyArray(ShMem['startTheta'])
assert shared_startTheta.size >= K + 1
shared_startTheta[:K + 1] = self.startTheta
shared_transTheta = sharedMemToNumpyArray(ShMem['transTheta'])
assert shared_transTheta.shape[0] >= K
assert shared_transTheta.shape[1] >= K + 1
shared_transTheta[:K, :K + 1] = self.transTheta
else:
ShMem['startTheta'] = numpyToSharedMemArray(self.startTheta)
ShMem['transTheta'] = numpyToSharedMemArray(self.transTheta)
return ShMem
def getLocalAndSummaryFunctionHandles(self):
""" Get function handles for local step and summary step
Useful for parallelized algorithms.
Returns
-------
calcLocalParams : f handle
calcSummaryStats : f handle
"""
return HMMUtil.calcLocalParams, calcSummaryStats
# .... end class HDPHMM
def calcSummaryStats(Data, LP,
doPrecompEntropy=0,
doPrecompMergeEntropy=0,
mPairIDs=None,
trackDocUsage=0,
**kwargs):
''' Calculate summary statistics for given data slice and local params.
Returns
-------
SS : SuffStatBag
'''
if mPairIDs is None:
M = 0
else:
M = len(mPairIDs)
resp = LP['resp']
K = resp.shape[1]
startLocIDs = Data.doc_range[:-1]
StartStateCount = np.sum(resp[startLocIDs], axis=0)
N = np.sum(resp, axis=0)
if 'TransCount' in LP:
TransStateCount = np.sum(LP['TransCount'], axis=0)
else:
respPair = LP['respPair']
TransStateCount = np.sum(respPair, axis=0)
SS = SuffStatBag(K=K, D=Data.dim, M=M)
SS.setField('StartStateCount', StartStateCount, dims=('K'))
SS.setField('TransStateCount', TransStateCount, dims=('K', 'K'))
SS.setField('N', N, dims=('K'))
SS.setField('nDoc', Data.nDoc, dims=None)
if doPrecompEntropy or 'Htable' in LP:
# Compute entropy terms!
# 'Htable', 'Hstart' will both be in Mdict
Mdict = calcELBO_NonlinearTerms(Data=Data,
LP=LP, returnMemoizedDict=1)
SS.setELBOTerm('Htable', Mdict['Htable'], dims=('K', 'K'))
SS.setELBOTerm('Hstart', Mdict['Hstart'], dims=('K'))
if doPrecompMergeEntropy:
subHstart, subHtable = HMMUtil.PrecompMergeEntropy_SpecificPairs(
LP, Data, mPairIDs)
SS.setMergeTerm('Hstart', subHstart, dims=('M'))
SS.setMergeTerm('Htable', subHtable, dims=('M', 2, 'K'))
SS.mPairIDs = np.asarray(mPairIDs)
if trackDocUsage:
# Track how often topic appears in a seq. with mass > thresh.
DocUsage = np.zeros(K)
for n in range(Data.nDoc):
start = Data.doc_range[n]
stop = Data.doc_range[n + 1]
DocUsage += np.sum(LP['resp'][start:stop], axis=0) > 0.01
SS.setSelectionTerm('DocUsageCount', DocUsage, dims='K')
return SS
| StarcoderdataPython |
3571024 | from enum import Enum
class MessageType(Enum):
BLOCK_HEADER = 1
BLOCK_INV = 2
UNCONFIRMED_TRANSACTION = 3
UNCONFIRMED_TRANSACTION_INV = 4
BLOCK_TRANSACTION_INV = 5
SYNCHRONIZE = 6
class TransactionType(Enum):
GENESIS = 1
COINBASE = 2
STANDARD = 3
ASSET_CREATION = 4
ASSET_ADDENDUM = 5
ORDER = 6
FILL = 7
CANCEL_ORDER = 8
REGISTRATION = 9
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.