text string | size int64 | token_count int64 |
|---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 11:55:27 2021
Snow-Hydrology Repo for Evaluation, Analysis, and Decision-making Dashboard (shread_dash.py) Database Initialization
This is part of dashboard loading database and other data into memory. The data for the database relies on a series of
retrieval scripts (/database/SUBS) that retrieve hydrometeorological data from online and store the data in local
databases. Part of the retrieval process is dependent on the SHREAD repository (https://github.com/tclarkin/shread).
The databases are built in SQLite.
@author: tclarkin, buriona (2020-2022)
"""
import os
import datetime as dt
from pathlib import Path
import pandas as pd
from flask_sqlalchemy import SQLAlchemy
import dash_bootstrap_components as dbc
import dash
### Launch SQLite DB Server ###
# Define directories and app
this_dir = os.path.dirname(os.path.realpath(__file__))
#this_dir = Path('C:/Programs/shread_dash/database')
app_dir = os.path.dirname(this_dir)
# define functions
def create_app():
"""
This function launches the SALAlchemy db server
"""
assets_path = Path(app_dir, 'assets')
app = dash.Dash(
__name__,
external_stylesheets=[dbc.themes.BOOTSTRAP],
update_title="Updating...",
# suppress_callback_exceptions=True,
assets_folder=assets_path
)
app.title="WCAO Dashboard"
db_path = Path(app_dir, 'database')
snodas_swe_db_path = Path(db_path, 'SHREAD', 'swe.db')
snodas_sd_db_path = Path(db_path, 'SHREAD', 'sd.db')
csas_iv_db_path = Path(db_path, 'CSAS', 'csas_iv.db')
csas_dv_db_path = Path(db_path, 'CSAS', 'csas_dv.db')
snotel_dv_db_path = Path(db_path, 'SNOTEL', 'snotel_dv.db')
usgs_dv_db_path = Path(db_path, 'FLOW', 'usgs_dv.db')
usgs_iv_db_path = Path(db_path, 'FLOW', 'usgs_iv.db')
rfc_dv_db_path = Path(db_path, 'FLOW', 'rfc_dv.db')
rfc_iv_db_path = Path(db_path, 'FLOW', 'rfc_iv.db')
ndfd_mint_db_path = Path(db_path, 'SHREAD', 'mint.db')
ndfd_maxt_db_path = Path(db_path, 'SHREAD', 'maxt.db')
#ndfd_rhm_db_path = Path(db_path, 'SHREAD', 'rhm.db')
ndfd_pop12_db_path = Path(db_path, 'SHREAD', 'pop12.db')
ndfd_qpf_db_path = Path(db_path, 'SHREAD', 'qpf.db')
ndfd_snow_db_path = Path(db_path, 'SHREAD', 'snow.db')
ndfd_sky_db_path = Path(db_path, 'SHREAD', 'sky.db')
snodas_swe_db_con_str = f'sqlite:///{snodas_swe_db_path.as_posix()}'
snodas_sd_db_con_str = f'sqlite:///{snodas_sd_db_path.as_posix()}'
csas_iv_db_con_str = f'sqlite:///{csas_iv_db_path.as_posix()}'
csas_dv_db_con_str = f'sqlite:///{csas_dv_db_path.as_posix()}'
snotel_dv_db_con_str = f'sqlite:///{snotel_dv_db_path.as_posix()}'
usgs_dv_db_con_str = f'sqlite:///{usgs_dv_db_path.as_posix()}'
usgs_iv_db_con_str = f'sqlite:///{usgs_iv_db_path.as_posix()}'
rfc_dv_db_con_str = f'sqlite:///{rfc_dv_db_path.as_posix()}'
rfc_iv_db_con_str = f'sqlite:///{rfc_iv_db_path.as_posix()}'
ndfd_mint_db_con_str = f'sqlite:///{ndfd_mint_db_path}'
ndfd_maxt_db_con_str = f'sqlite:///{ndfd_maxt_db_path}'
#ndfd_rhm_db_con_str = f'sqlite:///{ndfd_rhm_db_path}'
ndfd_pop12_db_con_str = f'sqlite:///{ndfd_pop12_db_path}'
ndfd_qpf_db_con_str = f'sqlite:///{ndfd_qpf_db_path}'
ndfd_snow_db_con_str = f'sqlite:///{ndfd_snow_db_path}'
ndfd_sky_db_con_str = f'sqlite:///{ndfd_sky_db_path}'
app.server.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.server.config['SQLALCHEMY_BINDS'] = {
'swe': snodas_swe_db_con_str,
'sd': snodas_sd_db_con_str,
'csas_iv':csas_iv_db_con_str,
'csas_dv':csas_dv_db_con_str,
'snotel_dv':snotel_dv_db_con_str,
'usgs_dv':usgs_dv_db_con_str,
'usgs_iv':usgs_iv_db_con_str,
'rfc_dv':rfc_dv_db_con_str,
'rfc_iv':rfc_iv_db_con_str,
"mint": ndfd_mint_db_con_str,
"maxt": ndfd_maxt_db_con_str,
#"rhm": ndfd_rhm_db_con_str,
"pop12": ndfd_pop12_db_con_str,
"qpf": ndfd_qpf_db_con_str,
"snow": ndfd_snow_db_con_str,
"sky": ndfd_sky_db_con_str,
}
return app
# Launch server
app = create_app()
db = SQLAlchemy(app.server)
db.reflect()
### Load in other Data ###
# Define working (data) directory
os.chdir(os.path.join(app_dir, 'database'))
# Identify files in database
csas_dir = os.path.join(app_dir, 'database', 'CSAS')
csas_files = os.listdir(csas_dir)
res_dir = os.path.join(app_dir, 'resources')
#switch working dir back to main dir so dash app can function correctly
os.chdir(app_dir)
print('Calculating bounds of SNODAS.db')
# Create list of basins
#TODO call from .csv for future user input
basin_list = [
{'label': 'NONE', 'value': None},
{'label': 'SAN JUAN - NAVAJO RES NR ARCHULETA', 'value': 'NVRN5L_F'},
{'label': 'ANIMAS - DURANGO', 'value': 'DRGC2H_F'},
{'label': 'DOLORES - MCPHEE RESERVOIR', 'value': 'MPHC2L_F'},
{'label': 'FLORIDA - LEMON RES NR DURANGO', 'value': 'LEMC2H_F'},
{'label': 'LOS PINOS - NR BAYFIELD VALLECITO RES', 'value': 'VCRC2H_F'}
]
# Set ranges of variables for use in dashboard
elevrange =[5000, 15000]
print(f' Elevations from {elevrange[0]} to {elevrange[-1]}')
elevdict = dict()
for e in range(1, 20):
elevdict[str(e * 1000)] = f"{e * 1000:,}'"
sloperange = [0.0, 100]
print(f' Slopes from {sloperange[0]} to {sloperange[-1]}')
slopedict = dict()
for s in range(0, 11):
slopedict[str(s * 10)] = f'{s * 10}°'
aspectdict = {-90: "W",
-45: "NW",
0: "N",
45: "NE",
90: "E",
135: "SE",
180: "S",
225: "SW",
270: "W",
315: "NW",
360: "N"}
# Define colors:
# https://colorbrewer2.org/?type=qualitative&scheme=Set1&n=9
color8 = ['#e41a1c','#377eb8','#4daf4a','#984ea3','#ff7f00','#a65628','#f781bf','#999999']
# Import FLOW gages and define list for dashboard drop down & add colors
usgs_gages = pd.read_csv(os.path.join(this_dir,"FLOW", "usgs_gages.csv"))
usgs_gages.index = usgs_gages.site_no
colorg = color8
while len(colorg)<len(usgs_gages):
colorg = colorg*2
usgs_gages["color"] = colorg[0:len(usgs_gages)]
# Add list for dropdown menu
usgs_list = list()
for g in usgs_gages.index:
usgs_list.append({"label": "0" + str(usgs_gages.site_no[g]) + " " + usgs_gages.name[g] + " (" + str(
usgs_gages.elev_ft[g]) + " ft | " + str(usgs_gages.area[g]) + " sq.mi.)", "value": "0" + str(g)})
# Create list of SNOTEL sites & add colors
snotel_sites = pd.read_csv(os.path.join(this_dir,"SNOTEL","snotel_sites.csv"))
snotel_sites.index = snotel_sites.triplet
colors = color8
while len(colors)<len(snotel_sites):
colors = colors*2
snotel_sites["color"] = snotel_sites["prcp_color"] = colors[0:len(snotel_sites)]
# Add list for dropdown menu
snotel_list = list()
for s in snotel_sites.index:
snotel_list.append({"label": str(snotel_sites.site_no[s]) + " " + snotel_sites.name[s] + " (" + str(
round(snotel_sites.elev_ft[s], 0)) + " ft)", "value": s})
# Create list of CSAS sites & add colors
csas_gages = pd.DataFrame()
csas_gages["site"] = ["SASP","SBSP","PTSP","SBSG"]
csas_gages["name"] = ["Swamp Angel","Senator Beck","Putney [Meteo]","Senator Beck Gage [Flow]"]
csas_gages["elev_ft"] = [11060,12186,12323,11030]
colorc = color8
while len(colorc)<len(csas_gages):
colorc = colorc*2
csas_gages["color"] = csas_gages["prcp_color"] = colorc[0:len(csas_gages)]
csas_gages.index = csas_gages["site"]
csas_list = list()
for c in csas_gages.index:
csas_list.append({"label": csas_gages.name[c] + " (" + str(
round(csas_gages.elev_ft[c], 0)) + " ft)", "value": c})
# Generate NDFD list
forecast_list = [{"label":"Flow (RFC)","value":"flow"},
{"label":"Min. Temp","value":"mint"},
{"label":"Max. Temp","value":"maxt"},
{"label":"Precip (QPF)","value":"qpf"},
{"label": "Precip Prob.", "value": "pop12"},
{"label":"Snow","value":"snow"},
#{"label":"Relative Humidity","value":"rhm"},
{"label":"Sky Coverage","value":"sky"}
]
# Import CSAS dust on snow data
try:
dust = pd.read_csv(os.path.join(csas_dir, "csas_dust.csv"))
except FileNotFoundError:
dust = pd.DataFrame()
if dust.empty:
dust_disable = True
else:
dust_disable = False
dust_ts = dust.loc[1:len(dust),]
dust_ts = dust_ts.reset_index(drop=True)
dust_ts["Date"] = pd.to_datetime(dust_ts["Date"],format="%d-%m-%y")
dust_ts.index = dust_ts.Date
dust_ts = dust_ts.drop("Date",axis=1)
dust_ts = (dust_ts.apply(pd.to_numeric)/2.54)
dust_layers = pd.DataFrame(index=dust_ts.columns)
colord = color8
while len(colord) < len(dust_layers):
colord = colord * 2
dust_layers["color"] = colord[0:len(dust_layers)]
# set initial start and end date
start_date = dt.datetime.now().date() - dt.timedelta(days=10)
end_date = dt.datetime.now().date() + dt.timedelta(days=10)
| 9,018 | 3,831 |
import cloudscraper
import json
from bs4 import BeautifulSoup
from dotenv import load_dotenv
import os
scraper = cloudscraper.create_scraper()
load_dotenv('lang_code')
language = os.getenv("lang_code")
def simsimi(question):
'''
Function to make the HTTP request to the Simsimi API already with the message typed by the user
'''
url = f'https://api.simsimi.net/v1/?text={question}={language}' #Language code (vi, en, ph, zh, ch, ru, id, ko, ar, fr, ja, es, de, pt, ...)
#bypass cloudflare antibot fetch
response = scraper.get(url).text
soup = BeautifulSoup(response, 'html.parser')
#debug stuff
#print("Code: " + str(soup))
text = json.loads(soup.text)
return text["success"] # Return the reply message | 730 | 253 |
import sys
def write():
print('Creating new text file')
name = 'test.txt' # Name of text file coerced with +.txt
try:
file = open(name,'a') # Trying to create a new file or open one
file.close()
except:
print('Something went wrong! Can\'t tell what?')
sys.exit(0) # quit Python
write() | 357 | 122 |
from rest_framework import serializers
from .models import Bid, Item, User
class ItemSerializer(serializers.ModelSerializer):
class Meta:
model = Item
fields = ('id', 'name', 'product_code', 'description', 'sold')
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id','username', 'email')
class BidSerializer(serializers.ModelSerializer):
class Meta:
model = Bid
fields = ('id', 'item', 'user', 'bid_time', 'bid_amount')
| 523 | 147 |
import requests
from a01.auth import A01Auth
session = requests.Session() # pylint: disable=invalid-name
session.auth = A01Auth()
| 133 | 46 |
"""
Construct, train neural-SDE models and simulate trajectories from the learnt
models.
"""
# Copyright 2021 Sheng Wang.
# Affiliation: Mathematical Institute, University of Oxford
# Email: sheng.wang@maths.ox.ac.uk
import numpy as np
import os
import pandas as pd
import tensorflow as tf
import tensorflow_probability as tfp
import tensorflow_model_optimization as tfmot
import marketmodel.utils as utils
from glob import glob
from tqdm import tqdm
from marketmodel.factors import PrepTrainData
class Loss(object):
"""
Library of loss functions for neural SDE models.
"""
@staticmethod
def loss_S(dt):
"""
Loss function for the neural SDE model of S.
Parameters
__________
dt: float
Time increment.
Returns
_______
loss: method
Loss function.
"""
def loss(y_true, y_pred):
# extract data
alpha = y_pred[:, 0]
beta = y_pred[:, 1]
dS = y_true[:, 0]
S = y_true[:, 1]
# compute drift
mu = beta * S # drift term
# compute log-likelihood
l = tf.reduce_sum(2*tf.math.log(S)-alpha + tf.square(dS - mu*dt) *
tf.exp(alpha) / dt / S**2)
return l
return loss
@staticmethod
def loss_xi(dt, n_dim, n_varcov, mask_diagonal, W, G,
lbd_penalty_eq, lbd_penalty_sz):
"""
Loss function for the neural SDE model of xi.
"""
def loss(y_true, y_pred):
# get diffusion terms in the predicted values; in particular,
# diagonal terms of the diffusion matrix are taken exponentials
sigma_term = tf.transpose(
tf.where(tf.constant(mask_diagonal),
tf.transpose(tf.exp(y_pred)), tf.transpose(y_pred)))[:,
:n_varcov]
# construct the transposed diffusion matrix
sigma_tilde_T = tfp.math.fill_triangular(sigma_term, upper=True)
# get diagonal terms of the diffusion matrix
sigma_term_diagonal = tf.where(tf.constant(mask_diagonal),
tf.transpose(y_pred), 0.)
# get drift terms in the predicted values
mu_residuals = y_pred[:, n_varcov:]
# get pre-calculated terms from the inputs
## regarding diffusion scaling
proj_dX = y_true[:, :n_dim]
Omega = tf.reshape(y_true[:, n_dim:n_dim+n_dim**2],
shape=[-1, n_dim, n_dim])
det_Omega = y_true[:, n_dim+n_dim**2:n_dim+n_dim**2+1]
n1 = n_dim+n_dim**2+1
## regarding drift correction
n_bdy = W.shape[0]
corr_dirs = tf.reshape(y_true[:, n1:n1+n_dim*n_bdy],
shape=[-1, n_bdy, n_dim])
epsmu = y_true[:, n1+n_dim*n_bdy:n1+n_dim*n_bdy+n_bdy]
n2 = n1+n_dim*n_bdy+n_bdy
## regarding baseline drift
mu_base = y_true[:, n2:n2+n_dim]
n3 = n2+n_dim
## regarding MPR penalty
zed = tf.expand_dims(y_true[:, n3:], axis=-1)
# compute corrected drifts
## compute drift
mu_term = mu_base * mu_residuals
## compute weights assigned to each correction direction
mu_tilde_inner_W = tf.matmul(
mu_term, tf.constant(W.T, dtype=tf.float32))
corr_dir_inner_W = tf.reduce_sum(
corr_dirs * tf.constant(W, dtype=tf.float32), axis=-1)
gamma = tf.maximum(-mu_tilde_inner_W - epsmu, 0.) / corr_dir_inner_W
## compute corrected drift
mu_tf = mu_term + tf.reduce_sum(
tf.expand_dims(gamma, axis=-1) * corr_dirs, axis=1)
mu_tf = tf.expand_dims(mu_tf, axis=-1)
# compute log likelihood
Omega_T = tf.transpose(Omega, perm=[0, 2, 1])
sigma_tilde = tf.transpose(sigma_tilde_T, perm=[0, 2, 1])
proj_mu = tf.linalg.solve(Omega_T, mu_tf)
sol_mu = tf.linalg.triangular_solve(
sigma_tilde, proj_mu, lower=True)
sol_mu = tf.squeeze(sol_mu)
proj_dX_tf = tf.expand_dims(proj_dX, axis=-1)
sol_dX = tf.linalg.triangular_solve(
sigma_tilde, proj_dX_tf, lower=True)
sol_dX = tf.squeeze(sol_dX)
l1 = 2 * tf.reduce_sum(tf.math.log(det_Omega)) + \
2 * tf.reduce_sum(sigma_term_diagonal)
l2 = 1./dt * tf.reduce_sum(tf.square(sol_dX))
l3 = dt * tf.reduce_sum(tf.square(sol_mu))
l4 = -2 * tf.reduce_sum(sol_mu * sol_dX)
# compute the penalty term
## evaluate the X variable in the regression problem
sigma = tf.matmul(Omega_T, sigma_tilde)
G_tf = tf.expand_dims(tf.constant(G[1:], dtype=tf.float32), axis=0)
reg_Xt = tf.matmul(sigma, G_tf, transpose_a=True)
## evaluate the Y variable in the regression problem
reg_Y = tf.matmul(G_tf, mu_tf, transpose_a=True) - zed
## evaluate the OLS estimates of the regression problem
reg_XtY = tf.matmul(reg_Xt, reg_Y)
reg_XtX = tf.matmul(reg_Xt, reg_Xt, transpose_b=True)
reg_psi = tf.linalg.solve(reg_XtX, reg_XtY) #
reg_err = reg_Y - tf.matmul(reg_Xt, reg_psi, transpose_a=True)
pnty = lbd_penalty_eq * tf.reduce_sum(tf.square(reg_err)) + \
lbd_penalty_sz * tf.reduce_sum(tf.square(reg_psi))
return l1 + l2 + l3 + l4 + pnty
return loss
class Model(object):
"""
Library of constructing neural network models.
"""
@staticmethod
def construct_S(dim_input, n_obs,
pruning_sparsity, validation_split, batch_size, epochs):
# construct the fully connected model
dim_output = 2
model_S = tf.keras.Sequential([
tf.keras.layers.Dense(128, input_shape=(dim_input,),
activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(dim_output)])
# prune the model
n_obs_train = n_obs * (1 - validation_split)
end_step = np.ceil(n_obs_train / batch_size).astype(np.int32) * epochs
pruning_schedule = tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=0, final_sparsity=pruning_sparsity,
begin_step=0, end_step=end_step
)
model_S_pruning = tfmot.sparsity.keras.prune_low_magnitude(
model_S, pruning_schedule
)
return model_S_pruning
@staticmethod
def construct_mu(dim_input):
# construct the fully connected model
dim_output = 2
model_mu = tf.keras.Sequential([
tf.keras.layers.Dense(128, input_shape=(dim_input,),
activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(128, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(dim_output)])
return model_mu
@staticmethod
def construct_xi(dim_input, dim_output, n_obs,
pruning_sparsity, validation_split, batch_size, epochs):
# construct the fully connected model
model_xi = tf.keras.Sequential([
tf.keras.layers.Dense(256, input_shape=(dim_input,),
activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(256, activation=tf.nn.relu),
tf.keras.layers.Dropout(rate=0.1),
tf.keras.layers.Dense(dim_output)])
# prune the model
n_obs_train = n_obs * (1 - validation_split)
end_step = np.ceil(n_obs_train / batch_size).astype(np.int32) * epochs
pruning_schedule = tfmot.sparsity.keras.PolynomialDecay(
initial_sparsity=0.0, final_sparsity=pruning_sparsity,
begin_step=0, end_step=end_step
)
model_xi_pruning = tfmot.sparsity.keras.prune_low_magnitude(
model_xi, pruning_schedule
)
return model_xi_pruning
class Train(object):
"""
Library of training methods for neural SDE models.
"""
@staticmethod
def train_S(X_S, Y_S,
pruning_sparsity=0.5, validation_split=0.1,
batch_size=512, epochs=500, rand_seed=0,
force_fit=False, model_name='model_S',
out_dir='output/checkpoint/'):
n_obs, dim_input = X_S.shape
# construct the neural network model
model_S = Model.construct_S(
dim_input, n_obs,
pruning_sparsity, validation_split, batch_size, epochs)
# compile the neural network model
model_S.compile(
loss=Loss.loss_S(1e-3),
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4)
)
# set up I/O
tag = out_dir + model_name + '_' + str(rand_seed)
checkpoint_filepath_model_S = tag
checkpoint_filepath_model_S_all = tag + '*'
csv_fname = tag + '_history.csv'
pruning_dir = out_dir + 'pruning_summary/'
if not os.path.exists(pruning_dir):
os.mkdir(pruning_dir)
# train the pruned model
tf.random.set_seed(rand_seed)
if glob(checkpoint_filepath_model_S_all) and not force_fit:
model_S.load_weights(checkpoint_filepath_model_S)
else:
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath_model_S,
save_weights_only=True,
monitor='loss',
mode='min',
save_best_only=True)
csv_logger = tf.keras.callbacks.CSVLogger(
filename=csv_fname,
separator=',',
append=False
)
history = model_S.fit(
X_S, Y_S,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split,
shuffle=True,
verbose=True,
callbacks=[
model_checkpoint_callback,
csv_logger,
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=pruning_dir)]
)
# plot training loss history
plot_fname = tag + '_history.png'
utils.PlotLib.plot_loss_over_epochs(history, True, plot_fname)
return model_S
@staticmethod
def train_mu(X_S, mu_base,
validation_split=0.1, batch_size=512,
epochs=200, rand_seed=0, force_fit=False,
model_name='model_mu', out_dir='output/checkpoint/'):
dim_input = X_S.shape[1]
# construct the neural network model
model_mu = Model.construct_mu(dim_input)
model_mu.compile(loss='mean_absolute_error', optimizer='adam')
# set up I/O
tag = out_dir + model_name + '_' + str(rand_seed)
checkpoint_filepath_model_mu = tag
checkpoint_filepath_model_mu_all = tag + '*'
csv_fname = tag + '_history.csv'
# train the model
if glob(checkpoint_filepath_model_mu_all) and not force_fit:
model_mu.load_weights(checkpoint_filepath_model_mu)
else:
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath_model_mu,
save_weights_only=True,
monitor='loss',
mode='min',
save_best_only=True)
csv_logger = tf.keras.callbacks.CSVLogger(
filename=csv_fname,
separator=',',
append=False
)
history = model_mu.fit(
X_S, mu_base,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split,
shuffle=True, verbose=True,
callbacks=[model_checkpoint_callback,
csv_logger]
)
# plot training loss history
plot_fname = tag + '_history.png'
utils.PlotLib.plot_loss_over_epochs(history, True, plot_fname)
return model_mu
@staticmethod
def train_xi(X_xi, Y_xi, W, G,
lbd_penalty_eq, lbd_penalty_sz,
pruning_sparsity=0.5, validation_split=0.1,
batch_size=512, epochs=20000, rand_seed=0,
force_fit=False, model_name='model_xi',
out_dir='output/checkpoint/'):
n_bdy, n_dim = W.shape
n_varcov, mask_diagonal = Train._identify_diagonal_entries(n_dim)
# construct the neural network model
model_xi_pruning = Model.construct_xi(
n_dim + 1, n_dim + n_varcov, X_xi.shape[0],
pruning_sparsity, validation_split, batch_size, epochs)
model_xi_pruning.compile(
loss=Loss.loss_xi(1e-3, n_dim, n_varcov, mask_diagonal, W, G,
lbd_penalty_eq, lbd_penalty_sz),
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
)
# set up I/O
tag = out_dir + model_name + '_' + str(rand_seed)
checkpoint_filepath = tag
checkpoint_filepath_all = tag + '*'
csv_fname = tag + '_history.csv'
pruning_dir = out_dir + 'pruning_summary/'
if not os.path.exists(pruning_dir):
os.mkdir(pruning_dir)
# train the pruned model
tf.random.set_seed(rand_seed)
if glob(checkpoint_filepath_all) and not force_fit:
model_xi_pruning.load_weights(checkpoint_filepath)
else:
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='loss',
mode='min',
save_best_only=True)
csv_logger = tf.keras.callbacks.CSVLogger(
filename=csv_fname,
separator=',',
append=False
)
history = model_xi_pruning.fit(
X_xi, Y_xi,
epochs=epochs,
batch_size=batch_size,
validation_split=validation_split,
shuffle=True,
verbose=True,
callbacks=[
model_checkpoint_callback,
csv_logger,
tfmot.sparsity.keras.UpdatePruningStep(),
tfmot.sparsity.keras.PruningSummaries(log_dir=pruning_dir)]
)
# plot training loss history
plot_fname = tag + '_history.png'
utils.PlotLib.plot_loss_over_epochs(history, True, plot_fname)
return model_xi_pruning
@staticmethod
def predict_in_sample_model_xi(model_xi, X_xi, Y_xi, W, G):
n_dim = X_xi.shape[1] - 1
n_bdy = W.shape[0]
n_varcov, mask_diagonal = Train._identify_diagonal_entries(n_dim)
# predict underlying functions using the learnt NN
y_pred_nn = model_xi.predict(X_xi)
# get diffusion terms
mask_diagonal_np = [m[0] for m in mask_diagonal]
sigma_term = y_pred_nn.copy()
sigma_term[:, mask_diagonal_np] = np.exp(sigma_term[:, mask_diagonal_np])
sigma_term = sigma_term[:, :n_varcov]
sigma_tilde_T = Train._fill_triu(sigma_term, n_dim)
# get drift terms
mu_residuals = y_pred_nn[:, n_varcov:]
# get inputs for scaling diffusions and correcting drifts
## regarding diffusions
Omega = np.reshape(Y_xi[:, n_dim:n_dim+n_dim**2],
newshape=[-1, n_dim, n_dim])
## regarding drifts
n1 = n_dim+n_dim**2+1
corr_dirs = np.reshape(Y_xi[:, n1:n1+n_dim*n_bdy],
newshape=[-1, n_bdy, n_dim])
epsmu = Y_xi[:, n1+n_dim*n_bdy:n1+n_dim*n_bdy+n_bdy]
n2 = n1+n_dim*n_bdy+n_bdy
mu_base = Y_xi[:, n2:n2+n_dim]
# compute drift term
mu_tilde = mu_base * mu_residuals
# scale diffusion
sigma_T = np.matmul(sigma_tilde_T, Omega)
# correct drift
mu_tilde_inner_W = mu_tilde.dot(W.T)
corr_dir_inner_W = np.sum(corr_dirs * W[None, :, :], axis=-1)
gamma = np.maximum(- mu_tilde_inner_W - epsmu, 0.) / corr_dir_inner_W
mu = mu_tilde + np.sum(gamma[:, :, None] * corr_dirs, axis=1)
# LU deconposition of diffusion matrices
mat_cov = np.matmul(np.transpose(sigma_T, axes=[0, 2, 1]), sigma_T)
sigma_L = np.linalg.cholesky(mat_cov)
return mu_tilde, sigma_tilde_T, mu, sigma_T, sigma_L
@staticmethod
def _identify_diagonal_entries(n_dim):
"""
Return the Boolean logical mask array that indicates diagonal terms in
a diffusion matrix.
"""
# get the number of unknowns in the diffusion matrix
n_varcov = int(n_dim*(n_dim+1)/2)
# construct the diagonal entry mask
x = np.arange(n_varcov)
xc = np.concatenate([x, x[n_dim:][::-1]])
idxs_diagonal = [xc[i * (n_dim + 1)] for i in range(n_dim)]
mask_diagonal = np.zeros(n_varcov + n_dim, dtype=bool)
mask_diagonal[idxs_diagonal] = True
mask_diagonal = [[m] for m in mask_diagonal]
return n_varcov, mask_diagonal
@staticmethod
def _fill_triu(arrs_sigma, n_dim):
"""
Return a list of upper triangular diffusion matrices, given a list of
flat arrays that contain non-zero elements of the diffusion matrices.
"""
n_obs = arrs_sigma.shape[0]
mats_sigma = np.zeros((n_obs, n_dim, n_dim))
for i in range(n_obs):
arr_sigma = arrs_sigma[i]
xc = np.concatenate([arr_sigma, arr_sigma[n_dim:][::-1]])
g = np.reshape(xc, [n_dim, n_dim])
mats_sigma[i] = np.triu(g, k=0)
return mats_sigma
class Simulate(object):
"""
Library of forward-simulation methods.
"""
@staticmethod
def simulate_S_xi_lite(dt, N, model_S, model_xi, model_mu,
S0, X0, W, b, factor_multiplier,
dist_multiplier, proj_scale,
rho_star, epsmu_star, X_interior, reflect=False):
# simulate innovations
n_dim = X0.shape[0]
dW = np.random.normal(0, np.sqrt(dt), (n_dim + 1, N + 1))
# initialise
st = np.ones(N+1) * np.nan
xit = np.ones((n_dim, N+1)) * np.nan
st[0] = S0
xit[:, 0] = X0
mus_sim = []
vols_sim = []
n_varcov, mask_diagonal = Train._identify_diagonal_entries(n_dim)
n_reflect = 0
for i in tqdm(range(1, N+1)):
try:
# get drift and diffusion of S
xi = xit[:, i-1]
x_S = np.hstack((st[i-1]/factor_multiplier, xi))
pred_S = model_S.predict(x_S.reshape(1, -1))[0]
vol_S = np.sqrt(np.exp(-pred_S[0])) * st[i-1]
mu_S = pred_S[1] * st[i-1]
# simulate S
S_ = st[i-1] + mu_S * dt + vol_S * dW[0, i]
# get baseline drift
x_mu = np.hstack((st[i-1]/factor_multiplier, xi))
pred_mu_base = model_mu.predict(x_mu.reshape(1, -1))[0]
# get drift and diffusion of xi
x_xi = np.hstack((st[i-1]/factor_multiplier, xi))
gamma_nn = model_xi.predict(x_xi.reshape(1,-1))[0]
gamma_nn[np.array(mask_diagonal).ravel()] = np.exp(
gamma_nn[np.array(mask_diagonal).ravel()])
sigma_term = gamma_nn[:n_varcov]
xc = np.concatenate([sigma_term, sigma_term[n_dim:][::-1]])
g = np.reshape(xc, [n_dim, n_dim])
sigma_tilde = np.triu(g, k=0).T
mu_residual = gamma_nn[n_varcov:]
# scale diffusion and correct drift
mu, mat_vol = Simulate.scale_drift_diffusion(
xi, mu_residual, sigma_tilde, W, b,
dist_multiplier, proj_scale,
rho_star, epsmu_star, X_interior, pred_mu_base)
# tame coefficients
mu_norm = 1. + np.linalg.norm(mu) * np.sqrt(dt)
vol_norm = 1. + np.linalg.norm(mat_vol) * np.sqrt(dt)
# simulate xi using Euler-scheme
xi_ = xi + mu / mu_norm * dt + \
mat_vol.dot(dW[1:, i].reshape((-1, 1))).flatten()/vol_norm
if reflect:
if np.any(W.dot(xi_) - b < 0):
n_reflect += 1
print(f'Reflect simulated data point at index {i}.')
xi_ = Simulate.reflect_data(xi, xi_, W, b)
st[i] = S_
xit[:, i] = xi_
mus_sim.append(mu)
vols_sim.append(mat_vol)
except:
break
return st, xit, mus_sim, vols_sim, n_reflect
@staticmethod
def simulate_S_xi(dt, N,
model_S, model_xi, model_mu,
S, X, W, b, factor_multiplier,
dist_multiplier, proj_scale,
rho_star, epsmu_star, X_interior,
train_rand_seed, sim_rand_seed,
force_simulate=False, reflect=False,
out_dir='output/checkpoint/'):
print(f'Simulation number: {str(train_rand_seed)}_{str(sim_rand_seed)}')
# set I/O
plot_fname = f'{out_dir}simulation_{str(train_rand_seed)}' + \
f'_{str(sim_rand_seed)}.png'
data_fname = f'{out_dir}simulation_{str(train_rand_seed)}' + \
f'_{str(sim_rand_seed)}.csv'
if os.path.exists(data_fname) and not force_simulate:
return
# simulate
np.random.seed(sim_rand_seed)
S0 = S[0]
X0 = X[0, :]
st, xit, mus_sim, vols_sim, n_reflect = Simulate.simulate_S_xi_lite(
dt, N, model_S, model_xi, model_mu,
S0, X0, W, b, factor_multiplier,
dist_multiplier, proj_scale,
rho_star, epsmu_star, X_interior, reflect)
if reflect:
plot_fname = f'{out_dir}simulation_{str(train_rand_seed)}' + \
f'_{str(sim_rand_seed)}_reflect_{str(n_reflect)}.png'
data_fname = f'{out_dir}simulation_{str(train_rand_seed)}' + \
f'_{str(sim_rand_seed)}_reflect_{str(n_reflect)}.csv'
# save simulated data
out_data = np.vstack((st, xit))
columns = ['S'] + ['xi' + str(i) for i in range(1, len(X0)+1)]
out_data = pd.DataFrame(data=out_data.T, columns=columns)
out_data.to_csv(data_fname, index=False)
# plot
utils.PlotLib.plot_simulated_xi(st, xit, X, plot_fname)
return st, xit, mus_sim, vols_sim
@staticmethod
def scale_drift_diffusion(x, mu_residual, sigma_tilde, W, b,
dist_multiplier, proj_scale,
rho_star, epsmu_star, x_interior, mu_base):
"""
Scale the drift and diffusion functions.
Parameters
__________
Returns
_______
"""
n_dim = W.shape[1]
# calculate the distance of the data point to each boundary
dist_x = np.abs(W.dot(x) - b) / np.linalg.norm(W, axis=1)
# calculate the normalised distance indicators
epsilon_sigma = PrepTrainData.normalise_dist_diffusion(
dist_x, dist_multiplier, proj_scale)
# sort by distance and get first n_dim closest ones
idxs_sorted_eps = np.argsort(epsilon_sigma)
idxs_used_eps = idxs_sorted_eps[:n_dim]
Wd = W[idxs_used_eps]
epsilond_sigma = epsilon_sigma[idxs_used_eps]
# scale the diffusions
if np.max(epsilond_sigma) < 1e-8: # if the anchor point is on a corner
Omega = np.zeros((n_dim, n_dim))
else: # if the anchor point is not on the corner
# compute new bases
V = np.linalg.qr(Wd.T)[0].T
Omega = np.diag(np.sqrt(epsilond_sigma)).dot(V)
mat_a = Omega.T.dot(sigma_tilde).dot(sigma_tilde.T).dot(Omega)
mat_vol = np.linalg.cholesky(mat_a)
# scale the drifts
## compute drift
mu_tilde = mu_base * mu_residual
## compute correction directions
corr_dirs_x = x_interior - x[None, :]
epsmu_x = PrepTrainData.normalise_dist_drift(
dist_x, rho_star, epsmu_star)
mu_tilde_inner_W = W.dot(mu_tilde)
corr_dir_inner_W = np.sum(corr_dirs_x * W, axis=-1)
weights_corr_dir = np.maximum(-mu_tilde_inner_W-epsmu_x, 0.) /\
corr_dir_inner_W
## compute the corrected drift
mu = mu_tilde + np.sum(corr_dirs_x * weights_corr_dir[:, None], axis=0)
return mu, mat_vol
@staticmethod
def reflect_data(x0, x1, W, b):
mask_arb = W.dot(x1) - b < 0
# reflect data if there is arbitrage
if np.any(mask_arb):
if np.sum(mask_arb) > 1:
print('Break more than one boundaries, move to the closest '
'boundary.')
wi = W[mask_arb]
bi = b[mask_arb]
candidates = ((bi + 1e-6 - wi.dot(x0))/wi.dot((x1-x0))).\
reshape((-1, 1)) * (x1 - x0) + x0
idx_first_qualified = np.where(
np.all(candidates.dot(W.T) - b[None,:] >= 0, axis=1))[0][0]
x2 = candidates[idx_first_qualified]
else:
wi = W[mask_arb]
bi = b[mask_arb]
t = bi - wi.dot(x1)
x2 = x1 + 2 * t * wi
# if the reflected data point breaks any arbitrage bounds
if np.any(x2.dot(W.T) - b < 0):
print('Reflect failed, move back to boundary.')
t = (bi - wi.dot(x1)) / (wi.dot(x1 - x0))
x2 = x0 + t * (x1-x0)
return x2
else:
return x1
| 27,025 | 9,166 |
import random
pedra = '''
_______
---' ____)
(_____)
(_____)
(____)
---.__(___)
'''
papel = '''
_______
---' ____)____
______)
_______)
_______)
---.__________)
'''
tesoura = '''
_______
---' ____)____
______)
__________)
(____)
---.__(___)
'''
# Write your code below this line 👇
#Escolha player e computador
escolha = None
v = 0
d = 0
e = 0
#Outcomes
#pedra
def pedra_empate():
global e
print(
f"Você escolheu:\n\n {pedra} \n\n O jogo escolheu: {pedra} \n\n Vocês empataram!")
e += 1
def pedra_derrota():
global d
print(
f"Você escolheu:\n\n {pedra} \n\n O jogo escolheu: {papel} \n\n Você perdeu!")
d += 1
def pedra_vitoria():
global v
print(
f"Você escolheu:\n\n {pedra} \n\n O jogo escolheu: {tesoura} \n\n Você ganhou!")
v += 1
#papel
def papel_empate():
global e
print(
f"Você escolheu:\n\n {papel} \n\n O jogo escolheu: {papel} \n\n Vocês empataram!")
e += 1
def papel_derrota():
global d
print(
f"Você escolheu:\n\n {papel} \n\n O jogo escolheu: {tesoura} \n\n Você perdeu!")
d += 1
def papel_vitoria():
global v
print(
f"Você escolheu:\n\n {papel} \n\n O jogo escolheu: {pedra} \n\n Você ganhou!")
v += 1
#tesoura
def tesoura_empate():
global e
print(
f"Você escolheu:\n\n {tesoura} \n\n O jogo escolheu: {tesoura} \n\n Vocês empataram!")
e += 1
def tesoura_derrota():
global d
print(
f"Você escolheu:\n\n {tesoura} \n\n O jogo escolheu: {pedra} \n\n Você perdeu!")
d += 1
def tesoura_vitoria():
global v
print(
f"Você escolheu:\n\n {tesoura} \n\n O jogo escolheu: {papel} \n\n Você ganhou!")
v += 1
while escolha != "sair":
escolha = input(
"Pedra, papel ou tesoura? Digite sair para terminar ").lower()
computador = random.randint(0, 2)
if escolha == "pedra":
escolha = 0
elif escolha == "papel":
escolha = 1
elif escolha == "tesoura":
escolha = 2
elif escolha == "sair":
print(
f"\nVocê ganhou {v} vezes, perdeu {d} vezes e empatou {e} com o computador, parabéns!")
break
#Lista e resultado final
pedra_resultados = [pedra_empate, pedra_derrota, pedra_vitoria]
papel_resultados = [papel_vitoria, papel_empate, papel_vitoria]
tesoura_resultados = [tesoura_derrota, tesoura_vitoria, tesoura_empate]
resultados = [pedra_resultados, papel_resultados, tesoura_resultados]
fim = resultados[escolha][computador]()
print("")
| 2,641 | 1,157 |
def even_square_sum(list):
even = [x * x for x in list if x % 2 == 0]
return sum(even)
print(even_square_sum([1, 2, 3, 4, 5])) | 133 | 63 |
import json
file_content = []
with open('vaccine_data.ndjson', 'r', encoding="UTF-8") as f:
for row in f.readlines():
rowJson = json.loads(row.replace('\n',''))
if rowJson['prefecture'] == '08':
del rowJson['prefecture']
if not rowJson['medical_worker']:
del rowJson['medical_worker']
file_content.append(rowJson)
new_file_content = []
i = 0
while i < len(file_content):
new_content = {
'date': file_content[i]['date'],
'count': file_content[i]['count'],
'status_1': file_content[i]['count'] if file_content[i]['status']==1 else 0,
'status_2': file_content[i]['count'] if file_content[i]['status']==2 else 0,
}
for j in range(1,len(file_content)-i):
print(i, j);
if file_content[i]['date'] == file_content[i+j]['date']:
new_content['count'] += file_content[i+j]['count']
if file_content[i+j]['status'] == 1:
new_content['status_1'] += file_content[i+j]['count']
else:
new_content['status_2'] += file_content[i+j]['count']
else:
i += j-1
break
if i+j+1 == len(file_content):
i += j
new_file_content.append(new_content)
i += 1
with open('vaccine_data.json', 'w', encoding="UTF-8") as f:
json.dump(new_file_content, f, ensure_ascii=False,indent=2,separators=(',',': '))
| 1,304 | 488 |
from .system import System
from logcat import LogCat
class Widget(System):
def __init__(self):
super().__init__()
self.on("cmd_render", self._render)
# widget.py
| 187 | 61 |
import smtplib
user = input('Enter your gmail ')
password = input('Enter your password ')
receiver = input('Enter the receiver ')
msg = input('Enter the message ')
#num = input('Enter the number of emails you want to send ')
#x = 0
#x = int()
#num = int()
server = smtplib.SMTP('smtp.gmail.com', 587)
server.starttls()
server.login(user, password)
server.sendmail(user, receiver, msg)
#x = x + 1
server.quit()
print("Successfully sent email")
| 461 | 160 |
'''
Created on Feb 4, 2015
@author: nirmal
'''
from scheme import current_timestamp
from spire.schema import *
from spire.mesh import Surrogate
__all__ = ('Notification',)
schema = Schema('narrative')
class Notification(Model):
"""A notification."""
class meta:
schema = schema
tablename = 'notification'
id = Identifier()
created = DateTime(timezone=True, nullable=False, default=current_timestamp)
read = Boolean(default=False)
resource = Surrogate(nullable=False)
ownerid = Token(nullable=True, index=True)
type = Token(nullable=False)
entity = Token(nullable=False)
NotificationOwnerIDIndex = Index('notification_ownerid_idx', Notification.ownerid)
NotificationReadIndex = Index('notification_read_idx', Notification.read)
| 820 | 255 |
from entity.message import Message
from .databaserepo import DatabaseRepo
class MessageDbRepo(DatabaseRepo):
def __init__(self):
super().__init__("Messages")
def all(self, cid):
query = "SELECT * FROM " + self.table + " WHERE conversation_id = '" + str(cid) + "'"
m_results = self.db.select(query)
if m_results:
m = [Message(*m_res[1:-3], m_res[0]) for m_res in m_results]
return m
else:
return None
def get(self, id):
current = super().get(id)
m = Message(*current[:-3], id)
m.created, m.modified, m.accessed = current[3], current[4], current[5]
return m
def last(self, cid):
query = "SELECT * FROM " + self.table + " WHERE conversation_id = '" + str(
cid) + "' ORDER BY created DESC LIMIT 0,1"
m_results = self.db.select(query)
if m_results:
m_results = m_results[0]
m = Message(*m_results[1:-3], m_results[0])
return m
else:
return None
| 1,061 | 335 |
from tkinter import *
from tkinter import messagebox
from database import db
from client import client
from pprint import *
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
class mainUI(Frame):
def logout(self):
self.controller.user = client(-1)
self.controller.show_frame("LoginFrame")
def __init__(self,parent,controller):
Frame.__init__(self,parent)
self.controller = controller
self.welcome_msg = StringVar(parent)
Label (self,textvariable = self.welcome_msg).grid(row=1,column=0,sticky='NW')
Button (self, text="Logout", command=self.logout).grid(row=1,column=1,sticky='NE')
self.content = StringVar()
Label (self,textvariable = self.content).grid(row=2,column=0,columnspan=2,sticky='NSEW')
def refresh(self):
#add graph to column three
f = Figure(figsize = (5,5), dpi = 100)
a = f.add_subplot(111)
plot = db.user_sales_timeline(self.controller.user.user_id)
a.plot(plot[0],plot[1])
f.gca().xaxis.set_major_formatter(mdates.DateFormatter('%m/%d/%Y'))
f.gca().xaxis.set_major_locator(mdates.DayLocator())
f.autofmt_xdate()
#bring up the canvas
canvas = FigureCanvasTkAgg(f,self)
canvas.show()
canvas.get_tk_widget().grid(row=3,columnspan = 2, sticky = 'NSEW')
#navigation toolbar
self.toolbar_frame = Frame(self).grid(row=4, columnspan = 2, sticky = 'NSEW')
toolbar_frame = Frame(self)
toolbar_frame.grid(row=4,columnspan = 2, sticky = S+E+W)
toolbar = NavigationToolbar2TkAgg( canvas, toolbar_frame )
#toolbar = NavigationToolbar2TkAgg(self, self.toolbar_frame)
toolbar.update()
canvas._tkcanvas.grid()
self.welcome_msg.set("Hello %s!" %self.controller.user.username)
self.columnconfigure(0, weight = 1)
self.columnconfigure(1, weight = 1)
if(self.controller.user.is_admin):
self.content.set("You are an admin!")
else:
self.content.set("You are a user.")
if(self.controller.user.is_admin):
pie = db.sales_pie_chart()
# Plot
f = Figure(figsize = (5,5), dpi = 100)
a = f.add_subplot(111)
a.pie(pie[1], autopct='%1.1f%%', shadow=True, labels = pie[0])
a.axis('equal')
#plt.show()
canvas = FigureCanvasTkAgg(f,self)
canvas.show()
canvas.get_tk_widget().grid(row=5,columnspan = 2, sticky = 'NSEW')
class RegisterFrame(Frame):
def refresh(self):
self.pass1.set('')
self.pass2.set('')
self.usEntry_reg.set('')
def create_account(self):
if(self.pass1.get()!=self.pass2.get()):
self.pass1.set('')
self.pass2.set('')
messagebox.showwarning("Password not match.","Please verify your password again.")
elif(self.pass1.get() == ''):
messagebox.showwarning("Blank fields.","Please do not leave any fields blank.")
else:
try:
db.register(self.usEntry_reg.get(),self.pass1.get())
messagebox.showinfo("Account created.","Please login using new credentials. :)")
except:
messagebox.showwarning("Error.","Please try another username or contact a technician")
self.controller.show_frame("LoginFrame")
self.controller.frames['LoginFrame'].usEntry.set(self.usEntry_reg.get())
def __init__(self,parent,controller):
Frame.__init__(self, parent)
self.controller = controller
self.usEntry_reg = StringVar(parent)
Label(self, text="Username").grid(row=0,column=0) #create the username label
Entry(self, textvariable = self.usEntry_reg).grid(row=0,column=1) #position the username box
self.pass1 = StringVar(parent)
self.pass1.set('')
self.pass2 = StringVar(parent)
self.pass2.set('')
Label(self, text="Password").grid(row=1,column=0)
Entry(self, show="*", textvariable=self.pass1).grid(row=1,column=1)
Label(self, text="re-enter Password").grid(row=2,column=0)
Entry(self, show="*", textvariable=self.pass2).grid(row=2,column=1)
Button(self, borderwidth=4, text="Register", width=10, pady=4, command=self.create_account).grid(row=3,column=1)
Button(self, borderwidth=4, text="Return", width=10, pady=4, command=lambda: self.controller.show_frame("LoginFrame")).grid(row=4,column=1)
class LoginFrame(Frame):
def refresh(self):
self.pwEntry.set('')
self.lbl_status.set("IDLE.")
self.usEntry.set('')
def check_password(self):
self.user_id = db.getuserid(self.usEntry.get(),self.pwEntry.get())
self.pwEntry.set('')
if(self.user_id == -1):
self.login_failure()
else:
self.usEntry.set('')
self.login_success()
def login_success(self):
self.lbl_status.set("Login succeed.")
self.controller.user = client(self.user_id)
self.controller.show_frame("mainUI")
def login_failure(self):
self.lbl_status.set("Authentication failed.")
self.wrongpass +=1
if(self.wrongpass >= 3):
self.btn_login.configure(state = DISABLED)
self.lbl_status.set("Denied access.")
def __init__(self,parent,controller):
Frame.__init__(self, parent)
self.controller = controller
self.wrongpass = 0
#self = Frame(root, padx=20, pady=20)
self.grid(row=0,column=0) # Create a frame and set it's position
self.usEntry = StringVar()
self.pwEntry = StringVar()
Label(self, text="Username").grid(row=0,column=0) #create the username label
Entry(self,textvariable = self.usEntry).grid(row=0,column=1)
Label(self, text="Password").grid(row=1,column=0) #create the password label
Entry(self, show="*",textvariable = self.pwEntry).grid(row=1,column=1)
self.btn_login = Button(self, borderwidth=4, text="Login", width=10, pady=4, command=self.check_password)
self.btn_login.grid(row=2,column=1,columnspan=2)
self.lbl_status = StringVar(parent)
self.lbl_status.set("waiting input...")
Button(self, borderwidth=4, text="Register", width=10, pady=4, command=lambda: self.controller.show_frame("RegisterFrame")).grid(row=3,column=1,columnspan=2)
Label(self,textvariable= self.lbl_status).grid(row=4,column=0,columnspan=2,sticky='W')
class SampleApp(Tk):
def onFrameConfigure(self,canvas):
canvas.configure(scrollregion=canvas.bbox("all"))
def FrameWidth(self, event):
canvas_width = event.width
self.canvas.itemconfig(self.canvas_frame, width = canvas_width)
def __init__(self, *args, **kwargs):
Tk.__init__(self, *args, **kwargs)
self.canvas = Canvas(self, borderwidth=0, background="#ffffff")
# the container is where we'll stack a bunch of frames
# on top of each other, then the one we want visible
# will be raised above the others
self.user = client(-1)
container = Frame(self.canvas)
vsb = Scrollbar(self, orient="vertical", command=self.canvas.yview)
self.canvas.configure(yscrollcommand=vsb.set)
vsb.pack(side="right", fill="y")
self.canvas.pack(side="left", fill="both", expand=True)
self.canvas_frame = self.canvas.create_window((4,4), window=container, anchor="nw")
container.bind("<Configure>", lambda event, canvas=self.canvas: self.onFrameConfigure(canvas))
self.canvas.bind('<Configure>', self.FrameWidth)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (LoginFrame, RegisterFrame,mainUI):
page_name = F.__name__
frame = F(parent=container, controller=self)
self.frames[page_name] = frame
# put all of the pages in the same location;
# the one on the top of the stacking order
# will be the one that is visible.
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("LoginFrame")
def show_frame(self, page_name):
frame = self.frames[page_name]
try:
frame.refresh()
except AttributeError:
pass
#create canvas
frame.tkraise()
class Login(Tk):
def register(self):
pass
def main():
app = SampleApp()
app.mainloop()
if __name__ == '__main__': main()
| 8,896 | 2,788 |
import torch
import numpy as np
import os
import sys
from fairseq.data.data_utils import collate_tokens
from fairseq.models.roberta import RobertaModel
import time
roberta = RobertaModel.from_pretrained('checkpoints/', checkpoint_file='ck.pt', data_name_or_path='data/processed_RACE/')
roberta.eval()
def eval_one_example():
# context = 'I was not very happy. Because he did some bad things to me. But I am fine after he apologized to me.'
# qa1 = 'What\'s my mood right now? Pleased'
# qa2 = 'What\'s my mood right now? Sad'
# qa3 = 'What\'s my mood right now? Angry'
# qa4 = 'What\'s my mood right now? Cool'
context = 'The Sunset Pasta Cruise to Emerald Bay Saturday evening, September 25, 2010 You will cruise to Emerald Bay at Sunset, one of the most beautiful places in the world while dining on a Pasta Buffet and listening to live light dance music. Buses will pick up Sunset Pasta Cruise diners from the main entrance to the Horizon Casino Resort at: 4:40pm and 5:05pm on Saturday and take you the 1.5 miles to Ski Run Marina for boarding. Boarding is at Ski Run Marina at 5:15 p.m. (with departure at 5:45 p.m.), located in South Lake Tahoe. The cost for the cruise, pasta buffet, live music, and the 2.5-hour cruise to Emerald Bay is $55 (normally $75). The cost for children between 3-11 is $41 and under 3 is free. Must register the under 3 as well for the coast guard count. The Sunset Pasta Cruise will be limited to 200 guests. Large parties will be seated first to insure seating together. Pick up your Sunset Pasta Cruise tickets at the Expo at the Horizon Casino Resort before 3 p.m. on Saturday. Those unclaimed will be sold to those on the waiting list at that time. At approximately 5:45 pm any extra spaces will be sold to passengers on the dock. Children who require a seat must have a ticket as well. Closest lodging to the Pasta Cruise is: Super 8, Lakeland Village. Please note that our sponsor , the Riva Grill, is on the Lake close to the boarding area for the Tahoe Queen. A great gathering place to meet or to have dinner. Call Riva Grill (530) 542-2600 for lunch or dinner reservations while you are visiting Lake Tahoe.'
qas = ['When will the cruise to Emerald Bay end? At about 7:00 pm.', 'When will the cruise to Emerald Bay end? At about 8:20 pm.', 'When will the cruise to Emerald Bay end? At about 9:20 pm.', 'When will the cruise to Emerald Bay end? On Sunday morning.']
t1 = time.time()
ans = 1
ts = []
for qa in qas:
inp = roberta.encode(qa, context)
ts.append(inp)
batch = collate_tokens(ts, pad_idx=1)
logits = roberta.predict('sentence_classification_head', batch, return_logits=True).tolist()
logits = np.asarray(logits).flatten()
print(logits)
# assert np.argmax(logits) == ans
t2 = time.time()
print("Time cost: {}s".format(t2 - t1))
def eval_on_test_set(testset='high'):
dirpath = 'data/extracted_RACE'
with open(os.path.join(dirpath, 'test-{}.input0').format(testset)) as fr0, open(os.path.join(dirpath, 'test-{}.input1').format(testset)) as fr1, open(os.path.join(dirpath, 'test-{}.input2').format(testset)) as fr2, open(os.path.join(dirpath, 'test-{}.input3').format(testset)) as fr3, open(os.path.join(dirpath, 'test-{}.input4').format(testset)) as fr4, open(os.path.join(dirpath, 'test-{}.label').format(testset)) as fr5:
preds = []
labels = []
i = 0
for context, qa1, qa2, qa3, qa4, label in zip(fr0, fr1, fr2, fr3, fr4, fr5):
ts = []
for qa in [qa1, qa2, qa3, qa4]:
inp = roberta.encode(qa.strip(), context.strip().replace('\n', ' '))
if len(inp) > 512:
break
ts.append(inp)
if len(ts) != 4:
continue
batch = collate_tokens(ts, pad_idx=1)
logits = roberta.predict('sentence_classification_head', batch, return_logits=True).tolist()
logits = np.asarray(logits).flatten()
pred = np.argmax(logits)
labels.append(int(label.strip()))
preds.append(pred)
i += 1
if i % 1000 == 0:
print("Finished {} samples.".format(i))
print(preds)
print(labels)
print('Accuracy:', np.mean(np.asarray(preds) == np.asarray(labels)))
eval_one_example()
# eval_on_test_set()
# eval_on_test_set('middle')
| 4,431 | 1,490 |
import soundfile as sf
from tqdm import tqdm
import src.utils.interface_file_io as io
import librosa
import wave
import multiprocessing
import src.utils.interface_multiprocessing as mi
import torchaudio
import numpy as np
import torch.nn.functional as F
import torch
torchaudio.set_audio_backend("sox_io")
def audio_loader(audio_file):
return torchaudio.load(audio_file)
def cutoff(waveform, sample_rate, start, end):
cut = waveform[0][int(start*sample_rate): int(end*sample_rate+1)]
return cut.unsqueeze(0)
def random_cutoff(waveform, audio_window, index=None):
audio_length = waveform.shape[1]
if index is None:
random_index = np.random.randint(audio_length - audio_window + 1)
else:
random_index = index
cutoff_waveform = waveform[:, random_index: random_index + audio_window]
return cutoff_waveform
def audio_adjust_length(x, audio_window, fit=False):
length_adj = audio_window - len(x[0])
if length_adj > 0:
half_adj = length_adj // 2
x = F.pad(x, (half_adj, length_adj - half_adj))
audio_length = len(x[0])
if fit:
random_index = np.random.randint(audio_length - audio_window + 1)
x = x[:, random_index: random_index + audio_window]
return x
def audio_auto_trim(waveform, vad, audio_window=None):
waveform = vad(waveform)
waveform = torch.flip(waveform, [0, 1])
waveform = vad(waveform)
waveform = torch.flip(waveform, [0, 1])
if audio_window is not None:
while True:
audio_length = waveform.shape[1]
if audio_length < audio_window:
waveform = torch.cat((waveform, waveform), 1)
else:
break
return waveform
def resampling_audio(file, original_sampling_rate=44100, resampling_rate=16000):
waveform, sampling_rate = librosa(file, original_sampling_rate)
resample_waveform = librosa.resample(waveform, original_sampling_rate, resampling_rate)
return resample_waveform
def resampling_audio_list(directory_list, new_file_path, file_extension, original_sampling_rate, resampling_rate):
for dir_index, directory in directory_list:
file_list = io.get_all_file_path(directory, file_extension=file_extension)
for file_index, file in tqdm(file_list, desc=directory):
resample_waveform = resampling_audio(file, original_sampling_rate=original_sampling_rate,
resampling_rate=resampling_rate)
filename = io.get_pure_filename(file)
file_path = "{}/{}".format(new_file_path, filename)
sf.write(file_path, resample_waveform, resampling_rate)
# The parameters are prerequisite information. More specifically,
# channels, bit_depth, sampling_rate must be known to use this function.
def pcm2wav(pcm_file, wav_file=None, channels=1, bit_depth=16, sampling_rate=16000):
# Check if the options are valid.
if bit_depth % 8 != 0:
raise ValueError("bit_depth " + str(bit_depth) + " must be a multiple of 8.")
if wav_file is None:
wav_file = pcm_file.replace("pcm", "wav")
# Read the .pcm file as a binary file and store the data to pcm_data
with open(pcm_file, 'rb') as opened_pcm_file:
pcm_data = opened_pcm_file.read()
obj2write = wave.open(wav_file, 'wb')
obj2write.setnchannels(channels)
obj2write.setsampwidth(bit_depth // 8)
obj2write.setframerate(sampling_rate)
obj2write.writeframes(pcm_data)
obj2write.close()
def distributed_pcm2wav(pcm_file):
print("start data distribution...")
for pcm_index, pcm in enumerate(pcm_file):
pcm2wav(pcm)
print("end data distribution...")
class MelSpectrogramLibrosa:
"""Mel spectrogram using librosa."""
def __init__(self, fs=16000, n_fft=1024, shift=160, n_mels=64, fmin=60, fmax=7800):
self.fs, self.n_fft, self.shift, self.n_mels, self.fmin, self.fmax = fs, n_fft, shift, n_mels, fmin, fmax
self.mfb = librosa.filters.mel(sr=fs, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax)
def __call__(self, audio):
X = librosa.stft(np.array(audio), n_fft=self.n_fft, hop_length=self.shift)
return torch.tensor(np.matmul(self.mfb, np.abs(X) ** 2 + np.finfo(float).eps))
if __name__ == '__main__':
task = ""
if task == "resampling":
directory_path = ['../../dataset/UrbanSound8K/audio']
new_save_directory = '../../dataset/UrbanSound8K/audio_16k/'
resampling_audio_list(directory_path, new_save_directory, 'wav', 44100, 16000)
elif task == 'pcm2wav':
input_dir = "../../dataset/KsponSpeech/train"
file_extension = "pcm"
divide_num = multiprocessing.cpu_count() - 1
file_list = io.get_all_file_path(input_dir, file_extension)
file_list = io.list_divider(divide_num, file_list)
print(len(file_list))
processes = mi.setup_multiproceesing(distributed_pcm2wav, data_list=file_list)
mi.start_multiprocessing(processes)
| 5,043 | 1,801 |
import math
from typing import MutableSequence, Optional, TypeVar, Union
import torch
from torch import nn
from torch import Tensor
from torch.types import Number
from einops import repeat
T = TypeVar("T")
def exists(val: Optional[T]) -> bool:
return val is not None
def default(val: Optional[T], d: T) -> T:
return d if val is None else val
def cast_tuple(val, depth: int = 1):
return val if isinstance(val, tuple) else (val,) * depth
class DropKwargs(nn.Module):
def __init__(self, inner: nn.Module):
super().__init__()
self.inner = inner
def forward(self, *args, **kwargs):
return self.inner(*args)
class SequentialKwargs(nn.Module):
def __init__(self, *modules: nn.Module):
super().__init__()
self.inner = nn.ModuleList(modules)
def forward(self, x, **kwargs):
out = x
for module in self.inner:
out = module(out, **kwargs)
return out
TensorSeq = MutableSequence[Tensor]
class PushBack(nn.Module):
def __init__(self, inner: nn.Module):
super().__init__()
self.inner = inner
def forward(
self,
xtup: TensorSeq,
) -> TensorSeq:
x = self.inner(*xtup)
xtup.append(x)
xtup[0] = x
return xtup
class PopBack(nn.Module):
def __init__(self, inner: nn.Module, key: str):
super().__init__()
self.inner = inner
self.key = key
def forward(self, xtup: TensorSeq) -> TensorSeq:
kwargs = {self.key: xtup.pop()}
x = self.inner(*xtup, **kwargs)
xtup[0] = x
return xtup
class ApplyMods(nn.Module):
def __init__(self, *mods):
super().__init__()
self.inner = nn.ModuleList(mods)
def forward(self, tup: TensorSeq) -> TensorSeq:
for i, mod in enumerate(self.inner):
tup[i] = mod(tup[i])
return tup
class ApplyMod(nn.Module):
def __init__(self, inner: nn.Module, ix: int = 0):
super().__init__()
self.inner = inner
self.ix = ix
def forward(self, tup: TensorSeq) -> TensorSeq:
tup[self.ix] = self.inner(tup[self.ix])
return tup
class RetIndex(nn.Module):
def __init__(self, ix: int = 0):
super().__init__()
self.ix = ix
def forward(self, tup: TensorSeq) -> Tensor:
return tup[self.ix]
class ClampWithGrad(torch.autograd.Function):
@staticmethod
def forward(ctx, input, min, max):
ctx.min = min
ctx.max = max
ctx.save_for_backward(input)
return input.clamp(min, max)
@staticmethod
def backward(ctx, grad_in):
(input,) = ctx.saved_tensors
return (
grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0),
None,
None,
)
clamp_with_grad = ClampWithGrad.apply
def clamp_exp(
t: torch.Tensor,
low: float = math.log(1e-2),
high: float = math.log(100),
):
return clamp_with_grad(t, low, high).exp()
def mk_full(d: int, init: Union[torch.Tensor, Number]):
if isinstance(init, torch.Tensor):
return init
else:
return torch.full([d], init)
@torch.no_grad()
def ema_update(model, averaged_model, decay):
"""Incorporates updated model parameters into an exponential moving averaged
version of a model. It should be called after each optimizer step."""
model_params = dict(model.named_parameters())
averaged_params = dict(averaged_model.named_parameters())
assert model_params.keys() == averaged_params.keys()
for name, param in model_params.items():
averaged_params[name].lerp_(param, 1 - decay)
model_buffers = dict(model.named_buffers())
averaged_buffers = dict(averaged_model.named_buffers())
assert model_buffers.keys() == averaged_buffers.keys()
for name, buf in model_buffers.items():
averaged_buffers[name].copy_(buf)
def get_ddpm_schedule(t):
"""Returns log SNRs for the noise schedule from the DDPM paper."""
return -torch.expm1(1e-4 + 10 * t ** 2).log()
def get_alphas_sigmas(log_snrs):
"""Returns the scaling factors for the clean image and for the noise, given
the log SNR for a timestep."""
alphas_squared = log_snrs.sigmoid()
return alphas_squared.sqrt(), (1 - alphas_squared).sqrt()
def calculate_stats(e):
e_mean = e.mean()
e_variance = (e - e_mean).pow(2).mean()
e_variance_stable = max(e_variance, 1e-5)
e_skewness = (e - e_mean).pow(3).mean() / e_variance_stable ** 1.5
e_kurtosis = (e - e_mean).pow(4).mean() / e_variance_stable ** 2
return e_mean, e_variance, e_skewness, e_kurtosis
def measure_perf(f):
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
start_event.record()
f()
# Run some things here
end_event.record()
torch.cuda.synchronize() # Wait for the events to be recorded!
elapsed_time_ms = start_event.elapsed_time(end_event)
return elapsed_time_ms
def calc_delta(t_in, t_out):
return math.pi / 2 * (t_in - t_out)
def diffusion_step(z, v, t_in, t_out):
delta = calc_delta(t_in, t_out)
z = torch.cos(delta) * z - torch.sin(delta) * v
return z
def calc_v_with_distillation_errors(net, z, t_in, t_out, *args, **kwargs):
v = net(z, t_in, *args, **kwargs)
with torch.no_grad():
delta = calc_delta(t_in, t_out)
t_mid = (t_in + t_out) / 2
z_1 = diffusion_step(z, v, t_in, t_mid)
v_2 = net(z_1, t_mid, *args, **kwargs)
z_2 = diffusion_step(z_1 < v_2, t_mid, t_out)
targets = z / torch.tan(delta) - z_2 / torch.sin(delta)
e = v.sub(targets).pow(2).mean(dim=[1, 2, 3])
return v, e
def factor_int(n):
val = math.ceil(math.sqrt(n))
val2 = int(n / val)
while val2 * val != float(n):
val -= 1
val2 = int(n / val)
return val, val2
def compute_channel_change_mat(io_ratio):
base = torch.eye(1)
if io_ratio < 1:
# reduce channels
c_in = int(1 / io_ratio)
cmat = repeat(base * io_ratio, "i1 i2 -> i1 (i2 m)", m=c_in)
else:
c_out = int(io_ratio)
cmat = repeat(base, "i1 i2 -> (i1 m) i2", m=c_out)
return cmat
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
| 6,318 | 2,343 |
class Blog:
def __init__(self, title, photo, name,date,content):
self.title = title
self.photo = photo
self.name = name
self.date = date
self.content = content
blog1= Blog(title='python bisics', photo='https://images.pexels.com/photos/837140/pexels-photo-837140.jpeg',
name='Yasser',date='10-06-2019',content='Hello How r u ?')
blog2= Blog(title='python bisics', photo='https://images.pexels.com/photos/837140/pexels-photo-837140.jpeg',
name='Mohammed',date='11-16-1979',content='Hello How r u ?')
blog3= Blog(title='python bisics', photo='https://images.pexels.com/photos/837140/pexels-photo-837140.jpeg',
name='Sara',date='10-06-2019',content='Hi ')
print(blog1.name)
print(blog2.date)
print(blog3.content)
blogs=[blog1,blog2,blog3]
blogs[2].name='Ali'
blogs.remove(blogs[0])
print(blog3.name)
| 828 | 360 |
import logging
from modules.common.GoogleBucketResource import GoogleBucketResource
from modules.common.Utils import Utils
from modules.common import create_output_dir, remove_output_dir
from yapsy.PluginManager import PluginManager
from definitions import PIS_OUTPUT_DIR
logger = logging.getLogger(__name__)
class RetrieveResource(object):
def __init__(self, args, yaml):
self.simplePluginManager = PluginManager()
self.args = args
self.output_dir = args.output_dir if args.output_dir is not None else PIS_OUTPUT_DIR
self.yaml = yaml
# Warning the user about the gc credential needs for access to GC itself
def checks_gc_service_account(self):
if self.args.google_credential_key is None:
logger.info("Some of the steps might be not work properly due the lack of permissions to access to GCS. "
"Eg. Evidence")
else:
GoogleBucketResource.has_valid_auth_key(self.args.google_credential_key)
# Copy the local files to the Google Storage
def copy_to_gs(self):
if self.args.google_bucket is not None:
Utils(self.yaml.config, self.yaml.outputs).gsutil_multi_copy_to(self.args.google_bucket)
else:
logger.error("Destination bucket info missing")
# This function normalise the input inserted by the user. Lower and Upper cases can break the code if
# not managed. Eg. SO/so/So -> SO Plugin
def normalise_steps(self, steps, all_plugins_available):
normalise_steps = []
lowercase_steps = [each_step.lower() for each_step in steps]
for plugin in all_plugins_available:
if plugin.lower() in lowercase_steps:
normalise_steps.append(plugin)
lowercase_steps.remove(plugin.lower())
logger.info("Steps not found:\n" + ','.join(lowercase_steps))
return normalise_steps
# Extract and check the steps to run
def steps(self):
all_plugins_available = []
for plugin in self.simplePluginManager.getAllPlugins():
all_plugins_available.append(plugin.name)
steps_requested = self.normalise_steps(self.args.steps, all_plugins_available)
excluded_requested = self.normalise_steps(self.args.exclude, all_plugins_available)
if len(self.args.steps) == 0:
plugin_order = list(set(all_plugins_available) - set(excluded_requested))
else:
plugin_order = list(set(steps_requested))
logger.info("Steps selected:\n" + ','.join(plugin_order))
return plugin_order
# Init yapsy plugin manager
def init_plugins(self):
# Tell it the default place(s) where to find plugins
self.simplePluginManager.setPluginPlaces(["plugins"])
# Load all plugins
self.simplePluginManager.collectPlugins()
# noinspection PyBroadException
# Given a list of steps to run, this procedure executes the selected plugins/step
def run_plugins(self):
steps_to_execute = self.steps()
for plugin_name in steps_to_execute:
plugin = self.simplePluginManager.getPluginByName(plugin_name)
try:
plugin.plugin_object.process(self.yaml[plugin_name.lower()], self.yaml.outputs, self.yaml.config)
except Exception as e:
logger.info("WARNING Plugin not available {}".format(plugin_name))
logger.info(e)
def create_output_structure(self, output_dir):
"""By default the directories prod and staging are created"""
remove_output_dir(output_dir) if self.args.force_clean else logger.info("Warning: Output not deleted.")
self.yaml.outputs.prod_dir = create_output_dir(output_dir + '/prod')
self.yaml.outputs.staging_dir = create_output_dir(output_dir + '/staging')
# Retrieve the resources requested.
def run(self):
self.create_output_structure(self.output_dir)
self.init_plugins()
self.checks_gc_service_account()
self.run_plugins()
self.copy_to_gs()
| 4,075 | 1,148 |
import importlib
import os
conf_module = importlib.import_module("conf.%s" % os.environ['CONFIGURATION'])
settings = {
key: getattr(conf_module, key)
for key in dir(conf_module)
if key.isupper()
}
| 211 | 74 |
from django.db import models
# Create your models here.
class User(models.Model):
SEXS = ((0, '未知'), (1, '男'), (2, '女'))
LOCATIONS = (('gz', '广州'), ('sz', '深圳'), ('sh', '上海'), ('bj', '北京'), ('cq', '重庆'))
phonenum = models.CharField(max_length=11, unique=True)
nickname = models.CharField(max_length=16)
sex = models.IntegerField(choices=SEXS, default=0)
birth_year = models.IntegerField(default=2000)
birth_month = models.IntegerField(default=1)
birth_day = models.IntegerField(default=1)
avatar = models.CharField(max_length=256)
location = models.IntegerField(choices=LOCATIONS, default='cq',max_length=12)
class Meta:
db_table = 'users' | 694 | 263 |
from dataclasses import dataclass
from scipy.stats import nbinom # type: ignore[import]
from probs.discrete.rv import DiscreteRV
@dataclass(eq=False)
class NegativeBinomial(DiscreteRV):
"""
The negative binomial distribution is a discrete probability distribution
that models the number of failures k in a sequence of independent and
identically distributed Bernoulli trials before a specified (non-random)
number of successes (denoted r) occurs.
For example, we can define rolling a 6 on a die as a success, and rolling
any other number as a failure, and ask how many failed rolls will occur
before we see the third success (r = 3). In such a case, the probability
distribution of the number of non-6s that appear will be a negative binomial
distribution.
https://en.wikipedia.org/wiki/Negative_binomial_distribution
:param r: Number of successes we want.
:param p: Probability of a failure.
"""
r: float = 0
p: float = 1
def __post_init__(self) -> None:
if self.r <= 0:
raise ValueError("r must be greater than 0.")
def median(self) -> float:
raise NotImplementedError
def mode(self) -> float:
if self.r <= 1:
return 0
return self.p * (self.r - 1) / (1 - self.p)
def expectation(self) -> float:
return self.p * self.r / (1 - self.p)
def variance(self) -> float:
return self.p * self.r / (1 - self.p) ** 2
def pdf(self, x: float) -> float:
k = int(x)
return float(nbinom.pmf(k, self.r, self.p))
def cdf(self, x: float) -> float:
k = int(x)
return float(nbinom.cdf(k, self.r, self.p))
| 1,696 | 534 |
#!python3
"""
A simple script that uses Baidu Place API to search certain kinds of place
in a range of circular space.
This API can be called maximum 2000 times per day.
"""
import requests, json
# import psycopg2
class ConvertFailure(Exception):
def __str__(self):
return "Convertion Failed."
mykey = "IniXfqhsWAyZQpkmh5FtEVv0" # my developer key
city = "韶关"
place = "公园"
coor1 = (39.915, 116.404)
coor2 = (39.975, 116.414)
radius = 500 # meters
city_params = {
# parameters for place api
'ak': mykey,
'output': 'json',
'query': place,
'page_size': 10,
'page_num': 0,
'scope': 2,
'region': city
}
rect_params = {
# parameters for place api
'ak': mykey,
'output': 'json',
'query': place,
'page_size': 10,
'page_num': 0,
'scope': 2,
'bounds': "%s, %s, %s, %s" % (*coor1, *coor2),
'location': coor1,
'radius': radius
}
circ_params = {
# parameters for place api
'ak': mykey,
'output': 'json',
'query': place,
'page_size': 10,
'page_num': 0,
'scope': 2,
}
geocoder_params = {
# parameters for geocoder api
'ak': mykey,
'output': 'json',
'address': None
}
placeAPI = "http://api.map.baidu.com/place/v2/search"
geocoder = "http://api.map.baidu.com/geocoder/v2/"
res_city = requests.get(placeAPI, params=city_params)
res_rect = requests.get(placeAPI, params=rect_params)
res_circ = requests.get(placeAPI, params=circ_params)
# print(res_city.url)
jsonobj = json.loads(res_city.text)
print(type(jsonobj))
print(type(res_city.text))
# print(json.dumps(jsonobj, sort_keys=False, indent=4))
# Below this line defines a series of Baidu geo-data API calling functions
def addr2coor(addresses: str)->tuple:
'''
This function converts addresses to a (longitude, latitude) coordinate.
'''
for address in addresses:
geocoder_params['address'] = address
res = requests.get(geocoder, params=geocoder_params)
res.raise_for_status()
coor = json.loads(requests.get(geocoder, params=geocoder_params).text)
# print(coor)
if coor['status'] == 0:
location = coor['result']['location']
yield (address, location['lng'], location['lat'])
else:
raise ConvertFailure
def rescounter(function)->tuple:
"""A addr2coor wraper"""
pass
if __name__ == '__main__':
address_list = ["天安门", "故宫", "奥林匹克公园", "广州塔"]
cor = addr2coor(address_list)
for item in cor:
print(item)
| 2,513 | 948 |
# -*- coding: utf-8 -*-
import logging
from pathlib import Path
import click
import torch
from classifier import Classifier
from torchvision import datasets, transforms
@click.command()
@click.argument('data_filepath', type=click.Path(), default='data')
@click.argument('trained_model_filepath', type=click.Path(),
default='models/trained_model.pth')
def main(data_filepath, trained_model_filepath):
""" Evaluates the neural network using MNIST test data """
logger = logging.getLogger(__name__)
logger.info('Evaluating a neural network using MNIST test data')
# Load the trained model
model = Classifier()
project_dir = Path(__file__).resolve().parents[2]
state_dict = torch.load(project_dir.joinpath(trained_model_filepath))
model.load_state_dict(state_dict)
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,)), ])
# Load the test data
test_set = datasets.MNIST(project_dir.joinpath(data_filepath),
download=False, train=False,
transform=transform)
batch_size = 64
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size,
shuffle=True)
# Evaluate test performance
test_correct = 0
# Turn off gradients for validation, saves memory and computations
with torch.no_grad():
model.eval() # Sets the model to evaluation mode
# Run through all the test points
for images, labels in test_loader:
# Forward pass
log_ps = model(images)
ps = torch.exp(log_ps)
# Keep track of how many are correctly classified
top_p, top_class = ps.topk(1, dim=1)
equals = top_class == labels.view(*top_class.shape)
test_correct += equals.type(torch.FloatTensor).sum().item()
test_accuracy = test_correct/len(test_set)
logger.info(str("Test Accuracy: {:.3f}".format(test_accuracy)))
if __name__ == '__main__':
log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=log_fmt)
main()
| 2,293 | 692 |
__author__ = 'luigolas'
import numpy as np
from scipy.stats import cumfreq
class Statistics():
"""
Position List: for each element in probe, find its same ids in gallery. Format: np.array([[2,14],[1,2],...])
Mean List: Calculate means of position list by axis 0. Format: np.array([1.52, 4.89])
Mode_list: TODO******* calculate statistical mode along axis 0. Format: (np.array([[2., 4.]]), np.array([[10, 12]]))
Prob admissible range: For each row in Position List calculates if it is lower than a value. Then sum and calculates
percentage
"""
def __init__(self):
self.matching_order = None
self.mean_value = None
self.CMC = None
self.AUC = None
def dict_name(self, ranges=None):
"""
:param ranges:
:return:
"""
if not ranges:
ranges = [1, 5, 10, 20, 50]
name = {"AUC": self.AUC, "MeanValue": self.mean_value}
for r in ranges:
name.update({"Range%02d" % r: self.CMC[r - 1]})
return name
def run(self, dataset, ranking_matrix):
"""
:param dataset:
:param ranking_matrix:
:return:
"""
# Filter ranking matrix to tests values of dataset
if ranking_matrix.shape[0] != len(dataset.test_indexes):
ranking_matrix = self._ranking_matrix_reshape(ranking_matrix, dataset.test_indexes)
self._calc_matching_order(ranking_matrix)
self._calc_mean_value()
self._calcCMC(dataset.test_size)
self._calcAUC(dataset.test_size)
def _calc_matching_order(self, ranking_matrix):
"""
:param ranking_matrix:
:return:
"""
matching_order = []
for elemp, rank_list in enumerate(ranking_matrix):
# probe_elem = dataset.test_indexes[elemp]
for column, elemg in enumerate(rank_list):
# if dataset.same_individual_by_pos(elemp, np.where(dataset.test_indexes == elemg)[0][0],
# set="test"):
if elemp == elemg:
matching_order.append(column + 1) # CMC count from position 1
break
self.matching_order = np.asarray(matching_order, np.uint16)
def _calc_mean_value(self):
"""
:return:
"""
self.mean_value = np.mean(self.matching_order)
# self.mean_value = np.mean(self.matching_order, 1) # For multiview case
def _calcAUC(self, test_size):
"""
:param test_size:
:return:
"""
self.AUC = self.CMC.sum() / test_size # CMC already normalized to 0:100
# self.AUC = (self.CMC.sum() / (test_size * test_size)) * 100. # if CMC were not normalized
# def plot_position_list(self, fig_name, zoom=None, show=False):
# bins_rank, num_positions = self.position_list.shape
# colors = itertools.cycle(["blue", "red", "green", "yellow", "orange"])
# for i in range(num_positions):
# plt.hist(self.position_list[:, i], bins=bins_rank, label='Pos ' + str(i), histtype='stepfilled', alpha=.8,
# color=next(colors), cumulative=True, normed=True)
# plt.yticks(np.arange(0, 1.01, 0.05))
# plt.grid(True)
# plt.title("Ranking Histogram")
# plt.xlabel("Value")
# plt.ylabel("Frequency")
# # Put a legend below current axis
# plt.legend(loc="upper left", bbox_to_anchor=(1, 1))
# # Zoomed figure
# if zoom:
# plt.axis([0, zoom, 0, 1])
# plt.xticks(range(0, zoom+1, 2))
# plt.savefig(fig_name, bbox_inches='tight')
# if show:
# plt.show()
# # Clean and close figure
# plt.clf()
# plt.close()
def _calcCMC(self, size):
cumfreqs = (cumfreq(self.matching_order, numbins=size)[0] / size) * 100.
self.CMC = cumfreqs.astype(np.float32)
# len(self.matching_order[self.matching_order <= admissible]) / float(self.dataset.test_size)
@staticmethod
def _ranking_matrix_reshape(ranking_matrix, test_indexes):
# TODO Optimize or use matching matrix directly
ranking_matrix = ranking_matrix[test_indexes]
length = ranking_matrix.shape[0]
elems = np.in1d(ranking_matrix, test_indexes).reshape(ranking_matrix.shape)
ranking_matrix = ranking_matrix[elems]
ranking_matrix = ranking_matrix.reshape(length, length)
rm = np.empty_like(ranking_matrix)
for pos, val in enumerate(test_indexes):
rm[ranking_matrix == val] = pos
return rm
| 4,681 | 1,553 |
from rhqmetrics_handler import RHQMetricsHandler
| 49 | 16 |
from tkinter import*
#=====importing self created module which will show the registartion form=======#
import registrationform
#=====importing self created module which will help in deleting student record from data base======#
import deletestudent
#=============importing selfcreated update student record ==============#
import updatestudent
#to import jpg image
import allotment #it will import module
#importing view database
import tables
from PIL import ImageTk #it will import Pillow librart
import smtplib
from email.message import EmailMessage
import sqlite3
import allotedstudentrecords
def student():
admin_window=Tk()
admin_window.iconbitmap("student.ico")
bg=ImageTk.PhotoImage(file="./images/login.jpg")
bg_image=Label(admin_window,image=bg)
bg_image.place(x=0,y=0,relwidth=1,relheight=1)
width = admin_window.winfo_screenwidth()
height = admin_window.winfo_screenheight()
admin_window.geometry(f'{width}x{height-100}+0+0')
admin_window.resizable(FALSE,FALSE)
admin_window.title("Student Registration system")
admin_text=Label(text="Spot Counslling Registration And Allotment System",font=("bold",30)).pack(side='top',pady=40)
# admin_text.place(x=450,y=40)
#======================registration window-=====================#
Register_button=Button(admin_window,text="Register Student",relief=GROOVE,width=15,height=5,font=("bold", 10),command=registrationform.register,bg='#BB001B',fg='white')
Register_button.place(x=70,y=150)
#-=====================student record====================#
Delete_student=Button(admin_window,text="Delete Student",relief=GROOVE,width=15,height=5,font=("bold", 10),command=deletestudent.delete_student,bg='#BB001B',fg='white')
Delete_student.place(x=70,y=350)
#============================view databasetable=========================#
View_table_button=Button(admin_window,text="View Registerd \n\n Students Records",relief=GROOVE,width=15,height=5,font=("bold", 10),command=tables.viewdatabase,bg='#BB001B',fg='white')
View_table_button.place(x=70,y=550)
#=================================update student ==================#
update_button=Button(admin_window,text="Update Student",relief=GROOVE,width=15,height=5,font=("bold", 10),command=updatestudent.updatefunc,bg='#BB001B',fg='white')
update_button.place(x=1150,y=350)
#===========================student selection table================#
Student_selection_button=Button(admin_window,text="Seat Allotment",relief=GROOVE,width=15,height=5,font=("bold", 10),command=allotment.selection,bg='#BB001B',fg='white')
Student_selection_button.place(x=1150,y=150)
#========================view alloted student records======================#
View_Alloted_button=Button(admin_window,text="View Alloted \n\n Students Records",relief=GROOVE,width=15,height=5,font=("bold", 10),command=allotedstudentrecords.viewdatabase,bg='#BB001B',fg='white')
View_Alloted_button.place(x=1150,y=550)
copy=Label(admin_window,text='Developed By Gaurav And Team ©',font=('bold',8),fg='white',bg='#01796F')
copy.pack(side='bottom',fill='x')
# admin_window.destroy()
admin_window.mainloop()
# student()
| 3,253 | 1,117 |
from django import forms
from django.contrib.auth.models import User
from django.contrib.auth.forms import UserCreationForm
from .models import Customer, Seller, Product, Order
class SignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=True)
last_name = forms.CharField(max_length=30, required=True)
email = forms.EmailField(
max_length=254, help_text='Required. Enter a valid email address.')
class Meta:
model = User
fields = ['username', 'first_name', 'last_name',
'email', 'password1', 'password2']
labels = {'username': 'Username', 'first_name': 'First Name', 'last_name': 'Last Name',
'email': 'Email', 'password1': 'Password', 'password2': 'Confirm Password'}
class DateInput(forms.DateInput):
input_type = 'date'
class ProfileEditForm(forms.ModelForm):
first_name = forms.CharField(
max_length=50, required=True, label='First Name')
last_name = forms.CharField(
max_length=50, label='Last Name', required=False)
email = forms.EmailField(
max_length=50, help_text='characters not exceeding 50 chars')
dob = forms.DateField(required=True, label='DoB',
widget=DateInput)
class Meta:
model = Customer
fields = ['image', 'first_name', 'last_name', 'email', 'phone', 'dob', 'residential_address',
'permanent_address', 'delievery_address']
labels = {'residential_address': 'Residential Address',
'permanent_address': 'Permanent Address', 'delievery_address': 'Delievery Address'}
class SellerSignUpForm(UserCreationForm):
first_name = forms.CharField(max_length=30, required=True)
last_name = forms.CharField(max_length=30, required=True)
email = forms.EmailField(
max_length=254, help_text='Required. Enter a valid email address.')
is_staff = forms.BooleanField(required=True)
class Meta:
model = User
fields = ['username', 'first_name', 'last_name',
'email', 'password1', 'password2', 'is_staff']
labels = {'username': 'Username', 'first_name': 'First Name', 'last_name': 'Last Name',
'email': 'Email', 'password1': 'Password', 'password2': 'Confirm Password',
'is_staff': 'Agree All T&Cs'}
error_message = {'is_staff': {
'required': 'Must Agree All Terms&Conditions'}}
class SellerProfileEditForm(forms.ModelForm):
first_name = forms.CharField(
max_length=50, required=True, label='First Name')
last_name = forms.CharField(
max_length=50, label='Last Name', required=False)
email = forms.EmailField(
max_length=50, help_text='characters not exceeding 50 chars')
dob = forms.DateField(required=True, label='DoB',
widget=DateInput)
class Meta:
model = Seller
fields = ['image', 'first_name', 'last_name', 'email', 'phone', 'dob', 'gstin', 'category', 'subcategory', 'residential_address',
'permanent_address', 'shop_address']
labels = {'gstin':'GSTIN', 'residential_address': 'Residential Address',
'permanent_address': 'Permanent Address', 'shop_address': 'Shop Address'}
class ProductForm(forms.ModelForm):
class Meta:
model = Product
fields = '__all__'
exclude = ['product_id', 'seller'] | 3,443 | 1,035 |
"""Implementation of AvoidBlastMatches."""
from ..Specification import Specification, SpecEvaluation
# from .VoidSpecification import VoidSpecification
from ..biotools import blast_sequence
from ..Location import Location
class AvoidBlastMatches(Specification):
"""Enforce that the sequence has no BLAST matches with a given database.
WARNING: try using AvoidMatches instead, it is much better!!
Uses NCBI Blast+. Only local BLAST is supported/tested as for now
Parameters
----------
blast_db
Path to a local BLAST database. These databases can be obtained with
NCBI's `makeblastdb`. Omit the extension, e.g. `ecoli_db/ecoli_db`.
word_size
Word size used by the BLAST algorithm
perc_identity
Minimal percentage of identity for BLAST matches. 100 means that only
perfect matches are considered.
num_alignments
Number alignments
num_threads
Number of threads/CPU cores to use for the BLAST algorithm.
min_align_length
Minimal length that an alignment should have to be considered.
"""
priority = -2
best_possible_score = 0
blasts_paths = {}
def __init__(
self,
blast_db=None,
sequences=None,
word_size=4,
perc_identity=100,
num_alignments=100000,
num_threads=3,
min_align_length=20,
ungapped=True,
e_value=1e80,
culling_limit=1,
location=None,
):
"""Initialize."""
self.blast_db = blast_db
self.sequences = sequences
self.word_size = word_size
self.perc_identity = perc_identity
self.num_alignments = num_alignments
self.num_threads = num_threads
self.min_align_length = min_align_length
self.location = Location.from_data(location)
self.e_value = e_value
self.ungapped = ungapped
self.culling_limit = culling_limit
def initialized_on_problem(self, problem, role=None):
return self._copy_with_full_span_if_no_location(problem)
def evaluate(self, problem):
"""Score as (-total number of blast identities in matches)."""
location = self.location
if location is None:
location = Location(0, len(problem.sequence))
sequence = location.extract_sequence(problem.sequence)
blast_record = blast_sequence(
sequence,
blast_db=self.blast_db,
subject_sequences=self.sequences,
word_size=self.word_size,
perc_identity=self.perc_identity,
num_alignments=self.num_alignments,
num_threads=self.num_threads,
ungapped=self.ungapped,
e_value=self.e_value,
culling_limit=self.culling_limit,
task="megablast"
)
if isinstance(blast_record, list):
alignments = [
alignment
for rec in blast_record
for alignment in rec.alignments
]
else:
alignments = blast_record.alignments
query_hits = [
(
min(hit.query_start, hit.query_end) + location.start - 1,
max(hit.query_start, hit.query_end) + location.start,
1 - 2 * (hit.query_start > hit.query_end),
hit.identities,
)
for alignment in alignments
for hit in alignment.hsps
]
locations = sorted(
[
(start, end, ids)
for (start, end, strand, ids) in query_hits
if (end - start) >= self.min_align_length
]
)
score = -sum([ids for start, end, ids in locations])
locations = [Location(start, end) for start, end, ids in locations]
if locations == []:
return SpecEvaluation(
self, problem, score=1, message="Passed: no BLAST match found"
)
return SpecEvaluation(
self,
problem,
score=score,
locations=locations,
message="Failed - %s matches at %s" % (len(locations), locations),
)
def localized(self, location, problem=None, with_righthand=True):
"""Localize the evaluation."""
new_location = self.location.overlap_region(location)
if new_location is None:
return None
new_location = location.extended(
self.min_align_length - 1, right=with_righthand
)
return self.copy_with_changes(location=new_location)
def feature_label_parameters(self):
return [self.blast_db]
| 4,665 | 1,336 |
from aadict import aadict
from cachetools import LRUCache
import ujson as json
import regex
from shortuuid import uuid
from functools import wraps
from glob import glob
from time import time
import logging
import os
import shutil
import unicodedata
_logger = logging.getLogger(__name__)
_basepath = None
_serialize = None
_deserialize = None
_ext = None
_db = aadict()
class UniqueConstraintError(ValueError):
pass
def normalize_text(text, lcase=True):
text = str(text).strip()
if lcase: text = text.lower()
text = unicodedata.normalize('NFKD', text)
text = regex.subn(r'\p{P}+', '', text)[0]
return text.encode('ascii', 'ignore').decode()
def bench(fn):
@wraps(fn)
def wrapper(*args, **kwargs):
start = time()
ret = fn(*args, **kwargs)
end = time()
_logger.debug('function %s took %g secs',
fn.__name__, end - start)
return ret
return wrapper
def object_path(collection, id):
"""Returns path to the backing file of the object
with the given ``id`` in the given ``collection``.
Note that the ``id`` is made filesystem-safe by
"normalizing" its string representation."""
_logger.debug(type(id))
_logger.debug(id)
if isinstance(id, dict) and 'id' in id:
id = id['id']
normalized_id = normalize_text(str(id), lcase=False)
return os.path.join(_basepath, collection,
'%s.%s' % (normalized_id, _ext))
def collection_path(collection):
"""Returns the base path to the ``collection``"""
return os.path.join(_basepath, collection)
def load_object_at_path(path):
"""Load an object from disk at explicit path"""
with open(path, 'r') as f:
data = _deserialize(f.read())
return aadict(data)
def load_object(collection, id):
"""Load an object from disk at path based on its
``collection`` and ``id``."""
path = object_path(collection, id)
return load_object_at_path(path)
def get_object(collection, id):
"""Get an object by its ``collection``-unique ``id``"""
return _db[collection].cache[id]
def add_collection(collection,
cache_size=1000,
cache_cls=LRUCache,
**cache_args):
"""Add a collection named ``collection``."""
assert collection not in _db
cache = cache_cls(maxsize=cache_size,
missing=lambda id: load_object(collection, id),
**cache_args)
_db[collection] = aadict(cache=cache, indexes={})
def _clear():
_db.clear()
def prepare(base_path='data',
serialize=json.dumps,
deserialize=json.loads,
file_ext='json'):
"""After you have added your collections, prepare the database
for use."""
global _basepath, _deserialize, _serialize, _ext
_basepath = base_path
assert callable(serialize)
assert callable(deserialize)
_serialize = serialize
_deserialize = deserialize
_ext = file_ext
_logger.debug('preparing with base path %s and file ext %s',
_basepath, _ext)
assert len(_db)
for collection in _db.keys():
c_path = collection_path(collection)
os.makedirs(c_path, exist_ok=True)
_logger.info('collection "%s": %d objects',
collection, object_count(collection))
def object_count(collection):
"""Returns the number of objects in the given ``collection``."""
return len(glob('%s/*.%s' % (collection_path(collection), _ext)))
def each_object(collection):
"""Yields each object in the given ``collection``.
The objects are loaded from cache and failing that,
from disk."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
yield load_object_at_path(path)
def each_object_id(collection):
"""Yields each object ID in the given ``collection``.
The objects are not loaded."""
c_path = collection_path(collection)
paths = glob('%s/*.%s' % (c_path, _ext))
for path in paths:
match = regex.match(r'.+/(.+)\.%s$' % _ext, path)
yield match.groups()[0]
@bench
def save_object(collection, obj):
"""Save an object ``obj`` to the given ``collection``.
``obj.id`` must be unique across all other existing objects in
the given collection. If ``id`` is not present in the object, a
*UUID* is assigned as the object's ``id``.
Indexes already defined on the ``collection`` are updated after
the object is saved.
Returns the object.
"""
if 'id' not in obj:
obj.id = uuid()
id = obj.id
path = object_path(collection, id)
temp_path = '%s.temp' % path
with open(temp_path, 'w') as f:
data = _serialize(obj)
f.write(data)
shutil.move(temp_path, path)
if id in _db[collection].cache:
_db[collection].cache[id] = obj
_update_indexes_for_mutated_object(collection, obj)
return obj
@bench
def delete_object(collection, obj):
try:
os.remove(object_path(collection, obj))
del _db[collection].cache[obj.id]
except:
pass
_update_indexes_for_deleted_object(collection, obj)
def indexed_value(index, obj):
values = [obj.get(f) for f in index.fields]
if callable(index.transformer):
values = index.transformer(values)
k = json.dumps(values)
return k.lower() if index.case_insensitive else k
@bench
def add_index(collection,
name,
fields,
transformer=None,
unique=False,
case_insensitive=False):
"""
Add a secondary index for a collection ``collection`` on one or
more ``fields``.
The values at each of the ``fields`` are loaded from existing
objects and their object ids added to the index.
You can later iterate the objects of an index via
``each_indexed_object``.
If you update an object and call ``save_object``, the index will
be updated with the latest values from the updated object.
If you delete an object via ``delete_object``, the object will
be removed from any indexes on the object's collection.
If a function is provided for ``transformer``, the values
extracted from each object in the collection will be passed to
the ``transformer``. The ``transformer`` should return a list
of values that will go into the index.
If ``unique`` is true, then there may only be at most one object
in the collection with a unique set of values for each the
``fields`` provided.
If ``case_insensitive`` is true, then the value stored in the
index will be lower-cased and comparisons thereto will be
lower-cased as well.
"""
assert len(name) > 0
assert len(fields) > 0
indexes = _db[collection].indexes
index = indexes.setdefault(name, aadict())
index.transformer = transformer
index.value_map = {} # json([value]) => set(object_id)
index.unique = unique
index.case_insensitive = case_insensitive
index.fields = fields
for obj in each_object(collection):
_add_to_index(index, obj)
_logger.info('added %s, %s index to collection %s on fields: %s',
'unique' if unique else 'non-unique',
'case-insensitive' if case_insensitive else 'case-sensitive',
collection, ', '.join(fields))
def _add_to_index(index, obj):
"""Adds the given object ``obj`` to the given ``index``"""
id_set = index.value_map.setdefault(indexed_value(index, obj), set())
if index.unique:
if len(id_set) > 0:
raise UniqueConstraintError()
id_set.add(obj.id)
def _remove_from_index(index, obj):
"""Removes object ``obj`` from the ``index``."""
try:
index.value_map[indexed_value(index, obj)].remove(obj.id)
except KeyError:
pass
def each_indexed_object(collection, index_name, **where):
"""Yields each object indexed by the index with
name ``name`` with ``values`` matching on indexed
field values."""
index = _db[collection].indexes[index_name]
for id in index.value_map.get(indexed_value(index, where), []):
yield get_object(collection, id)
def _update_indexes_for_mutated_object(collection, obj):
"""If an object is updated, this will simply remove
it and re-add it to the indexes defined on the
collection."""
for index in _db[collection].indexes.values():
_remove_from_index(index, obj)
_add_to_index(index, obj)
def _update_indexes_for_deleted_object(collection, obj):
"""If an object is deleted, it should no longer be
indexed so this removes the object from all indexes
on the given collection."""
for index in _db[collection].indexes.values():
_remove_from_index(index, obj)
| 8,769 | 2,698 |
print("x"*25)
print("Bem-Vindo a Tabuada v2.0")
print("x"*25)
n = int(input("Digite um número para a tabuada: "))
for t in range(1, 11):
print(f"{n} x {t:2} = {n*t}") | 171 | 88 |
n1=17
n2=28
if n1>n2:
print('n1 bozorgtar az n2')
elif n1<n2:
print('n2 bozorgtar az n1')
elif n1==n2:
print('n1 va n2 barabar hastand')
| 149 | 80 |
"""Adds table for scicrunch rrids
Revision ID: 39fa67f45cc0
Revises: 3452ca7b13e9
Create Date: 2020-12-15 18:16:03.581479+00:00
"""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "39fa67f45cc0"
down_revision = "3452ca7b13e9"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"scicrunch_resources",
sa.Column("rrid", sa.String(), nullable=False),
sa.Column("name", sa.String(), nullable=False),
sa.Column("description", sa.String(), nullable=True),
sa.Column(
"creation_date",
sa.DateTime(),
server_default=sa.text("now()"),
nullable=False,
),
sa.Column(
"last_change_date",
sa.DateTime(),
server_default=sa.text("now()"),
nullable=False,
),
sa.PrimaryKeyConstraint("rrid"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("scicrunch_resources")
# ### end Alembic commands ###
| 1,192 | 421 |
import pymysql
from tkinter import messagebox
class Socios():
def abrir(self):
bbdd= pymysql.connect( host= "localhost", user= "root", passwd="", db= "ejemplo1")
return bbdd
def alta(self,datos):
'''
datos[0]: id
datos[1]: nombre
'''
bbdd=self.abrir()
cursor=bbdd.cursor()
sql = "INSERT INTO Socios (NOMBRE, CUOTAPAGA)\
values('{}','{}')".format(datos[0],datos[1])
print (sql)
cursor.execute(sql)
bbdd.commit()
messagebox.showinfo(message = "registro exitoso", title = "Aviso")
# except:
# bbdd.rollback()
# messagebox.showinfo(message= "No registrado", title = "Aviso" )
bbdd.close()
def mostrarlistadosocio(self):
bbdd= self.abrir()
cursor=bbdd.cursor()
sql="SELECT * FROM socios"
cursor.execute(sql)
datoslistadocompleto= cursor.fetchall()
bbdd.commit()
bbdd.close()
# for lista in datoslistadocompleto:
# print(lista)
return datoslistadocompleto
# def editarTabla(self, a_editar):
# bbdd= pymysql.connect( host= "localhost", user= "root", passwd="", db= "ejemplo1")
# cursor= bbdd.cursor()
# sql="ALTER TABLE SOCIOS AUTO_INCREMENT = 1"
# bbdd.commit()
# cursor.execute(sql)
# print(sql)
# bbdd.close()
# sql = "INSERT INTO SOCIOS (nombre, sexo )\
# values( '{}','{}')".format(datos[0],datos[1] )
# print (sql)
# #sql="insert into articulos(descripcion, precio) values (%s,%s)"
# try:
# cursor.execute(sql)
# bbdd.commit()
# #messagebox.showinfo(message = "registro exitoso", title = "Aviso")
# except:
# bbdd.rollback()
# #messagebox.showinfo(message= "No registrado", title = "Aviso" )
# bbdd.close()
# bbdd= pymysql.connect( host= "localhost", user= "root", passwd="", db= "ejemplo1")
# cursor= bbdd.cursor()
# cursor.execute("DELETE FROM SOCIOS WHERE ID= 3")
# bbdd.commit()
# bbdd.close()
# bbdd= pymysql.connect( host= "localhost", user= "root", passwd="", db= "ejemplo1")
# cursor= bbdd.cursor()
# cursor.execute("ALTER TABLE SOCIOS AUTO_INCREMENT = 1")
# # "CREATE TABLE Socios (id INT PRIMARY KEY AUTO_INCREMENT, NOMBRE VARCHAR(50), CUOTAPAGA VARCHAR(2))")
# bbdd.commit()
# bbdd.close()
| 2,541 | 938 |
import sys
import json
import das_client
def dasFileQuery(dataset):
query = 'dataset dataset=%s' % dataset
host = 'https://cmsweb.cern.ch' # default
idx = 0 # default
limit = 0 # unlimited
debug = 0 # default
thr = 300 # default
ckey = "" # default
cert = "" # default
jsondict = das_client.get_data(host, query, idx, limit, debug, thr, ckey, cert)
# check if the pattern matches none, many, or one dataset
if not jsondict['data'] or not jsondict['data'][0]['dataset']:
sys.stderr.write('Error: the pattern "%s" does not match any dataset\n' % dataset)
sys.exit(1)
return []
elif len(jsondict['data']) > 1:
sys.stderr.write('Error: the pattern "%s" matches multiple datasets\n' % dataset)
for d in jsondict['data']:
sys.stderr.write(' %s\n' % d['dataset'][0]['name'])
sys.exit(1)
return []
else:
# expand the dataset name
dataset = jsondict['data'][0]['dataset'][0]['name']
query = 'file dataset=%s' % dataset
jsondict = das_client.get_data(host, query, idx, limit, debug, thr, ckey, cert)
# parse the results in JSON format, and extract the list of files
files = sorted( f['file'][0]['name'] for f in jsondict['data'] )
return files
| 1,420 | 441 |
def append_file(password, file_path):
password_file = open(file_path, "a+")
password_file.write(password + "\r\n")
password_file.close()
| 153 | 56 |
import pickle
# pickle can serialize python objects
data = {1:"hi", 2: "there"}
# convert to byte
byte_data = pickle.dumps(data)
# convert back to python object
data2 = pickle.loads(byte_data)
# ----------using with files----------
filename = ""
# write to a file
pickle.dump(data, open(filename, "wb" ))
with open(filename, "wb") as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
# read from file
unpickled_object = pickle.load(open(filename ,"rb"))
| 477 | 175 |
from os import sys, path
from resolver.models import *
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
sys.path.append('/home/app')
from client.lib.cactus_client import CactusClient
def run():
Organization.objects.all().delete()
Inchi.objects.all().delete()
Publisher.objects.all().delete()
EntryPoint.objects.all().delete()
EndPoint.objects.all().delete()
MediaType.objects.all().delete()
client = CactusClient()
m1 = MediaType.create(
name="text/plain",
description="plain text media type"
)
m1.save()
m2 = MediaType.create(
name="image/gif",
description="GIF image",
)
m2.save()
o1 = Organization.create(
name="National Institutes of Health",
abbreviation="NIH",
href="https://www.nih.gov",
category="government",
parent=None
)
o1.save()
o2 = Organization.create(
name="National Cancer Institute",
abbreviation="NCI",
href="https://www.cancer.gov",
category="government",
parent=o1
)
o2.save()
p1 = Publisher.create(
name="NCI Computer-Aided Drug Design (CADD) Group",
category="group",
organization=o2
)
p1.save()
p2 = Publisher.create(
name="Marc Nicklaus",
category="person",
email="marc.nicklaus@email.com",
address="Frederick, MD 21702-1201, USA",
href="https://ccr2.cancer.gov/resources/CBL/Scientists/Nicklaus.aspx",
orcid="https://orcid.org/0000-0002-4775-7030",
organization=o2,
parent=p1
)
p2.save()
e0 = EntryPoint.create(
name="NCI/CADD InChI Resolver",
description="Demonstration InChI Resolver of the NCI/CADD group",
category="self",
href="https://cactus.inchi-resolver.org",
entrypoint_href="https://cactus.inchi-resolver.org/_self",
publisher=p1
)
e0.save()
e1 = EntryPoint.create(
name="Chemical Identifier Resolver",
description="This service works as a resolver for different chemical structure identifiers and allows "
"the conversion of a given structure identifier into another representation or structure "
"identifier. It can be used via a web form or a simple URL API.",
category="api",
href="http://cactus.nci.nih.gov/chemical/structure",
publisher=p2,
parent=e0
)
e1.save()
e2 = EntryPoint.create(
name="InChI Trust Root Resolver",
description="Root InChI Resolver at InChI Trust",
category="resolver",
href="http://root.inchi-resolver.org"
)
e2.save()
x1 = EndPoint.create(
entrypoint=e1,
category="uritemplate",
uri="{+stdinchi|+stdinchikey}/smiles",
description="Standard InChI to SMILES conversion",
request_methods=['GET']
)
x1.save()
x1.accept_header_media_types.add(m1)
x1.content_media_types.add(m1)
x2 = EndPoint.create(
entrypoint=e1,
category="uritemplate",
uri="{+stdinchi,+stdinchikey}/iupac_name",
description="Standard InChI to IUPAC name conversion",
request_methods=['GET']
)
x2.save()
x2.accept_header_media_types.add(m1)
x2.content_media_types.add(m1)
x3 = EndPoint.create(
entrypoint=e1,
category="uritemplate",
uri="{+stdinchi,+stdinchikey}/image",
description="InChI to SMILES conversion",
request_methods=['GET']
)
x3.save()
x3.accept_header_media_types.add(m1)
x3.content_media_types.add(m1)
x4 = EndPoint.create(
entrypoint=e1,
category="uritemplate",
uri="{+smiles}/stdinchi",
description="SMILES to stdinchi conversion",
)
x4.save()
x4.accept_header_media_types.add(m1)
x4.content_media_types.add(m1)
# x5 = EndPoint.create(
# entrypoint=e1,
# category="uritemplate",
# uri="{+smiles}/stdinchikey",
# description="SMILES to stdinchikey conversion",
# )
# x5.save()
# x5.accept_header_media_types.add(m1)
# x5.content_media_types.add(m1)
for j in range(1, 10):
ilist = client.fetch_inchi(range(j * 10, j * 10 + 10))
for cid, i in ilist:
print("Loading: %s" % (i,))
try:
inchi = Inchi.create(
string=i
)
print('{} {}'.format(inchi, inchi.added))
inchi.save()
inchi.entrypoints.add(e1)
except Exception as e:
print(e)
| 4,676 | 1,560 |
import subprocess
def launch_app(path_of_app):
try:
subprocess.call([path_of_app])
return True
except Exception as e:
print(e)
return False | 180 | 55 |
from characters.models import Character
from characters.serializers import CharacterSerializer
from rest_framework import generics
class CharacterListView(generics.ListCreateAPIView):
queryset = Character.objects.all()
serializer_class = CharacterSerializer
class CharacterDetailView(generics.RetrieveUpdateDestroyAPIView):
queryset = Character.objects.all()
serializer_class = CharacterSerializer
| 418 | 100 |
from django.conf.urls import url
from django.utils.translation import ugettext_lazy as _
from .views import DeleteRole, EditRole, NewRole, RolesList, RoleUsers
class MisagoAdminExtension(object):
def register_urlpatterns(self, urlpatterns):
# Permissions section
urlpatterns.namespace(r'^permissions/', 'permissions')
# Roles
urlpatterns.namespace(r'^users/', 'users', 'permissions')
urlpatterns.patterns(
'permissions:users',
url(r'^$', RolesList.as_view(), name='index'),
url(r'^new/$', NewRole.as_view(), name='new'),
url(r'^edit/(?P<pk>\d+)/$', EditRole.as_view(), name='edit'),
url(r'^users/(?P<pk>\d+)/$', RoleUsers.as_view(), name='users'),
url(r'^delete/(?P<pk>\d+)/$', DeleteRole.as_view(), name='delete'),
)
def register_navigation_nodes(self, site):
site.add_node(
name=_("Permissions"),
icon='fa fa-adjust',
parent='misago:admin',
after='misago:admin:users:accounts:index',
namespace='misago:admin:permissions',
link='misago:admin:permissions:users:index',
)
site.add_node(
name=_("User roles"),
icon='fa fa-th-large',
parent='misago:admin:permissions',
namespace='misago:admin:permissions:users',
link='misago:admin:permissions:users:index',
)
| 1,454 | 450 |
from base import *
import utils.neuralrenderer_render as nr
class Visualizer(object):
def __init__(self,high_resolution=False):
self.high_resolution = high_resolution
self.renderer = nr.get_renderer(high_resolution=self.high_resolution).cuda()
def visualize_renderer(self,verts,images):
#verts = torch.from_numpy(verts).cuda()
#verts = self.batch_orth_proj_verts(verts,cam)
#verts = torch.cat((verts[:,:,1].unsqueeze(-1),\
# -verts[:,:,2].unsqueeze(-1),verts[:,:,0].unsqueeze(-1)),dim=-1)
results = self.renderer.forward(verts)
renders = (results.detach().cpu().numpy().transpose((0,2,3,1))*256).astype(np.uint8)[:,:,:,::-1]
render_mask = ~(renders>100)#.astype(np.bool) 去除渲染结果(白底时)的黑色毛刺边
renders[render_mask] = images[render_mask]
return renders
def visulize_result(self,outputs,kps,data,name,vnum = 6, white_background=False,rtype='',nokp=False,org_name=True,org_img=False,keep_name=False):
if not keep_name:
if 'name' in data:
img_names = data['name']
else:
img_names = data['imgpath']
imgs = data['image_org'].contiguous().numpy().astype(np.uint8)[:vnum,:,:,::-1]
vnum = imgs.shape[0]
if self.high_resolution:
kps = ((kps.detach().contiguous().cpu().numpy()+1)/2 * 500).reshape(-1,14,2)[:vnum]
else:
kps = ((kps.detach().contiguous().cpu().numpy()+1)/2 * imgs.shape[1]).reshape(-1,14,2)[:vnum]
kp_imgs = []
#white_background=False
for idx in range(vnum):
if white_background:
kp_imgs.append(draw_lsp_14kp__bone(np.ones_like(imgs[idx])*255, kps[idx]))
else:
kp_imgs.append(draw_lsp_14kp__bone(imgs[idx].copy(), kps[idx]))
((cam,pose,shape), predict_verts, predict_j2d, predict_j3d, predict_Rs,verts_camed,j3d_camed) = outputs
if white_background:
rendered_imgs = self.visualize_renderer(verts_camed[:vnum], np.ones_like(imgs)*255)
else:
rendered_imgs = self.visualize_renderer(verts_camed[:vnum], imgs)
if org_img:
offsets = data['offsets'].numpy()
org_image_names = data['imgpath']
#image_org = data['org_image'].numpy()
imgs = []
#imgs = data['orgimage'].numpy()
org_image = []
for n in range(rendered_imgs.shape[0]):
org_imge = cv2.imread(org_image_names[n])#image_org[n].numpy().astype(np.uint8)
imgs.append(org_imge.copy())
resized_images = cv2.resize(rendered_imgs[n], (offsets[n,0]+1, offsets[n,1]+1), interpolation = cv2.INTER_CUBIC)
#print(offsets[n,2],(offsets[n,3]-1),offsets[n,4],(offsets[n,5]-1))
org_imge[offsets[n,2]:(offsets[n,3]-1),offsets[n,4]:(offsets[n,5]-1),:] = resized_images[offsets[n,6]:(offsets[n,7]-1+offsets[n,6]),offsets[n,8]:(offsets[n,9]+offsets[n,8]-1),:]
org_image.append(org_imge)
#imgs = np.array(imgs)
#org_image = np.array(org_image)
for idx in range(vnum):
if nokp:
if org_img:
if len(org_image[idx].shape)<3:
print(org_image_names[idx],org_image[idx].shape)
continue
result_img = np.hstack((imgs[idx], org_image[idx]))
else:
result_img = np.hstack((imgs[idx], rendered_imgs[idx]))
else:
result_img = np.hstack((imgs[idx],kp_imgs[idx], rendered_imgs[idx]))
#cv2.imwrite(name+'_{}_org_{}.jpg'.format(idx,rtype),imgs[idx])
if keep_name:
#print(name[idx])
cv2.imwrite(name[idx],result_img)
elif org_name:
cv2.imwrite('{}{}-{}'.format(name.split(os.path.basename(name))[0],img_names[idx].split('/')[-2],os.path.basename(img_names[idx])),result_img)
else:
cv2.imwrite(name+'_{}_{}.jpg'.format(idx,rtype),result_img)
def render_video(self,verts,params,images,org_image,offsets,name):
rendered_images = self.visualize_renderer(verts,params[:,:3],images)
for n in range(verts.shape[0]):
resized_images = cv2.resize(rendered_images[n], (offsets[n,0]+1, offsets[n,1]+1), interpolation = cv2.INTER_CUBIC)
org_image[n,offsets[n,2]:(offsets[n,3]-1),offsets[n,4]:(offsets[n,5]-1),:] = resized_images[offsets[n,6]:(offsets[n,7]-1+offsets[n,6]),offsets[n,8]:(offsets[n,9]+offsets[n,8]-1),:]
self.make_mp4(org_image,name)
def make_mp4(self,images,name):
num = images.shape[0]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
output_movie = cv2.VideoWriter(name+'.mp4', fourcc, 50, (images.shape[2], images.shape[1]))
for i in range(num):
if i%100==0:
print('Writing frame: ',i,'/',num)
output_movie.write(images[i]) | 5,138 | 1,975 |
import wsgiref.util
import flask
from proxy import proxy
# pylint: disable=W0212
def test_happy_path():
environ = {
"REQUEST_METHOD": "GET",
"PATH_INFO": "/locationforecast/1.9/",
"QUERY_STRING": "lat=59.31895603;lon=18.0517762",
"HTTP_REFERER": "https://walles.github.io/weatherclock",
}
wsgiref.util.setup_testing_defaults(environ)
request = flask.Request(environ)
proxy._proxy_request(request)
| 455 | 179 |
import sys
def main():
for line in sys.stdin:
value, group = line.strip().split(',')
print(group, 1, sep='\t')
if __name__ == '__main__':
main()
| 186 | 66 |
import os
import numpy as np
import argparse
import logging
import random
import pickle
from pprint import pformat
from exps.data import ParticleNetDataset
from settree.set_data import SetDataset, OPERATIONS, merge_init_datasets
import exps.eval_utils as eval
from exps.eval_utils import create_logger
data_root = '/home/royhir/projects/data/physics/top_quark/proc'
def pre_process(dataset, limit=None):
x = dataset.X
y = dataset.y
if limit is None:
limit = len(y)
inds = random.sample(range(len(y)), limit)
x_points = x['points'].take(inds, axis=0)
x_features = x['features'].take(inds, axis=0)
x_mask = x['mask'].take(inds, axis=0)
y = y.take(inds, axis=0)
y = y.argmax(1)
records = []
ys = []
for p, f, m, y in zip(x_points, x_features, x_mask, y):
try:
m_row = np.where(p.any(axis=1))[0].max()
records.append(np.concatenate((p[:m_row, :], f[:m_row, :], m[:m_row, :]),axis=1))
ys.append(y)
except:
pass
return records, np.array(ys)
def get_top_quark_datset(train=None, val=None, test=None):
train_dataset = ParticleNetDataset(os.path.join(data_root, 'train_file_0.awkd'), data_format='channel_last')
val_dataset = ParticleNetDataset(os.path.join(data_root, 'val_file_0.awkd'), data_format='channel_last')
test_dataset = ParticleNetDataset(os.path.join(data_root, 'test_file_0.awkd'), data_format='channel_last')
logging.info('Loaded raw data')
train_records, train_y = pre_process(train_dataset, limit=train)
val_records, val_y = pre_process(val_dataset, limit=val)
test_records, test_y = pre_process(test_dataset, limit=test)
logging.info('Finish pre-processing')
logging.info('train: {} val: {} test: {}'.format(len(train_y), len(val_y), len(test_y)))
return SetDataset(records=train_records, is_init=True), train_y, \
SetDataset(records=val_records, is_init=True), val_y, \
SetDataset(records=test_records, is_init=True), test_y
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--exp_name", type=str, default='test')
parser.add_argument("--splits", type=int, nargs="+", default=[1200000, 400000, 400000])
parser.add_argument("--attention_set_limit", type=int, default=6)
parser.add_argument("--use_attention_set", action='store_true')
parser.add_argument('--save', action='store_true')
parser.add_argument("--log", action='store_true')
args = parser.parse_args()
np.random.seed(42)
random.seed(42)
log_dir = os.path.join(os.path.abspath('__file__' + '/../'), 'outputs', 'top_quark')
create_logger(log_dir=log_dir,
log_name=args.exp_name,
dump=args.log)
logging.info(args)
train, val, test = args.splits
ds_train, y_train, ds_val, y_val, ds_test, y_test = get_top_quark_datset(train, val, test)
shared_gbdt_params = {'n_estimators': 50,
'learning_rate': 0.1,
'max_depth': 8,
'max_features': None,
'subsample': 0.5,
'criterion': 'mse',
'early_stopping_rounds': 5,
'random_state': 42}
logging.info('Shared params:\n' + pformat(shared_gbdt_params))
set_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'operations': OPERATIONS,
'splitter': 'sklearn',
'use_attention_set': True,
'use_attention_set_comp': False,
'attention_set_limit': args.attention_set_limit,
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'random_state': shared_gbdt_params['random_state'],
'save_path': None,
'validation_fraction': 0.25,
'tol': 1e-4,
'n_iter_no_change': shared_gbdt_params['early_stopping_rounds'],
'verbose': 3}
sklearn_params = {'n_estimators': shared_gbdt_params['n_estimators'],
'criterion': 'mse',
'learning_rate': shared_gbdt_params['learning_rate'],
'max_depth': shared_gbdt_params['max_depth'],
'max_features': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'validation_fraction': 0.25,
'tol': 1e-4,
'n_iter_no_change': shared_gbdt_params['early_stopping_rounds'],
'random_state': shared_gbdt_params['random_state']}
xgboost_params = {#'tree_method': 'gpu_hist',
#'gpu_id': 7,
#'objective': 'binary:logistic',
'max_depth': shared_gbdt_params['max_depth'],
'n_jobs': 10,
'eval_metric': ['error'],
'learning_rate': shared_gbdt_params['learning_rate'],
'n_estimators': shared_gbdt_params['n_estimators'],
'colsample_bytree': shared_gbdt_params['max_features'],
'subsample': shared_gbdt_params['subsample'],
'reg_lambda': 0,
'verbosity': 0,
'random_state': shared_gbdt_params['random_state'],
'seed': shared_gbdt_params['random_state']}
x_train, x_test, x_val = eval.flatten_datasets(ds_train, ds_test,
operations_list=set_params['operations'],
ds_val=ds_val)
xgboost_gbtd = eval.train_and_predict_xgboost(xgboost_params,
x_train, y_train,
x_test, y_test,
val_x=None, val_y=None,
early_stopping_rounds=None)
ds_train_val = merge_init_datasets(ds_train, ds_val)
set_gbtd = eval.train_and_predict_set_gbdt(set_params,
ds_train_val, np.concatenate([y_train, y_val]),
ds_test, y_test,
resume=None)
if args.save:
pkl_filename = os.path.join(log_dir, '{}_model.pkl'.format(args.exp_name))
with open(pkl_filename, 'wb') as file:
pickle.dump(set_gbtd, file)
| 6,756 | 2,187 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from .blnet_web import BLNETWeb, test_blnet
from .blnet_conn import BLNETDirect
from .blnet import BLNET
| 152 | 61 |
from psycopg2 import connect
import requests
import json
r = requests.get(
"http://localhost:9091/api/getDatabaseContainerByContainerID",
params = {"containerID":"Metadatabase"}
)
r.json()
conn=connect(
dbname="metadatabase",
user = "postgres",
host = r.json()['IpAddress'],
password = "postgres"
)
cursor = conn.cursor()
cursor.execute(f"""DROP SCHEMA public CASCADE;
CREATE SCHEMA public;""")
conn.commit()
conn.close() | 452 | 165 |
from setuptools import setup
setup(name='dcgenerator',
version='0.1',
description='Generate dc events from time series',
url='https://github.com/JurgenPalsma/dcgenerator',
author='Flying Circus',
author_email='jurgen.palsma@gmail.com',
license='MIT',
packages=['dcgenerator'],
install_requires=[
'pandas',
],
zip_safe=False) | 395 | 122 |
"""Manage Wonk's configuration."""
import pathlib
from typing import Any, Dict, List
import yaml
from pydantic import BaseModel
from toposort import toposort_flatten # type: ignore
from wonk.exceptions import UnknownParentError
class PolicySet(BaseModel):
"""Describes a policy set."""
name: str
managed: List[str] = []
local: List[str] = []
inherits: List[str] = []
def __ior__(self, other):
"""Append the values from another policy set onto this one's."""
# This is not an efficient algorithm, but it maintains ordering which lends stability to
# the final output files. These lists are almost always going to be very short anyway, and
# an easy to read algorithm is better than a more efficient but complex one for these
# purposes.
for value in other.managed:
if value not in self.managed:
self.managed.append(value)
for value in other.local:
if value not in self.local:
self.local.append(value)
return self
class Config(BaseModel):
"""Describes a Wonk configuration file."""
policy_sets: Dict[str, PolicySet]
def load_config(config_path: pathlib.Path = None) -> Config:
"""Load a configuration file and return its parsed contents."""
if config_path is None:
config_path = pathlib.Path("wonk.yaml")
data = yaml.load(config_path.read_text(), Loader=yaml.SafeLoader)
return parse_config(data)
def parse_config(block_all_config: Dict[str, Any]) -> Config:
"""Parse the dictionary containing all Wonk configuration into a Config object."""
try:
block_policy_sets = block_all_config["policy_sets"] or {}
except KeyError:
policy_sets = {}
else:
policy_sets = parse_policy_sets(block_policy_sets)
return Config(policy_sets=policy_sets) # type: ignore
def parse_policy_sets(block_policy_sets: Dict[str, Any]) -> Dict[str, PolicySet]:
"""Parse the dictionary containing policy set definitions into a dict of PolicySets."""
policy_sets = {}
deps = {}
for name, definition in block_policy_sets.items():
with_name = {**definition, **{"name": name}}
policy_set = PolicySet(**with_name)
policy_sets[name] = policy_set
for parent_name in policy_set.inherits:
if parent_name not in block_policy_sets:
raise UnknownParentError(name, parent_name)
# Build a dependency graph from the set of inheritance definitions from the classes.
deps[name] = set(policy_set.inherits)
for name in toposort_flatten(deps):
policy_set = policy_sets[name]
for parent_name in policy_set.inherits:
policy_set |= policy_sets[parent_name]
return policy_sets
| 2,801 | 810 |
# Copyright 2021 Arm Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import Callable, List, Tuple, Union
import tensorflow as tf
def compute_ranges(kernel: tf.Tensor, per_channel: bool, symmetric: bool) -> Tuple[tf.Tensor, tf.Tensor]:
axes = tf.range(tf.rank(kernel) - 1) if per_channel else None
if symmetric:
quant_max = tf.stop_gradient(tf.math.reduce_max(tf.math.abs(kernel), axis=axes))
quant_min = -quant_max
else:
quant_max = tf.stop_gradient(tf.math.reduce_max(kernel, axis=axes))
quant_min = tf.stop_gradient(tf.math.reduce_min(kernel, axis=axes))
return quant_max, quant_min
@tf.custom_gradient
def floor_ste(x: tf.Tensor) -> Tuple[tf.Tensor, Callable[[tf.Tensor], List[tf.Tensor]]]:
y = tf.floor(x)
def grad(dy: tf.Tensor) -> List[tf.Tensor]:
return [dy]
return y, grad
def get_nudged_ranges_scale(
min: tf.Tensor,
max: tf.Tensor,
num_bits: int,
narrow_range: bool = False) -> Tuple[tf.Tensor, tf.Tensor, tf.Tensor]:
quant_max = tf.math.pow(2., tf.cast(num_bits, dtype=tf.dtypes.float32)) - 1.
quant_min = tf.constant(1.) if narrow_range else tf.constant(0.)
scale = (max - min) / (quant_max - quant_min)
# Rounding the zero-point to ensure one of the quantized values snap to zero
zero_point_from_min = quant_min - min / scale
nudged_zero_point = tf.round(zero_point_from_min)
nudged_zero_point = tf.where(zero_point_from_min < quant_min,
quant_min * tf.ones(shape=tf.shape(nudged_zero_point)),
nudged_zero_point)
nudged_zero_point = tf.where(zero_point_from_min > quant_max,
quant_max * tf.ones(shape=tf.shape(nudged_zero_point)),
nudged_zero_point)
# adjust/nudge the min/max to ensure zero-point snaps to real zero.
nudged_min = (quant_min - nudged_zero_point) * scale
nudged_max = (quant_max - nudged_zero_point) * scale
return nudged_min, nudged_max, scale
def fake_quant_with_min_max_vars(
inputs: tf.Tensor,
min: tf.Tensor,
max: tf.Tensor,
num_bits: int,
narrow_range: bool = False) -> tf.Tensor:
"""
This is differentiable equivalent of the utility in tf.quantization.
tf.quantization.fake_quant* utilities only allows the min/max ranges
to increase through gradients, but we would have to rely on l2_loss
to decrease the min/max ranges. This updated utility allows the gradients
to both increase and decrease the min/max ranges.
"""
nudged_min, nudged_max, scale = get_nudged_ranges_scale(min, max, num_bits, narrow_range)
clipped_data = tf.clip_by_value(inputs, nudged_min, nudged_max)
shifted_data = clipped_data - nudged_min
quant_data = floor_ste(shifted_data / scale + 0.5)
quant_data = quant_data * scale + nudged_min
return quant_data
fake_quant_with_min_max_vars_per_channel = fake_quant_with_min_max_vars
class ActivationQuantizationBlock(tf.keras.layers.Layer):
def __init__(self,
enabled: bool,
mode: str):
super().__init__()
self.enabled = enabled
self.mode = mode
if self.mode == 'train':
self.fake_quant_with_min_max_vars_fn = \
fake_quant_with_min_max_vars
elif self.mode == 'infer':
self.fake_quant_with_min_max_vars_fn = \
tf.quantization.fake_quant_with_min_max_vars
def build(self, input_shape):
if self.enabled:
self.quant_min = self.add_weight(
name='act_quant_min',
trainable=True)
self.quant_max = self.add_weight(
name='act_quant_max',
trainable=True)
if self.mode == 'train':
self.quant_initialized = tf.Variable(False, trainable=False)
def init_quant_ranges(self, inputs: tf.Tensor) -> None:
quant_max, quant_min = compute_ranges(inputs, per_channel=False, symmetric=False)
self.quant_max.assign(quant_max)
self.quant_min.assign(quant_min)
self.quant_initialized.assign(True)
def call(self, inputs):
if self.enabled:
if self.mode == "train":
if not self.quant_initialized:
self.init_quant_ranges(inputs)
return self.fake_quant_with_min_max_vars_fn(
inputs,
min=self.quant_min,
max=self.quant_max,
num_bits=8,
narrow_range=False)
else:
return inputs
| 5,325 | 1,697 |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Trains a model that sorts numbers, keeping loss summaries in tensorboard.
The flag hparam has to be passed as a string of comma separated statements of
the form hparam=value, where the hparam's are any of the listed in the
dictionary DEFAULT_HPARAMS.
See the README.md file for further compilation and running instructions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import sorting_model
flags = tf.app.flags
gfile = tf.gfile
FLAGS = flags.FLAGS
flags.DEFINE_string('hparams', '', 'Hyperparameters')
flags.DEFINE_integer('num_iters', 500, 'Number of iterations')
flags.DEFINE_integer(
'save_summaries_secs', 30,
'The frequency with which summaries are saved, in seconds.')
flags.DEFINE_integer(
'save_interval_secs', 30,
'The frequency with which the model is saved, in seconds.')
flags.DEFINE_string('exp_log_dir', '/tmp/sorting/',
'Directory where to write event logs.')
flags.DEFINE_integer('max_to_keep', 1, 'Maximum number of checkpoints to keep')
DEFAULT_HPARAMS = tf.contrib.training.HParams(n_numbers=50,
lr=0.1,
temperature=1.0,
batch_size=10,
prob_inc=1.0,
samples_per_num=5,
n_iter_sinkhorn=10,
n_units=32,
noise_factor=1.0,
optimizer='adam',
keep_prob=1.)
def main(_):
hparams = DEFAULT_HPARAMS
hparams.parse(FLAGS.hparams)
if not gfile.Exists(FLAGS.exp_log_dir):
gfile.MakeDirs(FLAGS.exp_log_dir)
tf.reset_default_graph()
g = tf.Graph()
model = sorting_model.SortingModel(g, hparams)
with g.as_default():
model.set_input()
model.build_network()
model.build_l2s_loss()
model.build_optimizer()
model.add_summaries_train()
with tf.Session():
tf.contrib.slim.learning.train(
train_op=model.train_op,
logdir=FLAGS.exp_log_dir,
global_step=model.global_step,
saver=tf.train.Saver(max_to_keep=FLAGS.max_to_keep),
number_of_steps=FLAGS.num_iters,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
if __name__ == '__main__':
tf.app.run(main)
| 3,192 | 969 |
#!/user/bin/env python
# -*- coding: utf-8 -*-
"""
------------------------------------
@Project : opensourcetest
@Time : 2020/11/12 15:01
@Auth : chineseluo
@Email : 848257135@qq.com
@File : cli_test.py
@IDE : PyCharm
------------------------------------
"""
import os
import sys
import unittest
from opensourcetest.cli import main
class TestCli(unittest.TestCase):
def test_show_version(self):
sys.argv = ["OST", "-V"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_help(self):
sys.argv = ["OST", "-h"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_create_http_project(self):
sys.argv = ["OST", "start_http_project"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_create_ui_project(self):
sys.argv = ["OST", "start_ui_project"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_create_app_project(self):
sys.argv = ["OST", "start_app_project"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
def test_show_online_docs_address(self):
sys.argv = ["OST", "onlinedocs"]
with self.assertRaises(SystemExit) as cm:
main()
self.assertEqual(cm.exception.code, 0)
| 1,578 | 539 |
import socket
import struct
import config
import json
import threading
import random
def multicast_handler(client_port: int):
# create the datagram socket
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind(('', client_port))
# set a timeout so the socket does not block indefinitely when trying to receive data.
sock.settimeout(0.2)
# Set the time-to-live for messages to 1 so they do not go past the local network segment.
ttl = struct.pack('b', 1)
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)
try:
# send request to the multicast group
print(f'CLIENT: Sending multicast message to {config.MULTICAST_IP}')
message = 'SERVER DISCOVERY'
multicast_group = (config.MULTICAST_IP, config.MULTICAST_PORT)
sock.sendto(bytes(message, encoding='utf-8'), multicast_group)
finally:
sock.close()
def tcp_handler(port: int):
sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock_tcp.bind(('', port))
sock_tcp.listen(5)
# empty buffer
buff = b''
while True:
print(f'CLIENT: Waiting for a TCP connection')
connection, client_address = sock_tcp.accept()
try:
print(f'CLIENT: Connection from {client_address}')
username = input('=== Provide Your Nickname === ')
connection.sendall(bytes(username, encoding='utf8'))
# receive the data in chunks and add to the buffer
while True:
print(f'CLIENT: Waiting for the server to send client base')
data = connection.recv(512)
buff += data
if not data:
break
break
finally:
print(f'CLIENT: Client base received')
res_dict = json.loads(buff.decode('utf-8'))
# print(res_dict)
print(f'CLIENT: Closing TCP connection')
# clean up the connection
connection.close()
break
if __name__ == '__main__':
port = random.randint(50_000, 65_000)
# pass selected port to the TCP thread, in order to listen on the same port
# thread in the background as daemon
th = threading.Thread(target=tcp_handler, args=(client_port,), daemon=True)
th.start()
multicast_handler(port)
th.join()
| 2,364 | 727 |
import numpy as np
import torch
from collections import defaultdict, deque, OrderedDict
import heapq
from data_structures import DoublyLinkedList, UndirectedGraph,Fragment
import time
import sys
loss = torch.nn.GaussianNLLLoss()
def _compute_stats(track):
t,x,y = track['t'],track['x'],track['y']
ct = np.nanmean(t)
if len(t)<2:
v = np.sign(x[-1]-x[0]) # assume 1/-1 m/frame = 30m/s
b = x-v*ct # recalculate y-intercept
fitx = np.array([v,b[0]])
fity = np.array([0,y[0]])
else:
xx = np.vstack([t,np.ones(len(t))]).T # N x 2
fitx = np.linalg.lstsq(xx,x, rcond=None)[0]
fity = np.linalg.lstsq(xx,y, rcond=None)[0]
track['t'] = t
track['x'] = x
track['y'] = y
track['fitx'] = fitx
track['fity'] = fity
return track
def stitch_objects_tsmn_online_2(o, THRESHOLD_MAX=3, VARX=0.03, VARY=0.03, time_out = 500):
'''
Dan's online version
can potentially "under" stitch if interior fragments have higher matching cost
THRESHOLD_MAX: aobve which pairs should never be matched
online version of stitch_objects_tsmn_ll
track: dict with key: id, t, x, y
{"id": 20,
"t": [frame1, frame2,...],
"x":[x1,x2,...],
"y":[y1,y2...],
"fitx": [vx, bx], least square fit
"fity": [vy, by]}
tracks come incrementally as soon as they end
'''
# define cost
def _getCost(track1, track2):
'''
track1 always ends before track2 ends
999: mark as conflict
-1: invalid
'''
if track2["t"][0] < track1['t'][-1]: # if track2 starts before track1 ends
return 999
if track2['t'][0] - track1['t'][-1] > time_out: # if track2 starts TIMEOUT after track1 ends
return -1
# predict from track1 forward to time of track2
xx = np.vstack([track2['t'],np.ones(len(track2['t']))]).T # N x 2
targetx = np.matmul(xx, track1['fitx'])
targety = np.matmul(xx, track1['fity'])
pt1 = track1['t'][-1]
varx = (track2['t']-pt1) * VARX
vary = (track2['t']-pt1) * VARY
input = torch.transpose(torch.tensor([track2['x'],track2['y']]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll1 = loss(input,target,var).item()
# predict from track2 backward to time of track1
xx = np.vstack([track1['t'],np.ones(len(track1['t']))]).T # N x 2
targetx = np.matmul(xx, track2['fitx'])
targety = np.matmul(xx, track2['fity'])
pt1 = track2['t'][-1]
varx = (track1['t']-pt1) * VARX
vary = (track1['t']-pt1) * VARY
input = torch.transpose(torch.tensor([track1['x'],track1['y']]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll2 = loss(input,target,np.abs(var)).item()
return min(nll1, nll2)
# return nll1
def _first(s):
'''Return the first element from an ordered collection
or an arbitrary element from an unordered collection.
Raise StopIteration if the collection is empty.
'''
return next(iter(s.values()))
df = o.df
# sort tracks by start/end time - not for real deployment
groups = {k: v for k, v in df.groupby("ID")}
ids = list(groups.keys())
ordered_tracks = deque() # list of dictionaries
all_tracks = {}
S = []
E = []
for id, car in groups.items():
t = car["Frame #"].values
x = (car.bbr_x.values + car.bbl_x.values)/2
y = (car.bbr_y.values + car.bbl_y.values)/2
notnan = ~np.isnan(x)
t,x,y = t[notnan], x[notnan],y[notnan]
if len(t)>1: # ignore empty or only has 1 frame
S.append([t[0], id])
E.append([t[-1], id])
track = {"id":id, "t": t, "x": x, "y": y}
# ordered_tracks.append(track)
all_tracks[id] = track
heapq.heapify(S) # min heap (frame, id)
heapq.heapify(E)
EE = E.copy()
while EE:
e, id = heapq.heappop(EE)
ordered_tracks.append(all_tracks[id])
# Initialize
X = UndirectedGraph() # exclusion graph
TAIL = defaultdict(list) # id: [(cost, head)]
HEAD = defaultdict(list) # id: [(cost, tail)]
curr_tracks = deque() # tracks in view. list of tracks. should be sorted by end_time
path = {} # oldid: newid. to store matching assignment
past_tracks = DoublyLinkedList() # set of ids indicate end of track ready to be matched
TAIL_MATCHED = set()
HEAD_MATCHED = set()
matched = 0 # count matched pairs
running_tracks = OrderedDict() # tracks that start but not end at e
start = time.time()
for i,track in enumerate(ordered_tracks):
# print("\n")
# print('Adding new track {}/{}'.format(i, len(ordered_tracks)))
# print("Out of view: {}".format(past_tracks.size))
curr_id = track['id'] # last_track = track['id']
path[curr_id] = curr_id
right = track['t'][-1] # right pointer: current time
# get tracks that started but not end - used to define the window left pointer
while S and S[0][0] < right: # append all the tracks that already starts
started_time, started_id = heapq.heappop(S)
running_tracks[started_id] = started_time
# compute track statistics
track = _compute_stats(track)
try:
left = max(0,_first(running_tracks) - time_out)
except: left = 0
# print("window size :", right-left)
# remove out of sight tracks
while curr_tracks and curr_tracks[0]['t'][-1] < left:
past_tracks.append(curr_tracks.popleft()['id'])
# compute score from every track in curr to track, update Cost
for curr_track in curr_tracks:
cost = _getCost(curr_track, track)
if cost > THRESHOLD_MAX:
X._addEdge(curr_track['id'], track['id'])
elif cost > 0:
heapq.heappush(TAIL[curr_track['id']], (cost, track['id']))
heapq.heappush(HEAD[track['id']], (cost, curr_track['id']))
# print("TAIL {}, HEAD {}".format(len(TAIL), len(HEAD)))
# start matching from the first ready tail
tail_node = past_tracks.head
if not tail_node: # no ready tail available: keep waiting
curr_tracks.append(track)
running_tracks.pop(curr_id) # remove tracks that ended
continue # go to the next track in ordered_tracks
while tail_node is not None:
tail = tail_node.data # tail is ready (time-wise)
# remove already matched
while TAIL[tail] and TAIL[tail][0][1] in HEAD_MATCHED:
heapq.heappop(TAIL[tail])
if not TAIL[tail]: # if tail does not have candidate match
TAIL.pop(tail)
tail_node = tail_node.next # go to the next ready tail
continue
_, head = TAIL[tail][0] # best head for tail
while HEAD[head] and HEAD[head][0][1] in TAIL_MATCHED:
heapq.heappop(HEAD[head])
if not HEAD[head]:
HEAD.pop(head)
tail_node = tail_node.next
continue
else: _, tail2 = HEAD[head][0]
# tail and head agrees with each other
if tail==tail2:
if head in X[tail]: # conflicts
HEAD.pop(head)
TAIL.pop(tail)
else: # match tail and head
# print("matching {} & {}".format(tail, head))
path[head] = path[tail]
X._union(head, tail)
HEAD.pop(head)
TAIL.pop(tail)
HEAD_MATCHED.add(head)
TAIL_MATCHED.add(tail)
matched += 1
past_tracks.delete_element(tail)
X._remove(tail)
# match or not, process the next ready tail
tail_node = tail_node.next
curr_tracks.append(track)
running_tracks.pop(curr_id) # remove tracks that ended
# print("matched:", matched)
# delete IDs that are empty
# print("\n")
# print("{} Ready: ".format(past_tracks.printList()))
# print("{} Processsed: ".format(len(processed)))
print("{} pairs matched".format(matched))
# print("Deleting {} empty tracks".format(len(empty_id)))
# df = df.groupby("ID").filter(lambda x: (x["ID"].iloc[0] not in empty_id))
end = time.time()
print('run time online stitching:', end-start)
# for debugging only
o.path = path
# o.C = C
o.X = X
o.groupList = ids
o.past_tracks = past_tracks.convert_to_set()
o.TAIL = TAIL
o.HEAD = HEAD
# replace IDs
newids = [v for _,v in path.items()]
m = dict(zip(path.keys(), newids))
df = df.replace({'ID': m})
df = df.sort_values(by=['Frame #','ID']).reset_index(drop=True)
print("Before DA: {} unique IDs".format(len(ids)))
print("After DA: {} unique IDs".format(df.groupby("ID").ngroups))
print("True: {} unique IDs".format(len([id for id in ids if id<1000])))
o.df = df
return o
# define cost
def _getCost(track1, track2, time_out, VARX, VARY):
'''
track1 always ends before track2 ends
999: mark as conflict
-1: invalid
'''
if track2.t[0] < track1.t[-1]: # if track2 starts before track1 ends
return 999
if track2.t[0] - track1.t[-1] > time_out: # if track2 starts TIMEOUT after track1 ends
return -1
# predict from track1 forward to time of track2
xx = np.vstack([track2.t,np.ones(len(track2.t))]).T # N x 2
targetx = np.matmul(xx, track1.fitx)
targety = np.matmul(xx, track1.fity)
pt1 = track1.t[-1]
varx = (track2.t-pt1) * VARX
vary = (track2.t-pt1) * VARY
input = torch.transpose(torch.tensor([track2.x,track2.y]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll1 = loss(input,target,var).item()
# predict from track2 backward to time of track1
xx = np.vstack([track1.t,np.ones(len(track1.t))]).T # N x 2
targetx = np.matmul(xx, track2.fitx)
targety = np.matmul(xx, track2.fity)
pt1 = track2.t[-1]
varx = (track1.t-pt1) * VARX
vary = (track1.t-pt1) * VARY
input = torch.transpose(torch.tensor([track1.x,track1.y]),0,1)
target = torch.transpose(torch.tensor([targetx, targety]),0,1)
var = torch.transpose(torch.tensor([varx,vary]),0,1)
nll2 = loss(input,target,np.abs(var)).item()
return min(nll1, nll2)
# return nll1
def _first(s):
'''Return the first element from an ordered collection
or an arbitrary element from an unordered collection.
Raise StopIteration if the collection is empty.
'''
return next(iter(s.values()))
def stitch_objects_tsmn_online_3(o, THRESHOLD_MAX=3, VARX=0.03, VARY=0.03, time_out = 500):
'''
build on ver2 - with object-oriented data structure
'''
df = o.df
# sort tracks by start/end time - not for real deployment
groups = {k: v for k, v in df.groupby("ID")}
ids = list(groups.keys())
ordered_tracks = deque() # list of dictionaries
all_tracks = {}
S = []
E = []
for id, car in groups.items():
t = car["Frame #"].values
x = (car.bbr_x.values + car.bbl_x.values)/2
y = (car.bbr_y.values + car.bbl_y.values)/2
notnan = ~np.isnan(x)
t,x,y = t[notnan], x[notnan],y[notnan]
if len(t)>1: # ignore empty or only has 1 frame
S.append([t[0], id])
E.append([t[-1], id])
track = Fragment(id, t,x,y)
# ordered_tracks.append(track)
all_tracks[id] = track
heapq.heapify(S) # min heap (frame, id)
heapq.heapify(E)
while E:
e, id = heapq.heappop(E)
ordered_tracks.append(all_tracks[id])
del all_tracks
# Initialize
# X = UndirectedGraph() # exclusion graph
running_tracks = OrderedDict() # tracks that start but not end at e
curr_tracks = deque() # tracks in view. list of tracks. should be sorted by end_time
past_tracks = OrderedDict() # set of ids indicate end of track ready to be matched
path = {} # oldid: newid. to store matching assignment
matched = 0 # count matched pairs
start = time.time()
for i,track in enumerate(ordered_tracks):
# print("\n")
# print('Adding new track {}/{},{}'.format(i, len(ordered_tracks),track.id))
# print("Past tracks: {}".format(len(past_tracks)))
# print("Curr tracks: {}".format(len(curr_tracks)))
# print("running tracks: {}".format(len(running_tracks)))
# print("path bytes: {}".format(sys.getsizeof(path)))
curr_id = track.id # last_track = track['id']
path[curr_id] = curr_id
right = track.t[-1] # right pointer: current time
# get tracks that started but not end - used to define the window left pointer
while S and S[0][0] < right: # append all the tracks that already starts
started_time, started_id = heapq.heappop(S)
running_tracks[started_id] = started_time
# compute track statistics
track._computeStats()
try:
left = max(0,_first(running_tracks) - time_out)
except: left = 0
# print("window size :", right-left)
# remove out of sight tracks
while curr_tracks and curr_tracks[0].t[-1] < left:
past_track = curr_tracks.popleft()
past_tracks[past_track.id] = past_track
# print("Curr_tracks ", [i.id for i in curr_tracks])
# print("past_tracks ", past_tracks.keys())
# compute score from every track in curr to track, update Cost
for curr_track in curr_tracks:
cost = _getCost(curr_track, track, time_out, VARX, VARY)
if cost > THRESHOLD_MAX:
curr_track._addConflict(track)
elif cost > 0:
curr_track._addSuc(cost, track)
track._addPre(cost, curr_track)
prev_size = 0
curr_size = len(past_tracks)
while curr_size > 0 and curr_size != prev_size:
prev_size = len(past_tracks)
remove_keys = set()
# ready = _first(past_tracks) # a fragment object
for ready_id, ready in past_tracks.items():
best_head = ready._getFirstSuc()
if not best_head or not best_head.pre: # if ready has no match or best head already matched to other tracks# go to the next ready
# past_tracks.pop(ready.id)
remove_keys.add(ready.id)
else:
try: best_tail = best_head._getFirstPre()
except: best_tail = None
if best_head and best_tail and best_tail.id == ready.id and best_tail.id not in ready.conflicts_with:
# print("** match tail of {} to head of {}".format(best_tail.id, best_head.id))
path[best_head.id] = path[best_tail.id]
remove_keys.add(ready.id)
Fragment._matchTailHead(best_tail, best_head)
matched += 1
[past_tracks.pop(key) for key in remove_keys]
curr_size = len(past_tracks)
curr_tracks.append(track)
running_tracks.pop(track.id) # remove tracks that ended
# print("matched:", matched)
# delete IDs that are empty
# print("\n")
# print("{} Ready: ".format(past_tracks.printList()))
# print("{} Processsed: ".format(len(processed)))
print("{} pairs matched".format(matched))
# print("Deleting {} empty tracks".format(len(empty_id)))
# df = df.groupby("ID").filter(lambda x: (x["ID"].iloc[0] not in empty_id))
end = time.time()
print('run time online stitching:', end-start)
# for debugging only
o.path = path
# o.C = C
# o.X = X
o.groupList = ids
o.past_tracks = past_tracks.keys()
# replace IDs
newids = [v for _,v in path.items()]
m = dict(zip(path.keys(), newids))
df = df.replace({'ID': m})
df = df.sort_values(by=['Frame #','ID']).reset_index(drop=True)
print("Before DA: {} unique IDs".format(len(ids)))
print("After DA: {} unique IDs".format(df.groupby("ID").ngroups))
print("True: {} unique IDs".format(len([id for id in ids if id<1000])))
o.df = df
return o | 18,567 | 5,753 |
#Copyright (c) 2010 harkon.kr
#
# ***** BEGIN MIT LICENSE BLOCK *****
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
#
# ***** END MIT LICENCE BLOCK *****
import bpy
from bpy.types import Operator, AddonPreferences
from bpy.props import StringProperty, IntProperty, BoolProperty
import os, os.path
class GamekitAddonPreferences(AddonPreferences):
# this must match the addon name, use '__package__'
# when defining this in a submodule of a python package.
bl_idname = __package__
runtime_path = StringProperty(
name="Runtime File Path",
subtype='FILE_PATH',
)
working_dir = StringProperty(
name="Working Directory",
subtype='FILE_PATH',
)
def draw(self, context):
layout = self.layout
layout.label(text="Gamekit Runtime options")
layout.prop(self, "runtime_path")
layout.prop(self, "working_dir")
class GamekitConfig:
cfg = dict()
defaultswin= {
'runtime':'./OgreKit/OgreKit-NoDX.exe',
'workingdir':'//'
}
defaultsmac= {
'runtime':'./OgreKit/AppOgreKit',
'workingdir':'//'
}
defaultslinux= {
'runtime':'./OgreKit/AppOgreKit',
'workingdir':'//'
}
def load_defaults(self):
if os.name == "nt":
self.cfg.update(self.defaultswin)
elif os.name == "mac":
self.cfg.update(self.defaultsmac)
else:
self.cfg.update(self.defaultslinux)
return True
def read_config(self, fn, clear_cfg = True):
if clear_cfg: self.cfg = {}
try:
f = open(fn)
lines = f.readlines()
for s in lines:
s = s.strip()
if len(s) > 0 and s[0] != '#':
kv = s.split('=', 1)
self.cfg[kv[0].strip()] = kv[1].strip()
except:
return False
return True
def write_config(self, fn):
try:
file = open(fn, 'w')
except IOError as er:
print(str(er))
return False
for k,v in self.cfg.items():
file.write(k + " = " + v + "\n")
file.close()
return True
def get(self, key, defvalue = ""):
try:
v = self.cfg[str(key)]
if not v: return defvalue
return v
except:
return defvalue
def set(self, key, value):
self.cfg[str(key)] = str(value)
def get_bool(self, key, defvalue = "False"):
v = self.get(key, defvalue)
if v == "" or v.lower() == "false" or v == "0": return False
return bool(v)
def get_int(self, key, defvalue = "0"):
try:
return int(self.get(key, defvalue))
except:
return 0
def get_float(self, key, defvalue = "0.0"):
try:
return float(self.get(key, defvalue))
except:
return 0.0
def get_color(self, key, defvalue = "(0.0, 0.0, 0.0)"):
try:
return eval(self.get(key, defvalue))
except:
return (0,0, 0.0, 0.0)
| 4,324 | 1,355 |
from enum import Enum
class StatusType(Enum):
DEFAULT = "dflag updated_at created_at".split()
class UnidadesType(Enum):
DEFAULT = "dflag updated_at created_at".split()
class AssuntoType(Enum):
DEFAULT = "dflag updated_at created_at".split()
class PersonsType(Enum):
DEFAULT = "dflag updated_at created_at".split()
class AppointmentsType(Enum):
DEFAULT = "dflag updated_at created_at id_person id_subject".split()
class Status:
def __new__(cls, status_json, remove_keys=StatusType.DEFAULT.value):
instance = super(Status, cls).__new__(cls)
instance.__init__(status_json, remove_keys)
return vars(instance)
def __init__(self, status_json, remove_keys: list):
status = dict(status_json)
self.name = status.get("name")
self.code = status.get("code")
self.description = status.get("description")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Assunto:
def __new__(cls, assunto_json, remove_keys=AssuntoType.DEFAULT.value):
instance = super(Assunto, cls).__new__(cls)
instance.__init__(assunto_json, remove_keys)
return vars(instance)
def __init__(self, assunto_json, remove_keys: list):
assunto = dict(assunto_json)
self.name = assunto.get("name")
self.code = assunto.get("code")
self.description = assunto.get("description")
self.active = assunto.get("active")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Unidade:
def __new__(cls, unidade_json, remove_keys=UnidadesType.DEFAULT.value):
instance = super(Unidade, cls).__new__(cls)
instance.__init__(unidade_json, remove_keys)
return vars(instance)
def __init__(self, unidade_json, remove_keys: list):
unidade = dict(unidade_json)
self.name = unidade.get("name")
self.code = unidade.get("code")
self.attendants_number = unidade.get("attendants_number")
self.description = unidade.get("description")
self.phone = unidade.get("phone")
self.email = unidade.get("email")
self.active = unidade.get("active")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Person:
def __new__(cls, person_json, remove_keys=PersonsType.DEFAULT.value):
instance = super(Person, cls).__new__(cls)
instance.__init__(person_json, remove_keys)
return vars(instance)
def __init__(self, person_json, remove_keys: list):
person = dict(person_json)
self.email = person.get("email")
self.national_registration = person.get("national_registration")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Appointment:
def __new__(cls, appointments_json, remove_keys=AppointmentsType.DEFAULT.value):
instance = super(Appointment, cls).__new__(cls)
instance.__init__(appointments_json, remove_keys)
return vars(instance)
def __init__(self, appointments_json, remove_keys: list):
appointment = dict(appointments_json)
self.unit = appointment.get("unit")
self.formatted_date = appointment.get("formatted_date")
self.formatted_day = appointment.get("formatted_day")
self.formatted_time = appointment.get("formatted_time")
self.attendance_number = appointment.get("attendance_number")
self.__remove_unwanted_keys(remove_keys)
def __remove_unwanted_keys(self, keys):
[delattr(self, key) for key in keys if hasattr(self, key)]
class Message:
def __init__(self, message, marketplace_id, type="notification.sms", event="send.sms", resource="teste"):
self.topic_message = {
"type": type,
"resource": resource,
"description": "",
"object": message
}
self.message_attributes = {
"event": {
"DataType": "String",
"StringValue": event
},
"marketplace_id": {
"DataType": "String",
"StringValue": marketplace_id
},
"resource": {
"DataType": "String",
"StringValue": resource
},
"source": {
"DataType": "String",
"StringValue": "api"
},
"type": {
"DataType": "String",
"StringValue": type
}
}
def __eq__(self, obj):
return isinstance(obj, Message) and obj.topic_message == self.topic_message and \
obj.message_attributes == self.message_attributes
| 5,063 | 1,489 |
import os
path = os.path.join(os.path.dirname(__file__), 'day01.txt')
with open(path) as f:
inputdata = f.readlines()
def part1():
total = 0
freqlist = {}
for line in inputdata:
total += int(line)
return total
def part2():
total = 0
freqlist = set()
while True:
for line in inputdata:
total += int(line)
if total in freqlist:
return total
freqlist.add(total)
print(f"\nAOC 2018 Day 01: \n")
print(f"Part 1: {part1()}")
print(f"Part 2: {part2()}") | 550 | 201 |
#! /usr/bin/env python
#
# Copyright (C) 2018 Mikko Kotila
import os
DESCRIPTION = "Signs Text Processing for Deep Learning"
LONG_DESCRIPTION = """\
Signs is a utility for text preprocessing, vectorizing, and analysis
such as semantic similarity, mainly for the purpose of using unstructured
data in deep learning models.
"""
DISTNAME = 'signs'
MAINTAINER = 'Mikko Kotila'
MAINTAINER_EMAIL = 'mailme@mikkokotila.com'
URL = 'http://autonom.io'
LICENSE = 'MIT'
DOWNLOAD_URL = 'https://github.com/autonomio/signs/'
VERSION = '0.3.2'
try:
from setuptools import setup
_has_setuptools = True
except ImportError:
from distutils.core import setup
install_requires = ['kerasplotlib',
'wrangle',
'pandas',
'numpy',
'cython',
'spacy',
'gensim',
'keras',
'ipython']
if __name__ == "__main__":
setup(name=DISTNAME,
author=MAINTAINER,
author_email=MAINTAINER_EMAIL,
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
install_requires=install_requires,
packages=['signs',
'signs.commands',
'signs.preprocess',
'signs.vectorize',
'signs.grams',
'signs.utils',
'signs.models',
'signs.similarity'],
classifiers=[
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3.6',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Human Machine Interfaces',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS'])
os.system("python -m spacy download en")
| 2,259 | 659 |
class NotProvided:
pass
def nullable(value):
if isinstance(value, str) and value == 'null':
return None
return value
| 139 | 44 |
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 08:46:08 2020
@author: Jon
"""
from numba import jit
import numpy as np
@jit(nopython=True)
def get_adjusted(state, K, W, ms2_coeff):
#ms2_coeff_flipped = np.flip(ms2_coeff_flipped, 1)
ms2_coeff_flipped = ms2_coeff
one_accumulator = 0
zero_accumulator = 0
for count in np.arange(0,W):
##print(count)
##print(state&1)
if state & 1 == 1:
##print('one')
one_accumulator = one_accumulator + ms2_coeff_flipped[0,count]
else:
##print('zero')
zero_accumulator = zero_accumulator + ms2_coeff_flipped[0,count]
state = state >> 1
##print(state)
return_list = []
return_list.append(one_accumulator)
return_list.append(zero_accumulator)
return return_list
| 856 | 334 |
"""Object name: Resistance
Function name: serial_sum(R,nori,nend), performs serial sum of a resistance object list from nori to nend
Function name: parallel_sum(R,nori,nend), performs parallel sum of a resistance object list from nori to nend
"""
### definition of thermal resistance ###
from sympy.interactive import printing
printing.init_printing(use_latex='mathjax')
from IPython.display import display,Image, Latex
import numpy as np
import math
import scipy.constants as sc
import sympy as sym
#from sympy import *
class Resistance(object):
""" Defines thermal resistances for conduction, convection and radiation heat transfer.
First define the object attached with class with the name used in the thermal circuit
and the units, which can only be 'W', 'W/m' or 'W/m^2'
Second use self.conduction, self.convection or self.radiation to calculate your
resistance. Each mode requires different arguments:
from Libraries import HT_thermal_resistance as res
R = []
R.append(res.Resistance("$label$", "units")) where units = 'W', 'W/m' or 'W/m^2'
then
For conduction, there are 3 options:
- R.cond_plane(k, L, A = 1.0) for planar conduction: k is the thermal conductivity,
L is the thickness of the wall, and A is the optional surface area (=1 by default)
- R.cond_cylinder(k , ra, rb, L = 1.0, angle = 2.*math.pi) for conduction in a
cylindrical shell between the radii ra (internal) and rb (external). L is the length
of the shell (optional, default = 1) and angle is angular dimension of shell, also
optional and set to a full revolution by default (2 pi)
- R.cond_sphere(k, ra, rb, scale = 1.0) for conductuion within a spherical shell bounded by radii ra and rb
ra < rb. The optional parameter scale allows to calculate the thermal resistance for a fraction
of a spherical shell. For instance a cornea is about 1/3 of spherical shell, so scale = 1./3.
Convection:
- R.convection(h, A = 1.0), where h is the convection coefficient (W/m^2K) and A is
the surface area (optional, default is unit surface aera 1 m^2)
Radiation:
- R.radiation(eps, T_s, T_sur, A = 1.0), where eps is the permissivity of the material, T_s
the surface temperature, T_sur the far away surface temperature, A the surface area (optional,
by default A is the unit surface area 1 m^2).
Contact:
- R.contact(R,A,R_name= "R_{t}",A_name = "A",T_a_name = "T_a",Tb_name = "T_b"), where R is the contact resistance, typically obtained from a table
A is the surface area
The minimum number of arguments are:
R.contact(R,A)
R.display_equation(index) displays the heat flux/rate equations for a given resistance. index is the number of
your resistance (you specify)
Outputs:
- R[i].R the resistance of element i, R[i].h the convection or radiation coefficient.
Functions include
R_tot = res.serial_sum(R,first_resistance,last_resistance) sums serial resistance
R_tot = res.parallel_sum(R,first_resistance,last_resistance) sums parallel resistance
"""
def __init__(self,name,units):
self.name = name
self.units = units
def cond_plane(self, k, L, A = 1.0):
self.mode = "conduction"
self.geometry = "planar"
self.k = k
if k <= 0.:
print("problem with the definition of thermal conductivity")
self.L = L
self.A = A
self.R = self.L / (self.k * self.A)
def cond_cylinder(self, k , ra, rb, L = 1.0, angle = 2.*math.pi):
self.mode = "conduction"
self.geometry = "cylindrical"
self.k = k
if k <= 0.:
print("problem with the definition of thermal conductivity")
self.ra = ra
self.rb = rb
if ra*rb <= 0.:
print("problem with the definition of radii")
self.L = L
self.angle = angle
self.R = np.log(rb/ra)/(angle*L*k)
def cond_sphere(self, k, ra, rb, scale = 1.0):
self.mode = "conduction"
self.geometry = "spherical"
self.k = k
if k <= 0.:
print("problem with the definition of thermal conductivity")
self.ra = ra
self.rb = rb
if ra*rb <= 0.:
print("problem with the definition of radii")
self.R = (1./r_a-1./r_b)/(scale*4.*math.pi*k)
def convection(self, h, A = 1.0):
self.mode = 'convection'
self.geometry = "whatever"
self.R = 1./(h*A)
self.A = A
self.h = h
def radiation(self,eps,T_s,T_sur, A = 1.0):
self.R = 1./(eps*sc.sigma*(T_s+T_sur)*(T_s**2+T_sur**2)*A)
self.mode = 'radiation'
self.geometry = "whatever"
self.A = A
self.h = eps*sc.sigma*(T_s+T_sur)*(T_s**2+T_sur**2)
def contact(self, R, A=1.0):
self.R = R/A
self.geometry = 'whatever'
self.mode = 'contact'
### summation of thermal resistance (R is a vector) ###
def serial_sum(R,nori,nend):
sum = 0.
for i in range(nori,nend+1):
sum += R[i].R
return sum
def parallel_sum(R,nori,nend):
sum = 0.
for i in range(nori,nend+1):
sum += 1./R[i].R
return 1./sum
| 5,542 | 1,770 |
import csv
import MySQLdb
from datetime import datetime
from tabulate import tabulate
#should be changed to sys/argv but this is temporary anyway
FILENAME = '/tmp/buttons.csv'
NUM_ROWS = 2
def tabulate_dataset(table):
headers = ["Mode", "Datetime"]
print tabulate(table, headers, tablefmt="grid") + '\n'
def line_prepender(filename, line):
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
def read_csv(filename):
the_file = open(filename, 'r')
the_reader = csv.reader(the_file, dialect='excel')
table = []
for row in the_reader:
if len(row) > 0:
table.append(row)
the_file.close()
with open(filename, 'r') as fin:
data = fin.read().splitlines(True)
with open(filename, 'w') as fout:
fout.writelines(data[1:])
break;
return table
def commit_to_table():
# MY SQL DATETIME FORMAT: YYYY-MM-DD HH:MM:SS
# Open database connection
db = MySQLdb.connect("hostname", "username", "password", "db_name")
# prepare a cursor object using cursor() method
cursor = db.cursor()
for i in range(NUM_ROWS):
row = read_csv(FILENAME)
# Prepare SQL query to INSERT a record into the database.
datetime_obj = datetime.strptime(row[0][1], "%a %b %d %H:%M:%S %Y")
datetime_string = datetime_obj.strftime("%Y-%m-%d %H:%M:%S")
button_pressed = row[0][0]
# Format the SQL query
query = """INSERT INTO button_presses(button_pressed, pressed_datetime)
VALUES (%s, '%s')""" % (button_pressed, datetime_string)
try:
# Execute the SQL command
cursor.execute(query)
#print 'Executed: ' + query + '\n'
except:
# Rollback in case there is any error
db.rollback()
line_prepender(FILENAME, ''.join(row))
# Insert row back into table
#print 'Failed to execute: ' + query + '\n'
# commit and disconnect from server
db.commit()
db.close()
#print 'Committed: ' + query + '\n'
def main():
commit_to_table()
if __name__ == '__main__':
main()
| 2,269 | 721 |
from flask import session
class Session():
@staticmethod
def set_session(key: str, value):
session[key] = value
| 133 | 41 |
import doctest
doctest.testfile("addd.rst")
| 44 | 20 |
from flask import Flask, request, url_for, redirect, session, flash, jsonify, abort
import json
# from flask_login import LoginManager, UserMixin, login_required, logout_user
from flask_login import login_user, logout_user, login_required
import os
from flask_restful import Resource, Api
from passlib.apps import custom_app_context as pwd_context
import jwt
from .lib.utils import *
__all__ =['Login', 'CreateAdmin', 'Logout']
false = False
true = True
users = load_data('users')
#admins = load_data('admins')
class Login(Resource):
def get(self, userid):
return jsonify({userid:users[userid], 'nom': username, 'mot de passe': password})
def post(self):
username = request.form.get('username')
userid = request.form.get('id')
password = request.form.get('password')
if not all([username, password, userid]):
msg = "Remplissez tous les champs, s'il vous plaît!"
abort(400, msg)
#session['name'] = username
#session['id'] = userid
#session['password'] = password
user = User.query.filter_by(id = userid).first()
if not user or not user.verify_password(password):
msg = "Vérifiez votre nom, votre id ou votre mot de passe, s'il vous plaît !"
abort(400, msg)
#----增加----#
else:
login_user(user, remember = False)
return jsonify({"message" : "Bienvenu."})
class Logout(Resource):
@login_required
def get(self):
logout_user()
return jsonify({'message':"Vous êtes sorti le système."})
class CreateAdmin(Resource):
def get(self):
return "formulaire d'inscrire admin template"
def post(self):
username = request.form.get('username')
#print(username)
userid = request.form.get('id')
#print(userid)
password = request.form.get('password')
password2 = request.form.get('passwordconfirm')
if not all([username, userid, password, password2]):
abort(400, "Veuillez remplir tous les champs.") #missing arguments
elif userid not in users.keys():
abort(400, "Désolée, vous n'êtes pas notre collaborateur, vous ne pouvez pas créer un compte.")
elif User.query.filter_by(username = username).first() is not None:
abort(400, 'Vous avez déjà un compte.') #existing user
elif username != users[userid]["nom"] :
abort(400, "Votre id ne conforme pas à votre nom. ")
elif password != password2 :
abort(400, "Les deux mots de passe remplis doivent être identiques.")
user = User(username = username, id = userid)
user.hash_password(password)
db.session.add(user)
db.session.commit()
msg = "Votre compte admin a bien été créé."
# print(admins)
# dict_to_json(admins, "admins")
return jsonify({"massage" : msg})
| 3,040 | 975 |
from core_tests_base import CoreTestsBase, FakeTessagon, FakeTileSubClass
from tessagon.core.tile_generator import TileGenerator
class TestTileGenerator(CoreTestsBase):
def test_non_cyclic(self):
tessagon = FakeTessagon()
tile_generator = TileGenerator(tessagon,
u_range=[0.5, 1.0], v_range=[2.5, 4.0],
u_num=2, v_num=3,
u_cyclic=False, v_cyclic=False)
tiles = tile_generator.initialize_tiles(FakeTileSubClass)
assert len(tiles) == 2
assert len(tiles[0]) == 3
assert len(tiles[1]) == 3
tile_generator.initialize_neighbors(tiles)
assert(tiles[0][0].get_neighbor_tile(['left']) is None)
assert(tiles[0][0].get_neighbor_tile(['bottom']) is None)
assert(tiles[0][0].get_neighbor_tile(['right']) is tiles[1][0])
assert(tiles[0][0].get_neighbor_tile(['top']) is tiles[0][1])
assert(tiles[1][2].get_neighbor_tile(['left']) is tiles[0][2])
assert(tiles[1][2].get_neighbor_tile(['bottom']) is tiles[1][1])
assert(tiles[1][2].get_neighbor_tile(['right']) is None)
assert(tiles[1][2].get_neighbor_tile(['top']) is None)
def test_u_cyclic(self):
tessagon = FakeTessagon()
tile_generator = TileGenerator(tessagon,
u_range=[0.5, 1.0], v_range=[2.5, 4.0],
u_num=2, v_num=3,
u_cyclic=True, v_cyclic=False)
tiles = tile_generator.initialize_tiles(FakeTileSubClass)
assert len(tiles) == 2
assert len(tiles[0]) == 3
assert len(tiles[1]) == 3
tile_generator.initialize_neighbors(tiles)
assert(tiles[0][0].get_neighbor_tile(['left']) is tiles[1][0])
assert(tiles[0][0].get_neighbor_tile(['bottom']) is None)
assert(tiles[0][0].get_neighbor_tile(['right']) is tiles[1][0])
assert(tiles[0][0].get_neighbor_tile(['top']) is tiles[0][1])
assert(tiles[1][2].get_neighbor_tile(['left']) is tiles[0][2])
assert(tiles[1][2].get_neighbor_tile(['bottom']) is tiles[1][1])
assert(tiles[1][2].get_neighbor_tile(['right']) is tiles[0][2])
assert(tiles[1][2].get_neighbor_tile(['top']) is None)
def test_v_cyclic(self):
tessagon = FakeTessagon()
tile_generator = TileGenerator(tessagon,
u_range=[0.5, 1.0], v_range=[2.5, 4.0],
u_num=2, v_num=3,
u_cyclic=False, v_cyclic=True)
tiles = tile_generator.initialize_tiles(FakeTileSubClass)
assert len(tiles) == 2
assert len(tiles[0]) == 3
assert len(tiles[1]) == 3
tile_generator.initialize_neighbors(tiles)
assert(tiles[0][0].get_neighbor_tile(['left']) is None)
assert(tiles[0][0].get_neighbor_tile(['bottom']) is tiles[0][2])
assert(tiles[0][0].get_neighbor_tile(['right']) is tiles[1][0])
assert(tiles[0][0].get_neighbor_tile(['top']) is tiles[0][1])
assert(tiles[1][2].get_neighbor_tile(['left']) is tiles[0][2])
assert(tiles[1][2].get_neighbor_tile(['bottom']) is tiles[1][1])
assert(tiles[1][2].get_neighbor_tile(['right']) is None)
assert(tiles[1][2].get_neighbor_tile(['top']) is tiles[1][0])
def test_u_v_cyclic(self):
tessagon = FakeTessagon()
tile_generator = TileGenerator(tessagon,
u_range=[0.5, 1.0], v_range=[2.5, 4.0],
u_num=2, v_num=3,
u_cyclic=True, v_cyclic=True)
tiles = tile_generator.initialize_tiles(FakeTileSubClass)
assert len(tiles) == 2
assert len(tiles[0]) == 3
assert len(tiles[1]) == 3
tile_generator.initialize_neighbors(tiles)
assert(tiles[0][0].get_neighbor_tile(['left']) is tiles[1][0])
assert(tiles[0][0].get_neighbor_tile(['bottom']) is tiles[0][2])
assert(tiles[0][0].get_neighbor_tile(['right']) is tiles[1][0])
assert(tiles[0][0].get_neighbor_tile(['top']) is tiles[0][1])
assert(tiles[1][2].get_neighbor_tile(['left']) is tiles[0][2])
assert(tiles[1][2].get_neighbor_tile(['bottom']) is tiles[1][1])
assert(tiles[1][2].get_neighbor_tile(['right']) is tiles[0][2])
assert(tiles[1][2].get_neighbor_tile(['top']) is tiles[1][0])
| 4,554 | 1,691 |
n = input();
mp = [[int(i) for i in raw_input()] for j in xrange(n)]
pnt = [[0 for i in xrange(n)] for j in xrange(n)]
x, y = [int(i) for i in raw_input().split()]
for i in xrange(1,x):
pnt[0][i] = pnt[0][i-1]+mp[0][i]-mp[0][i-1] if mp[0][i]>mp[0][i-1] else pnt[0][i-1]
for i in xrange(1,y):
pnt[i][0] = pnt[i-1][0]+mp[i][0]-mp[i-1][0] if mp[i][0]>mp[i-1][0] else pnt[i-1][0]
t = 0
l = 0
for i in xrange(1,y):
for j in xrange(1,x):
t = pnt[i-1][j]+mp[i][j]-mp[i-1][j] if mp[i][j]>mp[i-1][j] else pnt[i-1][j];
l = pnt[i][j-1]+mp[i][j]-mp[i][j-1] if mp[i][j]>mp[i][j-1] else pnt[i][j-1];
pnt[i][j] = l if t>l else t;
print pnt[y-1][x-1] | 645 | 390 |
from . import config as config_module
from .resource import resource_config, Resource
from .view import ResourceView
__all__ = [
"resource_config",
"Resource",
"ResourceView",
]
__version__ = "1.0a2"
def includeme(config):
for name in config_module.__all__:
method = getattr(config_module, name)
config.add_directive(name, method)
| 368 | 115 |
def decryped_data(text, key):
char_lower = ""
for i in range(97, 123):
char_lower += chr(i)
char_lower *= 2
char_upper = char_lower.upper()
decryped = ""
for i in text:
if i in char_lower:
decryped += char_lower[char_lower.index(i, 26) - key]
elif i in char_upper:
decryped += char_upper[char_upper.index(i, 26) - key]
else:
decryped += i
return decryped
text = input()
key = int(input())
print(decryped_data(text,key))
| 518 | 192 |
#-*- coding: utf-8 -*-
from setuptools import setup
with open("README.md", "r") as fh:
readme = fh.read()
setup(name='fala_assis',
version='0.0.1',
url='https://github.com/OseiasBeu/AssistenteDeFala',
license='MIT License',
author='Oseias Beu',
long_description=readme,
long_description_content_type="text/markdown",
author_email='oseiasbeu@outlook.com',
keywords='Assistente de Fala',
description=u'Assistente de fala que avisa um portador de deficiência visual quando o programa executou',
packages=['fala_assis'],
install_requires=['gtts','IPython'],) | 621 | 223 |
num_int = 5
num_dec = 7.3
val_str = "texto qualquer "
print("Primeiro número é:", num_int)
print("O poder do Kakaroto é mais de %i mil" %num_dec)
print("Olá mundo " + val_str + str(num_int))
print("Concatenando decimal:", num_dec)
print("Concatenando decimal: %.10f" %num_dec)
print("Concatenando decimal: " + str(num_dec))
print("Concatenando strings:", val_str)
print("Concatenando strings: %s" %val_str)
print("Concatenando strings: " + val_str) | 452 | 181 |
#%%
#md
"""
This script downloads the dataset use in the analysis.
__It requires 2 inputs to be specified__
repo_directory and email (see first cell block).
"""
#%%
# Where is the main directory of the repo
repo_directory = './'
# Pubmed requires you to identify with an email addreesss
email = ''
#%%
import os
os.chdir(repo_directory)
import numpy as np
import pandas as pd
import functions.dataminingfunctions as dmf
import functions.readabilityFunctions as rf
#%%
#Load journal info
journalInfo=pd.read_csv('./JournalSelection/JournalSelection.csv')
#%%
#md
"""
Specify the search data that you want to get from pubmeddata
"""
#%%
#What to get. "all" saves a txt. Otherwise the xml tags wanted (see https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html). Seperated by a comma
#"Trees" are possible to specify column you want. (e.g. <year> occurs) in several
#places so pubate_year takes the <year> tag in <pubdate>
dataOfInterest = 'abstracttext,pubdate_year,pmid,articletitle,journal_title,keyword,doi'
#If dataframe, what is the index column (usally article or author)
dfId = 'article'
#%%
#md
"""
Download the data
"""
#%%
for n in range(0, len(journalInfo)):
#Parameters needed (if left blank, get_pubmeddata asks for response)
#What to search pubmed with
searchString = journalInfo.search[n]
print(' ---Running search: ' + searchString + ' (' + str(n) + ')' + ' ---')
#Run get data
dmf.get_pubmeddata(searchString.lower(), dataOfInterest, dfId, email, 'ignore')
#%%
#md
"""
Sometimes the pubdate, year tags were missing in articles. The next cell finds those instances and
"""
#%%
# Sometimes the
for n in range(0, len(journalInfo)):
searchString = journalInfo.search[n].lower()
#make path to data (always this, if dataframe)
mDir = os.getcwd() + '/data/abstracts/' + searchString + '/' + 'id_' + dfId + '/' + dataOfInterest + '/'
mDir = mDir.replace(' ','_')
mDir = mDir.replace(',','_')
mDir = mDir.replace('\"','')
dat=pd.read_json(mDir + 'searchresults')
dat.sort_index(inplace=True)
idMissing = [i for i,x in enumerate(dat.pubdate_year) if x == '']
if len(idMissing)>0:
#Make a list of strings
pmidMissing=list(map(str,list(dat.pmid[idMissing])))
print(' ---Finding missing years (' + str(len(pmidMissing)) + ' found): ' + searchString + '. term: ' + str(n) + ' ---')
missingYears = dmf.get_medlineyear(list(pmidMissing))
dat['pubdate_year'].loc[idMissing]=missingYears
dat.to_json(mDir + 'searchresults')
#%%
#md
"""
For the "nr authors" the author info also has to be download.
"""
#%%
#What to get. "all" saves a txt. Otherwise the xml tags wanted (see https://www.nlm.nih.gov/bsd/licensee/elements_alphabetical.html). Seperated by a comma
#"Trees" are possible to specify column you want. (e.g. <year> occurs) in several
#places so pubate_year takes the <year> tag in <pubdate>
dataOfInterest = 'forename,lastname,affiliation'
#If dataframe, what is the index column (usally article or author)
dfId = 'author'
for n in range(0, len(journalInfo)):
#Parameters needed (if left blank, get_pubmeddata asks for response)
#What to search pubmed with
searchString = journalInfo.search[n]
print(' ---Running search: ' + searchString + ' (' + str(n) + ')' + ' ---')
#Run get data
dmf.get_pubmeddata(searchString.lower(), dataOfInterest, dfId, email, 'ignore')
#dataOfInterest = 'forename,lastname,affiliation'
#dfId = 'author'
#dmf.get_pubmeddata(searchString.lower(),dataOfInterest,dfId,email,'ignore')
| 3,603 | 1,227 |
# -*- coding: utf-8 -*-
from sbdc.preprocessing import ContiguousSegmentSet
from sbdc.datasets import bbc_load
X = bbc_load()
cs = ContiguousSegmentSet(
min_segment_length=100,
small_segment_vanish_strategy="top")
cs.fit(X[:, 2])
text_segments = cs.transform()
print text_segments[:2]
| 297 | 120 |
import xml.etree.ElementTree as ET
from pathlib import Path
from argparse import ArgumentParser
import dateutil.parser
def main():
parser = ArgumentParser(
description="An example script demonstrating how to parse a few "
"values out of a FIXM XML file.")
parser.add_argument("xml_file",
type=Path,
help="The XML file to parse")
args = parser.parse_args()
tree = ET.parse(args.xml_file)
message_collection = tree.getroot()
for message in message_collection:
for flight in message:
center = flight.attrib["centre"]
flight_identification = flight.find("flightIdentification")
flight_number = flight_identification.attrib[
"aircraftIdentification"]
timestamp_str = flight.attrib["timestamp"]
timestamp = dateutil.parser.parse(timestamp_str)
print(f"Center: {center}\n"
f"Flight Number: {flight_number}\n"
f"Timestamp: {timestamp}")
en_route = flight.find("enRoute")
if en_route is None:
print("Data does not have en-route information")
else:
pos = (en_route
.find("position")
.find("position")
.find("location")
.find("pos"))
latitude, longitude = pos.text.split(" ")
print(f" Lat: {latitude}, Long: {longitude}")
if __name__ == "__main__":
main()
| 1,586 | 408 |
# Base imports
from os import environ
# Third party imports
from graphene import Field, ObjectType, String, Int
# Project imports
from graphql_api.tenor.schemas.hacker_gif.result import Result
from graphql_api.tenor.resolvers.hacker_gif import resolve_hacker_gifs
API_KEY = environ.get('TENOR_API_KEY')
class HackerGifQuery(ObjectType):
hacker_gifs = Field(
Result,
key=String(default_value=API_KEY),
limit=Int(default_value=20),
query=String(default_value='hacker'),
resolver=resolve_hacker_gifs
)
| 554 | 182 |
# coding: latin-1
# Flask example: https://realpython.com/flask-by-example-part-1-project-setup/
from flask import Flask
from waitress import serve
app = Flask(__name__)
from functions.date import get_date
from functions.connect import connect
header = '<html>\n\t<header>\n\t\t<title>\n\t\t\tHome control panel\n\t\t</title>\n\t</header>\n\n\t<body>\n\t\t'
footer = '\n\n\t\t<small>\n\t\t\tLast modified: April 28, 2019\n\t\t</small>\n\t</body>\n</html>'
# Read a certain temperature from the database
def read(room):
reader.execute(f"SELECT {room} FROM TEMPERATURES_CURRENT")
var = str(reader.fetchone()).replace("(Decimal('", "").replace("'),)","")
return var
def page_add(page, stanza, temp):
page = page + "\n\t\t\t\t<tr>\n\t\t\t\t\t<td>" + stanza + "</td>\n\t\t\t\t\t<td>" + temp + "</td>\n\t\t\t\t</tr>"
return page
@app.route('/')
def hello():
global reader
date = get_date()
connection = connect(date)
reader = connection.cursor()
page = header
page = page + "\n\t\t<h1>\n\t\t\tTemperature " + get_date() + "\n\t\t</h1>\n\t\t<hr>"
page = page + "\n\n\t\t<table>\n\t\t\t<thead>\n\t\t\t\t<tr>\n\t\t\t\t\t<th align = \"left\">Stanza</th>\n\t\t\t\t\t<th align = \"left\">Temperatura</th>\n\t\t\t\t</tr>\n\t\t\t</thead>\n\t\t\t<tbody>"
page = page_add(page, "Camera Francesco", read("Camera_Francesco"))
page = page_add(page, "Camera Valentina", read("Camera_Valentina"))
page = page_add(page, "Camera Genitori", read("Camera_genitori"))
page = page_add(page, "Studio", read("Studio"))
page = page_add(page, "Salone", read("Salone"))
page = page + "\n\t\t\t</tbody>\n\t\t</table>\n\t\t<hr>\n\t\t<p>Per vedere il grafico con gli storici di tutte le temperature <a href =\"https://frisso.grafana.net/d/ri1HUveiz/stanze?orgId=1&refresh=5m&from=now-6h&to=now\">clicca qui</a><hr>"
page = page + footer
connection.close
return page
if __name__ == '__main__':
# https://stackoverflow.com/questions/7023052/configure-flask-dev-server-to-be-visible-across-the-network
# Note: since port 80 is a privileged port, this program has to be started with root permissions.
# date = get_date()
# connection = connect(date)
# reader = connection.cursor()
serve(app, listen='*:80')
#app.run(host= '0.0.0.0', port=80)
# Default call to this app
# app.run()
| 2,367 | 962 |
# Handlers
import json
import logging
from aiohttp import web
def tape_library_handler_wrapper(
request,
action_name,
required_params=None,
optional_params=None,
skip_lock_check=False,
):
"""This wrapper performs error handling for the API calls.
Raises
------
Multiple exceptions
see: https://docs.aiohttp.org/en/latest/web_exceptions.html
"""
# Check parameters
if required_params is not None:
for param in required_params:
if param in request.query:
if not request.query[param]:
error = {
"error": {
"description": "empty parameter",
"parameter": param,
"reason": "empty",
"type": "parameter",
}
}
raise web.HTTPUnprocessableEntity(text=json.dumps(error))
else:
error = {
"error": {
"description": "missing parameter",
"parameter": param,
"reason": "undefined",
"type": "parameter",
}
}
raise web.HTTPUnprocessableEntity(text=json.dumps(error))
library = request.app["tape_library"]
# Check that library is not locked
if not library.running and not skip_lock_check:
error = {
"error": {
"description": "Library is locked",
"reason": "locked",
"type": "lock",
}
}
raise web.HTTPForbidden(text=json.dumps(error))
# Check library queue
if library.check_queue_max_depth_reached():
error = {
"error": {
"description": "to many requests in progress",
"reason": "full",
"type": "taskqueue",
}
}
raise web.HTTPTooManyRequests(text=json.dumps(error))
# Check if action is available, run it, catch errors if any
if hasattr(library, "action_" + action_name):
try:
data = getattr(library, "action_" + action_name)(**request.query)
except web.HTTPException:
raise
except Exception as excpt:
logging.exception(action_name)
error = {
"error": {
"description": str(excpt),
"reason": "internal",
"type": "server",
}
}
raise web.HTTPInternalServerError(text=json.dumps(error))
else:
error = {
"error": {
"description": "no such method",
"reason": "nosuch",
"type": "method",
}
}
raise web.HTTPNotImplemented(text=json.dumps(error))
return web.json_response(data)
# Handlers that represent the system we simulate.
async def load_handle(request):
"""
---
description: Load media from slot to drive.
tags:
- mtx
parameters:
- in: query
name: drive
schema:
type: string
required: true
description: The ID of the drive.
- in: query
name: slot
schema:
type: string
required: true
description: The ID of the slot.
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(
request, "load", required_params=["slot", "drive"]
)
async def unload_handle(request):
"""
---
description: Unload media from drive to slot.
tags:
- mtx
parameters:
- in: query
name: drive
schema:
type: string
required: true
description: The ID of the drive.
- in: query
name: slot
schema:
type: string
required: true
description: The ID of the slot.
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(
request, "unload", required_params=["drive", "slot"]
)
async def transfer_handle(request):
"""
---
description: Move media from source-slot to target-slot.
tags:
- mtx
parameters:
- in: query
name: source
schema:
type: string
required: true
description: The ID of the source slot.
- in: query
name: target
schema:
type: string
required: true
description: The ID of the target slot.
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(
request, "transfer", required_params=["source", "target"]
)
async def park_handle(request):
"""
---
description: Move the picker head to a safe position and lock the unit.
tags:
- mtx
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(request, "park")
async def scan_handle(request):
"""
---
description: Perform inventory scan on a slot. Move the picker to the slot
and barcode scan the tape.
tags:
- mtx
parameters:
- in: query
name: slot
schema:
type: string
required: true
description: The ID of the slot to scan.
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(request, "scan", required_params=["slot"])
async def inventory_handle(request):
"""
---
description: Return the known inventory. Use scan command to scan a slot.
For each slot either the tapeid, true, false, or null is returned. null
indicates that the slot has not been scanned. false indicate that the
slot has no tape and true that the slot has a tape but we dont know the ID.
A real tape library might remember a tapeid as it moves from slot to drive, but the
simulator is kept dump to simulate the bare minimum required.
tags:
- info
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"""
return tape_library_handler_wrapper(request, "inventory", skip_lock_check=True)
async def sensors_handle(request):
"""
---
summary: sensor values
description: Return sensor values.
tags:
- info
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
# TODO(MS): Maybe allow some filter. It could be quite a bit of info.
return tape_library_handler_wrapper(request, "sensors", skip_lock_check=True)
async def config_handle(request):
"""
---
summary: get/set config
description: Return configuration, configuration can also be set.
tags:
- info
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"421":
$ref: '#/components/responses/HTTPMisdirectedRequest'
"422":
$ref: '#/components/responses/HTTPUnprocessableEntity'
"""
return tape_library_handler_wrapper(request, "config", skip_lock_check=True)
async def state_handle(request):
"""
---
summary: state
description: Return the library state.
tags:
- info
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"""
return tape_library_handler_wrapper(request, "state", skip_lock_check=True)
async def lock_handle(request):
"""
---
summary: lock tape library
description: Lock the tape library. No actions will be allowed until unlocked.
This action clears the internal work queue.
tags:
- mtx
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"""
return tape_library_handler_wrapper(request, "lock", skip_lock_check=True)
async def unlock_handle(request):
"""
---
summary: Unlock tape library
description: Unlock the tape library. Has no side effect if already unlocked.
tags:
- mtx
responses:
"200":
$ref: '#/components/responses/Reply200Ack'
"405":
$ref: '#/components/responses/HTTPMethodNotAllowed'
"""
# TODO: Should unlock have a clear_queue argument?
return tape_library_handler_wrapper(request, "unlock", skip_lock_check=True)
| 10,371 | 2,965 |
import logging
from datetime import datetime, timedelta
from robit.core.health import Health
class Alert:
def __init__(
self,
**kwargs,
):
if 'alert_method' in kwargs:
self.method = kwargs['alert_method']
if 'alert_method_kwargs' in kwargs:
self.method_kwargs = kwargs['alert_method_kwargs']
else:
self.method_kwargs = dict()
if 'alert_health_threshold' in kwargs:
self.health_threshold = kwargs['alert_health_threshold']
else:
self.health_threshold = 95.0
if 'alert_hours_between_messages' in kwargs:
self.hours_between_messages = kwargs['alert_hours_between_messages']
else:
self.hours_between_messages = 24
self.last_message_datetime = datetime.now() - timedelta(hours=self.hours_between_messages)
def check_health_threshold(self, name, health: Health):
if datetime.now() >= self.last_message_datetime + timedelta(hours=self.hours_between_messages):
if health.percentage_hundreds <= self.health_threshold:
alert_message = f'ALERT: {name} dropped below the {self.health_threshold} percentage health threshold.'
self.method_kwargs['alert_message'] = alert_message
try:
self.method(**self.method_kwargs)
self.last_message_datetime = datetime.now()
logging.warning(alert_message)
except Exception as e:
failed_message = f'ERROR: Alert method failed on exception "{e}"'
logging.warning(failed_message)
| 1,677 | 469 |
import datetime
import pytz
from utils.timezones import time_zones
from utils.CalculateHours import calc_hours
def validate_schedule(resource, this_tag, customTagName):
t = this_tag
if t['Key'].lower() == customTagName.lower():
state = resource.state['Name']
stop_instance = False
start_instance = False
all_schedules = []
replace_white_space = t['Value'].replace(" ", "")
replace_white_space = replace_white_space.lower()
multiSchedule = replace_white_space.split('&')
for grammar in multiSchedule:
if grammar == '':
continue
ptag = grammar.split(";")
if ptag[0].lower() in ['inactive', 'alternative']:
continue
if len(ptag) == 1 and ptag == ['followthesun']:
ptag = ['1900', '1500', 'pt', 'followthesun']
elif len(ptag) < 4 \
or len(ptag[0]) != 4 \
or len(ptag[1]) != 4 \
or not ptag[0].isdigit() \
or not ptag[1].isdigit():
print("Invalid expression: '{}' must be of the form '%H%M;%H%M;timezone;<daysActive>' ".format(ptag))
continue
stopTime = ptag[0]
startTime = ptag[1]
timeZone = ptag[2].lower()
daysActive = ptag[3].lower()
isActiveDay = False
isWeekend = False
isGlobalWeekend = False
isLogging = False
tz = time_zones(timeZone)
if tz == 'UTC':
ptz = pytz.UTC
else:
ptz = pytz.timezone(tz)
now = datetime.datetime.now(tz=ptz).strftime("%H%M")
nowMax = datetime.datetime.now(tz=ptz) - datetime.timedelta(minutes=45)
nowMax = nowMax.strftime("%H%M")
nowDay = datetime.datetime.now(tz=ptz).strftime("%a").lower()
# Days Interpreter
if daysActive == "all":
isActiveDay = True
elif daysActive == "weekdays":
weekdays = ['mon', 'tue', 'wed', 'thu', 'fri']
if nowDay in weekdays:
isActiveDay = True
elif daysActive == "weekends":
weekends = ["fri", "sat", "sun", "mon"]
if nowDay in weekends:
isActiveDay = True
isWeekend = True
# 1900;1500;pst;followthesun
elif daysActive == 'followthesun':
global_weekend = ['fri', 'sat', 'sun']
if nowDay in global_weekend:
isActiveDay = True
isGlobalWeekend = True
else:
daysActive = daysActive.split(",")
for d in daysActive:
if d.lower() == nowDay:
isActiveDay = True
if daysActive == 'followthesun':
# Weekend Stop/Start taking into account all timezones across th globe
if nowDay in ['fri'] and stopTime >= str(nowMax) and stopTime <= str(now) \
and isActiveDay and isGlobalWeekend and state == "running":
stop_instance = True
isLogging = True
print " Global Weekend STOP list", resource.id
if nowDay in ['sun'] and startTime >= str(nowMax) and startTime <= str(now) \
and isActiveDay and isGlobalWeekend and state == "stopped":
start_instance = True
isLogging = False
print " Global Weekend START list", resource.id
elif daysActive == 'weekends':
# Basic Weekend Stop
if nowDay in ['fri'] and stopTime >= str(nowMax) and stopTime <= str(now) \
and isActiveDay and isWeekend and state == "running":
stop_instance = True
isLogging = True
print " Weekend STOP list", resource.id
# Basic Weekend Start
if nowDay in ['mon'] and startTime >= str(nowMax) and startTime <= str(now) \
and isActiveDay and isWeekend and state == "stopped":
start_instance = True
isLogging = False
print " Weekend START list", resource.id
else:
# Append to stop list
if stopTime >= str(nowMax) and stopTime <= str(now) and \
isActiveDay and state == "running":
stop_instance = True
isLogging = True
print " added to STOP list", resource.id
# Append to start list
if startTime >= str(nowMax) and startTime <= str(now) and \
isActiveDay and state == "stopped":
start_instance = True
isLogging = False
print " added to START list", resource.id
# For logging the implicit weekend
if daysActive == 'weekdays' and nowDay == 'fri':
daysActive = 'weekends'
totalhours = calc_hours(daysActive, stopTime, startTime)
single_schedule = {
'resource_id': resource.id,
'start_instance': start_instance,
'stop_instance': stop_instance,
'stop_time': stopTime,
'start_time': startTime,
'tz': tz,
'daysActive': daysActive,
'grammar': grammar,
'isLogging': isLogging,
'TotalHours': totalhours
}
all_schedules.append(single_schedule)
return all_schedules
| 5,823 | 1,547 |
class Solution:
def isHappy(self, n: int) -> bool:
pool = set()
pool.add(n)
result=n
while(result>1):
strn = str(result)
result = 0
for c in strn:
result+=int(c)*int(c)
if result in pool:
return False
pool.add(result)
return True | 365 | 105 |
from django.contrib import admin
from .models import PreQuestionnaire
admin.site.register(PreQuestionnaire)
| 110 | 29 |
import numpy as np
from collections import defaultdict
from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union
def _voc_ap(
rec,
prec,
use_07_metric=False,
):
"""Compute VOC AP given precision and recall. If use_07_metric is true, uses
the VOC 07 11-point method (default:False).
"""
if use_07_metric:
# 11 point metric
ap = 0.0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap = ap + p / 11.0
else:
# correct AP calculation
# first append sentinel values at the end
mrec = np.concatenate(([0.0], rec, [1.0]))
mpre = np.concatenate(([0.0], prec, [0.0]))
# compute the precision envelope
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def voc_ap_score(
p_bboxes: List[np.ndarray],
p_scores: List[np.ndarray],
p_classes: List[np.ndarray],
gt_bboxes: List[np.ndarray],
gt_classes: List[np.ndarray],
class_id: int = None,
threshold: float = 0.5,
):
"""
Args:
p_bboxes: a list of predict bboxes
p_scores: a list of predict score for bbox
p_classes: a list of predict class id for bbox
gt_bboxes: a list of ground truth bboxes
gt_classes: a list of true class id for each true bbox
class_id: the class id to compute ap score
threshold: the threshold to ap score
"""
if class_id is not None:
gt_bboxes = [gt_bbox[gt_class == class_id] for gt_class, gt_bbox in zip(gt_classes, gt_bboxes)]
p_bboxes = [p_bbox[p_class == class_id] for p_class, p_bbox in zip(p_classes, p_bboxes)]
p_scores = [p_score[p_class == class_id] for p_class, p_score in zip(p_classes, p_scores)]
p_indexes = [np.array([i] * len(p_bboxes[i])) for i in range(len(p_bboxes))]
p_bboxes, p_scores, p_indexes = (
np.concatenate(p_bboxes),
np.concatenate(p_scores),
np.concatenate(p_indexes),
)
p_sort_indexes = np.argsort(-p_scores)
tp = np.zeros(p_scores.shape[0])
fp = np.zeros(p_scores.shape[0])
gt_bbox_status = defaultdict(set)
for idx, p_sort_index in enumerate(p_sort_indexes):
p_index = int(p_indexes[p_sort_index])
gt_bbox = gt_bboxes[p_index]
p_bbox = p_bboxes[p_sort_index]
vmax = -float("inf")
jmax = -1
if gt_bbox.size > 0:
ixmin = np.maximum(gt_bbox[:, 0], p_bbox[0])
iymin = np.maximum(gt_bbox[:, 1], p_bbox[1])
ixmax = np.minimum(gt_bbox[:, 2], p_bbox[2])
iymax = np.minimum(gt_bbox[:, 3], p_bbox[3])
iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
ih = np.maximum(iymax - iymin + 1.0, 0.0)
inters = iw * ih
uni = (
(p_bbox[2] - p_bbox[0] + 1.0) * (p_bbox[3] - p_bbox[1] + 1.0)
+ (gt_bbox[:, 2] - gt_bbox[:, 0] + 1.0) * (gt_bbox[:, 3] - gt_bbox[:, 1] + 1.0)
- inters
)
overlaps = inters / uni
vmax = np.max(overlaps)
jmax = np.argmax(overlaps)
if vmax > threshold:
if jmax not in gt_bbox_status[p_index]:
tp[idx] = 1
gt_bbox_status[p_index].add(jmax)
else:
fp[idx] = 1
else:
fp[idx] = 1
fp = np.cumsum(fp, axis=0)
tp = np.cumsum(tp, axis=0)
rec = tp / float(sum([len(gt) for gt in gt_bboxes]))
prec = tp / np.maximum(tp + fp, np.finfo(np.float).eps)
ap = _voc_ap(rec, prec)
return ap
def voc_map_score(
p_bboxes: List[np.ndarray],
p_scores: List[np.ndarray],
p_classes: List[np.ndarray],
gt_bboxes: List[np.ndarray],
gt_classes: List[np.ndarray],
):
"""
Args:
p_bboxes: a list of predict bboxes
p_scores: a list of predict score for bbox
p_classes: a list of predict class id for bbox
gt_bboxes: a list of ground truth bboxes
gt_classes: a list of true class id for each true bbox
Returns:
a avg ap score of all classes in ground truth
"""
classes = set(list(np.concatenate(gt_classes)))
ap_scores = dict()
for thres in range(50, 100, 5):
ap_scores[thres] = [
voc_ap_score(
p_bboxes,
p_scores,
p_classes,
gt_bboxes,
gt_classes,
c,
thres / 100,
)
for c in classes
]
mAP = {iou: np.mean(x) for iou, x in ap_scores.items()}
return np.mean(list(mAP.values()))
| 5,014 | 1,962 |
import asyncio
from callable import Callable
from signature import signature
import pytest
def f1(a: int, b: str = 'x') -> None:
pass
@asyncio.coroutine
def f2(a: int, b: str = 'x') -> None:
pass
@pytest.mark.parametrize('f,sig', [
(lambda: None, 'lambda'),
(lambda a, b: None, 'lambda a, b'),
(lambda a, b, *args: None, 'lambda a, b, *args'),
(lambda a, b='x', *args, **kw: None, "lambda a, b='x', *args, **kw"),
(lambda a, b, *args, c=10, **kw: None, 'lambda a, b, *args, c=10, **kw'),
(lambda a, b='x', *args, c=10, **kw: None, "lambda a, b='x', *args, c=10, **kw"),
(f1, "def f1(a:int, b:str='x') -> None"),
(f2, "def f2(a:int, b:str='x') -> None"),
])
def test_signature_py3(f, sig):
s = signature(f)
assert s == sig, s
@pytest.mark.parametrize('f,args,kwargs,merged', [
(lambda x, *args, y=10, **kw: None, (1,), {}, dict(x=1, args=(), y=10, kw={})),
(lambda x, *, y, z=20: None, (1,), dict(y=10), dict(x=1, y=10, z=20)),
])
def test_kwargify_py3(f, args, kwargs, merged):
kwargified = Callable(f).kwargify(args, kwargs)
assert kwargified == merged
@pytest.mark.parametrize('f,args,kwargs,exc', [
(lambda x, *args, y=30: None, (2), {'x': 1}, TypeError),
(lambda x, *, y, z=20: None, (1,), {}, TypeError),
])
def test_kwargify_exc_py3(f, args, kwargs, exc):
with pytest.raises(exc):
Callable(f).kwargify(args, kwargs)
| 1,418 | 629 |
import sys; print('%s %s' % (sys.executable or sys.platform, sys.version))
| 75 | 27 |
# coding: utf-8
##########################################################################
# NSAp - Copyright (C) CEA, 2020
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
This module defines the png dataset loader.
"""
# Third party import
import imageio
# Package import
from .loader_base import LoaderBase
class PNG(LoaderBase):
""" Define the PNG loader.
"""
allowed_extensions = [".png"]
def load(self, path):
""" A method that load the png data.
Parameters
----------
path: str
the path to the png file to be loaded.
Returns
-------
data: imageio numpy array
the loaded image.
"""
return imageio.imread(path)
def save(self, data, outpath):
""" A method that save the image in png.
Parameters
----------
data: imageio numpy array
the image to be saved.
outpath: str
the path where the the png image will be saved.
"""
imageio.imwrite(outpath, data)
| 1,293 | 367 |
from src.auth.adapter import *
from src.auth.domain import *
from src.auth.service import *
| 92 | 28 |
class Planning:
"""
This class represents the Planning phase of a turn
"""
def __init__(self, game):
"""
Constructor
game: The game under way
"""
self._game = game
def execute(self):
"""
Run the Planning phase
"""
for pilot in self._game.pilots_by_skill():
# Ask the pilot's player what to do
pilot.active = True
pilot.chosen_maneuver = self._game.player(pilot.faction).choose_dial(pilot)
pilot.active = False | 569 | 174 |
import json
import random
import re
import subprocess
import tempfile
from datetime import timedelta
import cv2
import numpy as np
import requests
from vidaug import augmentors as va
# this is a static build from https://www.johnvansickle.com/ffmpeg/old-releases/ffmpeg-4.4.1-i686-static.tar.xz
# requires new ffmpeg version for:
# - duration of extracted audio == video
# - contains x264 codec in build required for clean video frames
FFMPEG_PATH = '/opt/lip2wav/ffmpeg-4.4.1-i686-static/ffmpeg'
FFPROBE_PATH = '/opt/lip2wav/ffmpeg-4.4.1-i686-static/ffprobe'
OLD_FFMPEG_PATH = 'ffmpeg-2.8.15'
FFMPEG_OPTIONS = '-hide_banner -loglevel panic'
VIDEO_CROP_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -ss {{start_time}} -to {{end_time}} -async 1 {{output_video_path}}'
VIDEO_INFO_COMMAND = f'{FFMPEG_PATH} -i {{input_video_path}}'
VIDEO_DURATION_COMMAND = f'{FFPROBE_PATH} {FFMPEG_OPTIONS} -show_entries format=duration -of default=noprint_wrappers=1:nokey=1 {{video_path}}'
VIDEO_TO_AUDIO_COMMAND = f'{{ffmpeg_path}} {FFMPEG_OPTIONS} -threads 1 -y -i {{input_video_path}} -async 1 -ac 1 -vn -acodec pcm_s16le -ar 16000 {{output_audio_path}}'
VIDEO_CONVERT_FPS_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -strict -2 -filter:v fps=fps={{fps}} {{output_video_path}}' # copies original codecs and metadata (rotation)
VIDEO_SPEED_ALTER_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -filter_complex "[0:v]setpts={{video_speed}}*PTS[v];[0:a]atempo={{audio_speed}}[a]" -map "[v]" -map "[a]" {{output_video_path}}'
VIDEO_REMOVE_AUDIO_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -c copy -an {{output_video_path}}'
VIDEO_ADD_AUDIO_COMMAND = f'{FFMPEG_PATH} {FFMPEG_OPTIONS} -y -i {{input_video_path}} -i {{input_audio_path}} -strict -2 -c:v copy -c:a aac {{output_video_path}}'
def get_num_frames(video_path):
video_capture = cv2.VideoCapture(video_path)
num_frames = int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT))
video_capture.release()
return num_frames
def get_video_frame(video_path, index):
video_capture = cv2.VideoCapture(video_path)
i = 0
selected_frame = None
while True:
success, frame = video_capture.read()
if not success:
break
if i == index:
selected_frame = frame
break
i += 1
video_capture.release()
return selected_frame
def get_video_duration(video_path):
result = subprocess.check_output(VIDEO_DURATION_COMMAND.format(video_path=video_path).split(' '),
stderr=subprocess.STDOUT).decode()
return float(result)
def get_video_rotation(video_path):
cmd = VIDEO_INFO_COMMAND.format(input_video_path=video_path)
p = subprocess.Popen(
cmd.split(' '),
stderr=subprocess.PIPE,
close_fds=True
)
stdout, stderr = p.communicate()
try:
reo_rotation = re.compile('rotate\s+:\s(\d+)')
match_rotation = reo_rotation.search(str(stderr))
rotation = match_rotation.groups()[0]
except AttributeError:
# print(f'Rotation not found: {video_path}')
return 0
return int(rotation)
def fix_frame_rotation(image, rotation):
if rotation == 90:
image = cv2.rotate(image, cv2.ROTATE_90_CLOCKWISE)
elif rotation == 180:
image = cv2.rotate(image, cv2.ROTATE_180)
elif rotation == 270:
image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
return image
def get_fps(video_path):
video_capture = cv2.VideoCapture(video_path)
fps = int(video_capture.get(cv2.CAP_PROP_FPS))
video_capture.release()
return fps
def get_video_frames(video_path, rotation):
video_reader = cv2.VideoCapture(video_path)
frames = []
while True:
success, frame = video_reader.read()
if not success:
break
frame = fix_frame_rotation(frame, rotation)
frames.append(frame)
video_reader.release()
return frames
def show_frames(video_frames, delay, title):
for frame in video_frames:
cv2.imshow(title, frame)
cv2.waitKey(delay)
def run_video_augmentation(video_path, new_video_path, random_prob=0.5):
if random.random() < random_prob:
# https://trac.ffmpeg.org/wiki/How%20to%20speed%20up%20/%20slow%20down%20a%20video
# speed required between 0 and 2
# < 1 = slow down
# > 1 = speed up
speed = round(random.uniform(0.5, 1.5), 2)
subprocess.call(VIDEO_SPEED_ALTER_COMMAND.format(
input_video_path=video_path,
output_video_path=new_video_path,
video_speed=round(1. / speed, 2),
audio_speed=float(speed)
), shell=True)
return new_video_path
return video_path
class RandomRotate:
def __init__(self, degrees):
self.degrees = degrees
def __call__(self, clip):
image_center = tuple(np.array(clip[0].shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, self.degrees, 1.0)
return [cv2.warpAffine(frame, rot_mat, frame.shape[1::-1], flags=cv2.INTER_LINEAR)
for frame in clip]
def run_frame_augmentation(frames, method, random_prob=0.5, rotation_range=10, intensity_range=30):
sometimes = lambda aug: va.Sometimes(random_prob, aug)
random_int = lambda max: np.random.randint(-max, max) # inclusive
# TODO: Zoom in/out
if method == 'full':
seq = va.Sequential([
RandomRotate(degrees=random_int(rotation_range)), # random rotate of angle between (-degrees, degrees)
])
elif method == 'mouth':
seq = va.Sequential([
sometimes(va.HorizontalFlip()), # flip video horizontally
sometimes(va.Add(random_int(intensity_range))), # add random value to pixels between (-max, max)
])
else:
print(f'{method} does not exist')
return
# normalize frames to 0-255 uint8 dtype
return [cv2.normalize(src=frame, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
for frame in seq(frames)]
def extract_audio(video_path, use_old_ffmpeg=False):
audio_file = tempfile.NamedTemporaryFile(suffix='.wav')
if use_old_ffmpeg:
ffmpeg_path = OLD_FFMPEG_PATH
else:
ffmpeg_path = FFMPEG_PATH
subprocess.call(VIDEO_TO_AUDIO_COMMAND.format(
ffmpeg_path=ffmpeg_path,
input_video_path=video_path,
output_audio_path=audio_file.name
), shell=True)
return audio_file
def convert_fps(video_path, new_video_path, fps):
subprocess.call(VIDEO_CONVERT_FPS_COMMAND.format(
input_video_path=video_path,
output_video_path=new_video_path,
fps=fps
), shell=True)
return new_video_path
def replace_audio(video_path, audio_path, output_video_path):
with tempfile.NamedTemporaryFile(suffix='.mp4') as f:
subprocess.call(VIDEO_REMOVE_AUDIO_COMMAND.format(
input_video_path=video_path,
output_video_path=f.name
), shell=True)
subprocess.call(VIDEO_ADD_AUDIO_COMMAND.format(
input_video_path=f.name,
input_audio_path=audio_path,
output_video_path=output_video_path
), shell=True)
def get_lip_embeddings(video_path):
with open(video_path, 'rb') as f:
response = requests.post('http://127.0.0.1:6002/lip_embeddings', files={'video': f.read()})
if response.status_code != 200:
print(response.content)
return
return json.loads(response.content)
def crop(video_path, start, end):
suffix = video_path.split('/')[-1].split('.')[1]
output_video_path = f'/tmp/cropped_video.{suffix}'
subprocess.call(VIDEO_CROP_COMMAND.format(
input_video_path=video_path,
start_time='0' + str(timedelta(seconds=start))[:-3],
end_time='0' + str(timedelta(seconds=end))[:-3],
output_video_path=output_video_path
), shell=True)
return output_video_path
| 8,098 | 2,997 |
from bingraphvis.base import Content
class AflCovInfo(Content):
def __init__(self, project):
super(AflCovInfo, self).__init__('aflcovinfo', ['text'])
self.project = project
def gen_render(self, n):
node = n.obj
n.content[self.name] = {
'data': [{
'text': {
'content': "Hit: %d / %d " % (self.project.kb.cov.node_hit_count(node.addr), self.project.kb.cov.nr_of_paths),
'style':'B',
'align':'LEFT'
}
}],
'columns': self.get_columns()
}
| 621 | 195 |
from __future__ import absolute_import
from __future__ import unicode_literals
from corehq.dbaccessors.couchapps.all_docs import \
get_all_doc_ids_for_domain_grouped_by_db, get_doc_count_by_type, \
delete_all_docs_by_doc_type, get_doc_count_by_domain_type
from dimagi.utils.couch.database import get_db
from django.test import TestCase
class AllDocsTest(TestCase):
maxDiff = None
@classmethod
def setUpClass(cls):
super(AllDocsTest, cls).setUpClass()
cls.main_db = get_db(None)
cls.users_db = get_db('users')
cls.doc_types = ('Application', 'CommCareUser')
delete_all_docs_by_doc_type(cls.main_db, cls.doc_types)
delete_all_docs_by_doc_type(cls.users_db, cls.doc_types)
cls.domain1 = 'all-docs-domain1'
cls.domain2 = 'all-docs-domain2'
cls.main_db_doc = {'_id': 'main_db_doc', 'doc_type': 'Application'}
cls.users_db_doc = {'_id': 'users_db_doc', 'doc_type': 'CommCareUser'}
for doc_type in cls.doc_types:
for domain in (cls.domain1, cls.domain2):
db_alias = 'main' if doc_type == 'Application' else 'users'
doc_id = '{}_db_doc_{}'.format(db_alias, domain)
doc = {'_id': doc_id, 'doc_type': doc_type, 'domain': domain}
if doc_type == 'Application':
cls.main_db.save_doc(doc)
else:
cls.users_db.save_doc(doc)
@classmethod
def tearDownClass(cls):
delete_all_docs_by_doc_type(cls.main_db, cls.doc_types)
delete_all_docs_by_doc_type(cls.users_db, cls.doc_types)
super(AllDocsTest, cls).tearDownClass()
def test_get_all_doc_ids_for_domain_grouped_by_db(self):
self.assertEqual(
{key.uri: list(value) for key, value in
get_all_doc_ids_for_domain_grouped_by_db(self.domain1)},
{get_db(None).uri: ['main_db_doc_all-docs-domain1'],
get_db('users').uri: ['users_db_doc_all-docs-domain1'],
get_db('meta').uri: [],
get_db('fixtures').uri: [],
get_db('domains').uri: [],
get_db('apps').uri: []}
)
def test_get_doc_count_by_type(self):
self.assertEqual(get_doc_count_by_type(get_db(None), 'Application'), 2)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'CommCareUser'), 2)
self.assertEqual(get_doc_count_by_type(get_db(None), 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_type(get_db('users'), 'Application'), 0)
def test_get_doc_count_by_domain_type(self):
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain1, 'Application'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain2, 'Application'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), 'other', 'Application'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain1, 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain2, 'CommCareUser'), 1)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), 'other', 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db(None), self.domain1, 'CommCareUser'), 0)
self.assertEqual(get_doc_count_by_domain_type(get_db('users'), self.domain1, 'Application'), 0)
| 3,418 | 1,232 |
#=======================================================================
# instruction.py
#=======================================================================
from pydgin.utils import r_uint
class Instruction( object ):
def __init__( self, bits, str ):
self.bits = r_uint( bits )
self.str = str
@property
def rd( self ):
return (self.bits >> 11) & 0x1F
@property
def rt( self ):
return (self.bits >> 16) & 0x1F
@property
def rs( self ):
return (self.bits >> 21) & 0x1F
@property
def fd( self ):
return (self.bits >> 6) & 0x1F
@property
def ft( self ):
return (self.bits >> 16) & 0x1F
@property
def fs( self ):
return (self.bits >> 11) & 0x1F
@property
def imm( self ):
return self.bits & 0xFFFF
@property
def jtarg( self ):
return self.bits & 0x3FFFFFF
@property
def shamt( self ):
return (self.bits >> 6) & 0x1F
| 913 | 348 |
from autogoal.contrib import find_classes
from autogoal.kb import *
from autogoal.kb import build_pipelines, build_pipeline_graph
from autogoal.contrib.spacy import SpacyNLP
from autogoal.contrib._wrappers import FlagsMerger
import logging
logging.basicConfig(level=logging.INFO)
pipeline_space = build_pipeline_graph(
input=List(Sentence()),
output=MatrixContinuousDense(),
registry=find_classes(),
# registry=[SpacyNLP, FlagsMerger],
# max_list_depth=1,
)
for i in range(10):
pipeline = pipeline_space.sample()
print(pipeline)
| 564 | 189 |
"""Customizable space module that provides different search spaces
implementations.
"""
from opytimizer.spaces.boolean import BooleanSpace
from opytimizer.spaces.grid import GridSpace
from opytimizer.spaces.hyper_complex import HyperComplexSpace
from opytimizer.spaces.search import SearchSpace
from opytimizer.spaces.tree import TreeSpace
| 341 | 90 |
# coding: utf-8
#
# This code is part of lattpy.
#
# Copyright (c) 2021, Dylan Jones
#
# This code is licensed under the MIT License. The copyright notice in the
# LICENSE file in the root directory and this permission notice shall
# be included in all copies or substantial portions of the Software.
"""Spatial algorithms and data structures."""
import math
import numpy as np
import itertools
import matplotlib.pyplot as plt
from scipy.spatial import cKDTree, Voronoi
from typing import Iterable, Sequence, Optional, Union
from .utils import ArrayLike, min_dtype, chain
from .plotting import draw_points, draw_vectors, draw_lines, draw_surfaces
__all__ = [
"distance", "interweave", "vindices", "vrange", "cell_size", "cell_volume",
"compute_vectors", "compute_neighbors", "KDTree", "VoronoiTree", "WignerSeitzCell",
"rx", "ry", "rz", "rotate2d", "rotate3d", "build_periodic_translation_vector"
]
def distance(r1: ArrayLike, r2: ArrayLike, decimals: Optional[int] = None) -> float:
""" Calculates the euclidian distance bewteen two points.
Parameters
----------
r1: array_like
First input point.
r2: array_like
Second input point of matching size.
decimals: int, optional
Optional decimals to round distance to.
Returns
-------
distance: float
"""
dist = math.sqrt(np.sum(np.square(r1 - r2)))
if decimals is not None:
dist = round(dist, decimals)
return dist
def interweave(arrays: Sequence[np.ndarray]) -> np.ndarray:
""" Interweaves multiple arrays along the first axis
Example
-------
>>> arr1 = np.array([[1, 1], [3, 3], [5, 5]])
>>> arr2 = np.array([[2, 2], [4, 4], [6, 6]])
>>> interweave([arr1, arr2])
array([[1, 1], [2, 2], [3, 3], [4, 4], [5, 5], [6, 6]])
Parameters
----------
arrays: (M) Sequence of (N, ...) array_like
The input arrays to interwave. The shape of all arrays must match.
Returns
-------
interweaved: (M*N, ....) np.ndarray
"""
shape = list(arrays[0].shape)
shape[0] = sum(x.shape[0] for x in arrays)
result = np.empty(shape, dtype=arrays[0].dtype)
n = len(arrays)
for i, arr in enumerate(arrays):
result[i::n] = arr
return result
def vindices(limits: Iterable[Sequence[int]], sort_axis: Optional[int] = 0,
dtype: Optional[Union[int, str, np.dtype]] = None) -> np.ndarray:
""" Return an array representing the indices of a d-dimensional grid.
Parameters
----------
limits: (D, 2) array_like
The limits of the indices for each axis.
sort_axis: int, optional
Optional axis that is used to sort indices.
dtype: int or str or np.dtype, optional
Optional data-type for storing the lattice indices. By default the given limits
are checked to determine the smallest possible data-type.
Returns
-------
vectors: (N, D) np.ndarray
"""
if dtype is None:
dtype = min_dtype(limits, signed=True)
limits = np.asarray(limits)
dim = limits.shape[0]
# Create meshgrid reshape grid to array of indices
# version 1:
# axis = np.meshgrid(*(np.arange(*lim, dtype=dtype) for lim in limits))
# nvecs = np.asarray([np.asarray(a).flatten("F") for a in axis]).T
# version 2:
# slices = [slice(lim[0], lim[1], 1) for lim in limits]
# nvecs = np.mgrid[slices].astype(dtype).reshape(dim, -1).T
# version 3:
size = limits[:, 1] - limits[:, 0]
nvecs = np.indices(size, dtype=dtype).reshape(dim, -1).T + limits[:, 0]
# Optionally sort indices along given axis
if sort_axis is not None:
nvecs = nvecs[np.lexsort(nvecs.T[[sort_axis]])]
return nvecs
def vrange(start=None, *args,
dtype: Optional[Union[int, str, np.dtype]] = None,
sort_axis: Optional[int] = 0, **kwargs) -> np.ndarray:
""" Return evenly spaced vectors within a given interval.
Parameters
----------
start: array_like, optional
The starting value of the interval. The interval includes this value.
The default start value is 0.
stop: array_like
The end value of the interval.
step: array_like, optional
Spacing between values. If `start` and `stop` are sequences and the `step`
is a scalar the given step size is used for all dimensions of the vectors.
The default step size is 1.
sort_axis: int, optional
Optional axis that is used to sort indices.
dtype: dtype, optional
The type of the output array. If `dtype` is not given, infer the data
type from the other input arguments.
Returns
-------
vectors: (N, D) np.ndarray
"""
# parse arguments
if len(args) == 0:
stop = start
start = np.zeros_like(stop)
step = kwargs.get("step", 1.0)
elif len(args) == 1:
stop = args[0]
step = kwargs.get("step", 1.0)
else:
stop, step = args
start = np.atleast_1d(start)
stop = np.atleast_1d(stop)
if step is None:
step = np.ones_like(start)
elif not hasattr(step, "__len__"):
step = np.ones_like(start) * step
# Create grid and reshape to array of vectors
slices = [slice(i, f, s) for i, f, s in zip(start, stop, step)]
array = np.mgrid[slices].reshape(len(slices), -1).T
# Optionally sort array along given axis
if sort_axis is not None:
array = array[np.lexsort(array.T[[sort_axis]])]
return array if dtype is None else array.astype(dtype)
def cell_size(vectors: ArrayLike) -> np.ndarray:
""" Computes the shape of the box spawned by the given vectors.
Parameters
----------
vectors: array_like
The basis vectors defining the cell.
Returns
-------
size: np.ndarray
"""
max_values = np.max(vectors, axis=0)
min_values = np.min(vectors, axis=0)
min_values[min_values > 0] = 0
return max_values - min_values
def cell_volume(vectors: ArrayLike) -> float:
r""" Computes the volume of the unit cell defined by the primitive vectors.
The volume of the unit-cell in two and three dimensions is defined by
.. math::
V_{2d} = \abs{a_1 \cross a_2}, \quad V_{3d} = a_1 \cdot \abs{a_2 \cross a_3}
For higher dimensions the volume is computed using the determinant:
.. math::
V_{d} = \sqrt{\det{A A^T}}
where .math:`A` is the array of vectors.
Parameters
----------
vectors: array_like
The basis vectors defining the cell.
Returns
-------
vol: float
"""
dim = len(vectors)
if dim == 1:
v = float(vectors)
elif dim == 2:
v = np.cross(vectors[0], vectors[1])
elif dim == 3:
cross = np.cross(vectors[1], vectors[2])
v = np.dot(vectors[0], cross)
else:
v = np.sqrt(np.linalg.det(np.dot(vectors.T, vectors)))
return abs(v)
def build_periodic_translation_vector(indices, axs):
limits = np.array([np.min(indices, axis=0), np.max(indices, axis=0)])
nvec = np.zeros(indices.shape[1] - 1, dtype=np.int)
for ax in np.atleast_1d(axs):
nvec[ax] = np.floor(limits[1][ax]) + 1
return nvec
def compute_vectors(a: float, b: Optional[float] = None, c: Optional[float] = None,
alpha: Optional[float] = None, beta: Optional[float] = None,
gamma: Optional[float] = None,
decimals: Optional[int] = 0) -> np.ndarray:
""" Computes lattice vectors by the lengths and angles. """
if b is None and c is None:
vectors = [a]
elif c is None:
alpha = np.deg2rad(alpha)
ax = a
bx = b * np.cos(alpha)
by = b * np.sin(alpha)
vectors = np.array([
[ax, 0],
[bx, by]
])
else:
alpha = np.deg2rad(alpha)
beta = np.deg2rad(beta)
gamma = np.deg2rad(gamma)
ax = a
bx = b * np.cos(gamma)
by = b * np.sin(gamma)
cx = c * np.cos(beta)
cy = (abs(c) * abs(b) * np.cos(alpha) - bx * cx) / by
cz = np.sqrt(c ** 2 - cx ** 2 - cy ** 2)
vectors = np.array([
[ax, 0, 0],
[bx, by, 0],
[cx, cy, cz]
])
if decimals:
vectors = np.round(vectors, decimals=decimals)
return vectors
# noinspection PyUnresolvedReferences
class KDTree(cKDTree):
"""Simple wrapper of scipy's cKTree with global query settings."""
def __init__(self, points, k=1, max_dist=np.inf, eps=0., p=2):
super().__init__(points)
self.max_dist = max_dist
self.k = k
self.p = p
self.eps = eps
def query_ball_point(self, x, r):
return super().query_ball_point(x, r, self.p, self.eps)
def query_ball_tree(self, other, r):
return super().query_ball_tree(other, r, self.p, self.eps)
def query_pairs(self, r):
return super().query_pairs(r, self.p, self.eps)
def query(self, x=None, num_jobs=1, decimals=None, include_zero=False, compact=True):
x = self.data if x is None else x
distances, neighbors = super().query(x, self.k, self.eps, self.p, self.max_dist, num_jobs)
# Remove zero-distance neighbors and convert dtype
if not include_zero and np.all(distances[:, 0] == 0):
distances = distances[:, 1:]
neighbors = neighbors[:, 1:]
neighbors = neighbors.astype(min_dtype(self.n, signed=False))
# Remove neighbors with distance larger than max_dist
if self.max_dist < np.inf:
invalid = distances > self.max_dist
neighbors[invalid] = self.n
distances[invalid] = np.inf
# Remove all invalid columns
if compact:
mask = np.any(distances != np.inf, axis=0)
neighbors = neighbors[:, mask]
distances = distances[:, mask]
# Round distances
if decimals is not None:
distances = np.round(distances, decimals=decimals)
return neighbors, distances
def compute_neighbors(positions, k=20, max_dist=np.inf, num_jobs=1, decimals=None, eps=0.,
include_zero=False, compact=True, x=None):
# Build tree and query neighbors
x = positions if x is None else x
tree = KDTree(positions, k=k, max_dist=max_dist, eps=eps)
distances, neighbors = tree.query(x, num_jobs, decimals, include_zero, compact)
return neighbors, distances
class VoronoiTree:
def __init__(self, points):
points = np.asarray(points)
dim = points.shape[1]
edges = list()
if dim == 1:
vertices = points / 2
idx = np.where((vertices == np.zeros(vertices.shape[1])).all(axis=1))[0]
vertices = np.delete(vertices, idx)
vertices = np.atleast_2d(vertices).T
else:
vor = Voronoi(points)
# Save only finite vertices
vertices = vor.vertices # noqa
for pointidx, simplex in zip(vor.ridge_points, vor.ridge_vertices): # noqa
simplex = np.asarray(simplex)
if np.all(simplex >= 0):
edges.append(simplex)
self.dim = dim
self.points = points
self.edges = edges
self.vertices = vertices
self.tree = cKDTree(points) # noqa
self.origin = self.query(np.zeros(dim))
def query(self, x, k=1, eps=0):
return self.tree.query(x, k, eps) # noqa
def draw(self, ax=None, color="C0", size=3, lw=1, alpha=0.15, point_color="k", point_size=3,
draw_data=True, points=True, draw=True, fill=True):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d" if self.dim == 3 else None)
if draw_data:
draw_points(ax, self.points, size=point_size, color=point_color)
if self.dim > 1:
draw_vectors(ax, self.points, lw=0.5, color=point_color)
if points:
draw_points(ax, self.vertices, size=size, color=color)
if self.dim == 2 and draw:
segments = np.array([self.vertices[i] for i in self.edges])
draw_lines(ax, segments, color=color, lw=lw)
elif self.dim == 3:
if draw:
segments = np.array([self.vertices[np.append(i, i[0])] for i in self.edges])
draw_lines(ax, segments, color=color, lw=lw)
if fill:
surfaces = np.array([self.vertices[i] for i in self.edges])
draw_surfaces(ax, surfaces, color=color, alpha=alpha)
if self.dim == 3:
ax.set_aspect("equal")
else:
ax.set_aspect("equal", "box")
return ax
def __repr__(self):
return f"{self.__class__.__name__}(vertices: {len(self.vertices)})"
def __str__(self):
return f"vertices:\n{self.vertices}\n" \
f"egdes:\n{self.edges}"
class WignerSeitzCell(VoronoiTree):
def __init__(self, points):
super().__init__(points)
self._root = self.query(np.zeros(self.dim))[1]
@property
def limits(self):
return np.array([np.min(self.vertices, axis=0), np.max(self.vertices, axis=0)]).T
@property
def size(self):
return self.limits[1] - self.limits[0]
def check(self, points):
cells = np.asarray(self.query(points)[1])
return cells == self._root
def arange(self, steps, offset=0.):
limits = self.limits * (1 + offset)
steps = [steps] * self.dim if not hasattr(steps, "__len__") else steps
return [np.arange(*lims, step=step) for lims, step in zip(limits, steps)]
def linspace(self, nums, offset=0.):
limits = self.limits * (1 + offset)
nums = [nums] * self.dim if not hasattr(nums, "__len__") else nums
return [np.linspace(*lims, num=num) for lims, num in zip(limits, nums)]
def meshgrid(self, nums=None, steps=None, offset=0., check=True):
if nums is not None:
grid = np.array(np.meshgrid(*self.linspace(nums, offset)))
elif steps is not None:
grid = np.array(np.meshgrid(*self.arange(steps, offset)))
else:
raise ValueError("Either the number of points or the step size muste be specified")
if check:
lengths = grid.shape[1:]
dims = range(len(lengths))
for item in itertools.product(*[range(n) for n in lengths]):
point = np.array([grid[d][item] for d in dims])
if not self.check(point):
for d in dims:
grid[d][item] = np.nan
return grid
def symmetry_points(self):
origin = np.zeros((1,))
corners = self.vertices.copy()
face_centers = None
if self.dim == 1:
return origin, corners, None, None
elif self.dim == 2:
edge_centers = np.zeros((len(self.edges), 2))
for i, simplex in enumerate(self.edges):
p1, p2 = self.vertices[simplex]
edge_centers[i] = p1 + (p2 - p1) / 2
elif self.dim == 3:
edge_centers = list()
face_centers = list()
for i, simplex in enumerate(self.edges):
edges = self.vertices[simplex]
# compute face centers
face_centers.append(np.mean(edges, axis=0))
# compute edge centers
for p1, p2 in chain(edges, cycle=True):
edge_centers.append(p1 + (p2 - p1) / 2)
edge_centers = np.asarray(edge_centers)
face_centers = np.asarray(face_centers)
else:
raise NotImplementedError()
return origin, corners, edge_centers, face_centers
def rx(theta: float) -> np.ndarray:
"""X-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[1, 0, 0], [0, cos, -sin], [0, sin, cos]])
def ry(theta: float) -> np.ndarray:
"""Y-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[cos, 0, sin], [0, 1, 0], [-sin, 0, +cos]])
def rz(theta: float) -> np.ndarray:
"""Z-Rotation matrix."""
sin, cos = np.sin(theta), np.cos(theta)
return np.array([[cos, -sin, 0], [sin, cos, 0], [0, 0, 1]])
def rot(thetax: float = 0., thetay: float = 0., thetaz: float = 0.) -> np.ndarray:
"""General rotation matrix"""
r = np.eye(3)
if thetaz:
r = np.dot(r, rz(thetaz))
if thetay:
r = np.dot(r, ry(thetay))
if thetax:
r = np.dot(r, rz(thetax))
return r
def rotate2d(a, theta):
"""Applies the z-rotation matrix to a 2D point"""
return np.dot(a, rz(theta)[:2, :2])
def rotate3d(a, thetax=0., thetay=0., thetaz=0.):
"""Applies the general rotation matrix to a 3D point"""
return np.dot(a, rot(thetax, thetay, thetaz))
| 16,913 | 5,734 |
# SPDX-License-Identifier: BSD
#
# This file is part of Pyosmium.
#
# Copyright (C) 2022 Sarah Hoffmann.
import pytest
import osmium as o
def _run_file(fn):
rd = o.io.Reader(fn)
try:
o.apply(rd, o.SimpleHandler())
finally:
rd.close()
def test_node_only(test_data):
_run_file(test_data('n1'))
def test_way_only(test_data):
_run_file(test_data('w1 Nn1,n2,n3'))
def test_relation_only(test_data):
_run_file(test_data('r573 Mw1@'))
def test_node_with_tags(test_data):
_run_file(test_data('n32 Tbar=xx'))
def test_way_with_tags(test_data):
_run_file(test_data('w5666 Nn1,n2,n3 Tbar=xx'))
def test_relation_with_tags(test_data):
_run_file(test_data('r573 Mw1@ Tbar=xx'))
def test_broken_timestamp(test_data):
fn = test_data('n1 tx')
try:
rd = o.io.Reader(fn)
with pytest.raises(RuntimeError):
o.apply(rd, o.SimpleHandler())
finally:
rd.close()
def test_file_header(tmp_path):
fn = tmp_path / 'empty.xml'
fn.write_text("""<?xml version='1.0' encoding='UTF-8'?>
<osm version="0.6" generator="test-pyosmium" timestamp="2014-08-26T20:22:02Z">
<bounds minlat="-90" minlon="-180" maxlat="90" maxlon="180"/>
</osm>
""")
rd = o.io.Reader(str(fn))
try:
h = rd.header()
assert not h.has_multiple_object_versions
assert h.box().valid()
assert h.box().size() == 64800.0
finally:
rd.close()
| 1,471 | 625 |
def printMatrix(m):
for i in range(0, len(m)):
print(m[i])
print("\n")
def convertInputToReq(data):
matrix1 = data
width = len(data)
terminalStates = []
for i in range(0, width):
#are all in the row 0?
all0 = True
rowSum = sum(data[i])
if (rowSum==0):
terminalStates.append(i)
else:
for j in range(0, width):
if (data[i][j] != 0):
matrix1[i][j] = [data[i][j], rowSum]
#Move each terminal state row to the beginning
matrix2 = []
for i in terminalStates:
matrix2.append(matrix1[i])
for i in range(0, width):
if not i in terminalStates:
matrix2.append(matrix1[i])
#Move each terminal state column to the beginning
matrix3 = []
for i in range(0, width):
matrix3.append([])
for j in terminalStates:
matrix3[i].append(matrix2[i][j])
for j in range(0, width):
if not j in terminalStates:
matrix3[i].append(matrix2[i][j])
#Add identity elements to the first len(terminalStates) elements
for i in range(len(terminalStates)):
matrix3[i][i] = [1, 1]
return matrix3, len(terminalStates)
def identityMatrix(x):
identity = []
for i in range(0, x):
identity.append([])
for j in range(0, x):
if (i == j):
identity[i].append([1,1])
else:
identity[i].append(0)
return identity
def gcd(a, b):
while b:
a, b = b, a % b
return a
def simplify(c):
if (c != 0):
gcdVal = gcd(c[0],c[1])
return [int(c[0]/gcdVal), int(c[1]/gcdVal)]
else:
return 0
def commonDenomAdd(a, b):
if (a==0):
return b
elif (b==0):
return a
else:
raw = [a[0]*b[1]+a[1]*b[0], a[1]*b[1]]
return simplify(raw)
def simplifyMultiply(a, b):
if (a==0 or b == 0):
return 0
else:
raw = [a[0]*b[0], a[1]*b[1]]
return simplify(raw)
def simplifyDivide(a, b):
#if a is 0, return 0
#if b is 0, print error
#otherwise, raw=[a[0]*b[1], a[1]*b[0]]
if (a == 0):
return 0
elif (b == 0):
print("ERROR")
else:
raw=[a[0]*b[1], a[1]*b[0]]
return simplify(raw)
def matrixSubtract(a, b):
returnMat = []
for i in range(len(a)):
returnMat.append([])
for j in range(len(a)):
bNegated = b[i][j]
if (not bNegated == 0):
bNegated[0] = (-1)*b[i][j][0]
returnMat[i].append(commonDenomAdd(a[i][j], bNegated))
return returnMat
def matrixMinor(a, m, n):
#remove row m and column n
subMatrix = []
for i in range(len(a)):
subMatrix.append([])
for j in range(len(a)):
subMatrix[i].append(a[i][j])
subMatrix.pop(m)
for j in range(0, len(subMatrix)):
subMatrix[j].pop(n)
return subMatrix
def matrixDeterminant(a):
if (len(a) == 1):
return a[0][0]
else:
determinant = 0
for i in range(len(a)):
#Add contribution to determinant from top row of matrix a
cofactorMultiplier = (-1)**(i)
minorMat = matrixMinor(a, 0, i)
minorDet = matrixDeterminant(minorMat)
minorDet = simplifyMultiply(minorDet, a[0][i])
if (minorDet != 0):
minorDet[0]*=cofactorMultiplier
determinant = commonDenomAdd(determinant, minorDet)
return determinant
def matrixTranspose(a):
transpose = []
for i in range(len(a)):
transpose.append([])
for j in range(len(a)):
transpose[i].append(a[j][i])
return transpose
def matrixInverse(a):
#Find cofactor matrix of a
cofactors = []
for i in range(0, len(a)):
cofactors.append([])
for j in range(0, len(a)):
#Create submatrix without row i or column j
subMatrix = matrixMinor(a, i, j)
#Find determinant of subMatrix
determinant = matrixDeterminant(subMatrix)
#Append
if (determinant != 0):
determinant[0]*=((-1)**(i+j))
cofactors[i].append(determinant)
cofactorTranspose = matrixTranspose(cofactors)
aDeterminant = matrixDeterminant(a)
for i in range(0, len(a)):
for j in range(0, len(a)):
cofactorTranspose[i][j] = simplifyDivide(cofactorTranspose[i][j], aDeterminant)
return cofactorTranspose
def matrixProduct(a, b):
product = []
for i in range(len(a)):
product.append([])
for j in range(len(b[0])):
ijEntry = 0
for k in range(len(b)):
ijEntry = commonDenomAdd(ijEntry, simplifyMultiply(a[i][k],b[k][j]))
product[i].append(ijEntry)
return product
def getFirstNonzeroElement(a):
for i in range(len(a)):
if (a[i] != 0):
return a[i][1]
return 0
def scrapeTopRow(a):
if (len(a)==0):
return [1,1]
returnVals = []
smallestLCM = 1
for i in range(len(a[0])):
if (a[0][i] != 0):
smallestLCM = smallestLCM*a[0][i][1]//gcd(smallestLCM, a[0][i][1])
for i in range(len(a[0])):
if (a[0][i] != 0):
returnVals.append(int(a[0][i][0]*smallestLCM/a[0][i][1]))
else:
returnVals.append(0)
returnVals.append(sum(returnVals))
return returnVals
def findR(data, numTerminal):
R = []
for i in range(numTerminal, len(data)):
R.append([])
for j in range(0, numTerminal):
R[i-numTerminal].append(data[i][j])
return R
def findQ(data, numTerminal):
Q = []
for i in range(numTerminal, len(data)):
Q.append([])
for j in range(numTerminal, len(data)):
Q[i-numTerminal].append(data[i][j])
return Q
def solution(m):
reqInput = convertInputToReq(m)
reqMatrix = reqInput[0]
numTerminal = reqInput[1]
qMatrix = findQ(reqMatrix, numTerminal)
rMatrix = findR(reqMatrix, numTerminal)
iminusq = matrixSubtract(identityMatrix(len(reqMatrix)-numTerminal),qMatrix)
fMatrix = matrixInverse(iminusq)
frMatrix = matrixProduct(fMatrix, rMatrix)
topRow = scrapeTopRow(frMatrix)
return topRow
| 6,615 | 2,456 |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from displayio import Shape, Group, TileGrid, Palette, Bitmap, OnDiskBitmap
NativeElement = Group | Shape | TileGrid | Palette | Bitmap | OnDiskBitmap
NativeContainer = Group
else:
NativeElement = object
NativeContainer = object
| 302 | 90 |
##############################################################################
#copyright 2013, Hamid MEDJAHED (hmedjahed@prologue.fr) Prologue #
#Licensed under the Apache License, Version 2.0 (the "License"); #
#you may not use this file except in compliance with the License. #
#You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
#Unless required by applicable law or agreed to in writing, software #
#distributed under the License is distributed on an "AS IS" BASIS, #
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
#See the License for the specific language governing permissions and #
#limitations under the License. #
##############################################################################
import HTMLParser
data = '''
<table cellspacing="0" class="table table-bordered table-hover table-condensed" id="data">
<thead>
<tr>
<th class="name">Name</th>
<th class="memory">Memory</th>
<th class="computeunits">
<abbr title="One EC2 Compute Unit provides the equivalent CPU capacity of a 1.0-1.2 GHz 2007 Opteron or 2007 Xeon processor.">Compute Units</abbr>
</th>
<th class="storage">Storage</th>
<th class="architecture">Architecture</th>
<th class="ioperf">I/O Performance</th>
<th class="maxips">
<abbr title="Adding additional IPs requires launching the instance in a VPC.">Max IPs</abbr>
</th>
<th class="apiname">API Name</th>
<th class="cost">Linux cost</th>
<th class="cost">Windows cost</th>
</tr>
</thead>
<tbody>
<tr>
<td class="name">M1 Small</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="1">1</span></td>
<td class="storage"><span sort="160">160 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">8</td>
<td class="apiname">m1.small</td>
<td class="cost" hour_cost="0.060">$0.060 per hour</td>
<td class="cost" hour_cost="0.115">$0.115 per hour</td>
</tr>
<tr>
<td class="name">M1 Medium</td>
<td class="memory"><span sort="3.75">3.75 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="410">410 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">m1.medium</td>
<td class="cost" hour_cost="0.12">$0.12 per hour</td>
<td class="cost" hour_cost="0.23">$0.23 per hour</td>
</tr>
<tr>
<td class="name">M1 Large</td>
<td class="memory"><span sort="7.5">7.50 GB</span></td>
<td class="computeunits"><span sort="4">4</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">30</td>
<td class="apiname">m1.large</td>
<td class="cost" hour_cost="0.24">$0.24 per hour</td>
<td class="cost" hour_cost="0.46">$0.46 per hour</td>
</tr>
<tr>
<td class="name">M1 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="8">8</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">60</td>
<td class="apiname">m1.xlarge</td>
<td class="cost" hour_cost="0.48">$0.48 per hour</td>
<td class="cost" hour_cost="0.92">$0.92 per hour</td>
</tr>
<tr>
<td class="name">Micro</td>
<td class="memory"><span sort="0.6">0.60 GB</span></td>
<td class="computeunits"><span sort="2">2</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">32/64-bit</td>
<td class="ioperf"><span sort="0">Low</span></td>
<td class="maxips">1</td>
<td class="apiname">t1.micro</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
<td class="cost" hour_cost="0.02">$0.02 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Extra Large</td>
<td class="memory"><span sort="17.10">17.10 GB</span></td>
<td class="computeunits"><span sort="6.5">6.5</span></td>
<td class="storage"><span sort="420">420 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m2.xlarge</td>
<td class="cost" hour_cost="0.41">$0.41 per hour</td>
<td class="cost" hour_cost="0.57">$0.57 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Double Extra Large</td>
<td class="memory"><span sort="34.2">34.20 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="850">850 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m2.2xlarge</td>
<td class="cost" hour_cost="0.82">$0.82 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">High-Memory Quadruple Extra Large</td>
<td class="memory"><span sort="68.4">68.40 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="3">High</span></td>
<td class="maxips">240</td>
<td class="apiname">m2.4xlarge</td>
<td class="cost" hour_cost="1.64">$1.64 per hour</td>
<td class="cost" hour_cost="2.28">$2.28 per hour</td>
</tr>
<tr>
<td class="name">M3 Extra Large</td>
<td class="memory"><span sort="15">15.00 GB</span></td>
<td class="computeunits"><span sort="13">13</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">60</td>
<td class="apiname">m3.xlarge</td>
<td class="cost" hour_cost="0.50">$0.50 per hour</td>
<td class="cost" hour_cost="0.98">$0.98 per hour</td>
</tr>
<tr>
<td class="name">M3 Double Extra Large</td>
<td class="memory"><span sort="30">30.00 GB</span></td>
<td class="computeunits"><span sort="26">26</span></td>
<td class="storage"><span sort="0">0 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">120</td>
<td class="apiname">m3.2xlarge</td>
<td class="cost" hour_cost="1.00">$1.00 per hour</td>
<td class="cost" hour_cost="1.96">$1.96 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Medium</td>
<td class="memory"><span sort="1.7">1.70 GB</span></td>
<td class="computeunits"><span sort="5">5</span></td>
<td class="storage"><span sort="350">350 GB</span></td>
<td class="architecture">32_64-bit</td>
<td class="ioperf"><span sort="1">Moderate</span></td>
<td class="maxips">12</td>
<td class="apiname">c1.medium</td>
<td class="cost" hour_cost="0.145">$0.145 per hour</td>
<td class="cost" hour_cost="0.285">$0.285 per hour</td>
</tr>
<tr>
<td class="name">High-CPU Extra Large</td>
<td class="memory"><span sort="7">7.00 GB</span></td>
<td class="computeunits"><span sort="20">20</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="2">High</span></td>
<td class="maxips">60</td>
<td class="apiname">c1.xlarge</td>
<td class="cost" hour_cost="0.58">$0.58 per hour</td>
<td class="cost" hour_cost="1.14">$1.14 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Quadruple Extra Large</td>
<td class="memory"><span sort="23">23.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cc1.4xlarge</td>
<td class="cost" hour_cost="1.30">$1.30 per hour</td>
<td class="cost" hour_cost="1.61">$1.61 per hour</td>
</tr>
<tr>
<td class="name">Cluster Compute Eight Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="3370">3370 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">240</td>
<td class="apiname">cc2.8xlarge</td>
<td class="cost" hour_cost="2.40">$2.40 per hour</td>
<td class="cost" hour_cost="2.97">$2.97 per hour</td>
</tr>
<tr>
<td class="name">Cluster GPU Quadruple Extra Large</td>
<td class="memory"><span sort="22">22.00 GB</span></td>
<td class="computeunits"><span sort="33.5">33.5</span></td>
<td class="storage"><span sort="1690">1690 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cg1.4xlarge</td>
<td class="cost" hour_cost="2.10">$2.10 per hour</td>
<td class="cost" hour_cost="2.60">$2.60 per hour</td>
</tr>
<tr>
<td class="name">High I/O Quadruple Extra Large</td>
<td class="memory"><span sort="60.5">60.50 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="2048">2048 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hi1.4xlarge</td>
<td class="cost" hour_cost="3.10">$3.10 per hour</td>
<td class="cost" hour_cost="3.58">$3.58 per hour</td>
</tr>
<tr>
<td class="name">High Storage Eight Extra Large</td>
<td class="memory"><span sort="117.00">117.00 GB</span></td>
<td class="computeunits"><span sort="35">35</span></td>
<td class="storage"><span sort="49152">48 TB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">hs1.8xlarge</td>
<td class="cost" hour_cost="4.600">$4.600 per hour</td>
<td class="cost" hour_cost="4.931">$4.931 per hour</td>
</tr>
<tr>
<td class="name">High Memory Cluster Eight Extra Large</td>
<td class="memory"><span sort="244.00">244.00 GB</span></td>
<td class="computeunits"><span sort="88">88</span></td>
<td class="storage"><span sort="240">240 GB</span></td>
<td class="architecture">64-bit</td>
<td class="ioperf"><span sort="4">Very High</sort></td>
<td class="maxips">1</td>
<td class="apiname">cr1.8xlarge</td>
<td class="cost" hour_cost="3.500">$3.500 per hour</td>
<td class="cost" hour_cost="3.831">$3.831 per hour</td>
</tr>
</tbody>
</table> '''
class TableParser(HTMLParser.HTMLParser):
def __init__(self):
HTMLParser.HTMLParser.__init__(self)
self.in_td = False
self.flavors = []
def handle_starttag(self, tag, attrs):
if tag == 'td':
self.in_td = True
def handle_data(self, data):
if self.in_td:
self.flavors.append(data)
def handle_endtag(self, tag):
self.in_td = False
| 13,279 | 4,775 |
from django.contrib import admin
from django.http import HttpResponseRedirect
from django.urls import path
from faker import Faker
from .models import ProjectRequest
from .utils import create_project_request
@admin.register(ProjectRequest)
class ProjectRequestAdmin(admin.ModelAdmin):
change_list_template = "project_requests/admin/project_requests_changelist.html"
def get_urls(self):
urls = super().get_urls()
my_urls = [
path("create-fake/", self.create_fake),
]
return my_urls + urls
def create_fake(self, request):
f = Faker()
project_request = create_project_request(
**dict(
location=f.country(),
description=f.text(),
changemaker_name=f.name(),
date_of_birth=f.date(),
project_name=f.word(),
email=f.email(),
google_doc_url=f.url(),
description_url=f.url(),
)
)
project_request.task.transition("receive", {})
return HttpResponseRedirect("../")
| 1,105 | 299 |
import numpy as np
from gym import wrappers
from reinforcement_learning.utils.utils import decrement_eps, EPS_DEC_LINEAR, pickle_save
from reinforcement_learning.tabular_RL.utils import init_v, init_q, init_q1_q2, \
max_action_q, max_action_q1_q2, eps_greedy_q, eps_greedy_q1_q2, print_v
class TD0PredictionModel:
def __init__(self, custom_env, episodes=50000, alpha=0.1, gamma=None):
self.custom_env = custom_env
self.env = custom_env.env
self.action_space_size = self.env.action_space.n
self.states = custom_env.states
self.episodes = episodes
self.totalSteps = np.zeros(episodes)
self.totalScores = np.zeros(episodes)
self.totalAccumulatedScores = np.zeros(episodes)
self.ALPHA = alpha
if gamma is not None:
self.GAMMA = gamma
elif custom_env.GAMMA is not None:
self.GAMMA = custom_env.GAMMA
else:
self.GAMMA = 0.9
def perform_td0_policy_evaluation(self, policy, print_info=False, visualize=False, record=False):
if record:
self.env = wrappers.Monitor(
self.env, 'recordings/TD0-PE/', force=True,
video_callable=lambda episode_id: episode_id == 0 or episode_id == (self.episodes - 1)
)
V = init_v(self.states)
accumulated_scores = 0
print('\n', 'Game Started', '\n')
for i in range(self.episodes):
done = False
ep_steps = 0
ep_score = 0
observation = self.env.reset()
s = self.custom_env.get_state(observation)
if visualize and i == self.episodes - 1:
self.env.render()
while not done:
a = policy(s)
# print(observation, s, a) # for debugging purposes
observation_, reward, done, info = self.env.step(a)
ep_steps += 1
ep_score += reward
accumulated_scores += reward
s_ = self.custom_env.get_state(observation_)
V[s] += self.ALPHA * (reward + self.GAMMA * V[s_] - V[s])
# option: instead of the (V[s] += ...) line:
# value = weights.dot(s)
# value_ = weights.dot(s_)
# weights += self.ALPHA / dt * (reward + self.GAMMA * value_ - value) * s
observation, s = observation_, s_
if visualize and i == self.episodes - 1:
self.env.render()
if self.episodes < 10 or (i + 1) % (self.episodes // 10) == 0:
print('episode %d - score: %d, steps: %d' % (i + 1, ep_score, ep_steps))
self.totalSteps[i] = ep_steps
self.totalScores[i] = ep_score
self.totalAccumulatedScores[i] = accumulated_scores
if visualize and i == self.episodes - 1:
self.env.close()
if print_info:
print_v(V)
print('\n', 'Game Ended', '\n')
return V, self.totalScores, self.totalAccumulatedScores
class TD0ControlModel:
"""
On-policy:
SARSA
Expected SARSA
Off-policy:
Q Learning
Double Q Learning
"""
def __init__(self, custom_env, episodes=50000, alpha=0.1, gamma=None,
eps_max=1.0, eps_min=None, eps_dec=None, eps_dec_type=EPS_DEC_LINEAR):
self.custom_env = custom_env
self.env = custom_env.env
self.action_space_size = self.env.action_space.n
self.states = custom_env.states
self.episodes = episodes
self.totalSteps = np.zeros(episodes)
self.totalScores = np.zeros(episodes)
self.totalAccumulatedScores = np.zeros(episodes)
self.ALPHA = alpha
if gamma is not None:
self.GAMMA = gamma
elif custom_env.GAMMA is not None:
self.GAMMA = custom_env.GAMMA
else:
self.GAMMA = 0.9
self.EPS = eps_max
self.eps_max = eps_max
if eps_min is not None:
self.eps_min = eps_min
elif custom_env.EPS_MIN is not None:
self.eps_min = custom_env.EPS_MIN
else:
self.eps_min = 0.0
if eps_dec is not None:
self.eps_dec = eps_dec
else:
# will arrive to eps_min after half the episodes:
self.eps_dec = (self.eps_max - self.eps_min) * 2 / self.episodes
self.eps_dec_type = eps_dec_type
def perform_sarsa(self, visualize=False, record=False, pickle=False):
if record:
self.env = wrappers.Monitor(
self.env, 'recordings/SARSA/', force=True,
video_callable=lambda episode_id: episode_id == 0 or episode_id == (self.episodes - 1)
)
Q = init_q(self.states, self.action_space_size, self.custom_env.file_name, pickle)
accumulated_scores = 0
print('\n', 'Game Started', '\n')
for i in range(self.episodes):
done = False
ep_steps = 0
ep_score = 0
observation = self.env.reset()
s = self.custom_env.get_state(observation)
a = eps_greedy_q(Q, s, self.action_space_size, self.EPS, self.env)
if visualize and i == self.episodes - 1:
self.env.render()
while not done:
observation_, reward, done, info = self.env.step(a)
ep_steps += 1
ep_score += reward
accumulated_scores += reward
s_ = self.custom_env.get_state(observation_)
a_ = eps_greedy_q(Q, s_, self.action_space_size, self.EPS, self.env)
Q[s, a] += self.ALPHA * (reward + self.GAMMA * Q[s_, a_] - Q[s, a])
observation, s, a = observation_, s_, a_
if visualize and i == self.episodes - 1:
self.env.render()
if self.episodes < 10 or (i + 1) % (self.episodes // 10) == 0:
print('episode %d - eps: %.2f, score: %d, steps: %d' % (i + 1, self.EPS, ep_score, ep_steps))
self.EPS = decrement_eps(self.EPS, self.eps_min, self.eps_dec, self.eps_dec_type)
self.totalSteps[i] = ep_steps
self.totalScores[i] = ep_score
self.totalAccumulatedScores[i] = accumulated_scores
if visualize and i == self.episodes - 1:
self.env.close()
print('\n', 'Game Ended', '\n')
if pickle:
pickle_save(Q, self.custom_env.file_name + '-q-table')
return Q, self.totalScores, self.totalAccumulatedScores
def perform_expected_sarsa(self, visualize=False, record=False, pickle=False):
if record:
self.env = wrappers.Monitor(
self.env, 'recordings/E-SARSA/', force=True,
video_callable=lambda episode_id: episode_id == 0 or episode_id == (self.episodes - 1)
)
Q = init_q(self.states, self.action_space_size, self.custom_env.file_name, pickle)
accumulated_scores = 0
print('\n', 'Game Started', '\n')
for i in range(self.episodes):
done = False
ep_steps = 0
ep_score = 0
observation = self.env.reset()
s = self.custom_env.get_state(observation)
if visualize and i == self.episodes - 1:
self.env.render()
while not done:
a = eps_greedy_q(Q, s, self.action_space_size, self.EPS, self.env)
observation_, reward, done, info = self.env.step(a)
ep_steps += 1
ep_score += reward
accumulated_scores += reward
s_ = self.custom_env.get_state(observation_)
expected_value = np.mean(np.array([Q[s_, a] for a in range(self.action_space_size)]))
Q[s, a] += self.ALPHA * (reward + self.GAMMA * expected_value - Q[s, a])
observation, s = observation_, s_
if visualize and i == self.episodes - 1:
self.env.render()
if self.episodes < 10 or (i + 1) % (self.episodes // 10) == 0:
print('episode %d - eps: %.2f, score: %d, steps: %d' % (i + 1, self.EPS, ep_score, ep_steps))
self.EPS = decrement_eps(self.EPS, self.eps_min, self.eps_dec, self.eps_dec_type)
self.totalSteps[i] = ep_steps
self.totalScores[i] = ep_score
self.totalAccumulatedScores[i] = accumulated_scores
if visualize and i == self.episodes - 1:
self.env.close()
print('\n', 'Game Ended', '\n')
if pickle:
pickle_save(Q, self.custom_env.file_name + '-q-table')
return Q, self.totalScores, self.totalAccumulatedScores
def perform_q_learning(self, visualize=False, record=False, pickle=False):
if record:
self.env = wrappers.Monitor(
self.env, 'recordings/Q-L/', force=True,
video_callable=lambda episode_id: episode_id == 0 or episode_id == (self.episodes - 1)
)
Q = init_q(self.states, self.action_space_size, self.custom_env.file_name, pickle)
accumulated_scores = 0
print('\n', 'Game Started', '\n')
for i in range(self.episodes):
done = False
ep_steps = 0
ep_score = 0
observation = self.env.reset()
s = self.custom_env.get_state(observation)
if visualize and i == self.episodes - 1:
self.env.render()
while not done:
a = eps_greedy_q(Q, s, self.action_space_size, self.EPS, self.env)
observation_, reward, done, info = self.env.step(a)
ep_steps += 1
ep_score += reward
accumulated_scores += reward
s_ = self.custom_env.get_state(observation_)
a_ = max_action_q(Q, s_, self.action_space_size)
Q[s, a] += self.ALPHA * (reward + self.GAMMA * Q[s_, a_] - Q[s, a])
# Q[s, a] += self.ALPHA * (reward + self.GAMMA * np.max(Q[s_, :]) - Q[s, a]) # if Q is a numpy.ndarray
observation, s = observation_, s_
if visualize and i == self.episodes - 1:
self.env.render()
if self.episodes < 10 or (i + 1) % (self.episodes // 10) == 0:
print('episode %d - eps: %.2f, score: %d, steps: %d' % (i + 1, self.EPS, ep_score, ep_steps))
self.EPS = decrement_eps(self.EPS, self.eps_min, self.eps_dec, self.eps_dec_type)
self.totalSteps[i] = ep_steps
self.totalScores[i] = ep_score
self.totalAccumulatedScores[i] = accumulated_scores
if visualize and i == self.episodes - 1:
self.env.close()
print('\n', 'Game Ended', '\n')
if pickle:
pickle_save(Q, self.custom_env.file_name + '-q-table')
return Q, self.totalScores, self.totalAccumulatedScores
def perform_double_q_learning(self, visualize=False, record=False):
if record:
self.env = wrappers.Monitor(
self.env, 'recordings/D-Q-L/', force=True,
video_callable=lambda episode_id: episode_id == 0 or episode_id == (self.episodes - 1)
)
Q1, Q2 = init_q1_q2(self.states, self.action_space_size)
accumulated_scores = 0
print('\n', 'Game Started', '\n')
for i in range(self.episodes):
done = False
ep_steps = 0
ep_score = 0
observation = self.env.reset()
s = self.custom_env.get_state(observation)
if visualize and i == self.episodes - 1:
self.env.render()
while not done:
a = eps_greedy_q1_q2(Q1, Q2, s, self.action_space_size, self.EPS, self.env)
observation_, reward, done, info = self.env.step(a)
ep_steps += 1
ep_score += reward
accumulated_scores += reward
s_ = self.custom_env.get_state(observation_)
rand = np.random.random()
if rand <= 0.5:
a_ = max_action_q1_q2(Q1, Q1, s_, self.action_space_size)
Q1[s, a] += self.ALPHA * (reward + self.GAMMA * Q2[s_, a_] - Q1[s, a])
else: # elif rand > 0.5
a_ = max_action_q1_q2(Q2, Q2, s_, self.action_space_size)
Q2[s, a] += self.ALPHA * (reward + self.GAMMA * Q1[s_, a_] - Q2[s, a])
observation, s = observation_, s_
if visualize and i == self.episodes - 1:
self.env.render()
if self.episodes < 10 or (i + 1) % (self.episodes // 10) == 0:
print('episode %d - eps: %.2f, score: %d, steps: %d' % (i + 1, self.EPS, ep_score, ep_steps))
self.EPS = decrement_eps(self.EPS, self.eps_min, self.eps_dec, self.eps_dec_type)
self.totalSteps[i] = ep_steps
self.totalScores[i] = ep_score
self.totalAccumulatedScores[i] = accumulated_scores
if visualize and i == self.episodes - 1:
self.env.close()
print('\n', 'Game Ended', '\n')
return Q1, Q2, self.totalScores, self.totalAccumulatedScores
| 13,477 | 4,537 |