text string | size int64 | token_count int64 |
|---|---|---|
#!/usr/bin/env python
import subprocess
import socket
import ssl
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
def write_config(filename, port1, port2):
with open(filename, 'w') as f:
f.write("port %d\n" % (port2))
f.write("\n")
f.write("connection bridge_test\n")
f.write("address 127.0.0.1:%d\n" % (port1))
f.write("topic bridge/# both 0\n")
f.write("notifications false\n")
f.write("restart_timeout 2\n")
f.write("\n")
f.write("bridge_cafile ../ssl/all-ca.crt\n")
f.write("bridge_insecure true\n")
(port1, port2) = mosq_test.get_port(2)
conf_file = os.path.basename(__file__).replace('.py', '.conf')
write_config(conf_file, port1, port2)
rc = 1
keepalive = 60
client_id = socket.gethostname()+".bridge_test"
connect_packet = mosq_test.gen_connect(client_id, keepalive=keepalive, clean_session=False, proto_ver=128+4)
connack_packet = mosq_test.gen_connack(rc=0)
mid = 1
subscribe_packet = mosq_test.gen_subscribe(mid, "bridge/#", 0)
suback_packet = mosq_test.gen_suback(mid, 0)
publish_packet = mosq_test.gen_publish("bridge/ssl/test", qos=0, payload="message")
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssock = ssl.wrap_socket(sock, ca_certs="../ssl/all-ca.crt", keyfile="../ssl/server.key", certfile="../ssl/server.crt", server_side=True, ssl_version=ssl.PROTOCOL_TLSv1)
ssock.settimeout(20)
ssock.bind(('', port1))
ssock.listen(5)
broker = mosq_test.start_broker(filename=os.path.basename(__file__), port=port2, use_conf=True)
try:
(bridge, address) = ssock.accept()
bridge.settimeout(20)
if mosq_test.expect_packet(bridge, "connect", connect_packet):
bridge.send(connack_packet)
if mosq_test.expect_packet(bridge, "subscribe", subscribe_packet):
bridge.send(suback_packet)
pub = subprocess.Popen(['./08-ssl-bridge-helper.py', str(port2)], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
pub.wait()
(stdo, stde) = pub.communicate()
if mosq_test.expect_packet(bridge, "publish", publish_packet):
rc = 0
bridge.close()
finally:
os.remove(conf_file)
try:
bridge.close()
except NameError:
pass
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde)
ssock.close()
exit(rc)
| 2,731 | 1,047 |
import numpy as np
import torch
import torch.nn.functional as F
from torch.autograd import Variable
from .distributions import rand_circle2d
from ot import gromov_wasserstein2, unif
def rand_projections(embedding_dim, num_samples=50):
"""This fn generates `L` random samples from the latent space's unit sphere.
Args:
embedding_dim (int): embedding dimension size
num_samples (int): number of random projection samples
Return:
torch.Tensor
"""
theta = [w / np.sqrt((w**2).sum()) for w in np.random.normal(size=(num_samples, embedding_dim))]
theta = np.asarray(theta)
return torch.from_numpy(theta).type(torch.FloatTensor)
def _sliced_wasserstein_distance(encoded_samples, distribution_samples, num_projections=50, p=2):
"""Sliced Wasserstein Distance between encoded samples and drawn distribution samples.
Args:
encoded_samples (toch.Tensor): embedded training tensor samples
distribution_samples (torch.Tensor): distribution training tensor samples
num_projections (int): number of projectsion to approximate sliced wasserstein distance
p (int): power of distance metric
Return:
torch.Tensor
"""
# derive latent space dimension size from random samples drawn from a distribution in it
embedding_dim = distribution_samples.size(1)
# generate random projections in latent space
projections = rand_projections(embedding_dim, num_projections)
# calculate projection of the encoded samples
encoded_projections = encoded_samples.matmul(projections.transpose(0, 1))
# calculate projection of the random distribution samples
distribution_projections = distribution_samples.matmul(projections.transpose(0, 1))
# calculate the sliced wasserstein distance by
# sorting the samples per projection and
# calculating the difference between the
# encoded samples and drawn samples per projection
wasserstein_distance = torch.sort(encoded_projections.transpose(0, 1), dim=1)[0] - torch.sort(distribution_projections.transpose(0, 1), dim=1)[0]
# distance between them (L2 by default for Wasserstein-2)
wasserstein_distance_p = torch.pow(wasserstein_distance, p)
# approximate wasserstein_distance for each projection
return wasserstein_distance_p.mean()
def sliced_wasserstein_distance(encoded_samples, distribution_fn=rand_circle2d, num_projections=50, p=2):
"""Sliced Wasserstein Distance between encoded samples and drawn distribution samples.
Args:
encoded_samples (toch.Tensor): embedded training tensor samples
distribution_fn (callable): callable to draw random samples
num_projections (int): number of projectsion to approximate sliced wasserstein distance
p (int): power of distance metric
Return:
torch.Tensor
"""
# derive batch size from encoded samples
batch_size = encoded_samples.size(0)
# draw samples from latent space prior distribution
z = distribution_fn(batch_size)
# approximate wasserstein_distance between encoded and prior distributions
# for average over each projection
swd = _sliced_wasserstein_distance(encoded_samples, z, num_projections, p)
return swd
def _topology_persistence(encoded_samples, distribution_samples, num_projections=50, p=2):
prior_subcripted_views = distribution_samples
posterior_subscripted_views = encoded_samples
adversarial_learner = AdversariallearnerBatchTrainer()
adversarial_learner.train_on_batch(prior_subcripted_views)
posterior_pred = adversarial_learner.eval_on_batch(posterior_subscripted_views)
bce = F.binary_cross_entropy(posterior_pred)
# derive latent space dimension size from random samples drawn from a distribution in it
embedding_dim = distribution_samples.size(1)
# generate random projections in latent space
projections = rand_projections(embedding_dim, num_projections)
# calculate projection of the encoded samples
#import pdb; pdb.set_trace()
Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor
encoded_projections = encoded_samples.matmul(projections.transpose(0, 1).cuda())
# calculate projection of the random distribution samples
distribution_projections = distribution_samples.matmul(projections.transpose(0, 1))
# calculate the sliced wasserstein distance by
# sorting the samples per projection and
# calculating the difference between the
# encoded samples and drawn samples per projection
wasserstein_distance = torch.sort(encoded_projections.transpose(0, 1).cuda(), dim=1)[0] - torch.sort(distribution_projections.transpose(0, 1).cuda(), dim=1)[0]
# distance between them (L2 by default for Wasserstein-2)
wasserstein_distance_p = torch.pow(wasserstein_distance, p)
# approximate wasserstein_distance for each projection
return wasserstein_distance_p.mean()
def topology_persistence(encoded_samples, distribution_fn=rand_cirlce2d, num_projections=50, p=2):
batch_size = encoded_samples.size(0)
z = distribution_fn(batch_size)
return _topology_persistence(encoded_samples, self._distribution_fn, self.num_projections_, self.p_)
def gromov_wasserstein_distance(X, Y, device):
import concurrent.futures
# import pdb; pdb.set_trace()
mb_size = X.size(0)
gw_dist = np.zeros(mb_size)
Tensor = torch.FloatTensor
with concurrent.futures.ProcessPoolExecutor() as executor:
for i in executor.map(range(mb_size)):
C1 = sp.spatial.distance.cdist(X[i,:].reshape(28,28).data.cpu().numpy(), X[i,:].reshape(28,28).data.cpu().numpy()) #Convert data back to an image from one hot encoding with size 28x28
C2 = sp.spatial.distance.cdist(Y[i,:].reshape(28,28).data.cpu().numpy(), Y[i,:].reshape(28,28).data.cpu().numpy())
C1 /= C1.max()
C2 /= C2.max()
p = unif(28)
q = unif(28)
gw_dist[i] = gromov_wasserstein2(C1, C2, p, q, loss_fun='square_loss', epsilon=5e-4)
print("*"*100)
return Variable(Tensor(gw_dist), requires_grad=True).sum()
class SWAEBatchTrainer:
"""Sliced Wasserstein Autoencoder Batch Trainer.
Args:
autoencoder (torch.nn.Module): module which implements autoencoder framework
optimizer (torch.optim.Optimizer): torch optimizer
distribution_fn (callable): callable to draw random samples
num_projections (int): number of projectsion to approximate sliced wasserstein distance
p (int): power of distance metric
weight_swd (float): weight of divergence metric compared to reconstruction in loss
device (torch.Device): torch device
"""
def __init__(self, autoencoder, optimizer, distribution_fn,
num_projections=50, p=2, weight_swd=10.0, device=None):
self.model_ = autoencoder
self.optimizer = optimizer
self._distribution_fn = distribution_fn
self.embedding_dim_ = self.model_ .encoder.embedding_dim_
self.num_projections_ = num_projections
self.p_ = p
self.weight_swd = weight_swd
self._device = device if device else torch.device('cpu')
def __call__(self, x):
return self.eval_on_batch(x)
def train_on_batch(self, x):
# reset gradients
self.optimizer.zero_grad()
# autoencoder forward pass and loss
evals = self.eval_on_batch(x)
# backpropagate loss
evals['loss'].backward()
# update encoder and decoder parameters
self.optimizer.step()
return evals
def test_on_batch(self, x):
# reset gradients
self.optimizer.zero_grad()
# autoencoder forward pass and loss
evals = self.eval_on_batch(x)
return evals
def eval_on_batch(self, x):
x = x.to(self._device)
recon_x, z = self.model_(x)
# Equation 4 - this works for 1D
# import pdb; pdb.set_trace()
gw = gromov_wasserstein_distance(recon_x, x, self._device)
# Equation 15, this is only works for 2D
entropy = float(self.weight_swd) * topology_persistence(z, self._distribution_fn, self.num_projections_, self.p_)
# Equation 16: but why there is a bce. Following the original implementation with Keras
# it is said that (bce and l1) is the first term for equation 16, and w2 for the second term.
loss = gw + entropy
return {'loss': loss, 'gw': gw, 'entropy': entropy, 'encode': z, 'decode': recon_x}
| 8,658 | 2,575 |
#!/usr/bin/python
import cv2
img = cv2.imread("/home/abhishek/Desktop/tracks.jpeg")
cv2.line(img,(0,0),(236,236),(100,54,255),3)
cv2.rectangle(img,(199,112),(325,238),(0,0,255),2)
cv2.circle(img,(262,175),60,(255,200,0),3)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img,'TRAIN',(210,270),font,1,(90,200,140),cv2.LINE_4)
cv2.imshow("actions",img)
cv2.imwrite("/home/abhishek/Desktop/lines.jpeg",img)
cv2.waitKey(0)
cv2.destroyAllWindows()
| 475 | 256 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri May 24 09:08:48 2019
@author: uiet_mac1
"""
import numpy as np
import random as rd
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
#import hungarian as hg
def random_parameters(data, K):
""" K is the number of gaussians"""
"""if dimension is d, then mean is dX1"""
""" init the means, covariances and mixing coefs"""
cols = (data.shape)[1]
#print(len(data))
mu = np.zeros((K, cols)) #mean of k clusters KXD
for k in range(K):
idx = np.floor(rd.random()*len(data))
for col in range(cols):
mu[k][col] += (data[int(idx)][col])
sigma = []
for k in range(K):
sigma.append(np.cov(data.T))
pi = np.ones(K)*1.0/K
print(mu)
print(sigma)
return mu, sigma, pi
def e_step(data, K, mu, sigma, pi):
idvs = (data.shape)[0]
#cols = (data.shape)[1]
#print("idvs is " +str(idvs))
resp = np.zeros((idvs, K))
for i in range(idvs):
for k in range(K):
resp[i][k] = pi[k]*gaussian(data[i], mu[k], sigma[k])/likelihood(data[i], K, mu, sigma, pi)
#print("responsibitlies is ")
#print(resp)
return resp
def log_likelihood(data, K, mu, sigma, pi):
""" marginal over X """
log_likelihood = 0.0
for n in range (len(data)):
log_likelihood += np.log(likelihood(data[n], K, mu, sigma, pi))
return log_likelihood
def likelihood(x, K, mu, sigma, pi):
rs = 0.0
for k in range(K):
rs += pi[k]*gaussian(x, mu[k], sigma[k])
return rs
def m_step(data, K, resp):
""" find the parameters that maximize the log-likelihood given the current resp."""
idvs = (data.shape)[0]
cols = (data.shape)[1]
mu = np.zeros((K, cols))
sigma = np.zeros((K, cols, cols))
pi = np.zeros(K)
marg_resp = np.zeros(K)
for k in range(K):
for i in range(idvs):
marg_resp[k] += resp[i][k]
mu[k] += (resp[i][k])*data[i]
mu[k] /= marg_resp[k]
for i in range(idvs):
#x_i = (np.zeros((1,cols))+data[k])
x_mu = np.zeros((1,cols))+data[i]-mu[k]
sigma[k] += (resp[i][k]/marg_resp[k])*x_mu*x_mu.T
pi[k] = marg_resp[k]/idvs
return mu, sigma, pi
def gaussian(x, mu, sigma):
""" compute the pdf of the multi-var gaussian """
idvs = len(x)
norm_factor = (2*np.pi)**idvs
norm_factor *= np.linalg.det(sigma)
norm_factor = 1.0/np.sqrt(norm_factor)
x_mu = np.matrix(x-mu)
rs = norm_factor*np.exp(-0.5*x_mu*np.linalg.inv(sigma)*x_mu.T)
return rs
def EM(data, rst, K, threshold):
converged = False
mu, sigma, pi = random_parameters(data, K)
likelihood_list=[]
current_log_likelihood = log_likelihood(data, K, mu, sigma, pi)
max_iter = 100
for it in range(max_iter):
likelihood_list.append(float(current_log_likelihood[0][0]))
print(rst, " | ", it, " | ", current_log_likelihood[0][0])
#print("Mixing proportion is ", pi )
resp = e_step(data, K, mu, sigma, pi)
mu, sigma, pi = m_step(data, K, resp)
new_log_likelihood = log_likelihood(data, K, mu, sigma, pi)
if (abs(new_log_likelihood-current_log_likelihood) < threshold):
converged = True
break
current_log_likelihood = new_log_likelihood
print(converged)
plt.plot(likelihood_list)
plt.ylabel('log likelihood')
plt.show()
return current_log_likelihood, mu, sigma, pi, resp
#######################################################################
def assign_clusters(K, resp):
idvs = len(resp)
clusters = np.zeros(idvs, dtype=int)
for i in range(idvs):
#clusters[i][k] = 0
clss = 0
for k in range(K):
if resp[i][k] > resp[i][clss]:
clss = k
resp[i][clss]= resp[i][k]
clusters[i] = clss
return clusters
'''
def compute_statistics(clusters, ref_clusters, K):
mat = make_ce_matrix(clusters, ref_clusters, K)
#hung_solver = hg.Hungarian()
rs = hung_solver.compute(mat, False)
tmp_clusters = np.array(clusters)
for old, new in rs:
clusters[np.where(tmp_clusters == old)] = new
#print old, new
#print clusters, ref_clusters
nbrIts = 0
for k in range(K):
ref = np.where(ref_clusters == k)[0]
clust = np.where(clusters == k)[0]
nbrIts += len(np.intersect1d(ref, clust))
print(len(np.intersect1d(ref, clust)))
return nbrIts
def make_ce_matrix(clusters, ref_clusters, K):
mat = np.zeros((K, K), dtype=int)
for i in range(K):
for j in xrange(K):
ref_i = np.where(ref_clusters == i)[0]
clust_j = np.where(clusters == j)[0]
its = np.intersect1d(ref_i, clust_j)
mat[i,j] = len(ref_i) + len(clust_j) -2*len(its)
return mat
'''
########################################################################
def read_data(file_name):
""" read the data from filename as numpy array """
with open(file_name) as f:
data = np.loadtxt(f, delimiter=",", dtype = "float",
skiprows=0, usecols=(0,1,2,3))
with open(file_name) as f:
ref_classes = np.loadtxt(f, delimiter=",", dtype = "str",
skiprows=0, usecols=[4])
unique_ref_classes = np.unique(ref_classes)
ref_clusters = np.argmax(ref_classes[np.newaxis,:]==unique_ref_classes[:,np.newaxis],axis=0)
return data, ref_clusters
def f(t):
return t
def plot_ellipse(ax, mu, sigma, color="k"):
"""
Based on
http://stackoverflow.com/questions/17952171/not-sure-how-to-fit-data-with-a-gaussian-python.
"""
# Compute eigenX_embeddedues and associated eigenvectors
X_embeddeds, vecs = np.linalg.eigh(sigma)
# Compute "tilt" of ellipse using first eigenvector
x, y = vecs[:, 0]
theta = np.degrees(np.arctan2(y, x))
# EigenX_embeddedues give length of ellipse along each eigenvector
w, h = 2 * np.sqrt(X_embeddeds)
ax.tick_params(axis='both', which='major', labelsize=20)
ellipse = Ellipse(mu, w, h, theta, color=color) # color="k")
ellipse.set_clip_box(ax.bbox)
ellipse.set_alpha(0.2)
ax.add_artist(ellipse)
def error_ellipse(mu, cov, ax=None, factor=1.0, **kwargs):
"""
Plot the error ellipse at a point given its covariance matrix.
"""
# some sane defaults
facecolor = kwargs.pop('facecolor', 'none')
edgecolor = kwargs.pop('edgecolor', 'k')
x, y = mu
U, S, V = np.linalg.svd(cov)
theta = np.degrees(np.arctan2(U[1, 0], U[0, 0]))
ellipsePlot = Ellipse(xy=[x, y],
width=2 * np.sqrt(S[0]) * factor,
height=2 * np.sqrt(S[1]) * factor,
angle=theta,
facecolor=facecolor, edgecolor=edgecolor, **kwargs)
if ax is None:
ax = plt.gca()
ax.add_patch(ellipsePlot)
return ellipsePlot
def _plot_gaussian(mean, covariance, color, zorder=0):
"""Plots the mean and 2-std ellipse of a given Gaussian"""
plt.plot(mean[0], mean[1], color[0] + ".", zorder=zorder)
if covariance.ndim == 1:
covariance = np.diag(covariance)
radius = np.sqrt(5.991)
eigX_embeddeds, eigvecs = np.linalg.eig(covariance)
axis = np.sqrt(eigX_embeddeds) * radius
slope = eigvecs[1][0] / eigvecs[1][1]
angle = 180.0 * np.arctan(slope) / np.pi
plt.axes().add_artist(Ellipse(
mean, 2 * axis[0], 2 * axis[1], angle=angle,
fill=False, color=color, linewidth=1, zorder=zorder
))
plt.show()
def _plot_cov_ellipse(cov, pos, nstd=2, ax=None, **kwargs):
"""
Plots an `nstd` sigma error ellipse based on the specified covariance
matrix (`cov`). Additional keyword arguments are passed on to the
ellipse patch artist.
Parameters
----------
cov : The 2x2 covariance matrix to base the ellipse on
pos : The location of the center of the ellipse. Expects a 2-element
sequence of [x0, y0].
nstd : The radius of the ellipse in numbers of standard deviations.
Defaults to 2 standard deviations.
ax : The axis that the ellipse will be plotted on. Defaults to the
current axis.
Additional keyword arguments are pass on to the ellipse patch.
Returns
-------
A matplotlib ellipse artist
"""
from matplotlib import pyplot as plt
from matplotlib.patches import Ellipse
def eigsorted(cov):
X_embeddeds, vecs = np.linalg.eigh(cov)
order = X_embeddeds.argsort()[::-1]
return X_embeddeds[order], vecs[:, order]
if ax is None:
ax = plt.gca()
X_embeddeds, vecs = eigsorted(cov)
theta = np.degrees(np.arctan2(*vecs[:, 0][::-1]))
# Width and height are "full" widths, not radius
width, height = 2 * nstd * np.sqrt(X_embeddeds)
ellip = Ellipse(xy=pos, width=width, height=height, angle=theta,
**kwargs)
ax.add_artist(ellip)
plt.show()
return ellip
def main():
print("begining...")
file_name = "iris.data"
nbr_restarts = 5
threshold = 0.001
K = 3
data, ref_clusters = read_data(file_name)
print("#restart | EM iteration | log likelihood")
print("----------------------------------------")
max_likelihood_score = float("-inf")
for rst in range(nbr_restarts):
log_likelihood, mu, sigma, pi, resp = EM(data, rst, K, threshold)
if log_likelihood > max_likelihood_score:
max_likelihood_score = log_likelihood
max_mu, max_sigma, max_pi, max_resp = mu, sigma, pi, resp
#print("Iteration is"+ str(rst))
#print("mixing is ")
#print(max_pi)
#print("mean is ")
#print(max_mu)
#print("sigma is ")
#print(max_sigma)
#print(max_mu, max_sigma, max_pi)
print("mean matrix is ")
print(max_mu)
clusters = assign_clusters(K, max_resp)
#cost = compute_statistics(clusters, ref_clusters, K)
print(clusters)
print(ref_clusters)
#print(cost*1.0/len(data))
from mpl_toolkits.mplot3d import Axes3D
#with first three variables are on the axis and the fourth being color:
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(15, 12))
ax = fig.add_subplot(111, projection='3d')
sp = ax.scatter(data[:,0],data[:,1],data[:,2], s=20, c=data[:,3])
fig.colorbar(sp)
plt.show()
from sklearn.manifold import TSNE
data = np.concatenate((data,mu),axis = 0)
print(data)
X = np.array(data)
#means = np.array(mu)
'''
X_embedded = TSNE(n_components=1).fit_transform(X)
print("!!!!")
figs = plt.figure(figsize=(15, 12))
plt.plot(X_embedded,'ro')
plt.plot( X_embedded[150:153],'g^')
t1 = np.linspace(0, 140, 100)
plt.plot(t1,[X_embedded[150]]*100 , 'g^')
plt.plot(t1,[X_embedded[151]]*100 , 'g^')
plt.plot(t1,[X_embedded[152]]*100 , 'g^')
plt.ylabel('some numbers')
plt.show()
'''
X_embedded = TSNE(n_components=2).fit_transform(X)
print(X_embedded)
print("!!!!")
figs = plt.figure(figsize=(15, 12))
plt.plot(X_embedded[0:150,0], X_embedded[0:150,1],'ro')
plt.plot( X_embedded[150:153,0],X_embedded[150:153,1] ,'g^')
plt.ylabel('some numbers')
A = np.matrix(max_sigma[0])
N, M = A.shape
assert N % 2 == 0
assert M % 2 == 0
A0 = np.empty((N//2, M//2))
for i in range(N//2):
for j in range(M//2):
A0[i,j] = A[2*i:2*i+2, 2*j:2*j+2].sum()
A = np.matrix(max_sigma[1])
N, M = A.shape
assert N % 2 == 0
assert M % 2 == 0
A1 = np.empty((N//2, M//2))
for i in range(N//2):
for j in range(M//2):
A1[i,j] = A[2*i:2*i+2, 2*j:2*j+2].sum()
A = np.matrix(max_sigma[2])
N, M = A.shape
assert N % 2 == 0
assert M % 2 == 0
A2 = np.empty((N//2, M//2))
for i in range(N//2):
for j in range(M//2):
A2[i,j] = A[2*i:2*i+2, 2*j:2*j+2].sum()
print(A0)
print(A1)
print(A2)
print(X_embedded[150,:])
#_plot_cov_ellipse(A0,X_embedded[150,:] )
mean = X_embedded[150,:]
covariance = A0
plt.plot(mean[0], mean[1], 'g' + ".", zorder=0)
if covariance.ndim == 1:
covariance = np.diag(covariance)
radius = np.sqrt(5.991)
eigX_embeddeds, eigvecs = np.linalg.eig(covariance)
axis = np.sqrt(eigX_embeddeds) * radius
slope = eigvecs[1][0] / eigvecs[1][1]
angle = 180.0 * np.arctan(slope) / np.pi
plt.axes().add_artist(Ellipse(
mean, 2 * axis[0], 2 * axis[1], angle=angle,
fill=False, color='g', linewidth=1, zorder=0
))
mean = X_embedded[151,:]
covariance = A1
plt.plot(mean[0], mean[1], 'g' + ".", zorder=0)
if covariance.ndim == 1:
covariance = np.diag(covariance)
radius = np.sqrt(5.991)
eigX_embeddeds, eigvecs = np.linalg.eig(covariance)
axis = np.sqrt(eigX_embeddeds) * radius
slope = eigvecs[1][0] / eigvecs[1][1]
angle = 180.0 * np.arctan(slope) / np.pi
plt.axes().add_artist(Ellipse(
mean, 2 * axis[0], 2 * axis[1], angle=angle,
fill=False, color='g', linewidth=1, zorder=0
))
mean = X_embedded[152,:]
covariance = A2
plt.plot(mean[0], mean[1], 'g' + ".", zorder=0)
if covariance.ndim == 1:
covariance = np.diag(covariance)
radius = np.sqrt(5.991)
eigX_embeddeds, eigvecs = np.linalg.eig(covariance)
axis = np.sqrt(eigX_embeddeds) * radius
slope = eigvecs[1][0] / eigvecs[1][1]
angle = 180.0 * np.arctan(slope) / np.pi
plt.axes().add_artist(Ellipse(
mean, 2 * axis[0], 2 * axis[1], angle=angle,
fill=False, color='g', linewidth=1, zorder=0
))
plt.show()
#_plot_gaussian(X_embedded[150,:], A0,'r')
#error_ellipse(X_embedded[150,:], A0)
#plot_ellipse(plt, X_embedded[150,:], A0 )
#np.savetxt("mu.txt",max_mu)
return max_mu
if __name__ == '__main__':
main() | 14,662 | 5,685 |
"""Implementation of a stack in python."""
class Stack:
def __init__(self):
self.items = []
def push(self, item):
"""Add an item to the stack."""
self.items.append(item)
def pop(self):
"""Remove the most recent item from the stack."""
if len(self.items) > 0:
last = self.items[-1]
del(self.items[-1])
return last
else:
raise IndexError
def peek(self):
"""Return the most recent item to be pushed to the stack."""
return self.items[-1]
def isEmpty(self):
"""Returns True if stack is empty ."""
return not len(self.items) >= 1
def size(self):
"""Return the size of the stack."""
return len(self.items)
| 775 | 228 |
import unittest
import logging
import numpy
from knn import KNN
class MyTestCase(unittest.TestCase):
def test_something(self):
logging.basicConfig()
dataset = numpy.array([
[[5, 4], 1],
[[9, 6], 1],
[[4, 7], 1],
[[2, 3], -1],
[[8, 1], -1],
[[7, 2], -1]
])
knn = KNN(dataset, 1)
test_point = numpy.array([5, 3])
self.assertEqual(knn.predict(test_point), 1)
if __name__ == '__main__':
unittest.main()
| 535 | 196 |
from basic_op import *
from ld8a import *
from tab_ld8a import *
L_exc_err = [0] * 4
def Init_exc_err() -> None:
global L_exc_err
for i in range(0, 4):
L_exc_err[i] = MAX_INT_14 # Q14
def test_err(T0: int, T0_frac: int) -> int:
"""
# (o) flag set to 1 if taming is necessary
# (i) T0 - integer part of pitch delay
# (i) T0_frac - fractional part of pitch delay
"""
if T0_frac > 0:
t1 = add(T0, 1)
else:
t1 = T0
i = sub(t1, (L_SUBFR + L_INTER10))
if i < 0:
i = 0
zone1 = tab_tab_zone[i]
i = add(t1, (L_INTER10 - 2))
zone2 = tab_tab_zone[i]
L_maxloc = -1
flag = 0
for i in range(zone, zone1 + 1, -1):
L_acc = L_sub(L_exc_err[i], L_maxloc)
if L_acc > 0:
L_maxloc = L_exc_err[i]
L_acc = L_sub(L_maxloc, L_THRESH_ERR)
if L_acc > 0:
flag = 1
return flag
def update_exc_err(gain_pit: int, T0: int) -> None:
"""
# (i) pitch gain
# (i) integer part of pitch delay
"""
L_worst = -1
n = sub(T0, L_SUBFR)
if n < 0:
hi, lo = L_Extract(L_exc_err[0])
L_temp = Mpy_32_16(hi, lo, gain_pit)
L_temp = L_shl(L_temp, 1)
L_temp = L_add(MAX_INT_14, L_temp)
L_acc = L_sub(L_temp, L_worst)
if L_acc > 0:
L_worst = L_temp
hi, lo = L_Extract(L_temp)
L_temp = Mpy_32_16(hi, lo, gain_pit)
L_temp = L_shl(L_temp, 1)
L_temp = L_add(MAX_INT_14, L_temp)
L_acc = L_sub(L_temp, L_worst)
if L_acc > 0:
L_worst = L_temp
else:
zone1 = tab_tab_zone[n]
i = sub(T0, 1)
zone2 = tab_tab_zone[i]
for i in range(zone1, zone2 + 1):
hi, lo = L_Extract(L_exc_err[i])
L_temp = Mpy_32_16(hi, lo, gain_pit)
L_temp = L_shl(L_temp, 1)
L_temp = L_add(MAX_INT_14, L_temp)
L_acc = L_sub(L_temp, L_worst)
if L_acc > 0:
L_worst = L_temp
for i in range(3, 0, -1):
L_exc_err[i] = L_exc_err[i-1]
L_exc_err[0] = L_worst
| 2,130 | 990 |
import AppKit
from PyObjCTools.TestSupport import TestCase
import objc
class TestNSPageLayout(TestCase):
def testMethods(self):
self.assertArgIsSEL(
AppKit.NSPageLayout.beginSheetWithPrintInfo_modalForWindow_delegate_didEndSelector_contextInfo_, # noqa: B950
3,
b"v@:@" + objc._C_NSInteger + b"^v",
)
self.assertArgHasType(
AppKit.NSPageLayout.beginSheetWithPrintInfo_modalForWindow_delegate_didEndSelector_contextInfo_, # noqa: B950
4,
b"^v",
)
| 558 | 176 |
from datetime import datetime
from django.db import models
class TypeEntry(models.Model):
name = models.CharField(max_length=70, verbose_name="Nome", default="", blank=False, null=False)
def __str__(self):
return self.name
class FamilyMember(models.Model):
name = models.CharField(max_length=70, verbose_name="Nome", default="", blank=False, null=False)
def __str__(self):
return self.name
class Category(models.Model):
name = models.CharField(max_length=70, verbose_name="Nome", default="", blank=False, null=False)
enable = models.BooleanField(default=True, verbose_name="Enable?")
def __str__(self):
return self.name
class FullCommand(models.Model):
PAYMENT_DATE_OPTIONS = (
(1, "Data do Dia"),
(2, "Data do Cartão (15)"),
(3, "Perguntar"),
(4, "Dia Seguinte"),
(5, "Mês Seguinte"),
(6, "Dia 5 mês vigente"),
(7, "Dia 5 mês que vem"),
(8, "Crédito Parcelado"),
)
command = models.CharField(max_length=70, verbose_name="Comando", default="", blank=False, null=False)
entry_date = models.BooleanField(verbose_name="Data de Lançamento: Usa data do dia?")
payment_date = models.IntegerField(choices=PAYMENT_DATE_OPTIONS, verbose_name="Data de Pagamento", default=1)
debit = models.DecimalField(max_digits=6, verbose_name="Débito", decimal_places=2, blank=True, null=True)
credit = models.DecimalField(max_digits=6, verbose_name="Crédito", decimal_places=2, blank=True, null=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name="Categoria", blank=True, null=True)
name = models.ForeignKey(FamilyMember, on_delete=models.CASCADE, blank=True, verbose_name="Nome", null=True)
description = models.CharField(max_length=400, verbose_name="Descrição", blank=True, null=True)
type_entry = models.ForeignKey(TypeEntry, on_delete=models.CASCADE, verbose_name="Tipo", blank=True, null=True)
def __str__(self):
return self.command
class Records(models.Model):
db_included_date_time = models.DateTimeField(auto_now=True, null=False, verbose_name="Inclusão no Bando de Dados")
create_date_time = models.DateTimeField(
default=datetime.now, null=False, blank=False, verbose_name="Data do Lançamento"
)
payment_date_time = models.DateTimeField(
default=datetime.now, null=True, blank=False, verbose_name="Data da Execução"
)
debit = models.DecimalField(max_digits=6, verbose_name="Débito", decimal_places=2, blank=True, null=True)
credit = models.DecimalField(max_digits=6, verbose_name="Crédito", decimal_places=2, blank=True, null=True)
category = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name="Categoria", blank=True, null=True)
name = models.ForeignKey(FamilyMember, on_delete=models.CASCADE, blank=True, verbose_name="Nome", null=True)
type_entry = models.ForeignKey(TypeEntry, on_delete=models.CASCADE, verbose_name="Tipo", blank=True, null=True)
description = models.CharField(max_length=400, verbose_name="Descrição", default="", blank=True, null=True)
class Meta:
indexes = [
models.Index(fields=["db_included_date_time"], name="db_included_date_time_idx"),
models.Index(fields=["create_date_time"], name="create_date_time_idx"),
models.Index(fields=["payment_date_time"], name="payment_date_time_idx"),
models.Index(fields=["category"], name="category_idx"),
models.Index(fields=["name"], name="name_idx"),
models.Index(fields=["type_entry"], name="type_entry_idx"),
]
class Goal(models.Model):
PERIOD_CHOICES = ((1, "This Week"),)
category = models.ForeignKey(Category, on_delete=models.CASCADE, verbose_name="Categoria", blank=True, null=True)
name_family = models.ForeignKey(FamilyMember, on_delete=models.CASCADE, blank=True, verbose_name="Nome", null=True)
type_entry = models.ForeignKey(TypeEntry, on_delete=models.CASCADE, verbose_name="Tipo", blank=True, null=True)
name = models.CharField(max_length=40, verbose_name="Name")
value = models.DecimalField(max_digits=6, verbose_name="Value", decimal_places=2)
period = models.IntegerField(choices=PERIOD_CHOICES, verbose_name="Data de Pagamento", default=1)
enable = models.BooleanField(default=True, verbose_name="Enable")
def __str__(self):
return self.name
| 4,458 | 1,473 |
# Copyright 2020 The TensorStore Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//third_party:repo.bzl",
"third_party_http_archive",
)
load("@bazel_tools//tools/build_defs/repo:utils.bzl", "maybe")
def repo():
maybe(
third_party_http_archive,
name = "com_google_boringssl",
urls = [
# When updating, always use commit from master-with-bazel branch.
"https://github.com/google/boringssl/archive/34693f02f6cf9ac7982778b761c16a27f32433c1.tar.gz", # 2019-09-25
],
sha256 = "633e2e806d01a07a20725d1e68fff0be96db18344ed4389c00de042dcd874cac",
strip_prefix = "boringssl-34693f02f6cf9ac7982778b761c16a27f32433c1",
system_build_file = Label("//third_party/com_google_boringssl:system.BUILD.bazel"),
)
| 1,306 | 509 |
__all__ = [
'stats', 'utils', 'linalg'
]
| 45 | 22 |
name = "skforecast"
__version__ = "0.5.dev1"
| 45 | 22 |
# Generated by Django 3.1.2 on 2020-10-04 04:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0003_auto_20201004_0401'),
]
operations = [
migrations.AlterField(
model_name='airnowreportingarea',
name='name',
field=models.CharField(db_index=True, max_length=45),
),
migrations.AlterField(
model_name='airnowreportingarea',
name='state_code',
field=models.CharField(db_index=True, max_length=2),
),
migrations.AddIndex(
model_name='airnowreportingarea',
index=models.Index(fields=['name', 'state_code'], name='api_airnowr_name_3a07e3_idx'),
),
]
| 777 | 263 |
from app import db
from models import *
import datetime
# create the db and tables
db.create_all()
# prepare data to insert
year = 1982
month = 4
day = 3
birthday = datetime.date(year, month, day)
now = datetime.datetime.now()
today = datetime.date(now.year, now.month, now.day)
yesterday = datetime.date(now.year, now.month, 13)
# insert data
adam = User("adam", "abmorton@gmail.com", "testpw", yesterday)
# db.session.add(User("admin", "admin@admin.com", "adminpw", today))
db.session.add(User(adam))
db.session.commit()
# make a Portfolio
port = Portfolio(adam.id)
db.session.add(port)
db.session.commit()
# add a stock
db.session.add(Stock("XOMA", "XOMA Corporation", "NGM", "0.9929", None, None, None, "117.74M", 1))
db.session.commit()
# get a stock instance for later use creating other records
stock = Stock.query.get(1)
# make some trades
db.session.add(Trade(stock.symbol, 1, 10, yesterday, None, None, None))
db.session.add(Trade(stock.symbol, 1.20, -5, today, None, None, None))
# make a Position
# pos = Position(port.id, )
# position = Position(1)
# insert the data requiring ForeignKeys & relationship()
# commit changes
db.session.commit() | 1,176 | 439 |
import logging
from django.http import HttpResponse
from django.utils.datastructures import MultiValueDictKeyError
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.renderers import TemplateHTMLRenderer
from .handler import CsvHandler
from .validator import CsvValidator
from .models import CsvJob
logger = logging.getLogger(__name__)
class CsvUploader(APIView):
renderer_classes = (TemplateHTMLRenderer,)
def get(self, request, *args, **kwargs):
if not request.user.is_authenticated():
return HttpResponse("Access Denied..", content_type='text/plain')
return Response(data=dict(action_names= CsvValidator.available_actions()
, action_json=CsvValidator.available_actions_json())
, template_name='csv_uploader.html')
def post(self, request, *args, **kwargs):
template_name = 'csv_uploader.html'
if not request.user.is_authenticated():
return HttpResponse("Access Denied..", content_type='text/plain')
try:
handler = CsvHandler(str(request.POST['action_name']), request.FILES['csv_file'])
handler.process(request.user)
return Response(data=dict(status_message= handler.display_message()
,action_names=CsvValidator.available_actions()
, action_json=CsvValidator.available_actions_json()),
template_name='csv_uploader.html')
except MultiValueDictKeyError as e:
logger.exception("Error in upload file: %s", str(e))
return Response(data={'status_message': 'csv file missing', 'action_names': CsvValidator.available_actions()
, 'action_json': CsvValidator.available_actions_json()}, template_name=template_name)
except Exception as e:
logger.exception("Error in upload file: %s", str(e))
return Response(data={'status_message': str(e), 'action_names': CsvValidator.available_actions()
, 'action_json': CsvValidator.available_actions_json()}, template_name=template_name)
class CsvUploaderCallback(APIView):
def post(self, request, *args, **kwargs):
CsvHandler.callback(request.data['job_item_id'], request.data['status'], request.data['message'][0:199])
return Response('OK')
class CsvUploaderCleanup(APIView):
def post(self, request, *args, **kwargs):
CsvJob.purge()
return Response('OK') | 2,581 | 688 |
"""
@file Array-SingleNumber.py
Log:
2020/10/13
8:31
9:07 working implementation
"""
example_input_1 = [2, 2, 1]
example_input_2 = [4, 1, 2, 1, 2]
example_input_3 = [1]
example_output_1 = 1
example_output_2 = 4
example_output_3 = 1
"""
Given a non-empty array of integers nums, every element appears twice except
for one.
Let N be total size of the array.
For 0, 1, ... N - 1 (finite set), there exists I in 0, 1, ... N -1 such that
a_I != a_i for all i != I.
If i != I, there exists unique j != I, j != i such that a_i = a_j and
a_k != a_i for all k != i, j
For all i in 0, 1, ... N - 1, either
a. exists unique j != i such that a_i = a_j and for all k != j, a_k != a_j, or
b. a_i != a_j for all j != i.
For any i, j in 0, 1, ... N - 1, i != j, either
a. a_i != a_j so either
A. a_i or a_j is unique or
B. a_i and a_j "have other matching pairs"
b. a_i == a_j and for all k != i, k != j, a_k != a_i
"""
# This is the base case.
def find_single_number_from_3(nums3):
"""
Inputs
nums3 - assumed to have length 3.
"""
top_element = nums3.pop()
if top_element in nums3:
if top_element == nums3[0]:
return nums3[1]
else:
return nums3[0]
else:
return top_element
def check_pair_from_3_nums(nums, traversed_numbers):
"""
Suppose unique 1 is in nums + traversed_numberes.
For nums, either
a. all 3 have pairs in traversed_numbers, so traversed_numbers is of size 4
b. nums contain unique number. Either
A. traversed_numbers of size 2 so 2 numbers in nums has pairs in there
B. traversed_numbers of size 0 and so a pair is in nums.
"""
if len(traversed_numbers) == 4:
for num in nums:
traversed_numbers.remove(num)
return traversed_numbers[0]
if len(traversed_numbers) == 2:
for num in nums:
if num not in traversed_numbers:
return num
if len(traversed_numbers) == 0:
return find_single_number_from_3(nums)
def check_pair(nums, traversed_numbers):
if len(nums) == 3:
return check_pair_from_3_nums(nums, traversed_numbers)
pair = []
pair.append(nums.pop())
pair.append(nums.pop())
if (pair[0] == pair[1]):
return check_pair(nums, traversed_numbers)
if pair[0] in traversed_numbers:
traversed_numbers.remove(pair[0])
else:
traversed_numbers.append(pair[0])
if pair[1] in traversed_numbers:
traversed_numbers.remove(pair[1])
else:
traversed_numbers.append(pair[1])
return check_pair(nums, traversed_numbers)
def find_single_number(nums):
if len(nums) == 1:
return nums[0]
if len(nums) == 3:
return find_single_number_from_3(nums)
traversed_numbers = []
return check_pair(nums, traversed_numbers)
class Solution:
def singleNumber(self, nums) -> int:
return 0
if __name__ == "__main__":
print("\nArray-SingleNumber\n") | 2,979 | 1,125 |
from typing import Any, Dict
from h1st_ml import MLModeler
import pandas as pd
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import r2_score
from my_ml_model import MyMLModel
class MyMLModeler(MLModeler):
def __init__(self):
self.example_test_data_ratio = 0.2
def load_data(self) -> Dict:
df_raw = pd.read_csv('iris.csv')
return {'df_raw': df_raw}
def preprocess(self, data):
self.scaler = StandardScaler()
return self.scaler.fit_transform(data)
def generate_training_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
df_raw = data['df_raw']
self.targets = sorted(df_raw['species'].unique())
self.targets_dict = {k: v for v, k in enumerate(self.targets)}
df_raw['species'] = df_raw['species'].apply(lambda x: self.targets_dict[x])
# Shuffle all the df_raw
df_raw = df_raw.sample(frac=1, random_state=5).reset_index(drop=True)
# Preprocess data
df_raw.loc[:, 'sepal_length':'petal_width'] = self.preprocess(
df_raw.loc[:, 'sepal_length':'petal_width'])
# Split to training and testing data
n = df_raw.shape[0]
n_test = int(n * self.example_test_data_ratio)
training_data = df_raw.iloc[n_test:, :].reset_index(drop=True)
test_data = df_raw.iloc[:n_test, :].reset_index(drop=True)
# Split the data to features and labels
train_data_x = training_data.loc[:, 'sepal_length':'petal_width']
train_data_y = training_data['species']
test_data_x = test_data.loc[:, 'sepal_length':'petal_width']
test_data_y = test_data['species']
# When returning many variables, it is a good practice to give them names:
return {
'train_x':train_data_x,
'train_y':train_data_y,
'test_x':test_data_x,
'test_y':test_data_y,
}
def train(self, data: Dict[str, Any]) -> Any:
X, y = data['train_x'], data['train_y']
model = LogisticRegression(random_state=0)
model.fit(X, y)
return model
# TODO: need to check model instance type
def evaluate(self, data: Dict, model: Any) -> Dict:
X, y_true = data['test_x'], data['test_y']
y_pred = model.predict(X)
return {'r2_score': r2_score(y_true, y_pred)}
def build(self) -> MyMLModel:
data = self.load_data()
training_data = self.generate_training_data(data)
base_model = self.train(training_data)
ml_model = MyMLModel(base_model)
# pass all property of modeler to model
for k, v in self.__dict__.copy().items():
ml_model.__dict__[k] = v
ml_model.metrics = self.evaluate(training_data, ml_model.base_model)
return ml_model | 2,910 | 971 |
"""The imap component."""
| 26 | 9 |
from tik_manager import assetLibrary
reload(assetLibrary)
import pprint
import time
pathList = ["E:\\backup\\_CharactersLibrary", "E:\\backup\\_BalikKrakerAssetLibrary", "E:\\backup\\_AssetLibrary", "M:\\Projects\\_CharactersLibrary", "M:\\Projects\\_BalikKrakerAssetLibrary", "M:\\Projects\\_AssetLibrary"]
for path in pathList:
lib = assetLibrary.AssetLibrary(path)
lib.scanAssets()
for item in lib.assetsList:
data = lib._getData(item)
# data["sourceProject"] = "Maya(ma)"
# data["notes"] = "N/A"
# data["version"] = "N/A"
# if data["Faces/Triangles"] == "Nothing counted : no polygonal object is selected./Nothing counted : no polygonal object is selected.":
# data["Faces/Triangles"] = "N/A"
data["notes"]=""
# data["Faces/Triangles"] = data["Faces/Trianges"]
lib._setData(item, data)
| 879 | 279 |
#!/usr/bin/env python
import sys, os, random, unittest, itertools
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# pylint: disable=import-error
from libpv.pv_generation import PvGenerator, weather
from libpv.time_of_day import TimeOfDay
def generate_360_times():
t = TimeOfDay(0)
while True:
yield t
t += 240
if t.seconds() < 240:
break
class TestPvGeneration(unittest.TestCase):
def testEquality(self):
gen = PvGenerator(TimeOfDay.from_hms(8), TimeOfDay.from_hms(20), 3500)
self.assertEqual(
[round(gen.get_value(x)) for x in generate_360_times()],
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 11,
22, 32, 43, 54, 65, 76, 86, 97, 108, 119, 130, 140, 151,
162, 173, 184, 194, 205, 216, 229, 304, 378, 451, 523,
595, 665, 735, 803, 871, 938, 1004, 1069, 1134, 1197,
1260, 1322, 1383, 1443, 1502, 1560, 1618, 1674, 1730,
1785, 1839, 1892, 1944, 1996, 2046, 2096, 2145, 2193,
2240, 2286, 2332, 2376, 2420, 2463, 2504, 2545, 2586,
2625, 2663, 2701, 2738, 2774, 2809, 2843, 2876, 2908,
2940, 2971, 3000, 3029, 3058, 3085, 3111, 3137, 3161,
3185, 3208, 3230, 3251, 3271, 3291, 3309, 3327, 3344,
3360, 3375, 3389, 3403, 3415, 3427, 3438, 3448, 3457,
3465, 3472, 3479, 3484, 3489, 3493, 3496, 3498, 3500,
3500, 3500, 3498, 3496, 3493, 3489, 3484, 3479, 3472,
3465, 3457, 3448, 3438, 3427, 3415, 3403, 3389, 3375,
3360, 3344, 3327, 3309, 3291, 3271, 3251, 3230, 3208,
3185, 3161, 3137, 3111, 3085, 3058, 3029, 3000, 2971,
2940, 2908, 2876, 2843, 2809, 2774, 2738, 2701, 2663,
2625, 2586, 2545, 2504, 2463, 2420, 2376, 2332, 2286,
2240, 2193, 2145, 2096, 2046, 1996, 1944, 1892, 1839,
1785, 1730, 1674, 1618, 1560, 1502, 1443, 1383, 1322,
1260, 1197, 1134, 1069, 1004, 938, 871, 803, 735, 665,
595, 523, 451, 378, 304, 229, 216, 205, 194, 184, 173, 162,
151, 140, 130, 119, 108, 97, 86, 76, 65, 54, 43, 32, 22, 11,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
])
def testContinuityAndBounds(self):
gen = PvGenerator(TimeOfDay.from_hms(8), TimeOfDay.from_hms(20), 3500)
last = gen.get_value(TimeOfDay(0))
for time in generate_360_times():
power = gen.get_value(time)
self.assertGreaterEqual(power, 0)
self.assertLessEqual(power, 3500)
self.assertLess(abs(power - last), 80)
last = power
class TestWeatherGeneration(unittest.TestCase):
def testEquality(self):
w = weather(0.6, random.Random(4))
self.assertEqual(
[round(x * 10, 3) for x in itertools.islice(w, 100)],
[
9.201, 9.197, 9.193, 9.19, 9.186, 9.183, 9.179, 9.175, 9.172, 9.168,
9.165, 9.161, 9.158, 9.154, 9.15, 9.147, 9.143, 9.14, 9.137, 9.133,
9.13, 9.126, 9.123, 9.119, 9.116, 9.113, 9.109, 9.106, 9.102, 9.099,
9.096, 9.102, 9.107, 9.113, 9.119, 9.124, 9.13, 9.136, 9.141, 9.147,
9.152, 9.158, 9.164, 9.169, 9.175, 9.18, 9.186, 9.191, 9.197, 9.202,
9.208, 9.213, 9.219, 9.224, 9.23, 9.235, 9.24, 9.246, 9.251, 9.257,
9.253, 9.25, 9.247, 9.243, 9.24, 9.237, 9.234, 9.23, 9.227, 9.224,
9.221, 9.218, 9.215, 9.211, 9.208, 9.205, 9.202, 9.199, 9.196, 9.193,
9.19, 9.187, 9.183, 9.18, 9.177, 9.174, 9.171, 9.168, 9.165, 9.162,
9.159, 9.157, 9.154, 9.151, 9.148, 9.145, 9.142, 9.139, 9.136, 9.133,
])
def testNoiseFactor(self):
w = weather(0.4, random.Random(3))
for n in itertools.islice(w, 200_000):
self.assertLessEqual(0.6, n)
self.assertLessEqual(n, 1)
if __name__ == '__main__':
unittest.main()
| 4,552 | 2,952 |
#!/usr/bin python
# -*- coding: utf-8 -*-
from __future__ import print_function
from unittest import (TestCase, skip, skipIf)
from uvmod.stats import LnLike, LS_estimates, LnPrior, LnPost, hdi_of_mcmc
from uvmod.models import Model_1d, Model_2d_isotropic, Model_2d_anisotropic
# TODO: Use ``np.random.uniform`` instead
try:
from scipy.stats import uniform
is_scipy = True
except ImportError:
is_scipy = False
try:
import emcee
is_emcee = True
except ImportError:
is_emcee = False
import numpy as np
import math
# TODO: Add tests for data wo uncertainties
# TODO: Add tests for not installed packages
# TODO: Fix random state to guarantee passing
class Test_1D(TestCase):
def setUp(self):
self.p = [2, 0.3]
self.x = np.array([0., 0.1, 0.2, 0.4, 0.6])
self.model_1d = Model_1d
self.model_1d_detections = Model_1d(self.x)
self.y = self.model_1d_detections(self.p) + np.random.normal(0, 0.1,
size=5)
self.sy = np.random.normal(0.15, 0.025, size=5)
self.xl = np.array([0.5, 0.7])
self.yl = np.array([0.6, 0.2])
self.syl = np.random.normal(0.1, 0.03, size=2)
self.p1 = np.asarray(self.p) + np.array([1., 0.])
self.p2 = np.asarray(self.p) + np.array([-1., 0.])
self.p3 = np.asarray(self.p) + np.array([0., 0.2])
self.p4 = np.asarray(self.p) + np.array([0., -0.2])
self.p0_range = [0., 10.]
self.p1_range = [0., 2.]
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnLike(self):
lnlike = LnLike(self.x, self.y, self.model_1d, sy=self.sy,
x_limits=self.xl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnlik0 = lnlike._lnprob[0].__call__(self.p)
lnlik1 = lnlike._lnprob[1].__call__(self.p)
self.assertEqual(lnlike(self.p), lnlik0 + lnlik1)
self.assertGreater(lnlike(self.p), lnlike(self.p1))
self.assertGreater(lnlike(self.p), lnlike(self.p2))
self.assertGreater(lnlike(self.p), lnlike(self.p3))
self.assertGreater(lnlike(self.p), lnlike(self.p4))
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LS_estimates(self):
lsq = LS_estimates(self.x, self.y, self.model_1d, sy=self.sy)
p, pcov = lsq.fit([1., 1.])
delta0 = 3. * np.sqrt(pcov[0, 0])
delta1 = 5. * np.sqrt(pcov[1, 1])
self.assertAlmostEqual(self.p[0], p[0], delta=delta0)
self.assertAlmostEqual(self.p[1], abs(p[1]), delta=delta1)
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnPrior(self):
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
self.assertTrue(np.isinf(lnpr([-1., 1.])))
self.assertTrue(np.isinf(lnpr([1., -1.])))
self.assertTrue(np.isinf(lnpr([15., 1.])))
self.assertTrue(np.isinf(lnpr([1., 5.])))
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnPost(self):
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
lnlike = LnLike(self.x, self.y, self.model_1d, sy=self.sy,
x_limits=self.xl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnpost = LnPost(self.x, self.y, self.model_1d, sy=self.sy,
x_limits=self.xl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
self.assertEqual(lnpost._lnpr(self.p), lnpr(self.p))
self.assertEqual(lnpost._lnlike(self.p), lnlike(self.p))
self.assertGreater(lnpost(self.p), lnpost(self.p1))
self.assertGreater(lnpost(self.p), lnpost(self.p2))
self.assertGreater(lnpost(self.p), lnpost(self.p3))
self.assertGreater(lnpost(self.p), lnpost(self.p4))
@skipIf((not is_emcee) or (not is_scipy), "``emcee`` and/or ``scipy`` not"
" installed")
def test_MCMC(self):
nwalkers = 250
ndim = 2
p0 = np.random.uniform(low=self.p1_range[0], high=self.p1_range[1],
size=(nwalkers, ndim))
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
lnpost = LnPost(self.x, self.y, self.model_1d, sy=self.sy,
x_limits=self.xl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
pos, prob, state = sampler.run_mcmc(p0, 250)
sampler.reset()
sampler.run_mcmc(pos, 500)
sample_vec0 = sampler.flatchain[::10, 0]
sample_vec1 = sampler.flatchain[::10, 1]
p0_hdi_min, p0_hdi_max = hdi_of_mcmc(sample_vec0)
p1_hdi_min, p1_hdi_max = hdi_of_mcmc(sample_vec1)
self.assertTrue((p0_hdi_min < self.p[0] < p0_hdi_max))
self.assertTrue((p1_hdi_min < self.p[1] < p1_hdi_max))
class Test_2D_isoptopic(TestCase):
def setUp(self):
np.random.seed(1)
self.p = [2, 0.3]
self.x1 = np.random.uniform(low=-1, high=1, size=10)
self.x2 = np.random.uniform(low=-1, high=1, size=10)
self.xx = np.column_stack((self.x1, self.x2))
self.model_2d = Model_2d_isotropic
self.model_2d_detections = Model_2d_isotropic(self.xx)
self.y = self.model_2d_detections(self.p) + np.random.normal(0, 0.1,
size=10)
self.sy = np.random.normal(0.15, 0.025, size=10)
self.x1l = np.hstack((np.random.uniform(low=-1, high=-0.5, size=2),
np.random.uniform(low=0.5, high=1, size=2),))
self.x2l = np.hstack((np.random.uniform(low=-1, high=-0.5, size=2),
np.random.uniform(low=0.5, high=1, size=2),))
self.xxl = np.column_stack((self.x1l, self.x2l))
self.model_2d_limits = Model_2d_isotropic(self.xxl)
self.yl = self.model_2d_limits(self.p) + abs(np.random.normal(0, 0.1,
size=4))
self.syl = np.random.normal(0.1, 0.03, size=4)
self.p1 = np.asarray(self.p) + np.array([1., 0.])
self.p2 = np.asarray(self.p) + np.array([-1., 0.])
self.p3 = np.asarray(self.p) + np.array([0., 0.2])
self.p4 = np.asarray(self.p) + np.array([0., -0.2])
self.p0_range = [0., 10.]
self.p1_range = [0., 2.]
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnLike(self):
lnlike = LnLike(self.xx, self.y, self.model_2d, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnlik0 = lnlike._lnprob[0].__call__(self.p)
lnlik1 = lnlike._lnprob[1].__call__(self.p)
self.assertEqual(lnlike(self.p), lnlik0 + lnlik1)
self.assertGreater(lnlike(self.p), lnlike(self.p1))
self.assertGreater(lnlike(self.p), lnlike(self.p2))
self.assertGreater(lnlike(self.p), lnlike(self.p3))
self.assertGreater(lnlike(self.p), lnlike(self.p4))
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LS_estimates(self):
lsq = LS_estimates(self.xx, self.y, self.model_2d, sy=self.sy)
p, pcov = lsq.fit([1., 1.])
delta0 = 3. * np.sqrt(pcov[0, 0])
delta1 = 5. * np.sqrt(pcov[1, 1])
self.assertAlmostEqual(self.p[0], p[0], delta=delta0)
# FIXME: use variance as parameter so p[1] > 0
self.assertAlmostEqual(self.p[1], abs(p[1]), delta=delta1)
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnPost(self):
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
lnlike = LnLike(self.xx, self.y, self.model_2d, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnpost = LnPost(self.xx, self.y, self.model_2d, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
self.assertEqual(lnpost._lnpr(self.p), lnpr(self.p))
self.assertEqual(lnpost._lnlike(self.p), lnlike(self.p))
self.assertGreater(lnpost(self.p), lnpost(self.p1))
self.assertGreater(lnpost(self.p), lnpost(self.p2))
self.assertGreater(lnpost(self.p), lnpost(self.p3))
self.assertGreater(lnpost(self.p), lnpost(self.p4))
@skipIf((not is_emcee) or (not is_scipy), "``emcee`` and/or ``scipy`` not"
" installed")
def test_MCMC(self):
nwalkers = 250
ndim = 2
p0 = np.random.uniform(low=self.p1_range[0], high=self.p1_range[1],
size=(nwalkers, ndim))
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),)
lnpr = LnPrior(lnprs)
lnpost = LnPost(self.xx, self.y, self.model_2d, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
pos, prob, state = sampler.run_mcmc(p0, 250)
sampler.reset()
sampler.run_mcmc(pos, 500)
sample_vec0 = sampler.flatchain[::10, 0]
sample_vec1 = sampler.flatchain[::10, 1]
p0_hdi_min, p0_hdi_max = hdi_of_mcmc(sample_vec0)
p1_hdi_min, p1_hdi_max = hdi_of_mcmc(sample_vec1)
self.assertTrue((p0_hdi_min < self.p[0] < p0_hdi_max))
self.assertTrue((p1_hdi_min < self.p[1] < p1_hdi_max))
class Test_2D_anisoptopic(TestCase):
def setUp(self):
self.p = [2, 0.7, 0.3, 1.]
self.x1 = np.random.uniform(low=-1, high=1, size=10)
self.x2 = np.random.uniform(low=-1, high=1, size=10)
self.xx = np.column_stack((self.x1, self.x2))
self.model_2d_anisotropic = Model_2d_anisotropic
self.model_2d_detections = Model_2d_anisotropic(self.xx)
self.y = self.model_2d_detections(self.p) + np.random.normal(0, 0.05,
size=10)
self.sy = np.random.normal(0.15, 0.025, size=10)
self.x1l = np.hstack((np.random.uniform(low=-1, high=-0.5, size=2),
np.random.uniform(low=0.5, high=1, size=2),))
self.x2l = np.hstack((np.random.uniform(low=-1, high=-0.5, size=2),
np.random.uniform(low=0.5, high=1, size=2),))
self.xxl = np.column_stack((self.x1l, self.x2l))
self.model_2d_limits = Model_2d_anisotropic(self.xxl)
self.yl = self.model_2d_limits(self.p) + abs(np.random.normal(0, 0.05,
size=4))
self.syl = np.random.normal(0.1, 0.03, size=4)
self.p1 = np.asarray(self.p) + np.array([1., 0., 0., 0.])
self.p2 = np.asarray(self.p) + np.array([-1., 0., 0., 0.])
self.p3 = np.asarray(self.p) + np.array([0., 0.2, 0., 0.])
self.p4 = np.asarray(self.p) + np.array([0., -0.2, 0., 0.])
self.p5 = np.asarray(self.p) + np.array([0., 0., 0.4, 0.])
self.p6 = np.asarray(self.p) + np.array([0., 0., -0.4, 0.])
self.p7 = np.asarray(self.p) + np.array([0., 0., 0., math.pi / 2.])
self.p8 = np.asarray(self.p) + np.array([0., 0., 0., -math.pi / 2.])
self.p0_range = [0., 10.]
self.p1_range = [0., 2.]
self.p2_range = [0., 1.]
self.p3_range = [0., math.pi]
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LS_estimates(self):
lsq = LS_estimates(self.xx, self.y, self.model_2d_anisotropic,
sy=self.sy)
p, pcov = lsq.fit([1., 0.5, 0.5, 1.])
delta0 = 3. * np.sqrt(pcov[0, 0])
delta1 = 5. * np.sqrt(pcov[1, 1])
delta2 = 5. * np.sqrt(pcov[2, 2])
delta3 = 5. * np.sqrt(pcov[3, 3])
self.assertAlmostEqual(self.p[0], p[0], delta=delta0)
# FIXME: use variance as parameter so p[1] > 0
self.assertAlmostEqual(self.p[1], abs(p[1]), delta=delta1)
self.assertAlmostEqual(self.p[2], p[2], delta=delta2)
self.assertAlmostEqual(self.p[3], p[3], delta=delta3)
def test_LnLike(self):
lnlike = LnLike(self.xx, self.y, self.model_2d_anisotropic, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnlik0 = lnlike._lnprob[0].__call__(self.p)
lnlik1 = lnlike._lnprob[1].__call__(self.p)
self.assertEqual(lnlike(self.p), lnlik0 + lnlik1)
self.assertGreater(lnlike(self.p), lnlike(self.p1))
self.assertGreater(lnlike(self.p), lnlike(self.p2))
self.assertGreater(lnlike(self.p), lnlike(self.p3))
self.assertGreater(lnlike(self.p), lnlike(self.p4))
self.assertGreater(lnlike(self.p), lnlike(self.p5))
self.assertGreater(lnlike(self.p), lnlike(self.p6))
self.assertGreater(lnlike(self.p), lnlike(self.p7))
self.assertGreater(lnlike(self.p), lnlike(self.p8))
@skipIf(not is_scipy, "``scipy`` is not installed")
def test_LnPost(self):
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),
(uniform.logpdf, self.p2_range, dict(),),
(uniform.logpdf, self.p3_range, dict(),),)
lnpr = LnPrior(lnprs)
lnlike = LnLike(self.xx, self.y, self.model_2d_anisotropic, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
jitter=False, outliers=False)
lnpost = LnPost(self.xx, self.y, self.model_2d_anisotropic, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
self.assertEqual(lnpost._lnpr(self.p), lnpr(self.p))
self.assertEqual(lnpost._lnlike(self.p), lnlike(self.p))
self.assertGreater(lnpost(self.p), lnpost(self.p1))
self.assertGreater(lnpost(self.p), lnpost(self.p2))
self.assertGreater(lnpost(self.p), lnpost(self.p3))
self.assertGreater(lnpost(self.p), lnpost(self.p4))
@skipIf((not is_emcee) or (not is_scipy), "``emcee`` and/or ``scipy`` not"
" installed")
def test_MCMC(self):
nwalkers = 250
ndim = 4
p0 = np.random.uniform(low=self.p1_range[0], high=self.p1_range[1],
size=(nwalkers, ndim))
lnprs = ((uniform.logpdf, self.p0_range, dict(),),
(uniform.logpdf, self.p1_range, dict(),),
(uniform.logpdf, self.p2_range, dict(),),
(uniform.logpdf, self.p3_range, dict(),),)
lnpr = LnPrior(lnprs)
lnpost = LnPost(self.xx, self.y, self.model_2d_anisotropic, sy=self.sy,
x_limits=self.xxl, y_limits=self.yl, sy_limits=self.syl,
lnpr=lnpr, jitter=False, outliers=False)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost)
pos, prob, state = sampler.run_mcmc(p0, 250)
sampler.reset()
sampler.run_mcmc(pos, 500)
sample_vec0 = sampler.flatchain[::10, 0]
sample_vec1 = sampler.flatchain[::10, 1]
sample_vec2 = sampler.flatchain[::10, 2]
sample_vec3 = sampler.flatchain[::10, 3]
p0_hdi_min, p0_hdi_max = hdi_of_mcmc(sample_vec0)
p1_hdi_min, p1_hdi_max = hdi_of_mcmc(sample_vec1)
p2_hdi_min, p2_hdi_max = hdi_of_mcmc(sample_vec2)
p3_hdi_min, p3_hdi_max = hdi_of_mcmc(sample_vec3)
self.assertTrue((p0_hdi_min < self.p[0] < p0_hdi_max))
self.assertTrue((p1_hdi_min < self.p[1] < p1_hdi_max))
self.assertTrue((p2_hdi_min < self.p[2] < p2_hdi_max))
self.assertTrue((p3_hdi_min < self.p[3] < p3_hdi_max))
| 16,601 | 6,748 |
import os
import numpy as np
import time
from numpy.testing import (assert_array_almost_equal,
assert_array_equal)
from nose.tools import assert_true, assert_equal, assert_raises, raises
from tempfile import mktemp
from eegpy.formats.loc3dmarker import Loc3dMarkers
test_data = """A,-23.58,-18.00,-20.74,5.0,0.0,0.0,1.0
B,-25.50,-18.00,25.94,5.0,0.0,0.0,1.0
C,-57.27,-18.00,-1.49,5.0,0.0,0.0,1.0
D,-9.14,-18.00,59.63,5.0,0.0,0.0,1.0"""
TMP_FN = None
def setup():
global TMP_FN
TMP_FN = mktemp()
with open(TMP_FN, "w") as fh:
fh.write(test_data)
def teardown():
if TMP_FN is not None and os.path.exists(TMP_FN):
os.unlink(TMP_FN)
def test_load_marker():
markers = Loc3dMarkers(TMP_FN)
assert_equal(4, markers.count)
assert_equal("A", markers.labels[0])
assert_equal(5.0, markers.sizes[0])
#@raises(ValueError)
#def test_nextpow2_negative_x():
# nextpow2(-1)
| 942 | 447 |
# import nesessary packages
import cv2
import config
DEBUG = config.DEBUG
class Reader:
def __init__(self, source):
if DEBUG:
print('[INFO, reader]: reader module loaded')
# if source:
self.vs = None
self.set_source(source)
# else:
# print('[INFO, reader]: videosource not defined, using camera')
# self.vs = cv2.VideoCapture(0)
self.start_frame_number = 0
def set_start_frame_no(self, frame_no):
self.vs.set(cv2.CAP_PROP_POS_FRAMES, frame_no)
def set_source(self, source):
# set SOURCE_VID file as source otherwise use camera
if source:
self.vs = cv2.VideoCapture(source)
if DEBUG:
print('[INFO, reader]: videofile ' + source + ' succesfully opened')
else:
print('[ERR, reader]: no source file provided, using camera as source')
def read(self):
ret, frame = self.vs.read()
return ret, frame | 997 | 302 |
import cv2
import numpy as np
# function for the empty work
def empty(a):
pass
# * creating the Window
windowName = 'Color Detection in HSV Space' # Window Name
cv2.namedWindow(windowName) # Window Creation
# * Adding the Track pad
cv2.createTrackbar('HUE min',windowName,0,179,empty)
cv2.createTrackbar('HUE max',windowName,179,179,empty)
cv2.createTrackbar('SAT min',windowName,0,255,empty)
cv2.createTrackbar('SAT max',windowName,255,255,empty)
cv2.createTrackbar('Value min',windowName,0,255,empty)
cv2.createTrackbar('Value max',windowName,255,255,empty)
# * Creating the Webcam Instance
cam = cv2.VideoCapture(0)
while True:
cv2.waitKey(1000)
isTrue, initial_frame = cam.read()
if isTrue:
break
# * Start Video Rolling
while True:
isTrue, frame = cam.read() # Reading the Frames
# * Converting the frame in HSC color space
framehsv = cv2.cvtColor(frame,cv2.COLOR_BGR2HSV)
# * Getting the track bar Values
h_min = cv2.getTrackbarPos('HUE min',windowName)
h_max = cv2.getTrackbarPos('HUE max',windowName)
s_min = cv2.getTrackbarPos('SAT min',windowName)
s_max = cv2.getTrackbarPos('SAT max',windowName)
v_min = cv2.getTrackbarPos('Value min',windowName)
v_max = cv2.getTrackbarPos('Value max',windowName)
# creating the lower and upper range
lower = np.array([h_min,s_min,v_min])
upper = np.array([h_max,s_max,v_max])
# creating the mask
mask = cv2.inRange(framehsv, lower, upper)
mask = cv2.medianBlur(mask, 3)
mask_inv = 255+mask
kernel = np.ones((3,3),np.uint8)
mask = cv2.dilate(mask, kernel,5)
# creating blanket area color black
b = frame[:,:,0]
g = frame[:,:,1]
r = frame[:,:,2]
b = cv2.bitwise_and(b,mask_inv)
g = cv2.bitwise_and(g,mask_inv)
r = cv2.bitwise_and(r,mask_inv)
black_blanket_frame = cv2.merge([b,g,r])
# cutting blanket area from initial frame
b = initial_frame[:,:,0]
g = initial_frame[:,:,1]
r = initial_frame[:,:,2]
b = cv2.bitwise_and(b,mask)
g = cv2.bitwise_and(g,mask)
r = cv2.bitwise_and(r,mask)
initial_blanket_frame = cv2.merge([b,g,r])
# result output
result = cv2.bitwise_or(black_blanket_frame,initial_blanket_frame)
# stacking the output
stackimgs = np.hstack([frame,result])
# * Display
cv2.imshow(windowName,stackimgs)
# * Creating the Exit Pole
if cv2.waitKey(1) & 0xFF == 27:
break
cam.release() # Releasing the instance
cv2.destroyAllWindows() # Destroing the windows | 2,605 | 1,007 |
import os
from threading import RLock
ENVIRONMENT_VAR_NAME: str = "QSET_ENVIRONMENT"
ENVIRONMENT_NAME_DEFAULT: str = 'Development'
ENVIRONMENT_NAME_PRODUCTION: str = 'Production'
class Environment:
__Default = None
__Lock = RLock()
def __init__(self, environment_name: str = None):
self._environment_name: str = (environment_name or os.getenv(ENVIRONMENT_VAR_NAME, ENVIRONMENT_NAME_DEFAULT))\
.strip()
@property
def environment_name(self) -> str:
return self._environment_name
@classmethod
def get_default(cls):
if cls.__Default is None:
with cls.__Lock:
if cls.__Default is None:
cls.__Default = Environment()
return cls.__Default
| 759 | 234 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import subprocess
import requests
import argparse
import base64
import sys
import json
import codecs
def dec_data(byte_data: bytes):
try:
return byte_data.decode('UTF-8')
except UnicodeDecodeError:
return byte_data.decode('GB18030')
def get_files(path):
all_files = []
for root, dirs, files in os.walk(path):
all_files = files
return all_files
def automation():
get_payload_dir = get_files("./payload/")
get_result_dir = get_files("./fofa_file/")
for i in get_payload_dir:
print("\033[1;32m ================================================================\033[0m")
print("\033[1;32m 开始 %s 漏洞检查\033[0m" % (i))
print("\033[1;32m 正在检查请稍等......\033[0m")
print("\033[1;32m ================================================================\033[0m")
for j in get_result_dir:
if j == i + ".txt":
p = subprocess.Popen('python3 "./payload/%s" -f "./fofa_file/%s"' % (i, j), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
while p.poll() is None:
line = p.stdout.readline().strip()
if line:
line = dec_data(line)
x = line.find('不', 0, len(line))
if x == -1:
result = line.replace(
"\033[1;36m", "").replace("\033[0m", " ").replace("\033[1;32m", " ").replace(
"\033[0m", " ".replace("\033[36m[o] ", " ").replace("\033[0m", " "))
print(result)
f = open("./results/" + i + "_OK.txt", 'a', encoding='utf-8')
f.write(result + "\n")
def banner():
print("""
\033[1;36m ___ \033[0m
\033[1;36m ,--.'|_ \033[0m
\033[1;36m ,--, | | :,' ,---. __ ,-. \033[0m
\033[1;36m ,'_ /| : : ' : ' ,'\ .--.--. ,' ,'/ /| \033[0m
\033[1;36m ,--.--. .--. | | :.;__,' / / / | / / ' ' | |' | ,---. \033[0m
\033[1;36m / \ ,'_ /| : . || | | . ; ,. :| : /`./ | | ,'/ \ \033[0m
\033[1;36m .--. .-. | | ' | | . .:__,'| : ' | |: :| : ;_ ' : / / / ' \033[0m
\033[1;36m \__\/: . . | | ' | | | ' : |__' | .; : \ \ `. | | ' . ' / \033[0m
\033[1;36m ," .--.; | : | : ; ; | | | '.'| : | `----. \; : | ' ; :__ \033[0m
\033[1;36m / / ,. | ' : `--' \ ; : ;\ \ / / /`--' /| , ; ' | '.'| \033[0m
\033[1;36m; : .' \: , .-./ | , / `----' '--'. / ---' | : : \033[0m
\033[1;36m| , .-./ `--`----' ---`-' `--'---' \ \ / \033[0m
\033[1;36m `--`---' `----' \033[0m
""")
print('\033[1;36m 工具使用方法\033[0m')
print('\033[1;36m python3 autosrc.py -e/--email email -k/--key key\033[0m')
print('\033[1;36m python3 autosrc.py -h/--help\033[0m')
if len(sys.argv) == 1:
banner()
sys.exit()
parser = argparse.ArgumentParser(description='autosrcfofaapi help')
parser.add_argument('-e', '--email', help='Please Input a email!', default='')
parser.add_argument('-k', '--key', help='Please Input a key!', default='')
args = parser.parse_args()
email = args.email
key = args.key
url = "https://fofa.so/api/v1/info/my?email=" + email + "&key=" + key
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.93 Safari/537.36",
"Content-Type": "application/x-www-form-urlencoded"
}
response = requests.get(url, headers=header)
if 'errmsg' not in response.text:
print("\033[1;32memail和key均正确\033[0m")
get_payload_dir = get_files("./payload/")
print(get_payload_dir)
for i in get_payload_dir:
f = codecs.open("./payload/" + i, mode='r', encoding='utf-8')
line = f.readline()
sentence = line.strip("#")
print(sentence)
print("\033[1;36mfofa语句 >>>\033[0m" + sentence)
sentence = base64.b64encode(sentence.encode('utf-8')).decode("utf-8")
url = "https://fofa.so/api/v1/search/all?email=" + email + "&key=" + key + "&qbase64=" + sentence
response = requests.get(url, headers=header)
if 'errmsg' not in response.text:
print("\033[1;36m已保存到\033[0m\033[1;32mfofa_file目录下\033[0m")
r1 = json.loads(response.text)
for k in r1['results']:
s = k[0]
print(s)
f = open("./fofa_file/" + i + ".txt", 'a', encoding='utf-8')
f.write(s + "\n")
else:
print("\033[1;31mfofa语句不正确\033[0m")
else:
print("\033[1;31memail或key不正确\033[0m")
print("\033[1;34m[INFO]\033[0m Success")
print("\033[1;32m ================================================================\033[0m")
print("\033[1;32m FOFA采集完成 开始漏洞检查\033[0m")
print("\033[1;32m ================================================================\033[0m")
automation()
| 5,575 | 2,339 |
from direct.directnotify import DirectNotifyGlobal
from pirates.battle.DistributedBattleNPCAI import *
from pirates.pirate import AvatarTypes
class DistributedCreatureAI(DistributedBattleNPCAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedCreatureAI')
def __init__(self, air):
DistributedBattleNPCAI.__init__(self, air)
| 360 | 108 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Dec 13 09:59:13 2019
@author: orochi
"""
import numpy as np
import csv
from classifier_network import LinearNetwork
from classifier_network import ReducedLinearNetwork
import torch
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
def calc_velocity(start,end):
delta_t=0.05
#print(type(start),type(end))
velocity=(end-start)/delta_t
return velocity
def normalize_vector(vector):
#print(vector-np.min(vector))
#print(np.max(vector)-np.min(vector))
if (np.max(vector)-np.min(vector)) == 0:
n_vector=np.ones(np.shape(vector))*0.5
else:
n_vector=(vector-np.min(vector))/(np.max(vector)-np.min(vector))
return n_vector
filenames=['Classifier_Data_Big_Cube.csv','Classifier_Data_Med_Cube.csv','Classifier_Data_Small_Cube.csv', \
'Classifier_Data_Big_Cylinder.csv','Classifier_Data_Med_Cylinder.csv','Classifier_Data_Small_Cylinder.csv']
a=[]
column_names=[]
#load in the data to one massive matrix called a
for k in range(6):
with open('Classifier_Data/'+filenames[k]) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
if line_count == 0:
column_names.append(row)
#print(f'Column names are {", ".join(row)}')
#print(row[6],row[48])
line_count += 1
else:
a.append(row)
line_count += 1
#print('here')
#print(f'Processed {line_count} lines.')
#print(np.shape(a))
network=ReducedLinearNetwork()
network.zero_grad()
network.double()
b=np.shape(a)
print(b)
a=np.array(a,dtype='f')
#create a list of numbers that correspond to the columns to be removed. This arrangement removes the roll, pitch and yaw from the matrix a
c=np.arange(9,42,6)
d=np.arange(10,42,6)
e=np.arange(11,42,6)
f=np.arange(51,87,6)
g=np.arange(52,87,6)
h=np.arange(53,87,6)
#obj_pose=np.array([84,85,86])
c=np.concatenate((c,d,e,f,g,h))
#calculate the velocity of the fingers.
for i in range(36):
velocity=calc_velocity(a[:,i+6],a[:,i+48])
a[:,i+6]=velocity
#normalize the entire table so that all the inputs and outputs lie on a spectrum from 0-1
for i in range(b[1]):
a[:,i]=normalize_vector(a[:,i])
#remove the columns that are unwanted, described by the array c
new_a=np.zeros([b[0],69])
for i in range(b[0]):
new_a[i,:]=np.delete(a[i,:],c)
#check to make sure the right columns got deleted
column_names=np.delete(column_names,c)
print(column_names[0])
a=new_a
#print(a[:,-1])
running_loss=0
learning_rate=0.1
total_loss=[]
total_time=[]
num_epocs=100
network= network.float()
for j in range(num_epocs):
print(j)
learning_rate=0.1-j/num_epocs*0.09
np.random.shuffle(a)
running_loss=0
for i in range(b[0]):
#network=network.float()
#state = ego.convert_world_state_to_front()
#ctrl_delta, ctrl_vel, err, interr, differr = controller.calc_steer_control(t[i],state,x_true,y_true, vel, network)
input1=a[i,:-1]
#print(input1)
network_input=torch.tensor(input1)
#print(network_input)
#print(a[i,-1])
network_target=torch.tensor(a[i,-1])
#network_target.reshape(1)
network_input=network_input.float()
#print(network_input)
out=network(network_input)
out.reshape(1)
network.zero_grad()
criterion = nn.MSELoss()
loss = criterion(out, network_target)
loss.backward()
running_loss += loss.item()
#print(out.data,network_target.data, out.data-network_target.data)
#print(loss.item())
for f in network.parameters():
f.data.sub_(f.grad.data * learning_rate)
if i % 1000 ==999: # keep a tally of the loss and time so that the training can be plotted
print(running_loss)
#print(loss.item(),out[0])
total_loss.append(running_loss)
total_time.append((i+1)/1000+j*b[0]/1000)
running_loss=0
plt.plot(total_time,total_loss)
plt.show()
torch.save(network.state_dict(),'./full_trained_classifier_no_rpw_obj_pose.pth') | 4,263 | 1,597 |
"""
Wrapper for sudo
https://linux.die.net/man/8/sudo
"""
import os
import shlex
from typing import List, Mapping
from nspawn import CONFIG
from nspawn.wrapper.base import Base
from nspawn.support.parser import parse_text2dict
class Sudo(Base):
"""
Provide basic file system operations
"""
def __init__(self):
super().__init__('wrapper/sudo')
def script(self, script:str) -> None:
self.execute_unit_sert(script.split())
def folder_check(self, path:str) -> bool:
return self.has_success(['test', '-d', path])
def folder_assert(self, path:str) -> None:
assert self.folder_check(path), f"missing path '{path}'"
def folder_ensure(self, path:str) -> None:
self.execute_unit_sert(['mkdir', '--parents', path])
def parent_ensure(self, path:str) -> None:
folder = os.path.dirname(path)
self.folder_ensure(folder)
def file_check(self, path:str) -> bool:
return self.has_success(['test', '-f', path])
def file_assert(self, path:str):
assert self.file_check(path), f"missing path '{path}'"
def file_load(self, path) -> str:
return self.execute_unit_sert(['cat', path]).stdout
def file_save(self, path:str, text:str) -> None:
self.parent_ensure(path)
self.execute_unit_sert(['dd', f"of={path}"] , stdin=text)
def files_copy(self, source:str, target:str) -> None:
self.parent_ensure(target)
self.execute_unit_sert(['cp', '--force', source, target])
def files_move(self, source:str, target:str) -> None:
self.files_delete(target)
self.parent_ensure(target)
self.execute_unit_sert(['mv', '--force', source, target])
def files_delete(self, path:str) -> None:
self.execute_unit_sert(['rm', '--force', '--recursive', path])
#
#
#
def files_sync_any(self, source:str, target:str, opts_line:str) -> None:
"invoke rsync"
if self.folder_check(source):
source = os.path.join(source, '') # ensure traling slash
self.folder_ensure(target)
else:
self.parent_ensure(target)
opts_list = shlex.split(opts_line)
command = ['rsync'] + opts_list + [source, target ]
self.execute_unit_sert(command)
def files_sync_base(self, source:str, target:str) -> None:
"options for DSL.COPY, DSL.CAST"
rsync_base = CONFIG['wrapper/sudo']['rsync_base']
self.files_sync_any(source, target, rsync_base)
def files_sync_full(self, source:str, target:str) -> None:
"options for DSL.PULL, DSL.PUSH"
rsync_full = CONFIG['wrapper/sudo']['rsync_full']
self.files_sync_any(source, target, rsync_full)
def files_sync_time(self, source:str, target:str):
"transfer file time only"
self.execute_unit_sert(['touch', '-r', source, target])
#
# store file meta data in xattr
#
def xattr_space(self) -> str:
"attribute name space used by this package"
return CONFIG['wrapper/sudo']['xattr_space']
def xattr_regex(self) -> str:
"regular expression used to match package attributes"
return CONFIG['wrapper/sudo']['xattr_regex']
def xattr_name(self, key:str) -> str:
"produce package-specific attribute name"
return f"{self.xattr_space()}{key}"
def xattr_get(self, path:str, key:str) -> str:
"load single extendend path attribute"
# -n name, --name=name Dump the value of the named extended attribute
# --only-values Dump out the extended attribute value(s) only
name = self.xattr_name(key)
result = self.execute_unit(['getfattr', '-n', name, '--only-values', path])
if result.rc == 0:
return result.stdout
else:
return None
def xattr_set(self, path:str, key:str, value:str) -> None:
"save single extendend file attribute"
# -n name, --name=name Specifies the name of the extended attribute to set
# -v value, --value=value Specifies the new value of the extended attribute
name = self.xattr_name(key)
self.execute_unit_sert(['setfattr', '-n', name, '-v', value, path])
def xattr_load(self, path:str) -> Mapping[str, str]:
"retrieve extended file attributes as dictionary"
# -d, --dump Dump the values of all extended attributes
# -m pattern, --match=pattern Only include attributes with names matching the regular expression
result = self.execute_unit(['getfattr', '-d', '-m', self.xattr_regex(), path])
temp_dict = parse_text2dict(result.stdout)
data_dict = dict()
for name, data in temp_dict.items(): # deserialize
key = name.replace(self.xattr_space(), '')
value = data[1:-1] # remove quotes
data_dict[key] = value
return data_dict
def xattr_save(self, path:str, data_dict:Mapping[str, str]) -> None:
"persist dictionary as extended file attributes"
for key, value in data_dict.items():
self.xattr_set(path, key, value)
SUDO = Sudo()
| 5,144 | 1,608 |
from Board import Board, code_to_list
from sys import argv
def print_layout():
print("╔══╦══╦══╦══╦══╦══╦══╦══╗")
print("║ ║ 6║ 5║ 4║ 3║ 2║ 1║ ║ <- Player 2")
print("║ ╠══╬══╬══╬══╬══╬══╣ ║")
print("║ ║ 1║ 2║ 3║ 4║ 5║ 6║ ║ <- Player 1")
print("╚══╩══╩══╩══╩══╩══╩══╩══╝")
def lpad(str, length=2):
num = len(str)
return " "*(length-num)+str
def render(field):
print("""
╔══╦══╦══╦══╦══╦══╦══╦══╗
║ ║{N}║{M}║{L}║{K}║{J}║{I}║ ║
║{A}╠══╬══╬══╬══╬══╬══╣{H}║
║ ║{B}║{C}║{D}║{E}║{F}║{G}║ ║
╚══╩══╩══╩══╩══╩══╩══╩══╝
""".format(
A = lpad(str(field[0])),
B = lpad(str(field[1])),
C = lpad(str(field[2])),
D = lpad(str(field[3])),
E = lpad(str(field[4])),
F = lpad(str(field[5])),
G = lpad(str(field[6])),
H = lpad(str(field[7])),
I = lpad(str(field[8])),
J = lpad(str(field[9])),
K = lpad(str(field[10])),
L = lpad(str(field[11])),
M = lpad(str(field[12])),
N = lpad(str(field[13]))
)
)
def get_index(board):
if board.game_ended():
return 0
while True:
i = input(f"Player {board.current_player+1}:")
if i == "exit":
# Print board representation
print(f"Continue with code \"{board.get_code()}\"")
print("> python3 ./app.py <code>")
exit()
elif 0 < int(i) < 7:
return int(i)+board.current_player*7
else:
#print("Please select number from 1 to 6 or exit via \"exit\"\r\033[A\033[A")
print("Please select number from 1 to 6 or exit via \"exit\"")
def game_loop(b):
while not b.ended:
render(b.state)
i = get_index(b)
code = b.play(i)
if code == 1:
print("You can only play your own side.", end="")
elif code == 2:
print("You cannot play your Mancala.", end="")
elif code == 3:
print("The position you want to play must have a stone count higher than 0!", end="")
elif code == 4:
print("You ended in your Mancala. You may play again.", end="")
elif code == 5:
print(f"Player {(1-b.current_player)+1} took.", end="")
elif code == -1:
print(f"ERROR: Index {i} not on board....", end="")
elif code == 6:
print("Game Ended\n\n")
winner = b.finalize()
print(f"Player {winner+1} won!")
render(b.state)
break
else:
print(" "*90, end="")
print(" "*30)
#print("\r\033[A\033[A\033[A\033[A\033[A\033[A\033[A\033[A\033[A\033[A") #Return to start
def main():
#import colorama
#colorama.init()
if len(argv)>1:
state = code_to_list(argv[1])
b = Board(state)
else:
b = Board()
print("Layout")
print_layout()
print("\nGAME")
game_loop(b)
if __name__ == '__main__':
main()
| 3,052 | 1,192 |
from skyfield.api import Topos, load
from astropy import units as u
from astropy import time
import matplotlib.pyplot as plt
import numpy as np
from datetime import datetime
import sys
ts = load.timescale(builtin=True)
#This program is able to calculate the closest pass between two given satellites over the next 5 days.
#While it cannot predict collisions (yet), it can be used to verify already predicted ones.
#I used the Skyfield library's documentation to write this first part of my code.
#See here: https://rhodesmill.org/skyfield/earth-satellites.html
#Load all satellite Two-Line Element sets from the given file(s)
#I used the satellite data provided by the celestrak.com website, since it is vast, accurate and easily accessible.
sat_url = 'https://celestrak.com/NORAD/elements/active.txt'
tle_satellites = load.tle_file(sat_url, reload = True)
print("Loaded", len(tle_satellites), "satellites")
def calculate():
# Search the file(s) and display names and epochs of the desired satellites
NORAD_ID1 = input("\nPlease enter the NORAD ID of the first desired object: ")
NORAD_ID2 = input("\nPlease enter the NORAD ID of the second desired object: ")
by_number = {sat.model.satnum: sat for sat in tle_satellites}
satellite1 = by_number[int(NORAD_ID1)]
satellite2 = by_number[int(NORAD_ID2)]
print("\n", satellite1)
print("\n", satellite2)
print("\nCurrent epoch of sat1: ", satellite1.epoch.utc_jpl())
print("\nCurrent epoch of sat2: ", satellite2.epoch.utc_jpl())
# Compute the positions of both satellites every 0.864 seconds (0.00001 days) for the next 5 days using the SGP4 perturbation model.
tcompute = ts.tt_jd(np.arange(satellite1.epoch.tt, satellite1.epoch.tt + 5.0, 0.00001))
# From hereon, I did not reference the documentation and everything is my original code.
# Initialize the necessary arrays for the calculations.
distancearray = []
timearray = []
closepassdistance = []
closepasstime = []
print("setup complete")
# Compute the distance between the satellites from the tcompute array for each element, by subtracting the vectors of their current positions.
# Enter the distance into distancearray and the time into timearray.
for x in tcompute:
y = (satellite2.at(x) - satellite1.at(x)).distance().km
distancearray.append(y)
timearray.append(x)
print(x)
print("done x in tcompute")
# Initialize two more arrays, that contain the first and second derivatives of the distance between the satellites.
# This is necessary to determine the minimum distance between them during the next 5 days.
derivativearray = np.gradient(distancearray)
secondderivativearray = np.gradient(derivativearray)
print("done derivative")
# This for-loop finds all relative minima using single-variable calculus, appending them to another array.
# The loop first finds all instances where the derivative crosses 0.
# Note that I use the intermediate value theorem to do this, since the tcompute array only has datapoints every 0.864s.
# Then, the for loop finds which of these points have a second derivative greater than 0, i.e. they are relative minima.
i = 0
for z in derivativearray:
if z < 1.5 and z > -1.5:
if secondderivativearray[i] > 0:
j = distancearray[i]
k = timearray[i]
closepassdistance.append(j)
closepasstime.append(k)
i += 1
print("\n -----------------------------")
print("\n")
for e in closepasstime:
print(e)
# Find the distance and time of the closest pass between the two satellites.
cpd = closepassdistance.index(min(closepassdistance))
cpt = closepasstime[cpd]
# Print out the distance and time of the closest pass.
print("\n", cpt.utc_datetime())
print("\n", closepassdistance[cpd], "kilometers")
# Graph out a distance vs. time graph of the closest pass
tgraph = ts.tt_jd(np.arange(cpt.tt - 0.001, cpt.tt + 0.001, 0.00001))
g1 = satellite1.at(tgraph)
g2 = satellite2.at(tgraph)
fig, ax = plt.subplots()
a = tgraph.utc_datetime()
b = (g2 - g1).distance().km
ax.plot(a, b)
ax.grid(which='both')
ax.set(title='Closest pass/collision between the satellites', xlabel = 'UTC')
fig.savefig('sat-separation.png', bbox_inches='tight')
fig.show()
#Simple loop to restart the program if desired
quit = 'r'
if quit == 'q':
sys.exit()
elif quit == 'r':
calculate()
quit = input("Type 'q' to quit, or 'r' to restart: ")
| 4,346 | 1,488 |
import pytest
from pytest_django.asserts import assertRedirects
from django.urls import reverse
from django.contrib.auth.models import User
from webdev.fornecedores.models import Fornecedor, Fornecimento, Email, Telefone, Local, DadosBancarios
# Novo Fornecedor
@pytest.fixture
def fornecimento(db):
return Fornecimento.objects.create(
nome="Programador",
)
@pytest.fixture
def resposta_autenticada(client, fornecimento):
User.objects.create_user(username='TestUser', password='MinhaSenha123')
client.login(username='TestUser', password='MinhaSenha123')
resp = client.post(reverse('fornecedores:novo_fornecedor'), data={
'nome': 'Isaac Newton',
'fornecimento': [fornecimento.id]
})
return resp
def test_redireciona_editar_fornecedor(resposta_autenticada):
assertRedirects(resposta_autenticada, reverse(
'fornecedores:editar_fornecedor', kwargs={'fornecedor_id': 1}))
def test_fornecedor_existe_no_bd(resposta_autenticada):
assert Fornecedor.objects.exists()
# Novo Fornecimento
@pytest.fixture
def criar_fornecedor(db):
return Fornecedor.objects.create(nome='Zé Comédia')
# Novo Email
@pytest.fixture
def resposta_novo_email(client, criar_fornecedor):
User.objects.create_user(username='TestUser', password='MinhaSenha123')
client.login(username='TestUser', password='MinhaSenha123')
resp = client.post(
reverse('fornecedores:novo_email', kwargs={'fornecedor_id':criar_fornecedor.id}),
data={
'fornecedor': criar_fornecedor.id,
'email': 'testEmail@gmail.com'
}
)
return resp
def test_email_existe_no_bd(resposta_novo_email):
assert Email.objects.exists()
# Novo Telefone
@pytest.fixture
def resposta_novo_telefone(client, criar_fornecedor):
usr = User.objects.create_user(username='TestUser', password='MinhaSenha123')
client.login(username='TestUser', password='MinhaSenha123')
resp = client.post(
reverse(
'fornecedores:novo_telefone',
kwargs={'fornecedor_id':criar_fornecedor.id}
), data={
'fornecedor': criar_fornecedor.id,
'telefone': 11944647420
}
)
return resp
def test_telefone_existe_no_bd(resposta_novo_telefone):
assert Telefone.objects.exists()
# Nova Localização
@pytest.fixture
def resposta_novo_local(client, criar_fornecedor):
usr = User.objects.create_user(username='TestUser', password='MinhaSenha123')
client.login(username='TestUser', password='MinhaSenha123')
resp = client.post(
reverse(
'fornecedores:novo_local',
kwargs={'fornecedor_id':criar_fornecedor.id}
), data={
'fornecedor': criar_fornecedor.id,
'pais': 'Brasil',
'estado': 'SP',
'cidade': 'São Paulo',
'bairro': 'Campo Belo',
'endereco': 'Av Barão de Vali, 240',
'cep': '04613-030',
}
)
return resp
def test_local_existe_no_bd(resposta_novo_local):
assert Local.objects.exists()
# Novos Dados Bancários
@pytest.fixture
def resposta_novos_dados_bancarios(client, criar_fornecedor):
usr = User.objects.create_user(username='TestUser', password='MinhaSenha123')
client.login(username='TestUser', password='MinhaSenha123')
resp = client.post(
reverse(
'fornecedores:novos_dados_bancarios',
kwargs={'fornecedor_id':criar_fornecedor.id}
), data={
'fornecedor': criar_fornecedor.id,
'tipo_de_transacao': 'px',
'numero': '0000030',
}
)
return resp
def test_dados_bancarios_existe_no_bd(resposta_novos_dados_bancarios):
assert DadosBancarios.objects.exists()
| 3,738 | 1,360 |
"""humimp URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from jobs.views import ApplicationList, ApplicationRetrieveDestroy
from django.conf import settings
from django.conf.urls.i18n import i18n_patterns
from django.utils.translation import gettext_lazy as _
urlpatterns = [
# django rest api
path('api/applications', ApplicationList.as_view()),
path('api/applications/<int:pk>', ApplicationRetrieveDestroy.as_view()),
path('api-auth/', include('rest_framework.urls')),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += i18n_patterns(
path('admin/', admin.site.urls),
path('', include('jobs.urls', namespace='jobs')),
path('blogs/', include('blogs.urls', namespace='blogs')),
path('careers/', include('careers.urls', namespace='careers')),
)
admin.site.site_url = None
admin.site.site_header = 'HuminImp Administration'
| 1,656 | 521 |
from typing import List
def rotate(matrix: List[List[int]]) -> None:
for layer in range(len(matrix) // 2):
first = layer
last = len(matrix) - layer - 1
for i in range(first, last):
offset = i - first
top = matrix[first][i]
matrix[first][i] = matrix[last - offset][first]
matrix[last - offset][first] = matrix[last][last - offset]
matrix[last][last - offset] = matrix[i][last]
matrix[i][last] = top
| 499 | 146 |
from collections.abc import MutableMapping
import warnings
import json
from configparser import *
from configparser import __all__
__all__.append("JSONConfigParser")
_ConfigParser = ConfigParser
class ConfigParser:
"""Accepts extra keyword config_type and returns the instance based on it"""
def __new__(cls, *args, **kwds):
config_type = kwds.pop("config_type", "ini")
if config_type == "json":
return JSONConfigParser()
return _ConfigParser(*args, **kwds)
class JSONConfigParser(MutableMapping):
"""A ConfigParser that works with json file."""
def __init__(self):
self._dict = {}
def defaults(self):
raise NotImplementedError
def sections(self):
"""Return a list of section names"""
return list(self._dict)
def add_section(self, section):
"""Create a new section in the configuration.
Raise DuplicateSectionError if a section by the specified name
already exists.
"""
if self.has_section(section):
raise DuplicateSectionError(section)
self._dict[section] = {}
def has_section(self, section):
"""Indicate whether the named section is present in the configuration."""
if section in self._dict:
return True
return False
def options(self, section):
"""Return a list of option names for the given section name."""
try:
return list(self._dict[section])
except KeyError as e:
raise NoSectionError(str(e)) from None
def read(self, filenames, encoding=None):
"""Read and parse a filename or a list of filenames.
Files that cannot be opened are silently ignored; this is
designed so that you can specify a list of potential
configuration file locations (e.g. current directory, user's
home directory, systemwide directory), and all existing
configuration files in the list will be read. A single
filename may also be given.
Return list of successfully read files.
"""
if isinstance(filenames, str):
filenames = [filenames]
read_ok = []
for filename in filenames:
try:
with open(filename, encoding=encoding) as f:
self.read_file(f)
except OSError:
continue
read_ok.append(filename)
return read_ok
def read_file(self, f, **kwds):
"""Like read() but the argument must be a file-like object.
The 'f' argument must be a json document.
"""
dictionary = json.load(f)
self.read_dict(dictionary)
def read_string(self, string, **kwds):
"""Read configuration from a given string that contain json document."""
self._dict.update(json.loads(string))
def read_dict(self, dictionary, **kwds):
"""Read configuration from a dictionary."""
self._dict.update(dictionary)
def readfp(self, fp, **kwds):
"""Deprecated, use read_file instead."""
warnings.warn(
"This method will be removed in future versions. "
"Use 'parser.read_file()' instead.",
DeprecationWarning, stacklevel=2
)
self.read_file(fp, **kwds)
pass
def has_option(self, section, option):
"""Check for the existence of a given option in a given section."""
try:
if option in self._dict[section]:
return True
return False
except KeyError as e:
raise NoSectionError(str(e)) from None
def set(self, section, option, value=None):
"""Set an option."""
self._dict[section][option] = value
def write(self, fp, **kwds):
"""Write an .json-format representation of the configuration state."""
json.dump(self._dict, fp)
def remove_option(self, section, option):
"""Remove an option."""
try:
del(self._dict[section][option])
except KeyError as e:
if str(e) == section:
raise NoSectionError(section) from None
else:
raise NoOptionError(option) from None
def remove_section(self, section):
"""Remove a file section."""
try:
del(self._dict[section])
except KeyError:
raise NoSectionError(section) from None
def __getitem__(self, key):
return self._dict[key]
def __setitem__(self, key, value):
self._dict[key] = value
def __delitem__(self, key):
del(self._dict[key])
def __iter__(self):
return iter(self._dict)
def __len__(self):
return len(self._dict.keys)
# These methods provided directly for campatibility with orginal ConfigParser
def getint(self, section, option, **kwds):
try:
return int(self._dict[section][option])
except KeyError as e:
error_key = str(e)
if error_key == section:
raise NoSectionError(error_key) from None
else:
raise NoOptionError(error_key) from None
def getfloat(self, section, option, **kwds):
try:
return float(self._dict[section][option])
except KeyError as e:
error_key = str(e)
if error_key == section:
raise NoSectionError(error_key) from None
else:
raise NoOptionError(error_key) from None
def getboolean(self, section, option, **kwds):
try:
val = self._dict[section][option]
if isinstance(val, bool):
return val
raise ValueError("value is not boolean")
except KeyError as e:
error_key = str(e)
if error_key == section:
raise NoSectionError(error_key) from None
else:
raise NoOptionError(error_key) from None
# To do:implement for this class
def optionxform(self, optionstr):
raise NotImplementedError
@property
def converters(self):
raise NotImplementedError
| 6,176 | 1,627 |
import unittest
class ValidatorTest(unittest.TestCase):
def test_args_wrong_type(self):
"""Validation: check for error when args is the wrong type."""
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
pass
with self.assertRaises(AssertionError):
validate(args=123)
def test_limit_to_wrong_type(self):
"""Validation: check for error when limit_to is the wrong type."""
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
pass
with self.assertRaises(AssertionError):
validate(args={}, limit_to=1234)
def test_limit_to_not_in_args(self):
"""Validation: check for error when limit_to is not a key in args."""
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
pass
with self.assertRaises(AssertionError):
validate(args={}, limit_to='bar')
def test_args_keys_must_be_strings(self):
"""Validation: check for error when args keys are not all strings."""
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
pass
with self.assertRaises(AssertionError):
validate(args={1: 'foo'})
def test_invalid_return_value(self):
"""Validation: check for error when the return value type is wrong."""
from natcap.invest import validation
for invalid_value in (1, True, None):
@validation.invest_validator
def validate(args, limit_to=None):
return invalid_value
with self.assertRaises(AssertionError):
validate({})
def test_invalid_keys_iterable(self):
"""Validation: check for error when return keys not an iterable."""
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
return [('a', 'error 1')]
with self.assertRaises(AssertionError):
validate({'a': 'foo'})
def test_return_keys_in_args(self):
"""Validation: check for error when return keys not all in args."""
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
return [(('a',), 'error 1')]
with self.assertRaises(AssertionError):
validate({})
def test_error_string_wrong_type(self):
"""Validation: check for error when error message not a string."""
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
return [(('a',), 1234)]
with self.assertRaises(AssertionError):
validate({'a': 'foo'})
def test_wrong_parameter_names(self):
"""Validation: check for error when wrong function signature used."""
from natcap.invest import validation
@validation.invest_validator
def validate(foo):
pass
with self.assertRaises(AssertionError):
validate({})
def test_return_value(self):
"""Validation: validation errors should be returned from decorator."""
from natcap.invest import validation
errors = [(('a', 'b'), 'Error!')]
@validation.invest_validator
def validate(args, limit_to=None):
return errors
validation_errors = validate({'a': 'foo', 'b': 'bar'})
self.assertEqual(validation_errors, errors)
def test_n_workers(self):
"""Validation: validation error returned on invalid n_workers."""
from natcap.invest import validation
@validation.invest_validator
def validate(args, limit_to=None):
return []
validation_errors = validate({'n_workers': 1.5})
self.assertEqual(len(validation_errors), 1)
self.assertTrue(validation_errors[0][0] == ['n_workers'])
self.assertTrue('must be an integer' in validation_errors[0][1])
class ValidationContextTests(unittest.TestCase):
def test_is_arg_complete_require(self):
"""Validation: context returns a warning for incomplete args."""
from natcap.invest import validation
context = validation.ValidationContext(
args={}, limit_to=None)
is_complete = context.is_arg_complete('some_key', require=True)
self.assertEqual(is_complete, False)
self.assertEqual(len(context.warnings), 1)
def test_is_arg_complete_require_and_present(self):
"""Validation: context ok when arg complete."""
from natcap.invest import validation
context = validation.ValidationContext(
args={'some_key': 'foo'}, limit_to=None)
is_complete = context.is_arg_complete('some_key', require=True)
self.assertEqual(is_complete, True)
self.assertEqual(context.warnings, [])
def test_warn_single_key(self):
"""Validation: check warnings when single key is given."""
from natcap.invest import validation
context = validation.ValidationContext(
args={'some_key': 'foo'}, limit_to=None)
context.warn('some error', 'some_key')
self.assertEqual(context.warnings, [(('some_key',), 'some error')])
def test_warn_iterable_keys(self):
"""Validation: check warnings when keys are iterable."""
from natcap.invest import validation
context = validation.ValidationContext(
args={'some_key': 'foo'}, limit_to=None)
context.warn('some error', keys=['some_key'])
self.assertEqual(context.warnings, [(('some_key',), 'some error')])
| 5,859 | 1,636 |
NAME='zergpool'
CFLAGS = []
LDFLAGS = []
LIBS = []
GCC_LIST = ['zergpool']
| 77 | 40 |
import numpy as np
import matplotlib.pyplot as plt
import cv2
import math
def grayscale(img):
"""Applies the Grayscale transform
This will return an image with only one color channel
but NOTE: to see the returned image as grayscale
(assuming your grayscaled image is called 'gray')
you should call plt.imshow(gray, cmap='gray')"""
return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
# Or use BGR2GRAY if you read an image with cv2.imread()
# return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
def canny(img, low_threshold, high_threshold):
"""Applies the Canny transform"""
return cv2.Canny(img, low_threshold, high_threshold)
def gaussian_blur(img, kernel_size):
"""Applies a Gaussian Noise kernel"""
return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)
def region_of_interest(img, vertices):
"""
Applies an image mask.
Only keeps the region of the image defined by the polygon
formed from `vertices`. The rest of the image is set to black.
"""
# defining a blank mask to start with
mask = np.zeros_like(img)
# defining a 3 channel or 1 channel color to fill the mask with depending on the input image
if len(img.shape) > 2:
channel_count = img.shape[2] # i.e. 3 or 4 depending on your image
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
# filling pixels inside the polygon defined by "vertices" with the fill color
cv2.fillPoly(mask, vertices, ignore_mask_color)
# returning the image only where mask pixels are nonzero
masked_image = cv2.bitwise_and(img, mask)
return masked_image
def find_aggregated_line(lines_x, lines_y, y_bottom, y_top):
"""
Find two end-points (bottom and top) of aggregated line for given line collection.
The endpoints are determined by given y coordinate range
:param lines_x: x coordinates of lines
:param lines_y: y coordinates of lines
:param y_bottom: bottom end y coordinate of aggregated line segment
:param y_top: top end y coordinate of aggregated line segment
:return: (x, y) coordinates of two end-points of aggregated line segment
"""
# First, make sure that lines_x and lines_y are non-empty same size arrays
assert(len(lines_x) > 0 and len(lines_x) == len(lines_y))
# Compute straight lines that fit line endpoints for left and right line segments
line_fit = np.polyfit(lines_x, lines_y, 1)
# Find start and end points for aggregated lines
x_bottom = int(round((y_bottom - line_fit[1]) / line_fit[0]))
x_top = int(round((y_top - line_fit[1]) / line_fit[0]))
return [(x_bottom, y_bottom), (x_top, y_top)]
def draw_lines(img, lines, color=[255, 0, 0], thickness=10):
"""
NOTE: this is the function you might want to use as a starting point once you want to
average/extrapolate the line segments you detect to map out the full
extent of the lane (going from the result shown in raw-lines-example.mp4
to that shown in P1_example.mp4).
Think about things like separating line segments by their
slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
line vs. the right line. Then, you can average the position of each of
the lines and extrapolate to the top and bottom of the lane.
This function draws `lines` with `color` and `thickness`.
Lines are drawn on the image inplace (mutates the image).
If you want to make the lines semi-transparent, think about combining
this function with the weighted_img() function below
"""
# Identify line end points of each line (separate into left and right lines)
lines_left_x = []
lines_left_y = []
lines_right_x = []
lines_right_y = []
xsize = img.shape[1]
x_middle = int(round(xsize / 2))
for line in lines:
for x1, y1, x2, y2 in line:
slope = (y2 - y1) / (x2 - x1)
if (slope > -0.9 and slope < -0.5) and (x1 < x_middle and x2 < x_middle):
lines_left_x.extend([x1, x2])
lines_left_y.extend([y1, y2])
elif (slope > 0.4 and slope < 0.8) and (x1 > x_middle and x2 > x_middle):
lines_right_x.extend([x1, x2])
lines_right_y.extend([y1, y2])
else:
#print('Ignore outlier lines - slope: %f, (%d, %d), (%d, %d)' % (slope, x1, y1, x2, y2))
pass
# Determine Y range for aggregated lines
ysize = img.shape[0]
y_bottom, y_top = ysize - 1, min(lines_left_y + lines_right_y)
# Find and draw aggregated lines for left and right line collections respectively
if (len(lines_left_x) > 0):
point_bottom, point_top = find_aggregated_line(lines_left_x, lines_left_y, y_bottom, y_top)
cv2.line(img, point_bottom, point_top, color, thickness)
if (len(lines_right_x) > 0):
point_bottom, point_top = find_aggregated_line(lines_right_x, lines_right_y, y_bottom, y_top)
cv2.line(img, point_bottom, point_top, color, thickness)
def draw_raw_lines(img, lines, color=[255, 0, 0], thickness=2):
"""
The original draw_lines function provided in the project
"""
for line in lines:
for x1, y1, x2, y2 in line:
cv2.line(img, (x1, y1), (x2, y2), color, thickness)
def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
"""
`img` should be the output of a Canny transform.
Returns an image with hough lines drawn.
"""
lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,
maxLineGap=max_line_gap)
line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)
draw_lines(line_img, lines)
return line_img
# Python 3 has support for cool math symbols.
def weighted_img(img, initial_img, α=0.8, β=1., γ=0.):
"""
`img` is the output of the hough_lines(), An image with lines drawn on it.
Should be a blank image (all black) with lines drawn on it.
`initial_img` should be the image before any processing.
The result image is computed as follows:
initial_img * α + img * β + γ
NOTE: initial_img and img must be the same shape!
"""
return cv2.addWeighted(initial_img, α, img, β, γ)
| 6,279 | 2,091 |
##############################################################################
#
# Copyright (c) 2001, 2002 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import warnings
from zope.traversing.publicationtraverse import PublicationTraverse # noqa: F401 E501 (BBB and long line)
from zope.traversing.publicationtraverse import PublicationTraverser # noqa: F401 E501 (BBB and long line)
warnings.warn("""%s is deprecated
If you want PublicationTraverser, it's now in
zope.traversing.publicationtraverse. Anything else that was here is
deprecated.
""" % __name__, DeprecationWarning, stacklevel=1)
# BBB: do not use
class DuplicateNamespaces(Exception):
"""More than one namespace was specified in a request"""
# BBB: do not use
class UnknownNamespace(Exception):
"""A parameter specified an unknown namespace"""
| 1,314 | 422 |
from . import tables
from . import database
| 44 | 11 |
from django.contrib import admin
from .models import Timestep, TemporalDistribution
admin.site.register(TemporalDistribution)
admin.site.register(Timestep)
| 158 | 48 |
from typing import List, Tuple
import h2o
import pandas as pd
from sklearn.model_selection import train_test_split
from .config import Config
def get_train_valid(df: pd.DataFrame) -> Tuple[pd.DataFrame]:
"""Get train - valid - test"""
full_train_df, test_df = train_test_split(
df,
test_size=Config.test_percent,
random_state=Config.test_seed,
stratify=df[Config.stratify_col].values,
)
train_df, valid_df = train_test_split(
full_train_df,
test_size=Config.valid_percent,
random_state=Config.valid_seed,
stratify=full_train_df[Config.stratify_col].values,
)
return full_train_df, train_df, valid_df, test_df
def get_h2o_train_valid(dfs: Tuple[pd.DataFrame]) -> Tuple[h2o.H2OFrame]:
"""Convert DataFrames to H2OFrames"""
full_train_df, train_df, valid_df, test_df = dfs
if not Config.use_full_train:
train = h2o.H2OFrame(train_df)
else:
train = h2o.H2OFrame(full_train_df)
valid = h2o.H2OFrame(valid_df)
test = h2o.H2OFrame(test_df)
return train, valid, test
def treat_categorical_cols(
dfs: Tuple[h2o.H2OFrame], cat_cols: List[str]
) -> Tuple[h2o.H2OFrame, List[str], str]:
"""Set categorical columns as factor"""
train, valid, test = dfs
x = train.columns
y = Config.target_col
x.remove(y)
train[y] = train[y].asfactor()
for col in cat_cols:
train[col] = train[col].asfactor()
valid[col] = valid[col].asfactor()
test[col] = test[col].asfactor()
return train, valid, test, x, y
| 1,581 | 583 |
import csv
import plistlib as plist
SOURCE_FILE = "tech-names.csv"
snippets_array = []
with open(SOURCE_FILE, "rt") as csvfile:
reader = csv.DictReader(csvfile)
firstline = True
for row in reader:
snippets_array.append(
{"phrase": row["correct_spelling"], "shortcut": row["common_misspelling"]}
)
with open("tech-names.plist", "wb") as fp:
plist.dump(snippets_array, fp)
| 419 | 151 |
from cjax.continuation._arc_len_continuation import PseudoArcLenContinuation
from cjax.continuation.states.state_variables import StateWriter
from cjax.continuation.methods.predictor.secant_predictor import SecantPredictor
from jax.experimental.optimizers import l2_norm
from cjax.continuation.methods.corrector.perturbed_constrained_corrector import (
PerturbedCorrecter,
)
import copy
from cjax.utils.profiler import profile
import gc
from cjax.utils.math_trees import pytree_relative_error
# TODO: make **kwargs availible
class PerturbedPseudoArcLenContinuation(PseudoArcLenContinuation):
"""Noisy Pseudo Arc-length Continuation strategy.
Composed of secant predictor and noisy constrained corrector"""
def __init__(
self,
state,
bparam,
state_0,
bparam_0,
counter,
objective,
dual_objective,
hparams,
key_state,
):
super().__init__(
state,
bparam,
state_0,
bparam_0,
counter,
objective,
dual_objective,
hparams,
)
self.key_state = key_state
@profile(sort_by="cumulative", lines_to_print=10, strip_dirs=True)
def run(self):
"""Runs the continuation strategy.
A continuation strategy that defines how predictor and corrector components of the algorithm
interact with the states of the mathematical system.
"""
self.sw = StateWriter(f"{self.output_file}/version_{self.key_state}.json")
for i in range(self.continuation_steps):
print(self._value_wrap.get_record(), self._bparam_wrap.get_record())
self._state_wrap.counter = i
self._bparam_wrap.counter = i
self._value_wrap.counter = i
self.sw.write(
[
self._state_wrap.get_record(),
self._bparam_wrap.get_record(),
self._value_wrap.get_record(),
]
)
concat_states = [
(self._state_wrap.state, self._bparam_wrap.state),
(self._prev_state, self._prev_bparam),
self.prev_secant_direction,
]
predictor = SecantPredictor(
concat_states=concat_states,
delta_s=self._delta_s,
omega=self._omega,
net_spacing_param=self.hparams["net_spacing_param"],
net_spacing_bparam=self.hparams["net_spacing_bparam"],
hparams=self.hparams,
)
predictor.prediction_step()
self.prev_secant_direction = predictor.secant_direction
self.hparams["sphere_radius"] = (
0.005 * self.hparams["omega"] * l2_norm(predictor.secant_direction)
)
concat_states = [
predictor.state,
predictor.bparam,
predictor.secant_direction,
predictor.get_secant_concat(),
]
del predictor
gc.collect()
corrector = PerturbedCorrecter(
optimizer=self.opt,
objective=self.objective,
dual_objective=self.dual_objective,
lagrange_multiplier=self._lagrange_multiplier,
concat_states=concat_states,
delta_s=self._delta_s,
ascent_opt=self.ascent_opt,
key_state=self.key_state,
compute_min_grad_fn=self.compute_min_grad_fn,
compute_max_grad_fn=self.compute_max_grad_fn,
compute_grad_fn=self.compute_grad_fn,
hparams=self.hparams,
pred_state=[self._state_wrap.state, self._bparam_wrap.state],
pred_prev_state=[self._state_wrap.state, self._bparam_wrap.state],
counter=self.continuation_steps,
)
self._prev_state = copy.deepcopy(self._state_wrap.state)
self._prev_bparam = copy.deepcopy(self._bparam_wrap.state)
state, bparam, quality = corrector.correction_step()
value = self.value_func(state, bparam)
print(
"How far ....", pytree_relative_error(self._bparam_wrap.state, bparam)
)
self._state_wrap.state = state
self._bparam_wrap.state = bparam
self._value_wrap.state = value
del corrector
gc.collect()
| 4,524 | 1,277 |
# SPDX-License-Identifier: GPL-2.0+
from .exceptions import DependencyTrackApiError
class Bom:
"""Class dedicated to all "bom" related endpoints"""
def upload_bom(
self,
file_name,
project_id=None,
project_name=None,
project_version=None,
auto_create=False,
):
"""Upload a supported bill of material format document
API Endpoint: POST /bom
:return: UUID-Token
:rtype: string
:raises DependencyTrackApiError: if the REST call failed
"""
multipart_form_data = {}
multipart_form_data["bom"] = ("bom", open(file_name, "r"))
if project_id:
multipart_form_data["project"] = project_id
if project_name:
multipart_form_data["projectName"] = project_name
if project_version:
multipart_form_data["projectVersion"] = project_version
multipart_form_data["autoCreate"] = auto_create
response = self.session.post(
self.api + "/bom",
params=self.paginated_param_payload,
files=multipart_form_data,
)
if response.status_code == 200:
return response.json()
else:
description = f"Unable to upload BOM file"
raise DependencyTrackApiError(description, response)
| 1,345 | 387 |
"""By Ian Davis for Bootcampers Collective Coders Workshop on 2/19/20"""
""" This program evaluates a string and determines if it its a real sentence """
validString = 'This is a valid sentence.'
twoSpaces = "This isn't valid"
firstCharacterNotCapitalized = 'not capitalized'
containsProperNoun = 'Only the firs character can be capitalized Colorado'
lastCharNotTerminator = 'last not terminator'
validCharacters = [',', ';', ':', '.', '?', '!', "'", ' ']
def loopSentence(sentence):
for i, char in enumerate(sentence[1:]):
if (char.isalpha() or char in validCharacters):
print(f'char {char} is valid')
if sentence[i] == ' ':
if sentence[i+1] == ' ':
print(f'two spaces in a row')
return False
if char.isupper():
print('no propper nouns!')
return False
else:
print(f'char {char} is not valid')
return False
return True
def checkLastLetterTerminator(sentence):
if sentence[-1] not in ['.', '!', '?']:
print(f'last character is not a sentence terminator')
return False
else:
return True
def checkFirstLetterUppercase(sentence):
if sentence[0].isupper():
print('First letter of the sentence is Uppercase')
return True
else:
print('First letter of the sentence is NOT Uppercase')
return False
def combineTests(sentence):
if(not checkFirstLetterUppercase(sentence) or not loopSentence(sentence)):
print(f'TESTS FAILED on {sentence}\n')
else:
print(f'TESTS PASSED on {sentence}\n')
def main():
combineTests(validString)
combineTests(twoSpaces)
combineTests(firstCharacterNotCapitalized)
combineTests(containsProperNoun)
combineTests(lastCharNotTerminator)
if __name__ == "__main__":
main()
| 1,895 | 567 |
# -*- coding:utf-8 -*-
"""
@author: RubanSeven
@project: MirrorMirror
"""
from PyQt5.QtWidgets import *
class CodeTextEdit(QTextEdit):
def __init__(self, *__args):
super().__init__(*__args)
self.setStyleSheet(
"""
QTextEdit {
background-color: rgb(83, 83, 83);
border:0px;
font-size: 15px;
color: rgb(214, 214, 214);
}
"""
)
class ParamLineEdit(QLineEdit):
def __init__(self, *__args):
super().__init__(*__args)
self.setStyleSheet(
"""
QLineEdit {
background-color: rgb(46, 46, 46);
border:1px rgb(62, 62, 62);
font-size: 15px;
color: rgb(205, 205, 205);
height: 30px;
}
"""
)
class LabelText(QLabel):
def __init__(self, *__args):
super().__init__(*__args)
self.setStyleSheet(
"""
QLabel {
border: none;
font-size: 13px;
color: rgb(153, 153, 153);
}
"""
)
| 1,269 | 440 |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from Bio import Entrez
import MySQLdb as mySQLDB
Entrez.email="A.N.Other@example.com"
def savePID():
returnCount=100000#每次可以最大返回十万条数据。
handle=Entrez.esearch(db="pubmed",term="lncRNA",RetMax=returnCount)
'''
这些参数值目前是够用的,但是不能保证以后一定可以。如果运行错误,则参照官网给出的函数参数进行修改
'''
record=Entrez.read(handle)
print(record)
idList=record["IdList"]
count=record["Count"]
print("Count"+count)
#打开数据库连接
db = mySQLDB.connect(host='127.0.0.1',user='root',passwd='11223366',db='ncrna',charset='utf8')
print()
#使用cursor()方法获取操作游标
cursor=db.cursor()
for i in range(0,int(count)):
sql = "insert into lncrna_pmid (pmid) values(" + idList[i] + ")"
try:
#执行SQL语句
cursor.execute(sql)
#提交到数据库执行
db.commit()
#print(sql)
# 这些语句执行之后不能保证PID的唯一,我给的解决方案是讲数据库中的PID设置为unique来避免这个问题
'''SQL语句如下:
CREATE TABLE `lncrna_pid` ( `Id` int(11) NOT NULL AUTO_INCREMENT, `pid` int(11) NOT NULL DEFAULT '0', PRIMARY KEY (`Id`), UNIQUE KEY `pid` (`pid`)) ENGINE=InnoDB AUTO_INCREMENT=15374 DEFAULT CHARSET=utf8;'''
except:
# Rollback in case there is any error
db.rollback()
print("Error,can't insert data. "+str(sql))
db.close()
if __name__ == "__main__":
savePID() | 1,429 | 661 |
import os
from argparse import ArgumentParser
import requests
import json
import traceback
LOCATIONS = [
"Aruba",
"Afghanistan",
"Africa",
"Angola",
"Albania",
"Andorra",
"Andean Region",
"Arab World",
"United Arab Emirates",
"Argentina",
"Armenia",
"American Samoa",
"Antigua and Barbuda",
"Australia",
"Austria",
"Azerbaijan",
"Burundi",
"East Asia & Pacific (IBRD-only countries)",
"Europe & Central Asia (IBRD-only countries)",
"Belgium",
"Benin",
"Burkina Faso",
"Bangladesh",
"Bulgaria",
"IBRD countries classified as high income",
"Bahrain",
"Bahamas, The",
"Bosnia and Herzegovina",
"Latin America & the Caribbean (IBRD-only countries)",
"Belarus",
"Belize",
"Middle East & North Africa (IBRD-only countries)",
"Bermuda",
"Bolivia",
"Brazil",
"Barbados",
"Brunei Darussalam",
"Sub-Saharan Africa (IBRD-only countries)",
"Bhutan",
"Botswana",
"Sub-Saharan Africa (IFC classification)",
"Central African Republic",
"Canada",
"East Asia and the Pacific (IFC classification)",
"Central Europe and the Baltics",
"Europe and Central Asia (IFC classification)",
"Switzerland",
"Channel Islands",
"Chile",
"China",
"Cote d'Ivoire",
"Latin America and the Caribbean (IFC classification)",
"Middle East and North Africa (IFC classification)",
"Cameroon",
"Congo, Dem. Rep.",
"Congo, Rep.",
"Colombia",
"Comoros",
"Cabo Verde",
"Costa Rica",
"South Asia (IFC classification)",
"Caribbean small states",
"Cuba",
"Curacao",
"Cayman Islands",
"Cyprus",
"Czech Republic",
"East Asia & Pacific (IDA-eligible countries)",
"Europe & Central Asia (IDA-eligible countries)",
"Germany",
"IDA countries classified as Fragile Situations",
"Djibouti",
"Latin America & the Caribbean (IDA-eligible countries)",
"Dominica",
"Middle East & North Africa (IDA-eligible countries)",
"IDA countries not classified as Fragile Situations",
"Denmark",
"IDA countries in Sub-Saharan Africa not classified as fragile situations ",
"Dominican Republic",
"South Asia (IDA-eligible countries)",
"IDA countries in Sub-Saharan Africa classified as fragile situations ",
"Sub-Saharan Africa (IDA-eligible countries)",
"IDA total, excluding Sub-Saharan Africa",
"Algeria",
"East Asia & Pacific (excluding high income)",
"Early-demographic dividend",
"East Asia & Pacific",
"Europe & Central Asia (excluding high income)",
"Europe & Central Asia",
"Ecuador",
"Egypt, Arab Rep.",
"Euro area",
"Eritrea",
"Spain",
"Estonia",
"Ethiopia",
"European Union",
"Fragile and conflict affected situations",
"Finland",
"Fiji",
"France",
"Faroe Islands",
"Micronesia, Fed. Sts.",
"IDA countries classified as fragile situations, excluding Sub-Saharan Africa",
"Gabon",
"United Kingdom",
"Georgia",
"Ghana",
"Gibraltar",
"Guinea",
"Gambia, The",
"Guinea-Bissau",
"Equatorial Guinea",
"Greece",
"Grenada",
"Greenland",
"Guatemala",
"Guam",
"Guyana",
"High income",
"Hong Kong SAR, China",
"Honduras",
"Heavily indebted poor countries (HIPC)",
"Croatia",
"Haiti",
"Hungary",
"IBRD, including blend",
"IBRD only",
"IDA & IBRD total",
"IDA total",
"IDA blend",
"Indonesia",
"IDA only",
"Isle of Man",
"India",
"Not classified",
"Ireland",
"Iran, Islamic Rep.",
"Iraq",
"Iceland",
"Israel",
"Italy",
"Jamaica",
"Jordan",
"Japan",
"Kazakhstan",
"Kenya",
"Kyrgyz Republic",
"Cambodia",
"Kiribati",
"St. Kitts and Nevis",
"Korea, Rep.",
"Kuwait",
"Latin America & Caribbean (excluding high income)",
"Lao PDR",
"Lebanon",
"Liberia",
"Libya",
"St. Lucia",
"Latin America & Caribbean ",
"Latin America and the Caribbean",
"Least developed countries,ssification",
"Low income",
"Liechtenstein",
"Sri Lanka",
"Lower middle income",
"Low & middle income",
"Lesotho",
"Late-demographic dividend",
"Lithuania",
"Luxembourg",
"Latvia",
"Macao SAR, China",
"St. Martin (French part)",
"Morocco",
"Central America",
"Monaco",
"Moldova",
"Middle East (developing only)",
"Madagascar",
"Maldives",
"Middle East & North Africa",
"Mexico",
"Marshall Islands",
"Middle income",
"Macedonia, FYR",
"Mali",
"Malta",
"Myanmar",
"Middle East & North Africa (excluding high income)",
"Montenegro",
"Mongolia",
"Northern Mariana Islands",
"Mozambique",
"Mauritania",
"Mauritius",
"Malawi",
"Malaysia",
"North America",
"North Africa",
"Namibia",
"New Caledonia",
"Niger",
"Nigeria",
"Nicaragua",
"Netherlands",
"Non-resource rich Sub-Saharan Africa countries, of which landlocked",
"Norway",
"Nepal",
"Non-resource rich Sub-Saharan Africa countries",
"Nauru",
"IDA countries not classified as fragile situations, excluding Sub-Saharan Africa",
"New Zealand",
"OECD members",
"Oman",
"Other small states",
"Pakistan",
"Panama",
"Peru",
"Philippines",
"Palau",
"Papua New Guinea",
"Poland",
"Pre-demographic dividend",
"Puerto Rico",
"Korea, Dem. People’s Rep.",
"Portugal",
"Paraguay",
"West Bank and Gaza",
"Pacific island small states",
"Post-demographic dividend",
"French Polynesia",
"Qatar",
"Romania",
"Resource rich Sub-Saharan Africa countries",
"Resource rich Sub-Saharan Africa countries, of which oil exporters",
"Russian Federation",
"Rwanda",
"South Asia",
"Saudi Arabia",
"Southern Cone",
"Sudan",
"Senegal",
"Singapore",
"Solomon Islands",
"Sierra Leone",
"El Salvador",
"San Marino",
"Somalia",
"Serbia",
"Sub-Saharan Africa (excluding high income)",
"South Sudan",
"Sub-Saharan Africa ",
"Small states",
"Sao Tome and Principe",
"Suriname",
"Slovak Republic",
"Slovenia",
"Sweden",
"Eswatini",
"Sint Maarten (Dutch part)",
"Sub-Saharan Africa excluding South Africa",
"Seychelles",
"Syrian Arab Republic",
"Turks and Caicos Islands",
"Chad",
"East Asia & Pacific (IDA & IBRD countries)",
"Europe & Central Asia (IDA & IBRD countries)",
"Togo",
"Thailand",
"Tajikistan",
"Turkmenistan",
"Latin America & the Caribbean (IDA & IBRD countries)",
"Timor-Leste",
"Middle East & North Africa (IDA & IBRD countries)",
"Tonga",
"South Asia (IDA & IBRD)",
"Sub-Saharan Africa (IDA & IBRD countries)",
"Trinidad and Tobago",
"Tunisia",
"Turkey",
"Tuvalu",
"Taiwan, China",
"Tanzania",
"Uganda",
"Ukraine",
"Upper middle income",
"Uruguay",
"United States",
"Uzbekistan",
"St. Vincent and the Grenadines",
"Venezuela, RB",
"British Virgin Islands",
"Virgin Islands (U.S.)",
"Vietnam",
"Vanuatu",
"World",
"Samoa",
"Kosovo",
"Sub-Saharan Africa excluding South Africa and Nigeria",
"Yemen, Rep.",
"South Africa",
"Zambia",
"Zimbabwe"
]
def getAllIndicatorList():
url = "https://api.worldbank.org/v2/indicators?format=json&page=1"
res = requests.get(url)
data = res.json()
total = data[0]['total']
url2 = "https://api.worldbank.org/v2/indicators?format=json&page=1&per_page=" + str(total)
res2 = requests.get(url2)
data2 = res2.json()
return data2[1]
def generate_json_schema(dst_path):
unique_urls_str = getAllIndicatorList()
for commondata in unique_urls_str:
try:
urldata = "https://api.worldbank.org/v2/countries/indicators/" + commondata['id'] + "?format=json"
resdata = requests.get(urldata)
data_ind = resdata.json()
print("Generating schema for Trading economics", commondata['name'])
schema = {}
schema["title"] = commondata['name']
schema["description"] = commondata['sourceNote']
schema["url"] = "https://api.worldbank.org/v2/indicators/" + commondata['id'] + "?format=json"
schema["keywords"] = [i for i in commondata['name'].split()]
schema["date_updated"] = data_ind[0]["lastupdated"] if data_ind else None
schema["license"] = None
schema["provenance"] = {"source": "http://worldbank.org"}
schema["original_identifier"] = commondata['id']
schema["materialization"] = {
"python_path": "worldbank_materializer",
"arguments": {
"url": "https://api.worldbank.org/v2/indicators/" + commondata['id'] + "?format=json"
}
}
schema['variables'] = []
first_col = {
"name": "indicator_id",
"description": "id is identifier of an indicator in worldbank datasets",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/CategoricalData"]
}
second_col = {
"name": "indicator_value",
"description": "name of an indicator in worldbank datasets",
"semantic_type": ["http://schema.org/Text"]
}
third_col = {
"name": "unit",
"description": "unit of value returned by this indicator for a particular country",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/CategoricalData"]
}
fourth_col = {
"name": "sourceNote",
"description": "Long description of the indicator",
"semantic_type": ["http://schema.org/Text"]
}
fifth_col = {
"name": "sourceOrganization",
"description": "Source organization from where Worldbank acquired this data",
"semantic_type": ["http://schema.org/Text"]
}
sixth_col = {
"name": "country_value",
"description": "Country for which idicator value is returned",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/Location"],
"named_entity": LOCATIONS
}
seventh_col = {
"name": "countryiso3code",
"description": "Country iso code for which idicator value is returned",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/Location"]
}
eighth_col = {
"name": "date",
"description": "date for which indictor value is returned for a particular country",
"semantic_type": ["https://metadata.datadrivendiscovery.org/types/Time"],
"temporal_coverage": {"start": "1950", "end": "2100"}
}
schema['variables'].append(first_col)
schema['variables'].append(second_col)
schema['variables'].append(third_col)
schema['variables'].append(fourth_col)
schema['variables'].append(fifth_col)
schema['variables'].append(sixth_col)
schema['variables'].append(seventh_col)
schema['variables'].append(eighth_col)
if dst_path:
os.makedirs(dst_path + '/worldbank_schema', exist_ok=True)
file = os.path.join(dst_path, 'worldbank_schema',
"{}_description.json".format(commondata['id']))
else:
os.makedirs('WorldBank_schema', exist_ok=True)
file = os.path.join('worldbank_schema',
"{}_description.json".format(commondata['id']))
with open(file, "w") as fp:
json.dump(schema, fp, indent=2)
except:
traceback.print_exc()
pass
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("-o", "--dst", action="store", type=str, dest="dst_path")
args, _ = parser.parse_known_args()
generate_json_schema(args.dst_path)
| 11,867 | 4,125 |
#! /usr/bin/env python
from zplot import *
import sys
import sys
ctype = 'eps' if len(sys.argv) < 2 else sys.argv[1]
c = canvas(ctype, title='devol', dimensions=['400','340'])
t = table(file='devol.data')
t.addcolumns(['month','year'])
t.update(set='month = substr(date, 1, 2)')
t.update(set='year = substr(date, 4, 2)')
d = drawable(canvas=c, xrange=[-1,t.getmax(column='rownumber') + 1],
yrange=[0,2000], coord=[40,40], dimensions=[350,270])
grid(drawable=d, ystep=200, xstep=1, linecolor='lightgrey')
axis(drawable=d, style='y', yauto=['','',200])
axis(drawable=d, style='x', xmanual=t.getaxislabels(column='month'),
xlabelrotate=90, xlabelanchor='r,c', xlabelfontsize=7,
title='Number of Inquiries Per Month', titlesize=8,
titlefont='Courier-Bold', xtitle='Year and Month',
xtitleshift=[0,-15])
# Just pick out the unique years that show up and use them to label the axis
years, xlabels = [], []
for label in t.getaxislabels(column='year'):
if label[0] not in years:
years.append(label[0])
xlabels.append(label)
axis(drawable=d, style='x', xmanual=xlabels, linewidth=0, xlabelshift=[5,-15],
xlabelrotate=0, xlabelanchor='r,c', xlabelfontsize=7, xlabelformat='\'%s')
p = plotter()
p.line(drawable=d, table=t, xfield='rownumber', yfield='value', stairstep=True,
linecolor='purple', labelfield='value', labelsize=7, labelcolor='purple',
labelshift=[6,0], labelrotate=90, labelanchor='l,c')
c.circle(coord=d.map([10.5,463]), radius=20, linecolor='red')
c.render()
| 1,546 | 601 |
from DLplatform.aggregating import Aggregator
from DLplatform.parameters import Parameters
from typing import List
import numpy as np
from scipy.spatial.distance import cdist, euclidean
class GeometricMedian(Aggregator):
'''
Provides a method to calculate an averaged model from n individual models (using the arithmetic mean)
'''
def __init__(self, name="Geometric median"):
'''
Returns
-------
None
'''
Aggregator.__init__(self, name=name)
def calculateDivergence(self, param1, param2):
if type(param1) is np.ndarray:
return np.linalg.norm(param1 - param2)
else:
return param1.distance(param2)
def __call__(self, params: List[Parameters]) -> Parameters:
'''
This aggregator takes n lists of model parameters and returns a list of component-wise arithmetic means.
Parameters
----------
params A list of Paramters objects. These objects support addition and scalar multiplication.
Returns
-------
A new parameter object that is the average of params.
'''
Z = []
for param in params:
Z_i = param.toVector()
Z.append(Z_i)
Z = np.array(Z) #TODO: check that the shape is correct (that is, that no transpose is required)
gm = self.calcGeometricMedian(Z) #computes the GM for a numpy array
newParam = params[0].getCopy()#by copying the parameters object, we ensure that the shape information is preserved
newParam.fromVector(gm)
return newParam
def calcGeometricMedian(self, X, eps=1e-5, mat_iter = 10e6):
y = np.mean(X, 0)
iterCount = 0
while iterCount <= mat_iter:
D = cdist(X, [y])
nonzeros = (D != 0)[:, 0]
Dinv = 1 / D[nonzeros]
Dinvs = np.sum(Dinv)
W = Dinv / Dinvs
T = np.sum(W * X[nonzeros], 0)
num_zeros = len(X) - np.sum(nonzeros)
if num_zeros == 0:
y1 = T
elif num_zeros == len(X):
return y
else:
R = (T - y) * Dinvs
r = np.linalg.norm(R)
rinv = 0 if r == 0 else num_zeros/r
y1 = max(0, 1-rinv)*T + min(1, rinv)*y
if euclidean(y, y1) < eps:
return y1
y = y1
iterCount += 1
def __str__(self):
return "Geometric median"
# def setToGeometricMedian(self, params : List):
# models = params
#
# shapes = []
# b = []
# once = True
# newWeightsList = []
# try:
# for i, model in enumerate(models):
# w2 = model.get()
# c = []
# c = np.array(c)
# for i in range(len(w2)):
# z = np.array(w2[i])
#
# if len(shapes) < 8:
# shapes.append(z.shape)
# d = np.array(w2[i].flatten()).squeeze()
# c = np.concatenate([c, d])
# if (once):
# b = np.zeros_like(c)
# b[:] = c[:]
# once = False
# else:
# once = False
# b = np.concatenate([b.reshape((-1, 1)), c.reshape((-1, 1))], axis=1)
# median_val = np.array(b[0]) #hd.geomedian(b))
# sizes = []
# for j in shapes:
# size = 1
# for k in j:
# size *= k
# sizes.append(size)
# newWeightsList = []
#
# chunks = []
# count = 0
# for size in sizes:
# chunks.append([median_val[i + count] for i in range(size)])
# count += size
# for chunk, i in zip(chunks, range(len(shapes))):
# newWeightsList.append(np.array(chunk).reshape(shapes[i]))
#
# except Exception as e:
# print("Error happened! Message is ", e)
# self.set(newWeightsList)
| 4,208 | 1,271 |
from flask import Flask
from flask import render_template, request, jsonify
from source import StockPredictor as sp
from source import ModelsParametersTunning as mpt
from datetime import datetime
import json
from plotly.graph_objs import Scatter
from pandas.tseries.offsets import BDay
app = Flask(__name__)
def install_and_import(package):
import importlib
try:
importlib.import_module(package)
except ImportError:
import pip
pip.main(['install', package])
finally:
globals()[package] = importlib.import_module(package)
install_and_import('plotly')
@app.route('/')
def index():
print('index')
return render_template('master.html')
@app.route('/go', methods=['GET', 'POST'])
def go():
# save user input in query
query = request.values
print('go')
tickers = []
ticker_of_interest = request.values.get('ticker')
tickers.append(ticker_of_interest)
tickers.append('SPY')
start_date_str = request.values.get('start_date')
start_date = datetime.strptime(start_date_str, '%Y-%m-%d').date()
end_date_str = request.values.get('end_date')
end_date = datetime.strptime(end_date_str, '%Y-%m-%d').date()
prediction_date_str = request.values.get('prediction_date')
prediction_date = datetime.strptime(prediction_date_str, '%Y-%m-%d').date()
number_of_days = request.values.get('number_of_days')
if (number_of_days == ""):
number_of_days = 5
df = sp.getData(tickers , start_date.strftime("%Y-%m-%d"), '2019-07-08')
more_features = sp.introduce_features(df, ticker_of_interest,tickers,number_of_days)
data_dict = sp.split_data(more_features, ticker_of_interest, end_date)
all_features_normed = data_dict['all_features_normed']
all_target = data_dict['all_target']
training_features_normed = data_dict["training_features_normed"]
training_target = data_dict["training_target"]
small_features_normed = data_dict["small_features_normed"]
small_target = data_dict["small_target"]
features_validation_normed = data_dict["features_validation_normed"]
future_price_validation = data_dict["future_price_validation"]
price_validation = data_dict["price_validation"]
highest_model, highest_score = sp.pick_best_regressor(small_features_normed, small_target, features_validation_normed, future_price_validation)
tunned_model = mpt.tune_parameters(highest_model.__class__.__name__, small_features_normed, small_target, features_validation_normed, future_price_validation)
model = tunned_model.fit(all_features_normed,all_target)
predictions = sp.predict_n_days(model, all_features_normed, prediction_date, number_of_days)
real_data = df[predictions['Date'][0]:predictions['Date'][0]+BDay(int(number_of_days)-1)][ticker_of_interest]
pct = [abs(float(r)-float(p))/float(r)*100 for r,p in zip(real_data,predictions['Predicted Price'])]
# Plot closing prices
graphs = [
{
'data': [
Scatter(
x=df[ticker_of_interest].index,
y=df[ticker_of_interest],
)
],
'layout': {
'title': 'Adjusted Close Price' ,
'yaxis': {
'title': "Price"
},
'xaxis': {
'title': "Date"
}
}
},
{
'data': [
Scatter(
x=predictions['Date'],
y=predictions['Predicted Price'],
name= 'Predicted Price',
),
Scatter(
x=predictions['Date'],
y=real_data,
name= 'Actual Price',
),
Scatter(
x=predictions['Date'],
y=pct,
name= 'PCT',
yaxis= 'y2',
line = dict(
width = 1,
dash = 'dash')
)
],
'layout': {
'title': 'Predicted Adjusted Close Price' ,
'xaxis': {
'title': "Date"
},
'yaxis': {
'title': "Price"
},
'yaxis2': {
'title': 'Actual vs. Predicted',
'overlaying': 'y',
'side': 'right'
}
}
}]
ids = ["graph-{}".format(i) for i, _ in enumerate(graphs)]
graphJSON = json.dumps(graphs, cls=plotly.utils.PlotlyJSONEncoder)
return render_template('go.html', query=query , df=data_dict, ids=ids, graphJSON=graphJSON)
if __name__ == '__main__':
app.run(debug=True)
| 4,670 | 1,453 |
# Generated by Django 3.2.11 on 2022-01-18 12:43
from django.db import migrations, models
import generator.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Vault',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.CharField(default=generator.models.generate_uuid, max_length=40, unique=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['-created_at'],
},
),
]
| 720 | 213 |
from __future__ import unicode_literals
import vmraid
def execute():
vmraid.db.sql("""update tabDocType set document_type='Document'
where document_type='Transaction'""")
vmraid.db.sql("""update tabDocType set document_type='Setup'
where document_type='Master'""")
| 274 | 89 |
# Generated by Django 3.0.2 on 2020-02-17 22:42
from django.db import migrations, models
import hjson
import toml
def init_toml(apps, schema_editor):
Recipes = apps.get_model('recipes', 'Analysis')
for recipe in Recipes.objects.all():
# Load the json data
json_data = hjson.loads(recipe.json_text)
toml_text = toml.dumps(json_data)
# Bypass modifying the date on each recipe
Recipes.objects.filter(id=recipe.id).update(json_text=toml_text)
def init_label(apps, schema_editor):
Project = apps.get_model('recipes', 'Project')
for project in Project.objects.all():
# Bypass modifying the date on each project
Project.objects.filter(id=project.id).update(label=project.uid)
class Migration(migrations.Migration):
dependencies = [
('recipes', '0007_counts'),
]
operations = [
migrations.RemoveField(
model_name='analysis',
name='diff_author',
),
migrations.RemoveField(
model_name='analysis',
name='diff_date',
),
migrations.AddField(
model_name='project',
name='label',
field=models.CharField(max_length=32, null=True, unique=True),
),
migrations.AlterField(
model_name='analysis',
name='json_text',
field=models.TextField(default='', max_length=10000),
),
migrations.RunPython(init_toml),
migrations.RunPython(init_label),
]
| 1,529 | 480 |
import unittest
from cybercaptain.processing.filter import processing_filter
class ProcessingFilterLTTest(unittest.TestCase):
"""
Test the filters for LT
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
arguments = {'src': '.',
'filterby': 'LT',
'rule': 'LT 500',
'target': '.'}
self.processing = processing_filter(**arguments)
def test_LT_positive(self):
"""
Test if the filter passes LT correctly.
"""
# border line test
self.assertTrue(self.processing.filter({"LT":499}), 'should not be filtered')
# deep test
self.assertTrue(self.processing.filter({"LT":400}), 'should not be filtered')
def test_LT_negative(self):
"""
Test if the filter fails LT correctly.
"""
# border line test
self.assertFalse(self.processing.filter({"LT":500}), 'should be filtered')
# deep test
self.assertFalse(self.processing.filter({"LT":600}), 'should be filtered')
| 1,100 | 313 |
import logging
import numpy as np
import pandas as pd
from sklearn.neighbors.kde import KernelDensity
from scipy.optimize import minimize
from src.utils import cov2corr
class MarcenkoPastur:
def __init__(self, points=1000):
"""
Marcenko-Pastur
:param points:
:type points: int
:return:The Marcenko-Pastur probability density function
:rtype: pd.Series
"""
self.points = points
self.eigen_max = None
def pdf(self, var, q):
"""
:param var: The variance
:type var: float
:param q: N/T number of observations on the number of dates
:type q: float
:return:
:rtype:
"""
if isinstance(var, np.ndarray):
var = var.item()
eigen_min = var * (1 - (1. / q) ** .5) ** 2
eigen_max = var * (1 + (1. / q) ** .5) ** 2
eigen_values = np.linspace(eigen_min,
eigen_max,
self.points)
pdf = q / (2 * np.pi * var * eigen_values) * (
(eigen_max - eigen_values) * (
eigen_values - eigen_min)) ** .5
pdf = pd.Series(pdf, index=eigen_values)
return pdf
def err_pdfs(self, var, eigenvalues, q, bandwidth):
pdf0 = self.pdf(var, q)
pdf1 = fit_kde(
eigenvalues, bandwidth,
x=pdf0.index.values.reshape(-1, 1)
)
sse = np.sum((pdf1 - pdf0) ** 2)
return sse
def fit(self, eigenvalues, q, bandwidth):
func = lambda *x: self.err_pdfs(*x)
x0 = 0.5
out = minimize(func, x0,
args=(eigenvalues, q, bandwidth),
bounds=((1E-5, 1 - 1E-5),))
if out['success']:
var = out['x'][0]
else:
var = 1
eigen_max = var * (1 + (1. / q) ** 0.5) ** 2
self.eigen_max = eigen_max
return eigen_max, var
def facts_number(self, eigenvalues):
if self.eigen_max is not None:
return eigenvalues.shape[0] - \
np.diag(eigenvalues)[::-1].searchsorted(self.eigen_max)
else:
raise ValueError(f"Eigen max is not calculated. Please "
f"run the fit method before calculating the "
f"facts number")
def _denoise_constant_residual(self, eigenvalues, eigenvectors):
facts_number = self.facts_number(eigenvalues)
eigenvalues_ = eigenvalues.diagonal().copy()
# Denoising by making constant the eigen values past facts_number
eigenvalues_[facts_number:] = eigenvalues_[
facts_number:].sum() / float(
eigenvalues_.shape[0] - facts_number)
eigenvalues_ = np.diag(eigenvalues_)
cov = eigenvectors @ eigenvalues_ @ eigenvectors.T
# Rescaling
return cov2corr(cov)
def _denoise_shrink(self, eigenvalues, eigenvectors, alpha=0):
# Eigenvalues and eigenvectors corresponding
# to the eigenvalues less than the max value
facts_number = self.facts_number(eigenvalues)
eigenvalues_l = eigenvalues[:facts_number, :facts_number]
eigenvectors_l = eigenvectors[:, :facts_number]
# Eigenvalues and eigenvectors corresponding
# to the eigenvalues more than the max value
eigenvalues_r = eigenvalues[facts_number:, facts_number:]
eigenvectors_r = eigenvectors[:, facts_number:]
corr_l = eigenvectors_l @ eigenvalues_l @ eigenvectors_l.T
corr_r = eigenvectors_r @ eigenvalues_r @ eigenvectors_r.T
return corr_l + alpha * corr_r + (1 - alpha) * np.diag(
corr_r.diagonal())
def denoise(self, eigenvalues, eigenvectors, method="constant", alpha=0):
"""
Remove noise from corr by fixing random eigenvalues
:param eigenvalues:
:type eigenvalues:
:param eigenvectors:
:type eigenvectors:
:param method:
:type method: str
:param alpha:
:type alpha:
:return:
:rtype:
"""
if method == "constant":
return self._denoise_constant_residual(eigenvalues, eigenvectors)
elif method == "shrink":
return self._denoise_shrink(eigenvalues, eigenvectors, alpha=alpha)
else:
raise ValueError(f"The only available denoising methods are "
f"'constant' or 'shrink'. The method provided is "
f"{method}")
def detone(self, eigenvalues, eigenvectors):
# Test if the correlation matrix has a market component
eigenvalues_m = eigenvalues[0, 0]
eigenvectors_m = eigenvectors[:, 0]
cov = (eigenvectors @ eigenvalues @ eigenvectors.T) - \
(eigenvectors_m @ eigenvalues_m @ eigenvectors_m.T)
return cov2corr(cov)
def get_pca(matrix):
"""
Function to retrieve the eigenvalues and eigenvector from a Hermitian
matrix
:param matrix: Hermitian matrix
:type matrix: np.matrix or np.ndarray
:return:
:rtype:
"""
eigenvalues, eigenvectors = np.linalg.eigh(matrix)
indices = eigenvalues.argsort()[::-1]
eigenvalues, eigenvectors = eigenvalues[indices], eigenvectors[:, indices]
eigenvalues = np.diagflat(eigenvalues)
return eigenvalues, eigenvectors
def fit_kde(obs, bandwidth=0.25, kernel='gaussian', x=None):
"""
Fit kernel to a series of observations and derive the probability of obs
:param obs:
:type obs:
:param bandwidth:
:type bandwidth:
:param kernel:
:type kernel:
:param x: The array of values on which the fit KDE will be evaluated
:type x: array like
:return:
:rtype:
"""
if len(obs.shape) == 1:
obs = obs.reshape(-1, 1)
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth).fit(obs)
if x is None:
x = np.unique(obs).reshape(-1, 1)
log_prob = kde.score_samples(x)
pdf = pd.Series(np.exp(log_prob), index=x.flatten())
return pdf
| 6,129 | 1,870 |
from flows.builtin.webserver.factory import simple_http_server_flow_factory
from jobrunner.utils import list_tasks_in_flow
from tests.testcase import TestCase
class TestSimpleHTTPServerFlowFactory(TestCase):
def test_simple_http_server_flow_factory_creates_flow_with_name(self):
flow = simple_http_server_flow_factory()
self.assertEqual(flow.name, 'simple_http_server_flow')
def test_simple_http_server_has_correct_tasks(self):
flow = simple_http_server_flow_factory()
expected_tasks = (
'run_simple_webserver',
)
self.assertCountEqual(list_tasks_in_flow(flow), expected_tasks)
def test_simple_http_server_is_retried(self):
flow = simple_http_server_flow_factory()
self.assertEqual(flow.retry._attempts, 10)
| 803 | 251 |
from django.contrib.sites.models import Site
from django.templatetags.static import static
from ..core.utils import build_absolute_uri
def get_email_context():
site: Site = Site.objects.get_current()
logo_url = build_absolute_uri(static("images/logo-light.jpg"))
send_email_kwargs = {"from_email": site.settings.default_from_email}
email_template_context = {
"domain": site.domain,
"logo_url": logo_url,
"site_name": site.name,
}
return send_email_kwargs, email_template_context
| 530 | 164 |
from app.models import *
import pytest
from app.forms import *
from werkzeug.datastructures import MultiDict
from flask import url_for
import json
from flask.ext.login import current_user, login_user, logout_user
@pytest.fixture
def profession(session):
p = Profession(name='Software Engineer in Test')
session.add(p)
session.commit()
return p
@pytest.fixture
def password():
return '123qwe'
@pytest.fixture
def monkey(session, profession, password):
m = Monkey(
email='crazy@jungles.com',
fullname='Jack London',
about='Struggling hard in jungles',
profession_id=profession.id
)
m.set_password(password)
session.add(m)
session.commit()
return m
@pytest.fixture
def idea(session, monkey):
i = Idea(
title='This is test idea',
body='Body of the test idea',
author_id=monkey.id
)
session.add(i)
session.commit()
return i
@pytest.fixture
def field(session):
f = Field(name='Communications')
session.add(f)
session.commit()
return f
@pytest.fixture
def idea_status(session):
i = IdeaStatus(name='Demonstration ready')
session.add(i)
session.commit()
return i
@pytest.fixture
def monkey2(session, monkey, password):
"""Returns a monkey independent from idea fixture
"""
m = Monkey(
email='fast@jungles.com',
fullname='Tom Sawyer',
about='Jungles sharpen skills',
profession_id=monkey.profession_id
)
m.set_password(password)
session.add(m)
session.commit()
return m
def post_login(client, email, password):
data = {
'email': email,
'password': password
}
client.post(url_for('auth.login'), data=data)
class TestAuthViews:
def test_login(self, client, monkey, password):
with client:
post_login(client, monkey.email, password)
assert current_user == monkey
def test_logout(self, client, request_ctx, monkey):
with client:
post_login(client, monkey.email, password)
assert current_user == monkey
client.post(url_for('auth.logout'))
assert not current_user.is_authenticated()
def test_register(self, client, profession):
data = {
'email': 'test@siroca.com',
'fullname': 'Testing Registration',
'password': '123qwe',
'about': 'a' * 21,
'profession_id': profession.id
}
assert Monkey.query.count() == 0
r = client.post(url_for('auth.register'), data=data)
assert Monkey.query.count() == 1
class TestJoinRequestViews:
def test_request_to_join(self, client, idea, monkey2, password):
with client:
post_login(client, monkey2.email, password)
assert current_user.is_authenticated()
assert JoinRequest.query.count() == 0
data = {
'message': 'Please, accept this test join request'
}
r = client.post(
url_for('join_requests.request_to_join', idea_id=idea.id),
data=data
)
assert JoinRequest.query.count() == 1
class TestIdeaViews:
def test_add_new_idea(self, client, monkey,
password, field, idea_status):
with client:
post_login(client, monkey.email, password)
data = {
'title': 'This is test idea',
'body': 'This is test body',
'is_public': True,
'fields': [field.id],
'status_id': idea_status.id
}
r = client.post(url_for('ideas.add_idea'), data=data)
print r
print r.data
assert Idea.query.count() == 1
def test_accept_request(self, client, session, idea, monkey2, password):
jr = JoinRequest(monkey2, idea)
session.add(jr)
session.commit()
with client:
post_login(client, idea.author.email, password)
r = client.post(
url_for(
'join_requests.accept_decline_request',
id=jr.id,
action='accept'
)
)
assert r.status_code == 200
class TestSuggestionViews:
def test_suggest_to_user(self, client, idea, monkey2, password):
with client:
post_login(client, idea.author.email, password)
data = {
'idea_id': idea.id
}
r = client.post(
url_for(
'suggestions.suggest_to_user',
monkey_id=monkey2.id
),
data=data,
follow_redirects=True
)
assert r.status_code == 200
assert Suggestion.query.count() == 1
| 5,045 | 1,502 |
# -*- coding: utf-8 -*-
from src import app
from flask import render_template
from .controladores.blog import *
from .controladores.about import *
from .controladores.post import *
from .interpreter import get_all_proy, get_all_posts
@app.route('/')
@app.route('/', methods=['GET'])
def index():
title = "BLG0"
proys = get_all_proy(limit=3)
posts = get_all_posts(limit=3)
return render_template('_views/index.html',webTitle=title, proys=proys, posts=posts)
| 482 | 172 |
operand = input()
st1 = input().split(' ')
st2 = input().split(' ')
x1, y1, z1, x2, y2, z2 = float(st1[0]), float(st1[1]), float(st1[2]), float(st2[0]), float(st2[1]), float(st2[2])
if operand == "+":
res = (x1+x2, y1+y2, z1+z2)
elif operand == "-":
res = (x1-x2, y1-y2, z1-z2)
elif operand == "@":
res = (x1*x2, y1*y2, z1*z2)
else:
res = (float(y1*z2-y2*z1), float(-x1*z2+x2*z1), float(x1*y2-x2*y1))
print("%.2f %.2f %.2f" % res)
| 447 | 242 |
# MIT License
#
# Copyright (c) 2019 Creative Commons
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# from __future__ imports must occur at the beginning of the file. DO NOT CHANGE!
from __future__ import annotations
import os
import stat
import unittest
from unittest.mock import call
from unittest.mock import Mock
from unittest.mock import patch
from linkedin import Driver
from lib import DRIVER_PATH
from errors import WebDriverPathNotGivenException
from errors import WebDriverNotExecutableException
class TestDriverClass(unittest.TestCase):
@unittest.skipIf(not os.getuid() == 0, "Requires root privileges!")
def test_constructor_method_with_invalid_executable_path(
self: TestDriverClass) -> None:
paths = [1, (1, 2, 3), [1, 2, 3], {1: 1, 2: 2}]
for path in paths:
with self.assertRaises(WebDriverPathNotGivenException):
driver = Driver(path)
original_file_permissions = stat.S_IMODE(
os.lstat(DRIVER_PATH).st_mode)
def remove_execute_permissions(path):
"""Remove write permissions from this path, while keeping all other
permissions intact.
Params:
path: The path whose permissions to alter.
"""
NO_USER_EXECUTE = ~stat.S_IXUSR
NO_GROUP_EXECUTE = ~stat.S_IXGRP
NO_OTHER_EXECUTE = ~stat.S_IXOTH
NO_EXECUTE = NO_USER_EXECUTE & NO_GROUP_EXECUTE & NO_OTHER_EXECUTE
current_permissions = stat.S_IMODE(os.lstat(path).st_mode)
os.chmod(path, current_permissions & NO_EXECUTE)
remove_execute_permissions(DRIVER_PATH)
with self.assertRaises(WebDriverNotExecutableException):
driver = Driver(driver_path=DRIVER_PATH)
# place the original file permissions back
os.chmod(DRIVER_PATH, original_file_permissions)
@patch("linkedin.Driver.enable_webdriver_chrome")
def test_constructor_method_with_valid_chromedriver_path(self: TestDriverClass, mock_enable_webdriver_chrome: Mock) -> None:
driver = Driver(driver_path=DRIVER_PATH)
mock_enable_webdriver_chrome.assert_called()
@patch("selenium.webdriver.ChromeOptions.add_argument")
def test_constructor_method_add_argument_internal_calls(
self: TestDriverClass, mock_add_argument: Mock) -> None:
calls = [
call(Driver.HEADLESS),
call(Driver.INCOGNITO),
call(Driver.NO_SANDBOX),
call(Driver.DISABLE_GPU),
call(Driver.START_MAXIMIZED),
call(Driver.DISABLE_INFOBARS),
call(Driver.ENABLE_AUTOMATION),
call(Driver.DISABLE_EXTENSIONS),
call(Driver.DISABLE_NOTIFICATIONS),
call(Driver.DISABLE_SETUID_SANDBOX),
call(Driver.IGNORE_CERTIFICATE_ERRORS)]
driver = Driver(driver_path=DRIVER_PATH, options=[
Driver.HEADLESS, Driver.INCOGNITO, Driver.NO_SANDBOX, Driver.DISABLE_GPU, Driver.START_MAXIMIZED,
Driver.DISABLE_INFOBARS, Driver.ENABLE_AUTOMATION, Driver.DISABLE_EXTENSIONS, Driver.DISABLE_NOTIFICATIONS,
Driver.DISABLE_SETUID_SANDBOX, Driver.IGNORE_CERTIFICATE_ERRORS])
mock_add_argument.assert_has_calls(calls)
| 4,109 | 1,361 |
from flask import Flask
from flask_bootstrap import Bootstrap
from flask_nav import Nav
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask_mail import Mail
from Config import Config
app = Flask(__name__)
app.config.from_object(Config)
Bootstrap(app)
nav = Nav(app)
db = SQLAlchemy(app)
migrate = Migrate(app, db)
mail = Mail(app)
from app import views, forms, models, errors, tasks | 417 | 132 |
import sys
sys.path.append("../bin/")
import pyHiChi as hichi
import numpy as np
def valueE(x, y, z):
E = hichi.Vector3d(0, np.cos(z), 0) #sin(x)
return E
def valueEx(x, y, z):
Ex = 0
return Ex
def valueEy(x, y, z):
Ey = np.cos(z)
return Ey
def valueEz(x, y, z):
Ez = 0
return Ez
def valueB(x, y, z):
B = hichi.Vector3d(-np.cos(z), 0, 0)
return B
def valueBx(x, y, z):
Bx = -np.cos(z)
return Bx
def valueBy(x, y, z):
By = 0
return By
def valueBz(x, y, z):
Bz = 0
return Bz
field_size = hichi.Vector3d(5, 10, 11)
min_coords = hichi.Vector3d(0.0, 1.0, 0.0)
max_coords = hichi.Vector3d(3.5, 7.0, 2*np.pi)
field_step = (max_coords - min_coords) / field_size
time_step = 1e-16
field1 = hichi.YeeField(field_size, min_coords, field_step, time_step)
field2 = hichi.YeeField(field_size, min_coords, field_step, time_step)
field1.set_E(valueE)
field1.set_B(valueB)
field2.set_E(valueEx, valueEy, valueEz)
field2.set_B(valueBx, valueBy, valueBz)
#show
import matplotlib.pyplot as plt
N = 37
x = np.arange(0, 3.5, 3.5/N)
z = np.arange(0, 2*np.pi, 2*np.pi/N)
Ex1 = np.zeros(shape=(N,N))
Ex2 = np.zeros(shape=(N,N))
Ey1 = np.zeros(shape=(N,N))
Ey2 = np.zeros(shape=(N,N))
Bx1 = np.zeros(shape=(N,N))
Bx2 = np.zeros(shape=(N,N))
for ix in range(N):
for iy in range(N):
coord_xz = hichi.Vector3d(x[ix], 0.0, z[iy])
E1 = field1.get_E(coord_xz)
Ex1[ix, iy] = E1.x
Ey1[ix, iy] = E1.y
Bx1[ix, iy] = field1.get_B(coord_xz).x
E2 = field2.get_E(coord_xz)
Ex2[ix, iy] = E2.x
Ey2[ix, iy] = E2.y
Bx2[ix, iy] = field2.get_B(coord_xz).x
fig, axes = plt.subplots(ncols=3, nrows=2)
bar11 = axes[0, 0].imshow(Ex1, cmap='RdBu', interpolation='none', extent=(0, 2*np.pi, 0, 3.5))
fig.colorbar(bar11, ax=axes[0, 0])
axes[0, 0].set_title("Ex1")
axes[0, 0].set_xlabel("x")
axes[0, 0].set_ylabel("z")
bar12 = axes[0, 1].imshow(Ey1, cmap='RdBu', interpolation='none', extent=(0, 2*np.pi, 0, 3.5))
fig.colorbar(bar12, ax=axes[0, 1])
axes[0, 1].set_title("Ey1")
axes[0, 1].set_xlabel("x")
axes[0, 1].set_ylabel("z")
bar13 = axes[0, 2].imshow(Bx1, cmap='RdBu', interpolation='none', extent=(0, 2*np.pi, 0, 3.5))
fig.colorbar(bar13, ax=axes[0, 2])
axes[0, 2].set_title("Bx1")
axes[0, 2].set_xlabel("x")
axes[0, 2].set_ylabel("z")
bar21 = axes[1, 0].imshow(Ex2, cmap='RdBu', interpolation='none', extent=(0, 2*np.pi, 0, 3.5))
fig.colorbar(bar21, ax=axes[1, 0])
axes[1, 0].set_title("Ex2")
axes[1, 0].set_xlabel("x")
axes[1, 0].set_ylabel("z")
bar22 = axes[1, 1].imshow(Ey2, cmap='RdBu', interpolation='none', extent=(0, 2*np.pi, 0, 3.5))
fig.colorbar(bar22, ax=axes[1, 1])
axes[1, 1].set_title("Ey2")
axes[1, 1].set_xlabel("x")
axes[1, 1].set_ylabel("z")
bar23 = axes[1, 2].imshow(Bx2, cmap='RdBu', interpolation='none', extent=(0, 2*np.pi, 0, 3.5))
cbar = fig.colorbar(bar23, ax=axes[1, 2])
axes[1, 2].set_title("Bx2")
axes[1, 2].set_xlabel("x")
axes[1, 2].set_ylabel("z")
plt.tight_layout()
plt.show()
| 3,037 | 1,608 |
import scrapy
import json
from douyu.items import DouyuItem
class SpiderSpider(scrapy.Spider):
name = 'douyu'
allowed_domains = ['https://www.douyu.com']
base_url = 'http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset='
offset = 0
start_urls = [base_url + str(offset)]
def parse(self, response):
# 提取数据
data_list = json.loads(response.body)['data']
if len(data_list) == 0:
return
for data in data_list:
item = DouyuItem()
item['nickname'] = data['nickname'].encode('utf-8')
item['vertical_src'] = data['vertical_src']
yield item
self.offset += 20
url = self.base_url + str(self.offset)
# callback 回调函数,将得到请求的相应交给自己处理
yield scrapy.Request(url=url, callback=self.parse, dont_filter=True)
| 850 | 306 |
from datetime import date, time, datetime
from dateutil.parser import parse
import dateutil.tz
TRUE_STRINGS = ("true", "t", "yes", "y", "1")
FALSE_STRINGS = ("false", "f", "no", "n", "0")
def string_to_boolean(val):
"""
A very dumb string to boolean converter. Will fail hard
if the conversion doesn't succeed.
"""
if val is None: return False
if isinstance(val, bool):
return val
if val.lower().strip() in TRUE_STRINGS:
return True
elif val.lower().strip() in FALSE_STRINGS:
return False
raise ValueError("%s is not a parseable boolean!" % val)
def string_to_datetime(val):
"""
Try to convert a string to a date.
"""
if isinstance(val, datetime):
return val
elif isinstance(val, date):
return datetime.combine(val, time())
return parse(val)
def string_to_utc_datetime(val):
val = string_to_datetime(val)
if val.tzinfo is None:
return val
return val.astimezone(dateutil.tz.tzutc()).replace(tzinfo=None)
ISO_DATETIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
ISO_DATE_FORMAT = '%Y-%m-%d'
def json_format_datetime(dt):
"""
includes microseconds (always)
>>> json_format_datetime(datetime.datetime(2015, 4, 8, 12, 0, 1))
'2015-04-08T12:00:01.000000Z'
"""
from dimagi.ext.jsonobject import _assert
_assert(isinstance(dt, datetime),
'json_format_datetime expects a datetime: {!r}'.format(dt))
if isinstance(dt, datetime):
_assert(dt.tzinfo is None,
'json_format_datetime expects offset-naive: {!r}'.format(dt))
return dt.strftime(ISO_DATETIME_FORMAT)
def json_format_date(date_):
return date_.strftime(ISO_DATE_FORMAT)
| 1,716 | 601 |
import nltk
from nltk.corpus import wordnet
class Keyword:
def synonymn_generator(self):
synonyms = []
antonyms = []
word = input("enter the word : ")
for syn in wordnet.synsets(word):
for l in syn.lemmas():
synonyms.append(l.name())
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
print(set(synonyms))
print(set(antonyms))
p1 = Keyword()
p1.synonymn_generator() | 521 | 183 |
"""Entry point for linking dotfiles.
"""
from __future__ import annotations
import argparse
import os
import subprocess
import sys
from dataclasses import dataclass
from pathlib import Path
from typing import Optional
from . import log
from .link import Linker
from .resolver import Resolver
from .scan import Scanner
from .schema import DotfilesJson, PrettyPath
def main() -> None:
"""Entry point.
"""
args = Args.parse_args()
if args.dotfiles is None:
repo_root = _get_repo_root()
dotfiles_fh = open(repo_root / "dotfiles.json")
else:
repo_root = args.dotfiles.parent.absolute()
dotfiles_fh = args.dotfiles.open()
dotfiles = DotfilesJson.load_from_file(dotfiles_fh)
dotfiles_fh.close()
link_root = Path.home() if args.link_root is None else args.link_root
resolver = Resolver(
repo_root=repo_root, link_root=link_root, relative=not args.absolute
)
resolved = resolver.resolve_all(dotfiles)
if args.scan:
log.warn("Scanning for dotfiles is an experimental feature.")
scanner = Scanner(link_root, resolved.ignored, resolved.dotfiles)
for p in scanner.find_dotfiles():
# TODO: Fill in scanner processing.
# Actions:
# - skip
# - quit
# - ignore the path
# - move it to dotfiles
# - if it's a directory, recurse
# - if it's a file, cat it / display its stat
#
# Should also note if it's a directory or file.
p_disp = str(PrettyPath.from_path(p).disp)
if p.is_dir():
log.info("📁 " + p_disp)
else:
log.info(p_disp)
# TODO: Offer to commit new files...?
else:
linker = Linker(verbose=args.verbose,)
linker.link_all(resolved.dotfiles)
@dataclass
class Args:
"""Command-line arguments; see ``_argparser``.
"""
dotfiles: Optional[Path]
link_root: Optional[Path]
absolute: bool
scan: bool
verbose: bool
@classmethod
def parse_args(cls) -> Args:
"""Parse args from ``sys.argv``.
"""
args = _argparser().parse_args()
return cls(
dotfiles=args.dotfiles,
link_root=args.link_root,
absolute=args.absolute,
scan=args.scan,
verbose=args.verbose,
)
def _argparser() -> argparse.ArgumentParser:
"""Command-line argument parser.
"""
parser = argparse.ArgumentParser(description="links dotfiles")
parser.add_argument(
"-d", "--dotfiles", type=Path, help="The dotfiles.json file to load",
)
parser.add_argument(
"-l",
"--link-root",
type=Path,
help="Where to create links from; defaults to your home directory",
)
parser.add_argument(
"-a",
"--absolute",
action="store_true",
help="Create absolute links, rather than relative ones",
)
parser.add_argument(
"-s", "--scan", action="store_true", help="Scan for untracked dotfiles",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Make output more verbose",
)
return parser
def _get_repo_root() -> Path:
try:
proc = subprocess.run(
["git", "rev-parse", "--show-toplevel"],
capture_output=True,
text=True,
check=False,
)
except FileNotFoundError:
log.fatal(
"Couldn't run `git` to determine repo root; pass --dotfiles explicitly."
)
sys.exit(1)
if proc.returncode != 0:
log.fatal("Couldn't get repo root from git; pass --dotfiles explicitly.")
sys.exit(1)
return Path(proc.stdout.strip()).absolute()
if __name__ == "__main__":
main()
| 3,846 | 1,169 |
import sys
import json
import requests
from veracode_api_signing.plugin_requests import RequestsAuthPluginVeracodeHMAC
from pprint import pprint
from datetime import datetime
from app_definition import AppDefinition
from verified_check import VerifiedStandard, VerifiedTeam, VerifiedContinuous
from verified_report import VerifiedReport, ConsoleReport
from pprint import pprint
url_base = 'https://api.veracode.com/appsec'
min_severity = 3 # findings api only returns medium +
def main():
if len(sys.argv) != 4:
print('Usage: [API Key] [API Secret Key] [Check Type s=Standard t=Team c=Continuous a=All]')
exit(1)
auth = RequestsAuthPluginVeracodeHMAC(api_key_id=sys.argv[1],
api_key_secret=sys.argv[2])
'''
Process:
Make Veracode Verified Checks class
Make reporter
Get all policies
Get all apps
For each app
Get findings for the app
Check the app + policies based on the Verified level
Report any failures from the Verified Checks
'''
try:
checks = make_checks(sys.argv[3])
report = ConsoleReport()
policies_dict = get_policies_dict(auth)
apps_list = get_applications_list(auth)
apps_size = len(apps_list)
print('%d apps found' % (apps_size))
count = 1
for app in apps_list:
print('Checking %s (%d/%d)' % (app.name, count, apps_size))
add_findings_to_app(auth, app)
check(app, policies_dict, report, checks)
count = count + 1
report.output()
except Exception as e:
print('Error while scanning or uploading. ' + str(e))
raise e
def get_policies_dict(auth):
#Get all policies available to the user as a dict of 'policy_name': 'policy_json'
done = False
policies = {}
page_count = 0
while not done:
r = requests.get(url_base + '/v1/policies', auth=auth, params={'size':500, 'page': page_count})
if not r.ok:
print(r.text)
raise Exception('ERROR: Received status code %s while trying to get applications' % r.status_code)
#Check pagination
total_pages = r.json()['page']['total_pages']
page_count = page_count + 1
if page_count == total_pages:
done = True
policies.update({policy['name']:policy for policy in r.json()['_embedded']['policy_versions']})
return policies
def make_checks(check_type):
#Create the Verified Check class for the given check_type
cases = {'s': [VerifiedStandard],
't': [VerifiedTeam],
'c': [VerifiedContinuous],
'a': [VerifiedStandard,VerifiedTeam, VerifiedContinuous]}
if check_type in cases:
return cases[check_type]
else:
raise Exception('Unknown case. Must be one of %s' % ( ', '.join(cases.keys()) ))
def get_applications_list(auth):
#Get all applications
done = False
apps_list = []
page_count = 0
while not done:
r = requests.get(url_base + '/v1/applications', auth=auth, params={'size':500, 'page':page_count})
if not r.ok:
print(r.text)
raise Exception('ERROR: Received status code %s while trying to get applications' % r.status_code)
#Check pagination
total_pages = r.json()['page']['total_pages']
page_count = page_count + 1
if page_count == total_pages:
done = True
apps_list.extend([AppDefinition(application) for application in r.json()['_embedded']['applications']])
return apps_list
def add_findings_to_app(auth, app):
#Add the findings json to the app
r = requests.get(url_base + ('/v2/applications/%s/findings' % app.guid), auth=auth, params={'severity_gte': min_severity})
if not r.ok:
print(r.text)
raise Exception('ERROR: Received status code %s while trying to get findings' % r.status_code)
app.add_findings(r.json())
def check(app, policies_dict, report, checks):
#Using the Verified Check, check the app + policies
for check_func in checks:
check = check_func(app, policies_dict)
check.do_check(report)
if __name__ == '__main__':
sys.exit(main()) | 3,778 | 1,376 |
import os
from .settings import LAYOUT_DIR
from gi import require_version as gi_require_version
gi_require_version('Gtk', '3.0')
from gi.repository import Gtk,Gdk
class EqPopover():
def __init__(self, button, pulse, index):
self.builder = Gtk.Builder()
self.pulse = pulse
self.layout = pulse.config['layout']
try:
self.builder.add_objects_from_file(
os.path.join(LAYOUT_DIR, f'{self.layout}.glade'),
[
'eq_popup',
'eq_50_hz_adjust',
'eq_100_hz_adjust',
'eq_156_hz_adjust',
'eq_220_hz_adjust',
'eq_311_hz_adjust',
'eq_440_hz_adjust',
'eq_622_hz_adjust',
'eq_880_hz_adjust',
'eq_1_25_khz_adjust',
'eq_1_75_khz_adjust',
'eq_2_5_khz_adjust',
'eq_3_5_khz_adjust',
'eq_5_khz_adjust',
'eq_10_khz_adjust',
'eq_20_khz_adjust',
'apply_eq_button',
'reset_eq_button',
]
)
except Exception as ex:
print('Error building main window!\n{}'.format(ex))
sys.exit(1)
for i in range(1, 16):
mark = self.builder.get_object(f'eq_{i}')
mark.add_mark(0, Gtk.PositionType.TOP, '')
self.eq = []
self.eq.append(self.builder.get_object('eq_50_hz_adjust'))
self.eq.append(self.builder.get_object('eq_100_hz_adjust'))
self.eq.append(self.builder.get_object('eq_156_hz_adjust'))
self.eq.append(self.builder.get_object('eq_220_hz_adjust'))
self.eq.append(self.builder.get_object('eq_311_hz_adjust'))
self.eq.append(self.builder.get_object('eq_440_hz_adjust'))
self.eq.append(self.builder.get_object('eq_622_hz_adjust'))
self.eq.append(self.builder.get_object('eq_880_hz_adjust'))
self.eq.append(self.builder.get_object('eq_1_25_khz_adjust'))
self.eq.append(self.builder.get_object('eq_1_75_khz_adjust'))
self.eq.append(self.builder.get_object('eq_2_5_khz_adjust'))
self.eq.append(self.builder.get_object('eq_3_5_khz_adjust'))
self.eq.append(self.builder.get_object('eq_5_khz_adjust'))
self.eq.append(self.builder.get_object('eq_10_khz_adjust'))
self.eq.append(self.builder.get_object('eq_20_khz_adjust'))
self.Apply_EQ_Button = self.builder.get_object('apply_eq_button')
self.Reset_EQ_Button = self.builder.get_object('reset_eq_button')
control = self.pulse.config[index[0]][index[1]]['eq_control']
j = 0
if control != '':
for i in control.split(','):
self.eq[j].set_value(float(i))
j = j + 1
self.Apply_EQ_Button.connect('pressed', self.apply_eq, index)
self.Reset_EQ_Button.connect('pressed', self.reset_eq)
self.EQ_Popup = self.builder.get_object('eq_popup')
self.EQ_Popup.set_relative_to(button)
self.EQ_Popup.popup()
self.builder.connect_signals(self)
def apply_eq(self, widget, index):
control=''
for i in self.eq:
control = control + ',' + str(i.get_value())
control = control[1:]
if self.pulse.config[index[0]][index[1]]['use_eq'] == False:
return
self.pulse.apply_eq(index, control=control)
def disable_eq(self, widget, index):
self.pulse.remove_eq(index)
def reset_eq(self, widget):
for i in self.eq:
i.set_value(0)
def reset_value(self, widget, event):
if event.type == gtk.gdk.BUTTON_PRESS and event.button == 3:
widget.set_value(0)
| 3,850 | 1,382 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: idolized22
"""
import PIL.Image
from matplotlib import pyplot
import numpy as np
def resize_image (Img, DesieredSize=[1300,1300]):
#desired size to be passed as [width , height ]
factor=1
while (Img.size[0] * Img.size[1])>( DesieredSize[0]* DesieredSize[1]):
#reduce_size
Img=Img.resize((Img.size[0]//2,Img.size[1]//2),resample=PIL.Image.LANCZOS)
factor=factor+1
return [Img , Img.size]
def resize_binary_mask(array, new_size):
#from pycocotools on github
image = PIL.Image.fromarray(array.astype(np.uint8)*255)
image = image.resize(new_size)
return np.asarray(image).astype(np.bool_)
| 717 | 291 |
x=input("Enter a no. I will convert to integer")
z=1
try:
y=int(float(x))
z="float"
except:
z="wrong"
if z=="wrong":
print("fix your input")
else:
print("int of your input is:", y)
| 206 | 82 |
#!/usr/bin/python
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=8
#SBATCH --nodes=1
#SBATCH --mem=8192
#
# This script merges SNP and Indel Files called with Samtool and Freebayes that are called individually.
#
import sys
import os
from utils import *
from utils.configuration import *
from commands import *
import glob
from pprint import pprint as pp
#====================#
# Load Configuration #
#====================#
cf = config(sys.argv[1])
sf = cf.get_sample_file()
eav = cf.eav
#=========#
# Command #
#=========#
# Merging requires that filters within individual vcfs have passed.
merge_vcfs = """bcftools merge --apply-filters PASS -O z {vcf_set} > {merged_vcf_name};
bcftools index -f {merged_vcf_name}"""
#=====================#
# Merge SNP VCF Files #
#=====================#
# If a union variant file does not exist, merge vcfs and generate.
for caller in cf.snp_callers:
union_variant_all = cf.union_variants[caller]["ALL"]
if not file_exists(union_variant_all):
for variant_type in ["SNP", "INDEL"]:
union_variant_file = cf.union_variants[caller][variant_type]
vcf_set = []
merged_vcf_name = "{cf.vcf_dir}/{cf.config_name}.{variant_type}.{caller}.individual.vcf.gz".format(**locals())
for SM, data in sf.SM_Group_set.items():
vcf_file = data["vcf_files"][caller + "_individual"][variant_type]
assert(file_exists(vcf_file))
vcf_set.append(vcf_file)
vcf_set = ' '.join(vcf_set)
comm = merge_vcfs.format(**locals())
cf.command(comm)
# Create union variant set.
comm = r"""bcftools query -f '%CHROM\t%POS\t%REF,%ALT\n' {merged_vcf_name} > {union_variant_file}""".format(**locals())
cf.command(comm)
# Remove individual
if file_exists(union_variant_file) and check_seq_file(merged_vcf_name):
vcf_set = ' '.join([x for x in vcf_set.split(" ")] + [x + ".csi" for x in vcf_set.split(" ")])
comm = "rm " + vcf_set
cf.command(comm)
merge_varsets = """
for i in `cat {cf.config_name}.SNP.{caller}.union_variants.txt | cut -f 1 | uniq`; do
touch {union_variant_all}
for f in `ls {cf.config_name}.*.{caller}.union_variants.txt`; do
egrep "^$i\t" $f >> $i.{cf.config_name}.union_variant_temp.txt
done;
cat $i.{cf.config_name}.union_variant_temp.txt | sort -k2,2n >> {union_variant_all}
rm $i.{cf.config_name}.union_variant_temp.txt
done;
""".format(**locals())
cf.command(merge_varsets)
else:
for variant_type in ["SNP", "INDEL"]:
union_variant_file = cf.union_variants[caller][variant_type]
vcf_set = []
print "RUNNING UNION"
# Run Union
merged_vcf_name = "{cf.vcf_dir}/{cf.config_name}.{caller}.union.vcf.gz".format(**locals())
for SM, data in sf.SM_Group_set.items():
print data["vcf_files"][caller + "_union"][variant_type]
vcf_file = data["vcf_files"][caller + "_union"][variant_type]
assert(file_exists(vcf_file))
vcf_set.append(vcf_file)
vcf_set = ' '.join(vcf_set)
comm = merge_vcfs.format(**locals())
cf.command(comm)
| 3,426 | 1,169 |
import zipfile
import os
def download_and_prepare():
reid_path = "/content/drive/My Drive/Colab/datasets/reid.zip"
file_zip = zipfile.ZipFile(reid_path, 'r')
for file in file_zip.namelist():
file_zip.extract(file, r'.')
with open("/content/drive/My Drive/Colab/ReID works/CVPR fintuning/resnet_ibn_b.py", "rb") as f, open(
'./resnet_ibn_b.py',
'wb') as fw:
fw.write(f.read())
with open("/content/drive/My Drive/Colab/ReID works/CVPR fintuning/net_149.pth", "rb") as f, open('./net_149.pth',
'wb') as fw:
fw.write(f.read())
if not os.path.exists('./resnet_ibn_b.py'):
download_and_prepare()
| 769 | 277 |
from typing import Iterable, Dict
import copy
from collections.abc import Mapping
from .exceptions import DictException
def validate_config(cfg: Dict, cfg_spec: Dict):
_cfg = copy.deepcopy(cfg)
for k, spec in cfg_spec.items():
exist = k in _cfg
val = _cfg.pop(k, None)
if not spec.get('optional'):
if not exist:
raise DictException(f'expected key "{k}" in configuration dict as per config spec: "{cfg_spec}"')
if exist:
# if 'type' in spec:
if not isinstance(val, spec['type']):
raise DictException(f'expected key "{k}" value to be type "{spec["type"]}", got "{type(val)}"')
if _cfg:
raise DictException(f'configuration dictionary has unexpected values: "{_cfg}"')
def is_dict_subset(x, y):
"""recursively determine if key value pairs in x are a subset of y"""
for k, v in x.items():
if k not in y:
return False
elif type(v) is dict:
if not isinstance(y[k], Iterable):
return False
elif not is_dict_subset(v, y[k]):
return False
elif v != y[k]:
return False
return True
def dict_update(updates: Mapping, base_dict: Mapping):
"""recursively update key value pairs of base_dict with updates"""
for k, v in updates.items():
if type(v) is not dict:
# value is not dict
base_dict[k] = v
continue
# value is dict
if k not in base_dict:
# value is dict & key not found in y
base_dict[k] = v
continue
# value is dict & key found in y
if isinstance(base_dict[k], Iterable):
# value is dict & key found in y & value in y is iterable
dict_update(v, base_dict[k])
continue
# value is dict & key found in y & value in y is not iterable
base_dict[k] = v
def dict_sort(d: dict, key=lambda item: item[1]) -> Dict:
"""sort a dictionary items"""
return {k: v for k, v in sorted(d.items(), key=key)} | 2,109 | 624 |
from BaraaValidator.transactionValidators import validateTransactionsFolder, validateTransactionsFile
import os
def test_mainMethods():
dirpath = os.path.dirname(__file__)
filepath = os.path.join(dirpath, 'transaction.json')
assert (validateTransactionsFile(filepath)) == True
test_mainMethods() | 307 | 82 |
# -*- coding: utf-8 -*-
# supports for XEP 0012
__all__ = ['Activity']
from headstock.api import Entity
from headstock.api.jid import JID
from headstock.lib.utils import generate_unique
from bridge import Element as E
from bridge import Attribute as A
from bridge.common import XMPP_LAST_NS, XMPP_CLIENT_NS
class Activity(Entity):
def __init__(self, from_jid, to_jid, type=u'get',
stanza_id=None, seconds=None, message=None):
Entity.__init__(self, from_jid, to_jid)
self.seconds = seconds
self.message = message
self.type = type
self.stanza_id = stanza_id or generate_unique()
def __repr__(self):
return '<Activity (%s) at %s>' % (self.stanza_id, hex(id(self)),)
@staticmethod
def from_element(e):
activity = Activity(JID.parse(e.get_attribute_value('from')),
JID.parse(e.get_attribute_value('to')),
e.get_attribute_value('type'),
e.get_attribute_value('id'))
for child in e.xml_children:
if not isinstance(child, E):
continue
if child.xml_ns == XMPP_LAST_NS:
seconds = child.get_attribute_value('seconds')
if seconds != None:
activity.seconds = long(seconds)
activity.message = child.xml_text
return activity
@staticmethod
def to_element(a):
attrs = {}
if a.from_jid:
attrs[u'from'] = unicode(a.from_jid)
if a.to_jid:
attrs[u'to'] = unicode(a.to_jid)
if a.type:
attrs[u'type'] = a.type
if a.stanza_id:
attrs[u'id'] = a.stanza_id
iq = E(u'iq', attributes=attrs, namespace=XMPP_CLIENT_NS)
attr = {}
if a.seconds != None:
attr[u'seconds'] = unicode(a.seconds)
E(u'query', namespace=XMPP_LAST_NS, parent=iq,
content=a.message, attributes=attr)
return iq
| 2,020 | 653 |
import os
from abc import ABC, abstractmethod
from typing import Callable, Optional, Tuple
import torch
from torch import nn, optim
from ..base import MetaLearner
class MBML(MetaLearner, ABC):
"""
A base class for metric-based meta-learning algorithms.
Parameters
----------
model : torch.nn.Module
Model to be wrapped
optim : torch.optim.Optimizer
Optimizer
root : str
Root directory to save checkpoints
save_basename : str, optional
Base name of the saved checkpoints
lr_scheduler : callable, optional
Learning rate scheduler
loss_function : callable, optional
Loss function
device : optional
Device on which the model is defined. If `None`, device will be
detected automatically.
"""
def __init__(
self,
model: nn.Module,
optim: optim.Optimizer,
root: Optional[str] = None,
save_basename: Optional[str] = None,
lr_scheduler: Optional[Callable] = None,
loss_function: Optional[Callable] = None,
device: Optional = None
) -> None:
super(MBML, self).__init__(
model = model,
root = root,
save_basename = save_basename,
lr_scheduler = lr_scheduler,
loss_function = loss_function,
device = device
)
self.optim = optim
@classmethod
def load(cls, model_path: str, **kwargs):
"""Load a trained model."""
state = torch.load(model_path)
# load model and optimizers
kwargs['model'] = state['model']
kwargs['optim'] = state['optim']
# model name and save path
if 'root' not in kwargs:
kwargs['root'] = os.path.dirname(model_path)
if 'save_basename' not in kwargs:
kwargs['save_basename'] = os.path.basename(model_path)
return cls(**kwargs)
def save(self, prefix: Optional[str] = None) -> str:
"""Save the trained model."""
if self.root is None or self.save_basename is None:
raise RuntimeError('The root directory or save basename of the'
'checkpoints is not defined.')
state = {
'model': self.model,
'optim': self.optim
}
name = self.save_basename
if prefix is not None:
name = prefix + name + '.pth.tar'
path = os.path.join(self.root, name)
torch.save(state, os.path.join(self.root, name))
return path
def step(self, batch: dict, meta_train: bool = True) -> Tuple[float]:
if meta_train:
self.model.train()
else:
self.model.eval()
task_batch, n_tasks = self.get_tasks(batch)
losses, accuracies = 0., 0.
self.optim.zero_grad()
for task_data in task_batch:
loss, accuracy = self.single_task(task_data)
losses += loss.detach().item()
accuracies += accuracy.item()
if meta_train == True:
(loss / n_tasks).backward()
self.optim.step()
# average the losses and accuracies
losses /= n_tasks
accuracies /= n_tasks
return losses, accuracies
@abstractmethod
def single_task(
self, task: Tuple[torch.Tensor], meta_train: bool = True
) -> Tuple[float]:
pass
| 3,406 | 991 |
"""
Django settings for app project.
Generated by 'django-admin startproject' using Django 2.2.5.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
from datetime import timedelta
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get('SECRET_KEY', 'you-will-never-guess')
# SECURITY WARNING: don't run with debug turned on in production!
# To disable debug, remove the variable from the environment instead of trying to type cast
DEBUG = int(os.environ.get("DEBUG", default=0))
# 'DJANGO_ALLOWED_HOSTS' should be a single string of hosts with a space between each.
# For example: 'DJANGO_ALLOWED_HOSTS=localhost 127.0.0.1 [::1]'
ALLOWED_HOSTS = os.environ.get("DJANGO_ALLOWED_HOSTS", "localhost 127.0.0.1 [::1]").split(" ")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'corsheaders',
'solo.apps.SoloAppConfig',
'rest_framework',
'drf_yasg',
'djoser',
'django_celery_beat',
'django_celery_results',
'djcelery_email',
# Local
'users.apps.UsersConfig',
'education.apps.EducationConfig',
'pages.apps.PagesConfig',
'django_cleanup.apps.CleanupConfig',
]
AUTH_USER_MODEL = 'users.CustomUser'
MIDDLEWARE = [
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'app.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
"default": {
"ENGINE": os.environ.get("SQL_ENGINE", "django.db.backends.sqlite3"),
"NAME": os.environ.get("SQL_DATABASE", os.path.join(BASE_DIR, "db.sqlite3")),
"USER": os.environ.get("SQL_USER", "user"),
"PASSWORD": os.environ.get("SQL_PASSWORD", "password"),
"HOST": os.environ.get("SQL_HOST", "localhost"),
"PORT": os.environ.get("SQL_PORT", "5432"),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'ru-RU'
TIME_ZONE = 'Europe/Moscow'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATIC_URL = '/staticfiles/'
# Extra places for collectstatic to find static files.
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
MEDIA_URL = '/mediafiles/'
MEDIA_ROOT = os.path.join(BASE_DIR, "/mediafiles/")
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': [
'rest_framework_simplejwt.authentication.JWTAuthentication',
'rest_framework.authentication.SessionAuthentication',
],
'DEFAULT_PERMISSION_CLASSES': [
'rest_framework.permissions.AllowAny',
],
'DEFAULT_PAGINATION_CLASS': 'rest_framework.pagination.PageNumberPagination',
'PAGE_SIZE': 20,
'DEFAULT_SCHEMA_CLASS': 'rest_framework.schemas.coreapi.AutoSchema',
}
SIMPLE_JWT = {
'AUTH_HEADER_TYPES': ('JWT', 'Bearer'),
'ACCESS_TOKEN_LIFETIME': timedelta(hours=1), # timedelta(minutes=15),
'REFRESH_TOKEN_LIFETIME': timedelta(days=1),
}
DJOSER = {
'TOKEN_MODEL': None, # needed for JWT
'PERMISSIONS': {
'user_delete': ['users.permissions.IsAdminUser'],
},
'SERIALIZERS': {
'user_create': 'users.serializers.CustomUserCreateSerializer',
'user': 'users.serializers.CustomUserWithProfileSerializer',
'current_user': 'users.serializers.CustomUserSerializer',
},
# 'HIDE_USERS': If set to True, listing /users/ enpoint by normal user will return only that user’s
# profile in the list. Beside that, accessing /users/<id>/ endpoints by user without
# proper permission will result in HTTP 404 instead of HTTP 403.
'HIDE_USERS': True,
'ACTIVATION_URL': 'users/activation/{uid}/{token}', # TODO: urls in frontend, POST to back
'PASSWORD_RESET_CONFIRM_URL': 'users/password/reset/confirm/{uid}/{token}', # TODO: urls in frontend, POST to back
'USERNAME_RESET_CONFIRM_URL': 'users/reset/confirm/{uid}/{token}', # TODO: urls in frontend, POST to back
'SEND_ACTIVATION_EMAIL': False,
'SEND_CONFIRMATION_EMAIL': False,
'PASSWORD_CHANGED_EMAIL_CONFIRMATION': False,
'USERNAME_CHANGED_EMAIL_CONFIRMATION': False,
'EMAIL': {
'activation': 'users.emails.CustomActivationEmail',
'confirmation': 'users.emails.CustomConfirmationEmail',
'password_reset': 'users.emails.CustomPasswordResetEmail',
'password_changed_confirmation': 'users.emails.CustomPasswordChangedConfirmationEmail',
'username_changed_confirmation': 'users.emails.CustomUsernameChangedConfirmationEmail',
'username_reset': 'users.emails.CustomUsernameResetEmail',
}
}
SWAGGER_SETTINGS = {
'SECURITY_DEFINITIONS': {
'basic': {
'type': 'basic'
}
},
}
CELERY_BROKER_URL = os.environ.get('REDIS_URL')
CELERY_RESULT_BACKEND = os.environ.get('REDIS_URL')
CELERY_ACCEPT_CONTENT = ['application/json']
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_TIMEZONE = TIME_ZONE
EMAIL_BACKEND = 'djcelery_email.backends.CeleryEmailBackend'
EMAIL_HOST = os.environ.get('EMAIL_HOST', 'smtp.sendgrid.net')
EMAIL_PORT = os.environ.get('EMAIL_PORT', 587)
EMAIL_HOST_USER = os.environ.get('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.environ.get('EMAIL_HOST_PASSWORD')
EMAIL_USE_TLS = os.environ.get('EMAIL_USE_TLS', True)
DEFAULT_FROM_EMAIL = os.environ.get('DEFAULT_FROM_EMAIL', 'noreply@webmaster')
CORS_ORIGIN_ALLOW_ALL = False
CORS_ORIGIN_WHITELIST = (
# can be like r'^https://\w+\.example\.com$'
os.environ.get('CORS_ORIGIN_WHITELIST', 'http://localhost:3000'),
)
if 'SENTRY_DSN' in os.environ:
import sentry_sdk
from sentry_sdk.integrations.django import DjangoIntegration
sentry_sdk.init(
dsn=os.environ['SENTRY_DSN'], integrations=[DjangoIntegration()]
)
# try:
# from local_settings import *
# except ImportError as e:
# # No local settings was found, skipping.
# pass
# if not DEBUG and len(SECRET_KEY) < 25:
# print(f'The value of DJANGO_SECRET_KEY does not contain enough characters ({len(SECRET_KEY)} characters)')
# raise RuntimeError(f'DJANGO_SECRET_KEY is not long enough (in environment variable "DJANGO_SECRET_KEY")')
| 8,331 | 2,954 |
'''
Groups the 0s and 1s together from a random array
Reference: http://www.geeksforgeeks.org/segregate-0s-and-1s-in-an-array-by-traversing-array-once/
'''
from __future__ import print_function
def rearrange(arr):
p1 = 0
p2 = len(arr) - 1
while p1 < p2:
if arr[p1] == 0:
p1 += 1
if arr[p2] == 1:
p2 -= 1
if p1 < p2:
arr[p1], arr[p2] = arr[p2], arr[p1]
return arr
print(rearrange([0, 0, 1, 1]))
print(rearrange([1, 0, 0, 1, 1]))
print(rearrange([1, 0, 0, 0, 1, 0, 0]))
print(rearrange([0, 1, 0, 1, 0, 1, 0, 1]))
| 593 | 283 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
from scipy.signal import savgol_filter
import sys
def Interpolate(time, mask, y):
yy = np.array(y)
t_ = np.delete(time, mask)
y_ = np.delete(y, mask, axis = 0)
if len(yy.shape) == 1:
yy[mask] = np.interp(time[mask], t_, y_)
elif len(yy.shape) == 2:
for n in range(yy.shape[1]):
yy[mask, n] = np.interp(time[mask], t_, y_[:, n])
else:
raise Exception("Array ``y`` must be either 1- or 2-d.")
return yy
def Chunks(l, n, all = False):
if all:
jarr = range(0, n - 1)
else:
jarr = [0]
for j in jarr:
for i in range(j, len(l), n):
if i + 2 * n <= len(l):
yield l[i:i + n]
else:
if not all:
yield l[i:]
break
def Smooth(x, window_len = 100, window = 'hanning'):
if window_len == 0:
return np.zeros_like(x)
s = np.r_[2 * x[0] - x[window_len - 1::-1], x, 2 * x[-1] - x[-1:-window_len:-1]]
if window == 'flat':
w = np.ones(window_len, 'd')
else:
w = eval('np.' + window + '(window_len)')
y = np.convolve(w / w.sum(), s, mode = 'same')
return y[window_len:-window_len + 1]
def Scatter(y, win = 13, remove_outliers = False):
if remove_outliers:
if len(y) >= 50:
ys = y - Smooth(y, 50)
else:
ys = y
M = np.nanmedian(ys)
MAD = 1.4826 * np.nanmedian(np.abs(ys - M))
out = []
for i, _ in enumerate(y):
if (ys[i] > M + 5 * MAD) or (ys[i] < M - 5 * MAD):
out.append(i)
out = np.array(out, dtype = int)
y = np.delete(y, out)
if len(y):
return 1.e6 * np.nanmedian([np.std(yi) / np.sqrt(win) for yi in Chunks(y, win, all = True)])
else:
return np.nan
def SavGol(y, win = 49):
if len(y) >= win:
return y - savgol_filter(y, win, 2) + np.nanmedian(y)
else:
return y
def _float(s):
try:
res = float(s)
except:
res = np.nan
return res
def Downbin(x, newsize, axis = 0, operation = 'mean'):
assert newsize < x.shape[axis], "The new size of the array must be smaller than the current size."
oldsize = x.shape[axis]
newshape = list(x.shape)
newshape[axis] = newsize
newshape.insert(axis + 1, oldsize // newsize)
trim = oldsize % newsize
if trim:
xtrim = x[:-trim]
else:
xtrim = x
if operation == 'mean':
xbin = np.nanmean(xtrim.reshape(newshape), axis = axis + 1)
elif operation == 'sum':
xbin = np.nansum(xtrim.reshape(newshape), axis = axis + 1)
elif operation == 'quadsum':
xbin = np.sqrt(np.nansum(xtrim.reshape(newshape) ** 2, axis = axis + 1))
elif operation == 'median':
xbin = np.nanmedian(xtrim.reshape(newshape), axis = axis + 1)
else:
raise ValueError("`operation` must be either `mean`, `sum`, `quadsum`, or `median`.")
return xbin
| 3,039 | 1,172 |
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('requirements.txt') as f:
install_requires = f.read().strip().split('\n')
# get version from __version__ variable in pav_bsc/__init__.py
from pav_bsc import __version__ as version
setup(
name='pav_bsc',
version=version,
description='Partner ERPNext - Add Value On Balanced Scorecard',
author='Farouk Muharram',
author_email='farouk1dev@gmail.com',
packages=find_packages(),
zip_safe=False,
include_package_data=True,
install_requires=install_requires
)
| 544 | 196 |
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import argparse
import os
from time import gmtime, strftime
import time
import subprocess
import logging
logging.getLogger().setLevel(logging.INFO)
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('--model',
type = str,
default = 'flowers_model',
help = 'What to name your ml-engine model')
parser.add_argument('--version',
type = str,
default = 'resnet',
help = 'What to name the version of the model')
parser.add_argument('--model_dir',
type = str,
required=True,
help = 'The model directory generated by the train component.')
parser.add_argument('--project_id',
type = str,
required = True,
default = '',
help = 'Pass in your project id.')
parser.add_argument('--region',
type = str,
default = 'us-central1',
help = 'Region to use.')
parser.add_argument('--TFVERSION',
type = str,
default = '1.8',
help = 'Version of TensorFlow to use.')
args = parser.parse_args()
return args
if __name__== "__main__":
args = parse_arguments()
model_export_dir = os.path.join(args.model_dir, 'export')
logging.info('Writing latest model directory name: ' + model_export_dir)
subprocess.call('gsutil ls ' + model_export_dir + ' | tail -1 > model.txt', shell=True)
with open("./model.txt", "r") as model_path_file:
model_location = model_path_file.read()[:-1]
logging.info('Deploying ' + args.model + ' ' + args.version + ' from ' + model_location + ' ... this will take a few minutes')
subprocess.call('gcloud ml-engine versions delete ' + args.version + ' --model=' + args.model + ' --quiet', shell=True)
subprocess.call('gcloud ml-engine models create ' + args.model + ' --regions ' + args.region, shell=True)
subprocess.check_call('gcloud ml-engine versions create ' + args.version + ' --model ' + args.model +
' --origin ' + str(model_location) + ' --runtime-version=' + args.TFVERSION, shell=True) | 2,957 | 830 |
# -*- coding: utf-8 -*-
"""
tests/test_quickbooks_payroll.py
"""
import csv
import tempfile
class TestQuickBooksPayroll:
def test_views(self, install_module):
"Test all tryton views"
from trytond.tests.test_tryton import test_view
test_view('quickbooks_payroll')
def test_depends(self, install_module):
"Test missing depends on fields"
from trytond.tests.test_tryton import test_depends
test_depends()
def test_import_payroll_item(self, test_dataset, transaction):
"Test import payroll item wizard"
Date = self.POOL.get('ir.date')
Account = self.POOL.get('account.account')
Move = self.POOL.get('account.move')
Employee = self.POOL.get('company.employee')
QuickBooksPayroll = self.POOL.get('quickbooks.payroll_account')
ImportPayrollItem = self.POOL.get(
'quickbooks.wizard_import_payroll_item', type='wizard'
)
# Map quickbooks payroll item to tryton
main_expense, = Account.search([('name', '=', 'Main Expense')])
main_expense.party_required = True
main_expense.save()
main_tax, = Account.search([('name', '=', 'Main Tax')])
main_tax.party_required = True
main_tax.save()
main_cash, = Account.search([('name', '=', 'Main Cash')])
QuickBooksPayroll.create([{
'account': main_expense.id,
'payroll_item': 'Salary Expense',
}, {
'account': main_tax.id,
'payroll_item': 'Federal Income Taxes Payable',
}, {
'account': main_tax.id,
'payroll_item': 'State Income Taxes Payable',
}, {
'account': main_tax.id,
'payroll_item': 'FICA Taxes Payable',
}])
# Map employee to quickbooks source name
employee, = Employee.search([])
employee.quickbooks_source_name = 'Pandey, Prakash'
employee.save()
credit_account, = Account.search([], limit=1)
import_payroll_item = ImportPayrollItem(
ImportPayrollItem.create()[0]
)
import_payroll_item.start.credit_account = main_cash
with tempfile.NamedTemporaryFile(delete=False) as csv_file:
csv_writer = csv.writer(csv_file, quoting=csv.QUOTE_ALL)
csv_writer.writerow([
'Date', 'Num', 'Type', 'Source Name', 'Payroll Item',
'Wage Base', 'Amount',
])
csv_writer.writerow([
Date.today(), '309333', 'Cash', "Pandey, Prakash",
'Salary Expense', '', '-100000',
])
csv_writer.writerow([
'', '', '', "Pandey, Prakash", 'Federal Income Taxes Payable',
'', 15000,
])
csv_writer.writerow([
'', '', '', "Pandey, Prakash", 'State Income Taxes Payable',
'', 5000,
])
csv_writer.writerow([
'', '', '', "Pandey, Prakash", 'FICA Taxes Payable', '', 7650,
])
csv_writer.writerow([
'', '', '', '', '', '', 72350
])
csv_file.flush()
import_payroll_item.start.csv_file = \
buffer(open(csv_file.name).read())
_, res = import_payroll_item.do_import_(action=None)
move, = Move.search([])
assert move.id in res['res_id']
assert len(move.lines) == 5
Move.post([move])
| 3,506 | 1,101 |
import pandas as pd
import os
import requests
from pathlib import Path
from PIL import Image
from tqdm import tqdm
from multiprocessing import Pool
import gc
import glob
cc_url = 'https://storage.googleapis.com/conceptual_12m/cc12m.tsv'
root_folder = './'
total = 12423374
maxwidth = 256
maxheight = 256
thread_count = 16
batch = 10000
def load_caption(x):
name, caption, text_folder = x
fid = str(int(int(name) / 10000 ))
subdir = "0"*(5-len(fid)) + fid
os.makedirs(Path(text_folder+"/"+subdir), exist_ok=True)
fp = text_folder + '/' + subdir + "/" + "0"*(9-len(str(name))) + str(name) + '.txt'
with open(fp, 'w') as f:
f.write(caption)
def download_file(url):
response = requests.get(url, stream=True)
total_size_in_bytes= int(response.headers.get('content-length', 0))
block_size = 1024
progress_bar = tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(Path(root_folder + '/cc12m.tsv'), 'wb') as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
print("Error, something went wrong...")
def load_image(x):
name, url, image_folder, skip_folder = x
fid = str(int(int(name) / 10000 ))
subdir = "0"*(5-len(fid)) + fid
os.makedirs(Path(image_folder+"/"+subdir), exist_ok=True)
id = subdir + "/" + "0"*(9-len(str(name))) + str(name)
try:
with Image.open(requests.get(url,
headers={'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:72.0) Gecko/20100101 Firefox/72.0'},
stream=True, timeout=3).raw) as foo:
a = max(maxwidth/foo.size[0], maxheight/foo.size[1])
foo = foo.resize((int(foo.size[0] * a), int(foo.size[1] * a)), Image.ANTIALIAS)
with open(Path(image_folder + "/" + id + '.jpg'), 'wb') as file:
foo.save(file, optimize=True, quality=85)
except Exception:
os.makedirs(Path(skip_folder+"/"+subdir), exist_ok=True)
open(Path(skip_folder + '/' + id), 'a').close
pass
if __name__ == '__main__':
if not os.path.isfile(Path(root_folder + '/cc12m.tsv')):
print('Missing cc12m url-caption-dataset. Downloading...')
download_file(cc_url)
else:
print('cc12m.tsv already downloaded. Proceeding with downloading images!')
dfc = pd.read_csv(root_folder + "cc12m.tsv", sep='\t', names=["url", "caption"])
image_folder = root_folder + '/images'
text_folder = root_folder + '/texts'
skip_folder = root_folder + '/skip'
paths = [image_folder, text_folder, skip_folder]
for path in paths:
os.makedirs(path, exist_ok=True)
def list_ids(path):
return [int(os.path.splitext(os.path.basename(a))[0]) for a in glob.glob(path+"/**/*")]
skiplist = list_ids(text_folder)
remaining = total - len(skiplist)
percent_remaining = 100 * (total - remaining) / total
df = dfc.loc[~dfc.index.isin(skiplist)]
print('Remaining {} captions to be written - {} ({:.5f} %) already written.'.format(remaining, len(skiplist), percent_remaining))
if len(df) > 0:
captions = zip(df.index, df["caption"], [text_folder]*len(df))
pool = Pool(thread_count)
for _ in tqdm(pool.imap_unordered(load_caption, captions), total=len(df)):
pass
pool.close()
print('Done with captions!')
skiplist = list_ids(skip_folder) + list_ids(image_folder)
remaining = total - len(skiplist)
percent_remaining = 100 * (total - remaining) / total
df = dfc.loc[~dfc.index.isin(skiplist)]
print('Remaining {} images to be downloaded - {} ({:.5f} %) already downloaded.'.format(remaining, len(skiplist), percent_remaining))
images = list(zip(df.index, df["url"], [image_folder]*len(df), [skip_folder]*len(df)))
for i in tqdm(range(0, len(df), batch)):
pool = Pool(thread_count)
for _ in tqdm(pool.imap_unordered(load_image, images[i:i+batch]), total=batch):
pass
pool.terminate()
pool.join()
del pool
gc.collect()
print('Finished downloading available images from conceptual images!')
| 4,284 | 1,566 |
from unittest import TestCase
from jose import jwt
class TestJSONWebTokenLoginHandler(TestCase):
def test_parse_jwt(self):
json_web_token = 'eyJhbGciOiJIUzUxMiJ9.eyJzdWIiOiJ5dWFuemhlbmNhaSIsImF1dGgiOiJST0xFX1VTRVIiLCJleHAiOjE1OTAyMjk5NTJ9.PcI6wGxXew-AASYqCKneUyW4ZUVHosgfE0qkWh0Y5pB4nNr1kneSC8yt8liJ31TSjhzt2VVAgyoYnci-_R-Wfw'
secret = 'b939fce9c8879b8d41886695da17c363a0004bf7'
data = jwt.decode(json_web_token, secret, 'HS512', options={ 'verify_signature': False})
self.assertIsNotNone(data)
| 518 | 286 |
import numpy as np
from PySide2.QtCore import Qt, QUrl, QSize, QEventLoop
from PySide2.QtGui import QPixmap, QDropEvent, QDragEnterEvent, QMouseEvent, QResizeEvent, QHideEvent
from PySide2.QtWidgets import QApplication, QWidget, QHBoxLayout, QFileDialog, QWidgetItem
from itint.octree import Octree
from itint.ui_widget import Ui_MainWidget
from itint.widget_color_display import qimage_to_pil, ColorDisplayWidget
from itint.widget_screen_color_picker import ScreenColorPicker
from itint.widget_screenshot import WidgetScreenShot
class MainWidget(QWidget):
def __init__(self, parent=None):
super(MainWidget, self).__init__(parent)
self.setAcceptDrops(True)
self.internal_loader = Ui_MainWidget()
self.internal_loader.setupUi(self)
self.screen = WidgetScreenShot()
self.picker = ScreenColorPicker()
self.layout = QHBoxLayout(self.internal_loader.colorDisplayContent)
self.layout.setAlignment(Qt.AlignLeft)
self.internal_loader.btnFromScan.clicked.connect(self.btn_from_screen)
self.default_text = self.internal_loader.labelImagePreview.text()
self.internal_loader.labelImagePreview.mousePressEvent = self.btn_from_file
self.internal_loader.btnColorPickup.clicked.connect(self.btn_from_screen_color_picker)
self.internal_loader.btnFromClipboard.clicked.connect(self.btn_from_clipboard)
self.pixmap = QPixmap()
self.hide_callback = None
def dropEvent(self, event: QDropEvent) -> None:
url: QUrl = event.mimeData().urls()[0]
self.pixmap.load(url.toLocalFile())
self.update_image()
if not self.pixmap.isNull():
self.update_color_display(self.pixmap)
def dragEnterEvent(self, event: QDragEnterEvent) -> None:
if event.mimeData().hasUrls() and event.mimeData().urls()[0].isLocalFile():
event.acceptProposedAction()
def check_and_clear_color_display(self):
if self.internal_loader.cBtnAutoClear.isChecked():
for i in range(self.layout.count()):
color_display_widget: QWidgetItem = self.layout.itemAt(i)
color_display_widget.widget().deleteLater()
def update_color_display(self, image: QPixmap):
if image.isNull():
return
data = np.asarray(qimage_to_pil(image)).reshape((-1, 3))
tree = Octree()
tree.build(data, 8)
colors = tree.get_color(tree.root)
self.check_and_clear_color_display()
for r, g, b in colors:
color_display_widget = ColorDisplayWidget(r, g, b, self)
self.layout.addWidget(color_display_widget)
def resizeEvent(self, event: QResizeEvent):
self.update_image()
def hideEvent(self, event: QHideEvent):
if self.hide_callback is not None:
self.hide_callback()
self.hide_callback = None
def update_image(self):
if self.pixmap.isNull():
self.internal_loader.labelImagePreview.setText(self.default_text)
else:
pixel_ratio = QApplication.primaryScreen().devicePixelRatio()
pixmap_aspect = self.pixmap.width() / self.pixmap.height()
label_width = self.internal_loader.labelImagePreview.width() * pixel_ratio
label_height = self.internal_loader.labelImagePreview.height() * pixel_ratio
label_aspect = label_width / label_height
if pixmap_aspect > label_aspect:
pixmap = self.pixmap.scaled(
QSize(label_width,
label_width / pixmap_aspect),
Qt.KeepAspectRatio,
Qt.SmoothTransformation,
)
else:
pixmap = self.pixmap.scaled(
QSize(label_height * pixmap_aspect,
label_height),
Qt.KeepAspectRatio,
Qt.SmoothTransformation,
)
self.internal_loader.labelImagePreview.setPixmap(pixmap)
def btn_from_screen_color_picker(self):
def callback_screen_color_picker(rgb):
r, g, b = rgb
color_display_widget = ColorDisplayWidget(r, g, b, self)
self.layout.addWidget(color_display_widget)
if self.internal_loader.cBtnAutoHide.isChecked():
self.setVisible(True)
self.setWindowOpacity(1.0)
if self.internal_loader.cBtnAutoHide.isChecked():
self.setVisible(False)
self.setWindowOpacity(0.0)
QApplication.processEvents(QEventLoop.AllEvents)
# time.sleep(0.20) # 窗口动画
self.picker.pick_color(callback=callback_screen_color_picker)
def btn_from_screen(self):
def callback_captured_image(pixmap: QPixmap):
self.pixmap = pixmap
self.update_image()
if self.internal_loader.cBtnAutoHide.isChecked():
self.setVisible(True)
self.update_color_display(pixmap)
if self.internal_loader.cBtnAutoHide.isChecked():
self.setVisible(False)
self.screen.capture_screen(callback=callback_captured_image)
def btn_from_file(self, event: QMouseEvent):
filepath, _ = QFileDialog.getOpenFileName(self, "选择文件", "", "图片 (*.png;*.jpg;*.gif;*.bmp);;所有类型 (*)")
self.pixmap.load(filepath)
self.update_image()
if not self.pixmap.isNull():
self.update_color_display(self.pixmap)
def btn_from_clipboard(self):
clipboard = QApplication.clipboard()
self.pixmap = clipboard.pixmap()
self.update_image()
self.update_color_display(self.pixmap)
| 5,707 | 1,757 |
class ObservabilityError(Exception):
pass
class ConnectionNotConfigured(ObservabilityError):
pass
class TruncationError(ObservabilityError):
pass
| 162 | 46 |
import requests
from datetime import datetime, timedelta
import os
from pathlib import Path
from web3 import Web3, HTTPProvider
import time
import json
# put real private key and address here
wallet_private_key = "0x00000000000000000000000000000000"
wallet_address = "0x000000000000000000000000000000"
# put personal infura address here
# this only works for oracleContract
provider_url = "https://mainnet.infura.io/v3/00000000000000000000000000000000"
# change to your oracle address
contract_address = "0x000000000000000000000000000000"
start_time = time.time()
gas_url = "https://ethgasstation.info/json/ethgasAPI.json"
gas_p = 0
req = requests.get(gas_url)
if (req.status_code == 200):
t = json.loads(req.content)
gas_p = t['fast'] / 5
print('gas price', gas_p)
else:
gas_p = 15
w3 = Web3(HTTPProvider(provider_url))
with open(contract_abi) as f:
contract_abi = json.load(f)
# 1 for mainnet, 3 for ropsten
contract_address = contract_abi['address']
contract_address = w3.toChecksumAddress(contract_address)
contract = w3.eth.contract(address=contract_address, abi=contract_abi['abi'])
def update_PriceBatch(ethprice, spxprice, btcprice, final_day):
nonce = w3.eth.getTransactionCount(wallet_address)
txn_dict = contract.functions.updatePrices(ethprice, spxprice, btcprice, final_day).buildTransaction({
'gas': 500000,
'gasPrice': w3.toWei(gas_p, 'gwei'),
'nonce': nonce,
})
signed_txn = w3.eth.account.signTransaction(txn_dict, private_key=wallet_private_key)
result = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
tx_receipt = w3.eth.waitForTransactionReceipt(result)
if tx_receipt is None:
return {'status': 'failed', 'error': 'timeout'}
return {'status': 'added'}
def update_Settle(ethprice, spxprice, btcprice):
nonce = w3.eth.getTransactionCount(wallet_address)
txn_dict = contract.functions.settlePrice(ethprice, spxprice, btcprice).buildTransaction({
'gas': 900000,
'gasPrice': w3.toWei(gas_p, 'gwei'),
'nonce': nonce,
})
signed_txn = w3.eth.account.signTransaction(txn_dict, private_key=wallet_private_key)
result = w3.eth.sendRawTransaction(signed_txn.rawTransaction)
tx_receipt = w3.eth.waitForTransactionReceipt(result)
if tx_receipt is None:
return {'status': 'failed', 'error': 'timeout'}
return {'status': 'added'}
def is_final_day():
date_num = datetime.today().weekday()
if date_num == 3:
return True
else:
return False
if __name__ == "__main__":
curr_date = datetime.now().date()
current_date = curr_date.strftime("%y%m%d")
''' if a thursday, will report true, indicating tomorrow is a settlemnet price.
If Thurs or Friday is a holiday, this needs to be adjusted so you will settle correctly
'''
is_final = is_final_day()
eth_app = 'eth' + current_date + '.txt'
btc_app = 'btc' + current_date + '.txt'
spx_app = 'spx' + current_date + '.txt'
eth_new = Path('/home/lorenzo/oracle/data/') / eth_app
btc_new = Path('/home/lorenzo/oracle/data/') / btc_app
spx_new = Path('/home/lorenzo/oracle/data/') / spx_app
contract_abi = Path('/home/lorenzo/oracle/OracleMain.json')
spx_final_f = open(spx_new, 'r')
spx_final = spx_final_f.readline()
spx_final_f.close()
spx_final = float(spx_final)
eth_final_f = open(eth_new, 'r')
eth_final = eth_final_f.readline()
eth_final_f.close()
eth_final = float(eth_final)
btc_final_f = open(btc_new, 'r')
btc_final = btc_final_f.readline()
btc_final_f.close()
btc_final = float(btc_final)
w3 = Web3(HTTPProvider(provider_url))
with open(contract_abi) as f:
contract_abi = json.load(f)
contract = w3.eth.contract(address=contract_address, abi=contract_abi['abi'])
isSettle = contract.functions.nextUpdateSettle().call()
updateTime = contract.functions.lastUpdateTime().call()
OracleContractDay = datetime.utcfromtimestamp(updateTime).strftime("%y%m%d")
''' checks to see if date of last price is today. If so, it does not send
'''
if current_date != OracleContractDay:
''' if not the settlement day, uses the intraweek price update function
'''
if not isSettle:
eth_tx = update_PriceBatch(int(eth_final * 1e2), int(spx_final * 1e2), int(btc_final * 1e2), is_final)
else:
eth_tx = update_Settle(int(eth_final * 1e2), int(spx_final * 1e2), int(btc_final * 1e2))
| 4,480 | 1,696 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from hwt.hdl.constants import Time
from hwt.simulator.simTestCase import SingleUnitSimTestCase
from hwtLib.examples.statements.constDriver import ConstDriverUnit
class ConstDriverTC(SingleUnitSimTestCase):
@classmethod
def getUnit(cls):
cls.u = ConstDriverUnit()
return cls.u
def test_simple(self):
u = self.u
self.runSim(20 * Time.ns)
self.assertValSequenceEqual(u.out0._ag.data, [0, 0])
self.assertValSequenceEqual(u.out1._ag.data, [1, 1])
if __name__ == "__main__":
import unittest
suite = unittest.TestSuite()
# suite.addTest(TwoCntrsTC('test_nothingEnable'))
suite.addTest(unittest.makeSuite(ConstDriverTC))
runner = unittest.TextTestRunner(verbosity=3)
runner.run(suite)
| 816 | 288 |
# Calculate your Body-Mass-Index with Python
print("BMI - Calculator!")
weight_str = input("Please insert your weight (in kg): ")
height_str = input("Please insert your bodys height(in m): ")
weight = float(weight_str.replace(",", "."))
height = float(height_str.replace(",", "."))
bmi = weight / height ** 2
print("Your BMI is: " + str(round(bmi, 1))) | 356 | 121 |
import requests
from bs4 import BeautifulSoup
# with open('index.html', 'rb') as hf:
# soup = BeautifulSoup(hf, 'html.parser')
# print(soup.prettify())
# print(soup.head.title.text)
# print(soup.li.a.h2.text)
# print(soup.li.a.p.text)
source_code = requests.get('https://mariusciurea.github.io/links/')
soup = BeautifulSoup(source_code.content, 'lxml')
apps = soup.find_all('a', {'title':'Ajuta un elev sa aleaga informat facultatea'})
for app in apps:
print(app)
| 491 | 199 |
#input--> berfungsi untuk menerima baris input dari user dan mengembalikan dalam bentuk string
#int--> berfungsi untuk menkonversi bilangan maupun string angka menjadi bilangan bulat
a=int(input("masukan nilai A: "))
b=int(input("masukan nilai B: "))
a +=3
print(a)
b -=10
print(b)
a *=4
print(a)
b **=2
print(b) | 333 | 153 |
import time
import math
import random
from seal import *
from seal_helper import *
def rand_int():
return int(random.random()*(10**10))
def bfv_performance_test(context):
print_parameters(context)
parms = context.first_context_data().parms()
plain_modulus = parms.plain_modulus()
poly_modulus_degree = parms.poly_modulus_degree()
print("Generating secret/public keys: ", end="")
keygen = KeyGenerator(context)
print("Done")
secret_key = keygen.secret_key()
public_key = keygen.public_key()
relin_keys = RelinKeys()
gal_keys = GaloisKeys()
if context.using_keyswitching():
# Generate relinearization keys.
print("Generating relinearization keys: ", end="")
time_start = time.time()
relin_keys = keygen.relin_keys()
time_end = time.time()
print("Done [" + "%.0f" %
((time_end-time_start)*1000000) + " microseconds]")
if not context.key_context_data().qualifiers().using_batching:
print("Given encryption parameters do not support batching.")
return 0
print("Generating Galois keys: ", end="")
time_start = time.time()
gal_keys = keygen.galois_keys()
time_end = time.time()
print("Done [" + "%.0f" %
((time_end-time_start)*1000000) + " microseconds]")
encryptor = Encryptor(context, public_key)
decryptor = Decryptor(context, secret_key)
evaluator = Evaluator(context)
batch_encoder = BatchEncoder(context)
encoder = IntegerEncoder(context)
# These will hold the total times used by each operation.
time_batch_sum = 0
time_unbatch_sum = 0
time_encrypt_sum = 0
time_decrypt_sum = 0
time_add_sum = 0
time_multiply_sum = 0
time_multiply_plain_sum = 0
time_square_sum = 0
time_relinearize_sum = 0
time_rotate_rows_one_step_sum = 0
time_rotate_rows_random_sum = 0
time_rotate_columns_sum = 0
# How many times to run the test?
count = 10
# Populate a vector of values to batch.
slot_count = batch_encoder.slot_count()
pod_vector = uIntVector()
for i in range(slot_count):
pod_vector.push_back(rand_int() % plain_modulus.value())
print("Running tests ", end="")
for i in range(count):
'''
[Batching]
There is nothing unusual here. We batch our random plaintext matrix
into the polynomial. Note how the plaintext we create is of the exactly
right size so unnecessary reallocations are avoided.
'''
plain = Plaintext(parms.poly_modulus_degree(), 0)
time_start = time.time()
batch_encoder.encode(pod_vector, plain)
time_end = time.time()
time_batch_sum += (time_end-time_start)*1000000
'''
[Unbatching]
We unbatch what we just batched.
'''
pod_vector2 = uIntVector()
time_start = time.time()
batch_encoder.decode(plain, pod_vector2)
time_end = time.time()
time_unbatch_sum += (time_end-time_start)*1000000
for j in range(slot_count):
if pod_vector[j] != pod_vector2[j]:
raise Exception("Batch/unbatch failed. Something is wrong.")
'''
[Encryption]
We make sure our ciphertext is already allocated and large enough
to hold the encryption with these encryption parameters. We encrypt
our random batched matrix here.
'''
encrypted = Ciphertext()
time_start = time.time()
encryptor.encrypt(plain, encrypted)
time_end = time.time()
time_encrypt_sum += (time_end-time_start)*1000000
'''
[Decryption]
We decrypt what we just encrypted.
'''
plain2 = Plaintext(poly_modulus_degree, 0)
time_start = time.time()
decryptor.decrypt(encrypted, plain2)
time_end = time.time()
time_decrypt_sum += (time_end-time_start)*1000000
if plain.to_string() != plain2.to_string():
raise Exception("Encrypt/decrypt failed. Something is wrong.")
'''
[Add]
We create two ciphertexts and perform a few additions with them.
'''
encrypted1 = Ciphertext()
encryptor.encrypt(encoder.encode(i), encrypted1)
encrypted2 = Ciphertext(context)
encryptor.encrypt(encoder.encode(i + 1), encrypted2)
time_start = time.time()
evaluator.add_inplace(encrypted1, encrypted1)
evaluator.add_inplace(encrypted2, encrypted2)
evaluator.add_inplace(encrypted1, encrypted2)
time_end = time.time()
time_add_sum += (time_end-time_start)*1000000
'''
[Multiply]
We multiply two ciphertexts. Since the size of the result will be 3,
and will overwrite the first argument, we reserve first enough memory
to avoid reallocating during multiplication.
'''
encrypted1.reserve(3)
time_start = time.time()
evaluator.multiply_inplace(encrypted1, encrypted2)
time_end = time.time()
time_multiply_sum += (time_end-time_start)*1000000
'''
[Multiply Plain]
We multiply a ciphertext with a random plaintext. Recall that
multiply_plain does not change the size of the ciphertext so we use
encrypted2 here.
'''
time_start = time.time()
evaluator.multiply_plain_inplace(encrypted2, plain)
time_end = time.time()
time_multiply_plain_sum += (time_end-time_start)*1000000
'''
[Square]
We continue to use encrypted2. Now we square it; this should be
faster than generic homomorphic multiplication.
'''
time_start = time.time()
evaluator.square_inplace(encrypted2)
time_end = time.time()
time_square_sum += (time_end-time_start)*1000000
if context.using_keyswitching():
'''
[Relinearize]
Time to get back to encrypted1. We now relinearize it back
to size 2. Since the allocation is currently big enough to
contain a ciphertext of size 3, no costly reallocations are
needed in the process.
'''
time_start = time.time()
evaluator.relinearize_inplace(encrypted1, relin_keys)
time_end = time.time()
time_relinearize_sum += (time_end-time_start)*1000000
'''
[Rotate Rows One Step]
We rotate matrix rows by one step left and measure the time.
'''
time_start = time.time()
evaluator.rotate_rows_inplace(encrypted, 1, gal_keys)
evaluator.rotate_rows_inplace(encrypted, -1, gal_keys)
time_end = time.time()
time_rotate_rows_one_step_sum += (time_end-time_start)*1000000
'''
[Rotate Rows Random]
We rotate matrix rows by a random number of steps. This is much more
expensive than rotating by just one step.
'''
row_size = batch_encoder.slot_count() / 2
random_rotation = int(rand_int() % row_size)
time_start = time.time()
evaluator.rotate_rows_inplace(
encrypted, random_rotation, gal_keys)
time_end = time.time()
time_rotate_rows_random_sum += (time_end-time_start)*1000000
'''
[Rotate Columns]
Nothing surprising here.
'''
time_start = time.time()
evaluator.rotate_columns_inplace(encrypted, gal_keys)
time_end = time.time()
time_rotate_columns_sum += (time_end-time_start)*1000000
# Print a dot to indicate progress.
print(".", end="", flush=True)
print(" Done", flush=True)
avg_batch = time_batch_sum / count
avg_unbatch = time_unbatch_sum / count
avg_encrypt = time_encrypt_sum / count
avg_decrypt = time_decrypt_sum / count
avg_add = time_add_sum / (3 * count)
avg_multiply = time_multiply_sum / count
avg_multiply_plain = time_multiply_plain_sum / count
avg_square = time_square_sum / count
avg_relinearize = time_relinearize_sum / count
avg_rotate_rows_one_step = time_rotate_rows_one_step_sum / (2 * count)
avg_rotate_rows_random = time_rotate_rows_random_sum / count
avg_rotate_columns = time_rotate_columns_sum / count
print("Average batch: " + "%.0f" % avg_batch + " microseconds", flush=True)
print("Average unbatch: " + "%.0f" %
avg_unbatch + " microseconds", flush=True)
print("Average encrypt: " + "%.0f" %
avg_encrypt + " microseconds", flush=True)
print("Average decrypt: " + "%.0f" %
avg_decrypt + " microseconds", flush=True)
print("Average add: " + "%.0f" % avg_add + " microseconds", flush=True)
print("Average multiply: " + "%.0f" %
avg_multiply + " microseconds", flush=True)
print("Average multiply plain: " + "%.0f" %
avg_multiply_plain + " microseconds", flush=True)
print("Average square: " + "%.0f" %
avg_square + " microseconds", flush=True)
if context.using_keyswitching():
print("Average relinearize: " + "%.0f" %
avg_relinearize + " microseconds", flush=True)
print("Average rotate rows one step: " + "%.0f" %
avg_rotate_rows_one_step + " microseconds", flush=True)
print("Average rotate rows random: " + "%.0f" %
avg_rotate_rows_random + " microseconds", flush=True)
print("Average rotate columns: " + "%.0f" %
avg_rotate_columns + " microseconds", flush=True)
def ckks_performance_test(context):
print_parameters(context)
parms = context.first_context_data().parms()
plain_modulus = parms.plain_modulus()
poly_modulus_degree = parms.poly_modulus_degree()
print("Generating secret/public keys: ", end="")
keygen = KeyGenerator(context)
print("Done")
secret_key = keygen.secret_key()
public_key = keygen.public_key()
relin_keys = RelinKeys()
gal_keys = GaloisKeys()
if context.using_keyswitching():
print("Generating relinearization keys: ", end="")
time_start = time.time()
relin_keys = keygen.relin_keys()
time_end = time.time()
print("Done [" + "%.0f" %
((time_end-time_start)*1000000) + " microseconds]")
if not context.key_context_data().qualifiers().using_batching:
print("Given encryption parameters do not support batching.")
return 0
print("Generating Galois keys: ", end="")
time_start = time.time()
gal_keys = keygen.galois_keys()
time_end = time.time()
print("Done [" + "%.0f" %
((time_end-time_start)*1000000) + " microseconds]")
encryptor = Encryptor(context, public_key)
decryptor = Decryptor(context, secret_key)
evaluator = Evaluator(context)
ckks_encoder = CKKSEncoder(context)
time_encode_sum = 0
time_decode_sum = 0
time_encrypt_sum = 0
time_decrypt_sum = 0
time_add_sum = 0
time_multiply_sum = 0
time_multiply_plain_sum = 0
time_square_sum = 0
time_relinearize_sum = 0
time_rescale_sum = 0
time_rotate_one_step_sum = 0
time_rotate_random_sum = 0
time_conjugate_sum = 0
# How many times to run the test?
count = 10
# Populate a vector of floating-point values to batch.
pod_vector = DoubleVector()
slot_count = ckks_encoder.slot_count()
for i in range(slot_count):
pod_vector.push_back(1.001 * float(i))
print("Running tests ", end="")
for i in range(count):
'''
[Encoding]
For scale we use the square root of the last coeff_modulus prime
from parms.
'''
plain = Plaintext(parms.poly_modulus_degree() *
len(parms.coeff_modulus()), 0)
# [Encoding]
scale = math.sqrt(parms.coeff_modulus()[-1].value())
time_start = time.time()
ckks_encoder.encode(pod_vector, scale, plain)
time_end = time.time()
time_encode_sum += (time_end-time_start)*1000000
# [Decoding]
pod_vector2 = DoubleVector()
time_start = time.time()
ckks_encoder.decode(plain, pod_vector2)
time_end = time.time()
time_decode_sum += (time_end-time_start)*1000000
# [Encryption]
encrypted = Ciphertext(context)
time_start = time.time()
encryptor.encrypt(plain, encrypted)
time_end = time.time()
time_encrypt_sum += (time_end-time_start)*1000000
# [Decryption]
plain2 = Plaintext(poly_modulus_degree, 0)
time_start = time.time()
decryptor.decrypt(encrypted, plain2)
time_end = time.time()
time_decrypt_sum += (time_end-time_start)*1000000
# [Add]
encrypted1 = Ciphertext(context)
ckks_encoder.encode(i + 1, plain)
encryptor.encrypt(plain, encrypted1)
encrypted2 = Ciphertext(context)
ckks_encoder.encode(i + 1, plain2)
encryptor.encrypt(plain2, encrypted2)
time_start = time.time()
evaluator.add_inplace(encrypted1, encrypted1)
evaluator.add_inplace(encrypted2, encrypted2)
evaluator.add_inplace(encrypted1, encrypted2)
time_end = time.time()
time_add_sum += (time_end-time_start)*1000000
# [Multiply]
encrypted1.reserve(3)
time_start = time.time()
evaluator.multiply_inplace(encrypted1, encrypted2)
time_end = time.time()
time_multiply_sum += (time_end-time_start)*1000000
# [Multiply Plain]
time_start = time.time()
evaluator.multiply_plain_inplace(encrypted2, plain)
time_end = time.time()
time_multiply_plain_sum += (time_end-time_start)*1000000
# [Square]
time_start = time.time()
evaluator.square_inplace(encrypted2)
time_end = time.time()
time_square_sum += (time_end-time_start)*1000000
if context.using_keyswitching():
# [Relinearize]
time_start = time.time()
evaluator.relinearize_inplace(encrypted1, relin_keys)
time_end = time.time()
time_relinearize_sum += (time_end-time_start)*1000000
# [Rescale]
time_start = time.time()
evaluator.rescale_to_next_inplace(encrypted1)
time_end = time.time()
time_rescale_sum += (time_end-time_start)*1000000
# [Rotate Vector]
time_start = time.time()
evaluator.rotate_vector_inplace(encrypted, 1, gal_keys)
evaluator.rotate_vector_inplace(encrypted, -1, gal_keys)
time_end = time.time()
time_rotate_one_step_sum += (time_end-time_start)*1000000
# [Rotate Vector Random]
random_rotation = int(rand_int() % ckks_encoder.slot_count())
time_start = time.time()
evaluator.rotate_vector_inplace(
encrypted, random_rotation, gal_keys)
time_end = time.time()
time_rotate_random_sum += (time_end-time_start)*1000000
# [Complex Conjugate]
time_start = time.time()
evaluator.complex_conjugate_inplace(encrypted, gal_keys)
time_end = time.time()
time_conjugate_sum += (time_end-time_start)*1000000
print(".", end="", flush=True)
print(" Done\n", flush=True)
avg_encode = time_encode_sum / count
avg_decode = time_decode_sum / count
avg_encrypt = time_encrypt_sum / count
avg_decrypt = time_decrypt_sum / count
avg_add = time_add_sum / (3 * count)
avg_multiply = time_multiply_sum / count
avg_multiply_plain = time_multiply_plain_sum / count
avg_square = time_square_sum / count
avg_relinearize = time_relinearize_sum / count
avg_rescale = time_rescale_sum / count
avg_rotate_one_step = time_rotate_one_step_sum / (2 * count)
avg_rotate_random = time_rotate_random_sum / count
avg_conjugate = time_conjugate_sum / count
print("Average encode: " + "%.0f" %
avg_encode + " microseconds", flush=True)
print("Average decode: " + "%.0f" %
avg_decode + " microseconds", flush=True)
print("Average encrypt: " + "%.0f" %
avg_encrypt + " microseconds", flush=True)
print("Average decrypt: " + "%.0f" %
avg_decrypt + " microseconds", flush=True)
print("Average add: " + "%.0f" % avg_add + " microseconds", flush=True)
print("Average multiply: " + "%.0f" %
avg_multiply + " microseconds", flush=True)
print("Average multiply plain: " + "%.0f" %
avg_multiply_plain + " microseconds", flush=True)
print("Average square: " + "%.0f" %
avg_square + " microseconds", flush=True)
if context.using_keyswitching():
print("Average relinearize: " + "%.0f" %
avg_relinearize + " microseconds", flush=True)
print("Average rescale: " + "%.0f" %
avg_rescale + " microseconds", flush=True)
print("Average rotate vector one step: " + "%.0f" %
avg_rotate_one_step + " microseconds", flush=True)
print("Average rotate vector random: " + "%.0f" %
avg_rotate_random + " microseconds", flush=True)
print("Average complex conjugate: " + "%.0f" %
avg_conjugate + " microseconds", flush=True)
def example_bfv_performance_default():
print_example_banner(
"BFV Performance Test with Degrees: 4096, 8192, and 16384")
parms = EncryptionParameters(scheme_type.BFV)
poly_modulus_degree = 4096
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
parms.set_plain_modulus(786433)
bfv_performance_test(SEALContext.Create(parms))
print()
poly_modulus_degree = 8192
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
parms.set_plain_modulus(786433)
bfv_performance_test(SEALContext.Create(parms))
print()
poly_modulus_degree = 16384
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
parms.set_plain_modulus(786433)
bfv_performance_test(SEALContext.Create(parms))
# Comment out the following to run the biggest example.
# poly_modulus_degree = 32768
def example_bfv_performance_custom():
print("\nSet poly_modulus_degree (1024, 2048, 4096, 8192, 16384, or 32768): ")
poly_modulus_degree = input("Input the poly_modulus_degree: ").strip()
if len(poly_modulus_degree) < 4 or not poly_modulus_degree.isdigit():
print("Invalid option.")
return 0
poly_modulus_degree = int(poly_modulus_degree)
if poly_modulus_degree < 1024 or poly_modulus_degree > 32768 or (poly_modulus_degree & (poly_modulus_degree - 1) != 0):
print("Invalid option.")
return 0
print("BFV Performance Test with Degree: " + str(poly_modulus_degree))
parms = EncryptionParameters(scheme_type.BFV)
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
if poly_modulus_degree == 1024:
parms.set_plain_modulus(12289)
else:
parms.set_plain_modulus(786433)
bfv_performance_test(SEALContext.Create(parms))
def example_ckks_performance_default():
print_example_banner(
"CKKS Performance Test with Degrees: 4096, 8192, and 16384")
parms = EncryptionParameters(scheme_type.CKKS)
poly_modulus_degree = 4096
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
ckks_performance_test(SEALContext.Create(parms))
print()
poly_modulus_degree = 8192
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
ckks_performance_test(SEALContext.Create(parms))
poly_modulus_degree = 16384
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
ckks_performance_test(SEALContext.Create(parms))
# Comment out the following to run the biggest example.
# poly_modulus_degree = 32768
def example_ckks_performance_custom():
print("\nSet poly_modulus_degree (1024, 2048, 4096, 8192, 16384, or 32768): ")
poly_modulus_degree = input("Input the poly_modulus_degree: ").strip()
if len(poly_modulus_degree) < 4 or not poly_modulus_degree.isdigit():
print("Invalid option.")
return 0
poly_modulus_degree = int(poly_modulus_degree)
if poly_modulus_degree < 1024 or poly_modulus_degree > 32768 or (poly_modulus_degree & (poly_modulus_degree - 1) != 0):
print("Invalid option.")
return 0
print("CKKS Performance Test with Degree: " + str(poly_modulus_degree))
parms = EncryptionParameters(scheme_type.CKKS)
parms.set_poly_modulus_degree(poly_modulus_degree)
parms.set_coeff_modulus(CoeffModulus.BFVDefault(poly_modulus_degree))
ckks_performance_test(SEALContext.Create(parms))
if __name__ == '__main__':
print_example_banner("Example: Performance Test")
example_bfv_performance_default()
example_bfv_performance_custom()
example_ckks_performance_default()
example_ckks_performance_custom()
| 21,636 | 7,356 |
# -*- coding: utf-8 -*-
from django.db import models
from django.db.models import DateTimeField, GenericIPAddressField
class Client(models.Model):
class Meta:
app_label = "subd"
verbose_name = "Клиент"
verbose_name_plural = "Клиенты"
fio = models.CharField("ФИО", max_length=200)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return u"{0.fio}".format(self)
| 452 | 159 |
import os
import sys
import requests
import logging
import json
import google.auth.transport.grpc
import google.auth.transport.requests
import google.oauth2.credentials
from google.assistant.embedded.v1alpha2 import (
embedded_assistant_pb2,
embedded_assistant_pb2_grpc
)
from config import Config
# Ref: https://github.com/googlesamples/assistant-sdk-python/blob/master/google-assistant-sdk/googlesamples/assistant/grpc/textinput.py
ASSISTANT_API_ENDPOINT = 'embeddedassistant.googleapis.com'
DEFAULT_GRPC_DEADLINE = 60 * 3 + 5
def gassist(text_query, lang_code='en-US'):
logging.info(text_query)
# Load OAuth 2.0 credentials.
try:
with open(Config.CREDENTIALS, 'r') as f:
credentials = google.oauth2.credentials.Credentials(token=None, **json.load(f))
session = requests.Session()
http_request = google.auth.transport.requests.Request(session)
credentials.refresh(http_request)
except Exception as e:
logging.error('Error loading credentials', exc_info=True)
sys.exit(-1)
# Create an authorized gRPC channel.
grpc_channel = google.auth.transport.grpc.secure_authorized_channel(
credentials, http_request, ASSISTANT_API_ENDPOINT)
# Create an assistant.
assistant = embedded_assistant_pb2_grpc.EmbeddedAssistantStub(grpc_channel)
def assist(text_query):
def iter_assist_requests():
config = embedded_assistant_pb2.AssistConfig(
audio_out_config=embedded_assistant_pb2.AudioOutConfig(
encoding='LINEAR16',
sample_rate_hertz=16000,
volume_percentage=0,
),
dialog_state_in=embedded_assistant_pb2.DialogStateIn(
language_code=lang_code,
conversation_state=None,
is_new_conversation=True,
),
device_config=embedded_assistant_pb2.DeviceConfig(
device_id=Config.DEVICE_ID,
device_model_id=Config.DEVICE_MODEL_ID,
),
text_query=text_query,
)
req = embedded_assistant_pb2.AssistRequest(config=config)
yield req
text_response = None
html_response = None
for resp in assistant.Assist(iter_assist_requests(), DEFAULT_GRPC_DEADLINE):
if resp.screen_out.data:
html_response = resp.screen_out.data
if resp.dialog_state_out.supplemental_display_text:
text_response = resp.dialog_state_out.supplemental_display_text
return text_response, html_response
text, html = assist(text_query)
logging.info(text)
grpc_channel.close()
session.close()
return text
if __name__ == '__main__':
print(gassist('hello'))
| 2,866 | 853 |
__all__ = ["datafile", "flags", "graphs"]
for module in __all__:
__import__(__name__ + "." + module, globals(), locals()) | 125 | 44 |
import torch
from .box_impl import (
iou2d as iou2d_cc, iou2d_cuda,
nms2d as nms2d_cc, nms2d_cuda,
rbox_2d_crop as rbox_2d_crop_cc,
IouType, SupressionType)
def box2d_iou(boxes1, boxes2, method="box"):
'''
:param method: 'box' - normal box, 'rbox' - rotated box
'''
if len(boxes1.shape) != 2 or len(boxes2.shape) != 2:
raise ValueError("Input of rbox_2d_iou should be Nx2 tensors!")
if boxes1.shape[1] != 5 or boxes2.shape[1] != 5:
raise ValueError("Input boxes should have 5 fields: x, y, w, h, r")
iou_type = getattr(IouType, method.upper())
if boxes1.is_cuda and boxes2.is_cuda:
impl = iou2d_cuda
else:
impl = iou2d_cc
return impl(boxes1, boxes2, iou_type)
# TODO: implement IoU loss, GIoU, DIoU, CIoU: https://zhuanlan.zhihu.com/p/104236411
def box2d_nms(boxes, scores, iou_method="box", supression_method="hard",
iou_threshold=0, score_threshold=0, supression_param=0):
'''
:param method: 'box' - normal box, 'rbox' - rotated box
Soft-NMS: Bodla, Navaneeth, et al. "Soft-NMS--improving object detection with one line of code." Proceedings of the IEEE international conference on computer vision. 2017.
'''
if len(boxes) != len(scores):
raise ValueError("Numbers of boxes and scores are inconsistent!")
if len(scores.shape) == 2:
scores = scores.max(axis=1).values
if boxes.numel() == 0:
return torch.tensor([], dtype=torch.bool)
iou_type = getattr(IouType, iou_method.upper())
supression_type = getattr(SupressionType, supression_method.upper())
if boxes.is_cuda and scores.is_cuda:
impl = nms2d_cuda
else:
impl = nms2d_cc
suppressed = impl(boxes, scores,
iou_type, supression_type,
iou_threshold, score_threshold, supression_param
)
return ~suppressed
def box2d_crop(cloud, boxes):
'''
Crop point cloud points out given rotated boxes.
The result is a list of indices tensor where each tensor is corresponding to indices of points lying in the box
'''
result = rbox_2d_crop_cc(cloud, boxes)
return result
| 2,145 | 806 |
import pygame,sys
import random
import math
from pygame.locals import *
from pygame.sprite import Group
import gF
import Bullet
import DADcharacter
import Slave
import global_var
import Effect
import Item
import gameRule
class titleStar(pygame.sprite.Sprite):
def __init__(self):
super(titleStar,self).__init__()
self.tx=0.0
self.ty=0.0
self.speedx=0
self.speedy=0
self.image=pygame.Surface((64,64)).convert_alpha()
self.image.fill((0,0,0,0))
self.image.blit(global_var.get_value('titleStar'),(0,0),(0,0,64,64))
self.lastFrame=0
self.rAngle=random.random()*360
self.rDirection=random.randint(0,1)
if self.rDirection==0:
self.rDirection=-1
self.rotation=(random.random()*1.5+1.2)*self.rDirection
self.maxFrame=270+random.randint(0,80)
self.shadowInt=4
self.voidifyFrame=30
self.speed=0
self.dDeg=-0.07*random.random()-0.07
def initial(self,posx,posy):
self.tx=posx
self.ty=posy
def movement(self):
tick=global_var.get_value('DELTA_T')
self.tx+=self.speedx*60/1000*tick
self.ty+=self.speedy*60/1000*tick
def speedAlter(self,speedx,speedy):
self.speedx=speedx
self.speedy=speedy
def countAngle(self):
if self.speedx!=0:
t=self.speedy/self.speedx
deg=math.atan(t)*180/math.pi
else:
if self.speedy>0:
deg=90
if self.speedy<0:
deg=270
if deg<0:
deg+=360
if self.speedy>0 and deg>=180:
deg=deg-180
if self.speedy<0 and deg<=180:
deg=deg+180
if self.speedy==0 and self.speedx<0:
deg=180
self.angle=deg
def setSpeed(self,angle,speed):
s=math.sin(math.radians(angle))
c=math.cos(math.radians(angle))
self.speedy=s*speed
self.speedx=c*speed
self.speed=speed
def arc(self):
if self.angle>95:
angle=self.angle+self.dDeg
self.setSpeed(angle,self.speed)
def checkValid(self):
if self.lastFrame>self.maxFrame:
self.kill()
def update(self,screen,titleDec):
self.lastFrame+=1
self.rAngle+=self.rotation
self.movement()
self.countAngle()
self.arc()
self.draw(screen)
if self.lastFrame%self.shadowInt==0:
self.newShadow(titleDec)
self.checkValid()
def newShadow(self,titleDec):
new_shadow=starShadow((self.tx,self.ty),80,self.rAngle)
titleDec.add(new_shadow)
def draw(self,screen):
pos=(round(self.tx)-32,round(self.ty)-32)
if self.lastFrame<=self.voidifyFrame:
tempImg=self.image
alpha=round((256-56)*self.lastFrame/self.voidifyFrame+56)
tempImg.set_alpha(alpha)
gF.drawRotation(tempImg,pos,self.rAngle,screen)
elif (self.maxFrame-self.lastFrame)<=self.voidifyFrame:
tempImg=self.image
alpha=round((256-56)*(self.maxFrame-self.lastFrame)/self.voidifyFrame+56)
tempImg.set_alpha(alpha)
gF.drawRotation(tempImg,pos,self.rAngle,screen)
else:
#pos=(round(self.tx)-32,round(self.ty)-32)
gF.drawRotation(self.image,pos,self.rAngle,screen)
#screen.blit(self.image,pos)
class starShadow(pygame.sprite.Sprite):
def __init__(self,pos,length=20,angle=0):
super(starShadow,self).__init__()
self.maxFrame=length
self.angle=angle
self.pos=pos
self.image=pygame.Surface((64,64)).convert_alpha()
self.image.fill((0,0,0,0))
self.image.blit(global_var.get_value('titleStar'),(0,0),(0,0,64,64))
self.lastFrame=0
def checkValid(self):
if self.lastFrame>=self.maxFrame:
self.kill()
def update(self,screen,*arg):
self.lastFrame+=1
self.draw(screen)
self.checkValid()
def draw(self,screen):
self.percentage=self.lastFrame/self.maxFrame
self.alpha=round((120-0)*(1-self.percentage)+0)
self.size=round(33*(1-self.percentage))+1
tempImg=pygame.Surface((64,64)).convert_alpha()
tempImg.fill((0,0,0,0))
tempImg.blit(self.image,(0,0),(0,0,64,64))
tempImg=pygame.transform.smoothscale(tempImg,(self.size,self.size))
tempImg.set_alpha(self.alpha)
x,y=self.pos
pos=(round(x-self.size/2),round(y-self.size/2))
gF.drawRotation(tempImg,pos,self.angle,screen)
class Menu():
def __init__(self):
super(Menu,self).__init__()
self.image=pygame.image.load('resource/title/menu.png').convert()
self.sign=global_var.get_value('menuSign')
self.shadow=global_var.get_value('menuShadow')
self.playerTitleImg=global_var.get_value('playerTitleImg')
self.kanjiLogo=global_var.get_value('kanjiLogo')
self.engLogo=global_var.get_value('engLogo')
self.lightLogo=global_var.get_value('lightLogo')
self.tachie=global_var.get_value('reimuLogo')
self.selectImg=global_var.get_value('menuSelectImg')
self.levelImg=global_var.get_value('levelImg')
self.font=pygame.font.SysFont('arial', 20)
self.selectNum=[0,0,0,0]
self.stairMax=[7,0,1,1]
self.menuStair=0 #0:main menu, 1 stage selection, 2 player selection, 3 practice menu
self.playerReset=False
self.lightStrength=0.0
self.logoPosAdj=[0,0]
self.lastFrame=0
self.testSpellNum=1
self.ifSpell=False
self.substract=False
self.plus=False
self.starInt=180
def update(self,screen,pressed_keys,pressed_keys_last,player,titleDec):
self.lastFrame+=1
self.addTitleStar(titleDec)
if self.lastFrame>360:
self.lastFrame=self.lastFrame%360
screen.blit(self.image,(0,0))
self.alterSelect(pressed_keys,pressed_keys_last)
self.drawSign(screen,titleDec)
self.doSelection(pressed_keys,pressed_keys_last,player)
def addTitleStar(self,titleDec):
if self.lastFrame%self.starInt==0:
new_star=titleStar()
i_x=300+random.random()*660
i_y=random.random()*5+10
new_star.initial(i_x,i_y)
new_star.setSpeed(135+random.random()*10,1.8+0.6*random.random())
titleDec.add(new_star)
def alterSelect(self,pressed_keys,pressed_keys_last):
if self.menuStair!=2 and self.menuStair!=3:
if not (pressed_keys[K_UP] and pressed_keys_last[K_UP]):
if pressed_keys[K_UP]:
self.selectNum[self.menuStair]-=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_DOWN] and pressed_keys_last[K_DOWN]):
if pressed_keys[K_DOWN]:
self.selectNum[self.menuStair]+=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
elif self.menuStair==2:
if not (pressed_keys[K_LEFT] and pressed_keys_last[K_LEFT]):
if pressed_keys[K_LEFT]:
self.selectNum[self.menuStair]-=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_RIGHT] and pressed_keys_last[K_RIGHT]):
if pressed_keys[K_RIGHT]:
self.selectNum[self.menuStair]+=1
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
elif self.menuStair==3:
if not (pressed_keys[K_LEFT] and pressed_keys_last[K_LEFT]):
if pressed_keys[K_LEFT]:
self.testSpellNum-=1
self.substract=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_RIGHT] and pressed_keys_last[K_RIGHT]):
if pressed_keys[K_RIGHT]:
self.testSpellNum+=1
self.plus=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if self.testSpellNum>10:
self.testSpellNum=1
elif self.testSpellNum<1:
self.testSpellNum=10
if not (pressed_keys[K_DOWN] and pressed_keys_last[K_DOWN]):
if pressed_keys[K_DOWN]:
self.ifSpell=False
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not (pressed_keys[K_UP] and pressed_keys_last[K_UP]):
if pressed_keys[K_UP]:
self.ifSpell=True
global_var.get_value('select_sound').stop()
global_var.get_value('select_sound').play()
if not self.ifSpell and self.testSpellNum==10:
if self.substract:
self.testSpellNum=9
elif self.plus:
self.testSpellNum=1
else:
self.ifSpell=True
self.substract=False
self.plus=False
if (pressed_keys[K_ESCAPE]!=pressed_keys_last[K_ESCAPE] and pressed_keys[K_ESCAPE]) or (pressed_keys[K_x]!=pressed_keys_last[K_x] and pressed_keys[K_x]):
if self.menuStair>0:
self.menuStair-=1
global_var.get_value('cancel_sound').play()
else:
if self.selectNum[0]!=7:
self.selectNum[0]=7
global_var.get_value('cancel_sound').play()
else:
global_var.get_value('cancel_sound').play()
sys.exit()
if self.selectNum[self.menuStair]>self.stairMax[self.menuStair]:
self.selectNum[self.menuStair]=0
elif self.selectNum[self.menuStair]<0:
self.selectNum[self.menuStair]=self.stairMax[self.menuStair]
def drawSign(self,screen,titleDec):
#stars
if self.menuStair!=0:
for entity in titleDec:
entity.update(screen,titleDec)
if self.menuStair==0:
screen.blit(self.tachie,(600,90))
for entity in titleDec:
entity.update(screen,titleDec)
self.logoPosAdj=[math.sin(self.lastFrame*math.pi/180)*20,math.sin(self.lastFrame*0.5*math.pi/180)*5]
screen.blit(self.kanjiLogo,(100+self.logoPosAdj[0],30+self.logoPosAdj[1]))
self.lightStrength=0.5*math.sin(self.lastFrame*2*math.pi/180)+0.5
alpha=round(self.lightStrength*256)
self.lightLogo.set_alpha(alpha)
screen.blit(self.lightLogo,(100-5,164))
screen.blit(self.engLogo,(100,164))
for i in range(0,8):
if i!=self.selectNum[self.menuStair]:
screen.blit(self.shadow[i],(100,250+i*48))
else:
screen.blit(self.sign[i],(100,250+i*48))
elif self.menuStair==1:
screen.blit(self.selectImg[0],(40,10))
screen.blit(self.levelImg[0],(288,264))
elif self.menuStair==2:
if self.selectNum[0]==0 or self.selectNum[0]==2:
screen.blit(self.selectImg[1],(40,10))
for i in range(0,2):
self.playerTitleImg[i].set_alpha(256)
if self.selectNum[2]==0:
self.playerTitleImg[1].set_alpha(100)
elif self.selectNum[2]==1:
self.playerTitleImg[0].set_alpha(100)
for i in range(0,2):
screen.blit(self.playerTitleImg[i],(450*i,120))
elif self.menuStair==3:
if self.selectNum[0]==2:
if self.ifSpell:
pracText=self.font.render('Test: Start From Spell No.'+str(self.testSpellNum),True,(255,255,255))
else:
pracText=self.font.render('Test: Start From non-Spell No.'+str(self.testSpellNum),True,(255,255,255))
screen.blit(pracText,(200,300))
def doSelection(self,pressed_keys,pressed_keys_last,player):
if pressed_keys[K_z]!=pressed_keys_last[K_z] and pressed_keys[K_z]:
if self.menuStair==0:
if self.selectNum[self.menuStair]==0:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.selectNum[self.menuStair]==2:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.selectNum[self.menuStair]==7:
global_var.get_value('ok_sound').play()
pygame.quit()
sys.exit()
else:
global_var.get_value('invalid_sound').stop()
global_var.get_value('invalid_sound').play()
elif self.menuStair==1:
if self.selectNum[0]==0 or self.selectNum[0]==2:
if self.selectNum[self.menuStair]==0:
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.menuStair==2:
if self.selectNum[0]==0:
if self.selectNum[self.menuStair]==0:
global_var.set_value('playerNum',0)
elif self.selectNum[self.menuStair]==1:
global_var.set_value('playerNum',1)
global_var.get_value('ok_sound').play()
global_var.get_value('ok_sound').play()
global_var.set_value('ifTest',False)
pygame.mixer.music.stop()
pygame.mixer.music.load('resource/bgm/lightnessOnTheWay.mp3') # 载入背景音乐文件
#pygame.mixer.music.load('resource/bgm/上海アリス幻樂団 - 死体旅行~ Be of good cheer!.mp3')
pygame.mixer.music.set_volume(0.6) # 设定背景音乐音量
pygame.mixer.music.play(loops=-1)
self.menuStair=0
global_var.set_value('menu',False)
self.playerReset=True
if self.selectNum[0]==2:
if self.selectNum[self.menuStair]==0:
global_var.set_value('playerNum',0)
elif self.selectNum[self.menuStair]==1:
global_var.set_value('playerNum',1)
global_var.get_value('ok_sound').play()
self.menuStair+=1
elif self.menuStair==3:
if self.selectNum[0]==2:
global_var.get_value('ok_sound').play()
global_var.set_value('ifTest',True)
global_var.set_value('ifSpellTest',self.ifSpell)
global_var.set_value('spellNum',self.testSpellNum)
pygame.mixer.music.stop()
pygame.mixer.music.load('resource/bgm/lightnessOnTheWay.mp3') # 载入背景音乐文件
#pygame.mixer.music.load('resource/bgm/上海アリス幻樂団 - 死体旅行~ Be of good cheer!.mp3')
pygame.mixer.music.set_volume(0.6) # 设定背景音乐音量
pygame.mixer.music.play(loops=-1)
self.menuStair=0
global_var.set_value('menu',False)
self.playerReset=True | 15,809 | 5,287 |