text stringlengths 38 1.54M |
|---|
from setuptools import find_packages, setup
setup(
name='carnav',
packages=['carnav'],
version='0.0.1',
include_package_data=True,
install_requires=['gym', 'numpy', 'pillow', 'scipy', 'matplotlib']
) |
# application constants
# TELEGRAM_TOKEN = telegram token to access bot
# (https://core.telegram.org/bots check point number 6)
# API_ID = telegram api id to access telegram api
# (https://core.telegram.org/api/obtaining_api_id)
# API_HASH = telegram api hash to access telegram api
#(https://core.telegram.org/api/obtaining_api_id)
# PHONE = registered telegram phone number
# SESSION_NAME = telegram bot session name (this can be anything)
TELEGRAM_TOKEN = ''
API_ID = 123456
API_HASH = ''
PHONE = ''
SESSION_NAME = 'tele_session'
|
x = int(input())
a = x // 100
b = x % 100
y = 5
c = []
while(b > 0 or y > 0):
c.append(b // y)
b = b % y
y -= 1
print(int(sum(c) <= a))
|
# Import necessary packages
import numpy as np
import igraph as ig
from scipy.special import gammaln
from itertools import combinations, product
import multiprocessing as mp
import os
import time
def PlotGraph(G):
# Plots graph G with appropriate positions of vertices
layout = zip(G.vs["x"], -1*np.array(G.vs["y"]))
pl = ig.plot(G, layout=layout)
pl.show()
def ConstructSquareLattices(N_x, N_y, w, h):
# Constructs cylindrical square lattices
# Square lattice
Square_Lattice = ig.Graph() # Vertices are indexed from 0 in igraph
x_squares = np.arange(0.0, w * N_x, w)
y_squares = 0 * h * np.ones(N_x)
y_squares[-1] = y_squares[-1] + 0.35 * h
Square_Lattice.add_vertices(N_x)
Square_Lattice.vs["x"] = x_squares[0:N_x]
Square_Lattice.vs["y"] = y_squares[0:N_x]
Square_Lattice.add_edges(zip(range(0, N_x-1), range(1, N_x)))
Square_Lattice.add_edge(0, N_x-1)
Square_Lattice.es["weight"] = np.ones(N_x)
for rung in range(2, N_y + 1 + 1):
x_squares = np.append(x_squares, np.arange(0.0, w*N_x, w))
y_squares = np.append(y_squares, (rung - 1)*h*np.ones(N_x))
y_squares[-1] = y_squares[-1] + 0.35*h
Square_Lattice.add_vertices(N_x)
Square_Lattice.vs.select(range((rung - 1)*N_x, rung*N_x))["x"] = x_squares[(rung - 1)*N_x:rung*N_x]
Square_Lattice.vs.select(range((rung - 1)*N_x, rung*N_x))["y"] = y_squares[(rung - 1)*N_x:rung*N_x]
Square_Lattice.add_edges(zip(range((rung - 1)*N_x, rung*N_x-1), range((rung - 1)*N_x + 1, rung*N_x)))
Square_Lattice.add_edge((rung - 1)*N_x, rung*N_x - 1)
Square_Lattice.es.select(range((rung - 1)*N_x, rung*N_x))["weight"] = np.ones(N_x)
for ladder in range(0, N_x):
Square_Lattice.add_edge(((rung - 1)*N_x) + ladder, ((rung - 2)*N_x) + ladder, weight=1)
x_squares = np.append(x_squares, np.arange(0.0, w*N_x, w))
y_squares = np.append(y_squares, -1*h*np.ones(N_x))
y_squares[-1] = y_squares[-1] + 0.35*h
Square_Lattice.add_vertices(N_x)
Square_Lattice.vs.select(range((N_y + 1)*N_x, (N_y + 2)*N_x))["x"] = x_squares[(N_y + 1)*N_x: (N_y + 2)*N_x]
Square_Lattice.vs.select(range((N_y + 1)*N_x, (N_y + 2)*N_x))["y"] = y_squares[(N_y + 1)*N_x: (N_y + 2)*N_x]
Square_Lattice.add_edges(zip(range(0, N_x), range(N_x*(N_y + 1), N_x*(N_y + 2))))
Square_Lattice.es.select(range((N_y + 1)*N_x, (N_y + 2)*N_x))["weight"] = np.ones(N_x)
# Square lattice with no edges
Square_Lattice_Bare = Square_Lattice.copy()
Square_Lattice_Bare.delete_edges(range(Square_Lattice_Bare.ecount()))
return Square_Lattice, Square_Lattice_Bare
def ComputeRandomUniqueCombinations(N_faces, n, samples):
# Determines random faces to flip
np.random.seed()
combs = np.zeros([samples, n])
i = 0
while i < samples:
combs[i, :] = np.sort(np.random.permutation(range(1, N_faces + 1))[0:n])
i = i + 1
if i == samples:
combs = np.unique(combs, axis=0)
i = np.shape(combs)[0]
combs = np.pad(combs, ((0, samples - i), (0, 0)), 'constant')
return combs
def FlipSquareLatticeFaces(Lattice, coords, N_x):
# Flips the faces in coords
for x, y in coords:
v = N_x * (y - 1) + x - 1 # Lower left vertex of face to be flipped
# Edges to be flipped
if np.mod(v+1, N_x) == 0:
to_flip = np.array([[v, v+N_x], [v, N_x*(y-1)], [v+N_x, N_x*y], [N_x*(y-1), N_x*y]]).astype(int)
else:
to_flip = np.array([[v, v + 1], [v, v + N_x], [v + N_x, v + 1 + N_x], [v + 1, v + 1 + N_x]]).astype(int)
# Flips edges
for v1, v2 in to_flip:
if Lattice.are_connected(v1, v2):
Lattice.delete_edges((v1, v2))
else:
Lattice.add_edge(v1, v2, weight=1)
return Lattice
def AddNCLoop(Lattice, N_x, rung):
# Adds to Lattice a noncontractible loop at rung
for i in range(0, N_x-1):
if Lattice.are_connected(int((rung - 1)*N_x + i), int((rung - 1)*N_x + i + 1)):
Lattice.delete_edges( (int((rung - 1)*N_x + i), int((rung - 1)*N_x + i + 1)) )
else:
Lattice.add_edge(int((rung - 1) * N_x + i), int((rung - 1) * N_x + i + 1), weight=1)
if Lattice.are_connected(int((rung - 1)*N_x), int(rung*N_x - 1)):
Lattice.delete_edges( (int((rung - 1)*N_x), int(rung*N_x - 1)) )
else:
Lattice.add_edge(int((rung - 1)*N_x), int(rung*N_x - 1), weight=1)
return Lattice
def ComputeAs_component_0_Square(N_x, Lattice_Initial, deg2_weight, loop_weight, rung):
# Computes 0th component of A (configuration with no strings)
As_component_0 = 0
# Contractible configuration (no faces flipped)
As_component_0 += 1
# Noncontractible configuration
Lattice_nc = AddNCLoop(Lattice_Initial.copy(), N_x, rung)
deg2 = Lattice_nc.degree().count(2)
loops = 1
As_component_0 += (deg2_weight**deg2)*(loop_weight**loops)
return As_component_0
def ComputeAs_component_contribution_Square(N_x, N_y, Lattice_Initial, deg2_weight, gamma, loop_weight,
combs, rung, sample):
# Computes contribution to A (configurations with no strings) by flipping faces of combs
As_component_contribution = 0
# Finds coordinates of faces to be flipped in loop configuration
n = np.shape(combs)[1]
coords = np.zeros([n, 2])
for j in range(0, n):
coords[j, :] = [np.floor((combs[sample, j] - 1) / N_y) + 1, np.mod(combs[sample, j] - 1, N_y) + 1]
# Flips faces, contractible configuration
Lattice_c = FlipSquareLatticeFaces(Lattice_Initial.copy(), coords, N_x)
# Flips faces, noncontractible configuration
Lattice_nc = AddNCLoop(Lattice_c.copy(), N_x, rung)
# Adds contribution from contractible lattice configuration
deg2 = Lattice_c.degree().count(2)
deg4 = Lattice_c.degree().count(4)
loops = len([x for x in Lattice_c.components() if len(x) > 1])
As_component_contribution += (deg2_weight)**(deg2)*gamma**(deg4)*loop_weight**(loops)
# Adds contribution from noncontractible lattice configuration
deg2 = Lattice_nc.degree().count(2)
deg4 = Lattice_nc.degree().count(4)
loops = len([x for x in Lattice_nc.components() if len(x) > 1])
As_component_contribution += (deg2_weight)**(deg2)*gamma**(deg4)*loop_weight**(loops)
return As_component_contribution
def ComputeAllPairs(lst):
# Computes all possible pairs of elements in lst
if len(lst) < 2:
yield []
return
if len(lst) % 2 == 1:
# Handle odd length list
for i in range(len(lst)):
for result in ComputeAllPairs(lst[:i] + lst[i + 1:]):
yield result
else:
a = lst[0]
for i in range(1, len(lst)):
pair = [a, lst[i]]
for rest in ComputeAllPairs(lst[1:i] + lst[i + 1:]):
yield pair + rest
def Compute_deg4_configs(deg4, deg4_samples):
# Computes possible configurations of deg4 vertices
if deg4_samples < 3**deg4:
np.random.seed()
deg4_configs = np.random.randint(3, size=(deg4_samples, deg4)) + 1
else:
deg4_configs = np.reshape(list([p for p in product([1, 2, 3], repeat=deg4)]),
(-1, deg4))
return deg4_configs
def ComputeLoopProperties_Square(N_x, N_y, N_faces, Lattice_Initial, deg2_weight, CP_weight, CR_weight, loop_weight,
n_low, n_high, samples, deg4_samples, iteration):
# Computes average loop number and loop size
print('Iteration: ' + str(iteration + 1))
rung = np.mod(iteration, N_y + 1) + 1
loop_numbers = np.zeros(n_high - n_low + 1)
loop_sizes = np.zeros(n_high - n_low + 1)
loop_total_sizes = np.zeros(n_high - n_low + 1)
Z0_components = np.zeros(n_high - n_low + 1) # Components of Z with no strings
for n in range(n_low, n_high + 1):
if np.mod(iteration + 1, 16) == 0:
print('Iteration: ' + str(iteration + 1) + ' n: '+str(n))
if n == 0:
# Contribution from contractible configuration (no faces flipped)
Z0_components[0] += 1
#loop_sizes[0] += 1
# Contribution from noncontractible configuration
Lattice_nc = AddNCLoop(Lattice_Initial.copy(), N_x, rung)
deg2 = Lattice_nc.degree().count(2)
loops = len([x for x in Lattice_nc.components() if len(x) > 1])
edges = Lattice_nc.ecount()
w = (deg2_weight**deg2)*(loop_weight**loops)
Z0_components[0] += w
loop_numbers[0] += loops*w
loop_sizes[0] += edges/loops*w
loop_total_sizes[0] += edges*w
else:
n_index = n-n_low
# Constructs list of combinations (loop configurations) to analyze
if (gammaln(N_faces+1) - gammaln(n+1) - gammaln(N_faces-n+1)) > \
np.log(samples): # equivalent to nchoosek(N_faces, n) > samples
combs = ComputeRandomUniqueCombinations(N_faces, n, samples)
else:
combs = np.reshape(list(combinations(range(1, N_faces + 1), n)), (-1, n))
# Computes exp(-energy) for each loop config to be analyzed
for i in range(0, np.shape(combs)[0]):
# Finds coordinates of faces to be flipped in loop configuration
coords = np.zeros([n, 2])
for j in range(0, n):
coords[j, :] = [np.floor((combs[i, j] - 1) / N_y) + 1, np.mod(combs[i, j] - 1, N_y) + 1]
# Flips faces, contractible config
Lattice_c = FlipSquareLatticeFaces(Lattice_Initial.copy(), coords, N_x)
# Flips faces, noncontractible config
Lattice_nc = AddNCLoop(Lattice_c.copy(), N_x, rung)
# Contribution from contractible configuration
deg2 = Lattice_c.degree().count(2)
deg4 = Lattice_c.degree().count(4)
loops0 = len([x for x in Lattice_c.components() if len(x) > 1])
edges = Lattice_c.ecount()
if deg4 >= 1:
deg4_configs = Compute_deg4_configs(deg4, deg4_samples)
deg4_avg_factor = (3**deg4/np.shape(deg4_configs)[0])
for deg4_config in deg4_configs:
CP1 = list(deg4_config).count(1) # Corner passes of type 1
CP2 = list(deg4_config).count(3) # Corner passes of type 2
CP = CP1 + CP2
CR = list(deg4_config).count(2) # Crossings
loops = loops0+CP1 # One type of corner pass increases the number of loops
w = (deg2_weight**deg2)*(CP_weight**CP)*(CR_weight**CR)*(loop_weight**loops)*(deg4_avg_factor)
Z0_components[n_index] += w
loop_numbers[n_index] += loops*w
loop_sizes[n_index] += (edges/loops)*w
loop_total_sizes[n_index] += edges * w
else:
loops = loops0
w = (deg2_weight**deg2)*(loop_weight**loops)
Z0_components[n_index] += w
loop_numbers[n_index] += loops*w
loop_sizes[n_index] += (edges/loops)*w
loop_total_sizes[n_index] += edges * w
# Contribution from noncontractible configuration
deg2 = Lattice_nc.degree().count(2)
deg4 = Lattice_nc.degree().count(4)
loops0 = len([x for x in Lattice_nc.components() if len(x) > 1])
edges = Lattice_nc.ecount()
if deg4 >= 1:
deg4_configs = Compute_deg4_configs(deg4, deg4_samples)
deg4_avg_factor = (3**deg4/np.shape(deg4_configs)[0])
for deg4_config in deg4_configs:
CP1 = list(deg4_config).count(1) # Corner passes of type 1
CP2 = list(deg4_config).count(3) # Corner passes of type 2
CP = CP1 + CP2
CR = list(deg4_config).count(2) # Crossings
loops = loops0+CP1 # One type of corner pass increases the number of loops
w = (deg2_weight**deg2)*(CP_weight**CP)*(CR_weight**CR)*(loop_weight**loops)*deg4_avg_factor
Z0_components[n_index] += w
loop_numbers[n_index] += loops*w
loop_sizes[n_index] += (edges/loops)*w
loop_total_sizes[n_index] += edges * w
else:
loops = loops0
w = (deg2_weight**deg2)*(loop_weight**loops)
Z0_components[n_index] += w
loop_numbers[n_index] += loops*w
loop_sizes[n_index] += (edges/loops)*w
loop_total_sizes[n_index] += edges * w
if (gammaln(N_faces + 1) - gammaln(n + 1) - gammaln(N_faces - n + 1)) > \
np.log(samples): # equivalent to nchoosek(N_faces, n) > samples
Z0_components[n_index] *= 1/samples*np.exp(gammaln(N_faces+1)-gammaln(n+1)-gammaln(N_faces-n+1))
loop_numbers[n_index] *= 1/samples*np.exp(gammaln(N_faces+1)-gammaln(n+1)-gammaln(N_faces-n+1))
loop_sizes[n_index] *= 1/samples*np.exp(gammaln(N_faces+1)-gammaln(n+1)-gammaln(N_faces-n+1))
loop_total_sizes[n_index] *= 1/samples*np.exp(gammaln(N_faces+1)-gammaln(n+1)-gammaln(N_faces-n+1))
loop_number = sum(loop_numbers)/(sum(Z0_components))
loop_size = sum(loop_sizes)/(sum(Z0_components))
loop_total_size = sum(loop_total_sizes)/(sum(Z0_components))
return loop_number, loop_size, loop_total_size
if __name__ == '__main__':
# Parameter specification
t = time.time()
N_x = 8 # Number of squares in x direction; assumed to be even
N_y = 8 # Number of squares in y direction
N_faces = N_x*N_y # Total number of squares being considered
h = 1 # Height of squares
w = 1 # Width of squares
# Sweeps over c_2 and c_\ell; c_4 remains fixed
# Weights of degree 2 vertex
deg2_weights = np.arange(1.3, 1.5, 0.1) #np.array([.1, .2, .3])
# Weight of crossing
CR_weight = 1/15
CP_weight = CR_weight # Weight of a corner pass
loop_weights = np.arange(1e-20, 3.2, 0.1) # Weight of a closed loop
epsilon = 0.01 # Maximum admissible error in coefficients
samples = 40 # Maximum number of samples (loop configurations) evaluated
iterations = 32 # Number of iterations over which coefficients are averaged
deg4_samples = 20 # Maximum number of deg4 configs sampled over
range_samples = 50 # Number of samples used to determine n_range
# Initializes lattices; sets the low number of face flips to 0 and high number of face flips to N_faces
Square_Lattice, Square_Lattice_Bare = ConstructSquareLattices(N_x, N_y, w, h)
n_low = 0
n_high = N_faces
loop_numbers = np.zeros([len(deg2_weights), len(loop_weights)])
loop_sizes = np.zeros([len(deg2_weights), len(loop_weights)])
loop_total_sizes = np.zeros([len(deg2_weights), len(loop_weights)])
Lattice_Initial = Square_Lattice_Bare.copy()
for loop_index in range(len(loop_weights)):
for deg2_index in range(len(deg2_weights)):
# Sets weights
loop_weight = loop_weights[loop_index]
gamma = (loop_weight+1)*CP_weight+CR_weight # Contribution from deg4 vertex if all deg4 configs are valid
deg2_weight = deg2_weights[deg2_index]
print('\n \n \n deg2_weight = ' + str(deg2_weight) + '\n')
print('\n \n \n deg4_weight = ' + str(CR_weight) + '\n')
# Determines low and high number of faces to be flipped, such that Z is accurate to within epsilon percent
print('Determining n_range')
if epsilon == 0:
n_low = 0
n_high = N_faces
else:
As_component = np.zeros(N_faces + 1)
rung = np.random.randint(1, N_y + 1 + 1)
Lattice_Initial = Square_Lattice_Bare.copy()
As_component[0] = ComputeAs_component_0_Square(N_x, Lattice_Initial, deg2_weight, loop_weight, rung)
max = As_component[0]
max_n_index = 0
# Highest and lowest number of 'on' faces to be considered
high = N_faces
low = 0
for n in range(1, N_faces + 1):
if np.mod(n, 1) == 0:
print('n: ' + str(n))
# Constructs list of combinations (loop configurations) to analyze
if (gammaln(N_faces + 1) - gammaln(n + 1) - gammaln(N_faces - n + 1)) > \
np.log(range_samples): # equivalent to nchoosek(N_faces, n) > samples
combs = ComputeRandomUniqueCombinations(N_faces, n, range_samples)
avg = 1
else:
combs = np.reshape(list(combinations(range(1, N_faces + 1), n)), (-1, n))
avg = 0
# Computes exp(-energy) for each loop config to be analyzed
def ComputeAs_component_contribution_Square_parallel(sample):
return ComputeAs_component_contribution_Square(N_x, N_y, Lattice_Initial, deg2_weight, gamma,
loop_weight, combs, rung, sample)
pool = mp.Pool()
As_component_contributions = \
np.transpose(
pool.map(ComputeAs_component_contribution_Square_parallel, range(np.shape(combs)[0])))
pool.close()
pool.join()
if avg == 1:
As_component[n] = np.mean(As_component_contributions) * \
np.exp(gammaln(N_faces + 1) - gammaln(n + 1) - gammaln(N_faces - n + 1))
else:
As_component[n] = np.sum(As_component_contributions[:])
if np.abs(As_component[n]) > max:
max = abs(As_component[n])
max_n_index = n
# elif np.abs(As_component[n]) * (N_faces - n) / max < epsilon:
elif np.abs(As_component[n]) * (N_faces - n) / np.sum(As_component) < epsilon:
high = n
break
for n in range(max_n_index, 0, -1):
# if np.abs(As_component[n]) * (n + 1) / max < epsilon:
if np.abs(As_component[n]) * (n + 1) / np.sum(As_component) < epsilon:
low = n
break
approx_error = (np.abs(As_component[high] * (N_faces - high)) + np.abs(As_component[low - 1])*(low))/\
np.abs(np.sum(As_component[low:high + 1]))
if high == N_faces:
approx_error = 0
As_determine_n = As_component[np.nonzero(As_component)]
n_low = low
n_high = high
print('\n As for determining n_range: ')
print(As_determine_n)
print('\n n_high: ' + str(n_high))
print('n_low: ' + str(n_low))
print('error: <= ' + str(approx_error))
# Computes average loop perimeter and number
def ComputeLoopProperties_Square_parallel(iteration):
return ComputeLoopProperties_Square(N_x, N_y, N_faces, Lattice_Initial, deg2_weight, CP_weight,
CR_weight, loop_weight, n_low, n_high, samples, deg4_samples, iteration)
pool = mp.Pool()
loop_numbers_iters, loop_sizes_iters, loop_total_sizes_iters = np.transpose(
pool.map(ComputeLoopProperties_Square_parallel, range(iterations)))
pool.close()
pool.join()
# Averages loop properties and prints results
loop_numbers[deg2_index, loop_index] = np.mean(loop_numbers_iters)
loop_sizes[deg2_index, loop_index] = np.mean(loop_sizes_iters)
loop_total_sizes[deg2_index, loop_index] = np.mean(loop_total_sizes_iters)
print('Average loop number: ' + str(loop_numbers[deg2_index]))
print('Average loop size (perimeter): ' + str(loop_sizes[deg2_index]))
print('Average total loop size (perimeter): ' + str(loop_total_sizes[deg2_index]) + '\n \n')
# Prints results
t2 = time.time()
print('\n \n \n \n ALL DONE!!!')
print('Runtime: ' + str(t2 - t))
print('\n deg2_weights: \n')
for w in deg2_weights:
print(w)
print('\n loop_weight: \n')
for w in loop_weights:
print(w)
print('\n deg4_weight: \n')
print(CP_weight)
print('\n Average loop numbers: ')
for i in range(len(deg2_weights)):
for j in range(len(loop_weights)):
print(loop_numbers[i, j])
print('\n Average loop sizes (perimeters): ')
for i in range(len(deg2_weights)):
for j in range(len(loop_weights)):
print(loop_sizes[i, j])
print('\n Average total loop sizes (perimeters): ')
for i in range(len(deg2_weights)):
for j in range(len(loop_weights)):
print(loop_total_sizes[i, j])
# Outputs results to text file
with open(os.path.basename(__file__) + ".txt", "w") as text_file:
print('N_x = ' + str(N_x), file=text_file)
print('N_y = ' + str(N_y), file=text_file)
print('deg2_weights = ', file=text_file)
print(deg2_weights, file=text_file)
print('deg4_weight = ' + str(CP_weight), file=text_file)
print('loop_weights = ', file=text_file)
print(loop_weights, file=text_file)
print('samples = ' + str(samples), file=text_file)
print('iterations = ' + str(iterations), file=text_file)
print('deg4_samples = ' + str(deg4_samples) + '\n', file=text_file)
print('Runtime: ' + str(t2 - t), file=text_file)
print('\n \n \n deg2_weights: ', file=text_file)
for i in range(len(deg2_weights)):
print(str(deg2_weights[i]), file=text_file)
print('\n Average loop numbers: ', file=text_file)
print(loop_numbers, file=text_file)
print('\n Average loop sizes (perimeters): ', file=text_file)
print(loop_sizes, file=text_file)
print('\n Average total loop sizes (perimeters): ', file=text_file)
print(loop_total_sizes, file=text_file)
np.savetxt(str(os.path.basename(__file__) + "_LoopNumbers.txt"), loop_numbers, delimiter='\t')
np.savetxt(str(os.path.basename(__file__) + "_LoopSizes.txt"), loop_sizes, delimiter='\t')
np.savetxt(str(os.path.basename(__file__) + "_LoopTotalSizes.txt"), loop_total_sizes, delimiter='\t')
|
import os.path
from setuptools import setup, find_packages
import stun
def main():
src = os.path.realpath(os.path.dirname(__file__))
README = open(os.path.join(src, 'README.rst')).read()
setup(
name='pystun3',
version=stun.__version__,
packages=find_packages(),
zip_safe=False,
license='MIT',
author='TalkIQ (original authors: gaohawk, Justin Riley)',
author_email='engineering@talkiq.com',
url='http://github.com/talkiq/pystun3',
description='A Python STUN client for getting NAT type and external IP (RFC 3489)',
long_description=README,
keywords='STUN NAT',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Internet',
'Topic :: System :: Networking :: Firewalls',
],
tests_require=['coverage', 'nose', 'prospector'],
test_suite='tests',
entry_points={
'console_scripts': [
'pystun3=stun.cli:main'
]
}
)
if __name__ == '__main__':
main()
|
pw = "@@F\u0011\u0013\u0011\u0013XAE\u001a\u0011G\u0010\u0013\u0015\u0014F\u0015G\u001a\u001b\u0013\u0013\u001b\u0016BE\u0012\u0013B\u0017\u001a\u0011AF@\u0011E\u0016^"
flag = ""
for c in pw:
flag += chr(ord(c) ^ 0x23)
print(flag)
|
import os
import sys
from unittest import TestCase
from werkzeug.datastructures import FileStorage
sys.path.append(os.getcwd().split('\Tests')[0])
from Domain import FlightsManager
import mock
class TestUniformedFormat(TestCase):
def setUp(self):
self.data_with_GPS_point = (open("TestData/2021-01-08 17-17-25.log", 'rb')).readlines()
self.data_without_GPS_point = []
def test_convert_flight_data_to_uniformed_format(self):
result1 = FlightsManager.convert_flight_data_to_uniformed_format(self.data_without_GPS_point)
result2 = FlightsManager.convert_flight_data_to_uniformed_format(self.data_with_GPS_point)
self.assertEqual(result1, 'TimeStamp\tPOS_X\tPOS_Y\tPOS_Z\n')
self.assertEqual(result2, 'TimeStamp\tPOS_X\tPOS_Y\tPOS_Z\n' +
'1627013934 -35.3631754 149.1651923 590.75\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n' +
'189828205 -35.3632443 149.1652301 584.1\n')
class TestUploadFlight(TestCase):
def setUp(self):
self.log_file_loc = "TestData/2021-01-07 15-59-47.log"
self.log_file_copy = open(self.log_file_loc, 'rb')
self.log_file_storage = FileStorage(self.log_file_copy)
self.log_file_storage.save(dst=self.log_file_loc)
self.log_file_copy.close()
self.log_file = open(self.log_file_loc, 'rb')
self.log_file = FileStorage(self.log_file)
self.text_file_name = 'text_file.txt'
self.text_file = open("text_file.txt", "w+")
self.text_file.write("This is a text file an not a log file.")
self.text_file.close()
@mock.patch('Domain.FlightsManager.convert_flight_data_to_uniformed_format')
@mock.patch('DBCommunication.DBAccess.DBAccess')
def test_valid_input(self, mock_db, mock_convert_flight_data_to_uniformed_format):
mock_db.return_value = True
# invalid inut
self.assertDictEqual(FlightsManager.upload_flight(None, {}),
{'msg': "Failure, error with the server", 'data': False})
self.assertDictEqual(FlightsManager.upload_flight(self.log_file, "string is invalid input"),
{'msg': "Failure, error with the server", 'data': False})
# valid input
mock_convert_flight_data_to_uniformed_format.return_value = ""
self.assertDictEqual(FlightsManager.upload_flight(self.log_file, {}),
{'msg': "Failure, error with the server", 'data': False})
mock_convert_flight_data_to_uniformed_format.return_value = "12 34 554\n 42 54 65"
self.assertDictEqual(FlightsManager.upload_flight(self.log_file, {'location': 'summer'}),
{'msg': "File uploaded successfully.", 'data': True})
@mock.patch('DBCommunication.DBAccess.DBAccess')
def test_invalid_input(self, mock_db):
mock_db.return_value = True
self.assertDictEqual(FlightsManager.upload_flight(None, {}), {'msg': 'Failure, error with the server', 'data': False})
self.assertDictEqual(FlightsManager.upload_flight(self.text_file, {}), {'msg': 'Failure, error with the server', 'data': False})
self.assertDictEqual(FlightsManager.upload_flight(self.log_file, None), {'msg': 'Failure, error with the server', 'data': False})
def tearDown(self):
if os.path.exists(self.text_file_name):
os.remove(self.text_file_name)
|
import os
USERNAME = os.environ.get('USERNAME')
PASSWORD = os.environ.get('PASSWORD')
AR_ENDPOINT = os.environ.get('ARX_ENDPOINT')
AR11_ADVANCED_FEATURES = os.environ.get('AR11_ADVANCED_FEATURES', 'False')
WIREMOCK_SERVER = os.environ.get('WIREMOCK_SERVER')
WIREMOCK_START_RECORDING = '__admin/recordings/start'
WIREMOCK_STOP_RECORDING = '__admin/recordings/stop'
WIREMOCK_SNAPSHOT = "__admin/recordings/snapshot"
WIREMOCK_RESET = "/__admin/mappings/reset"
PACKETS_KEY_COLS = [u'start_time', u'end_time',
u'cli_tcp.ip', u'srv_tcp.port_name']
PACKETS_VALUE_COLS = [u'avg_cifs.data_transfer_time',
u'sum_srv_tcp.payload_packets',
u'avg_tcp.network_time_c2s', u'sum_web.packets']
|
# 2534 - exame geral
try:
while True:
n, q = map(int, input().split())
notas = []
for _ in range(n):
notas.append(int(input()))
notas.sort(reverse=True)
for _ in range(q):
pos = int(input())-1
print(notas[pos])
except EOFError:
pass
|
# Scoring functions
#
# Licensed under the BSD 3-Clause License
# Copyright (c) 2020, Yuriy Sverchkov
import numpy as np
def gini_of_label_column(y):
n: int = len(y)
if n == 0:
return 0
p = np.unique(y, return_counts=True)[1] / n
return 1 - np.sum(p*p)
def gini_of_p_matrix(pm: np.ndarray):
if len(pm) == 0: return 0
p = pm.mean(axis=0)
return 1 - np.sum(p*p)
def entropy_of_label_column(y):
n = len(y)
if n == 0:
return 0
p = np.unique(y, return_counts=True)[1] / n
return entropy_of_p_vector(p)
def entropy_of_p_matrix(pm):
return entropy_of_p_vector(np.mean(pm, axis=0))
def entropy_of_p_vector(p):
pl2p = np.where(p > 0, - p * np.log2(p, where = p > 0), 0)
return pl2p.sum() |
import random
class Meteor():
def __init__(self):
self.x = random.randint(0, 400)
self.y = -40
self.change_x = random.randint(-5, 5)
self.change_y = 1
self.height = 40
self.width = 40
def updateMeteor(self, speed, height, width, rocket, rocketValues):
self.x += self.change_x
self.y += self.change_y + 2*(rocket.getHeight()**(1/3))
if (self.y >= height or self.x >= width or self.x <= 0):
Meteor.__init__(self)
def isInsideRocket(rocket, coords):
if (coords[0] >= rocket.getPos_x() and coords[0] <= rocket.getPos_x() + rocket.getWidth() and
coords[1] >= rocket.getPos_y() and coords[1] <= rocket.getPos_y() + rocket.getLength()):
return True
return False
def collision(self, rocket):
meteor_hit_points = [(self.x, self.y), (self.x + self.width, self.y),
(self.x, self.y + self.height), (self.x + self.width, self.y + self.height),
(self.x + self.width / 2, self.y + self.height)]
for point in meteor_hit_points:
if (Meteor.isInsideRocket(rocket, point)):
return True
return False
def getX(self):
return self.x
def getY(self):
return self.y
|
!/usr/bin/env python
from distutils.core import setup
setup(
name='dataset-shuft',
packages=[],
version='0.0.1',
description='dataset-shift for a Python package',
author='Abhilash Bokka',
license=Null,
author_email='abhilash.bokka@ucdenver.edu',
url='https://github.com/ashabhi101/dataset-shift',
keywords=['dataset', 'template', 'package','data shift', 'shift','drift', 'covariant shift'],
classifiers=[
'Development Status :: 1 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: Pending',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Topic :: Software Development',
],
) |
import dash
import os
import dash_html_components as html
from flask_caching import Cache
app = dash.Dash(__name__)
server=app.server
CACHE_CONFIG={
'CACHE_TYPE':'redis',
'CACHE_REDIS_URL': os.environ.get('REDIS_URL','localhost:6379')
}
cache=Cache()
cache.init_app(server,config=CACHE_CONFIG)
app.config.supress_callback_exceptions = True
app.css.config.serve_locally=True
app.scripts.config.serve_locally=True
#server.secret_key=os.environ.get('SECRET_KEY', 'ajiskkjsdkjasfjaffjhsfdkjsfd')
|
#!/usr/bin/env python
from setuptools import setup
setup(name='Pyda',
version='0.1',
author='Don Grote',
author_email='don.grote@gmail.com',
packages=['common','interfaces','mipsb'],
test_suite='tests')
|
import pathManager.pathSetting as extPath
from vanilla import FloatingWindow, RadioGroup, Button, HUDFloatingWindow, ImageButton, TextBox, EditText, CheckBox
from groupingTool.tTopology import topologyButtonEvent as tbt
from groupingTool.tMatrix import matrixButtonEvent as mbt
from groupingTool.tMatrix.PhaseTool import *
from mojo.UI import *
from rbWindow.Controller import smartSetSearchModule
from rbWindow.Controller.toolMenuController import *
from rbWindow.Controller.smartSetFocus import *
from rbWindow.ExtensionSetting.extensionValue import *
from fontParts.world import *
from fontParts.fontshell.contour import *
matrixMode = 0
topologyMode = 1
optionList = ["penPair", "stroke", "innerType", "dependX", "dependY"]
class subAttributeWindow:
def __init__(self, attributeWindow):
self.attributeWindow = attributeWindow
self.createUI()
def createUI(self):
x=10;y=10;w=80;h=30;space=5;self.size=(150,300);pos=(1200,300);
self.w = HUDFloatingWindow((pos[0],pos[1], self.size[0],self.size[1]), "DeleteWindow")
self.w.deleteRadio = RadioGroup((x, y, w, 190),["penPair", "stroke", "innerFill", "dependX", "dependY"],callback=self.radioGroupCallback)
y += space + h + 190
self.w.applyButton = Button((x,y,w,35), "Apply", callback=self.buttonCallback)
self.deleteOption = None
self.w.open()
def radioGroupCallback(self, sender):
self.deleteOption = optionList[int(sender.get())]
def buttonCallback(self, sender):
if self.deleteOption is None:
return
if self.attributeWindow.updateAttributeComponent() is False:
return
mode = getExtensionDefault(DefaultKey+".mode")
groupDict = getExtensionDefault(DefaultKey+".groupDict")
if mode is matrixMode:
matrix = getExtensionDefault(DefaultKey+".matrix")
print("matrix = ", matrix)
attribute = self.deleteOption
print("optionList = ",optionList)
print("self.option = ", self.deleteOption)
mbt.mdeleteAttribute(groupDict, matrix, attribute)
elif mode is topologyMode:
standardContour = getExtensionDefault(DefaultKey+".standardContour")
k = getExtensionDefault(DefaultKey+".k")
pass
else:
Message("๋ชจ๋ ์๋ฌ")
self.w.close()
class attributeWindow:
def __init__(self):
self.createUI()
self.testPath = getExtensionDefault(DefaultKey+".testPath")
def createUI(self):
x = 10; y = 10; w = 100; h = 30; space = 5; self.size = (200,450); pos = (1200,300); self.minSize = (50,400);
self.w = HUDFloatingWindow((pos[0],pos[1],self.size[0],self.size[1]), "ToolsWindow", minSize=(self.minSize[0], self.minSize[1]))
h = 30
self.w.innerFillButton = ImageButton((x,y,h,h), imagePath=extPath.ImagePath+extPath.attrImgList[0]+".png", callback=self.handleInnerFill)
self.w.innerFillText = TextBox((x+40,y,w,h), "innerFill")
y += h + space
self.w.penPairButton = ImageButton((x,y,h,h), imagePath=extPath.ImagePath+extPath.attrImgList[1]+".png", callback=self.handlePenPair)
self.w.PenPairText = TextBox((x+40,y,w,h), "penPair")
y += h + space
self.w.dependXButton = ImageButton((x,y,h,h), imagePath=extPath.ImagePath+extPath.attrImgList[2]+".png", callback=self.handleDependX)
self.w.dependXText = TextBox((x+40,y,w,h), "dependX")
y += h + space
self.w.dependYButton = ImageButton((x,y,h,h), imagePath=extPath.ImagePath+extPath.attrImgList[3]+".png", callback=self.handleDependY)
self.w.dependYText = TextBox((x+40,y,w,h), "dependY")
y += h + space
self.w.stokeButton = ImageButton((x,y,h,h), imagePath=extPath.ImagePath+extPath.attrImgList[4]+".png", callback=self.handleStroke)
self.w.strokeText = TextBox((x+40,y,w,h), "stroke")
y += h + space
self.w.deleteButton = ImageButton((x,y,h,h), imagePath=extPath.ImagePath+extPath.attrImgList[5]+".png", callback=self.popDelete)
self.w.deleteText = TextBox((x+40,y,w,h), "delete")
y += h + space
self.w.selectButton = ImageButton((x,y,h,h), imagePath=extPath.ImagePath+extPath.attrImgList[6]+".png", callback=self.handleSelect)
self.w.selectText = TextBox((x+40,y,w,h), "select")
y += h + space
self.w.minimizeBox = CheckBox((x,y,80,20), "", callback=self.minimizeCallback, value=False)
y += h +space
mode = getExtensionDefault(DefaultKey+".mode")
self.w.bind("close", self.close)
self.w.open()
def close(self, sender):
self.w = None
def radioGroupCallback(self,sender):
self.option = int(sender.get())
def updateAttributeComponent(self):
"""
2020/05/14 created by Cho H.W.
์ฌ์ฉ์์ ์กฐ์์ ์ํด ์ฐพ์๋์ groupDict๊ฐ ์๋ ๋ค๋ฅธ ์์์ ๋ํด ์์ฑ์ ๋ถ์ฌํ๋ ๊ณผ์ ์์
ํ์ํ ์ธ์๋ค์(ex. matrix, groupDict, standardGlyph, ...) ๊ฐฑ์ ํ๊ธฐ ์ํ ๋ณด์กฐํจ์
์ ํ๋ ์ปจํฌ์ด๊ฐ ๊ธฐ์กด์ groupDict ๋ด์ ํฌํจ๋ ์์๋ผ๋ฉด ๊ฐฑ์ ํ์ง ์๊ณ ๋ฉ์๋๊ฐ ์ข
๋ฃ๋ฉ๋๋ค.
"""
count = 0
selectedContour = None
currentGlyph = CurrentGlyph()
prevGlyph = getExtensionDefault(DefaultKey+".standardGlyph")
prevContour = getExtensionDefault(DefaultKey+".standardContour")
prevGroupDict = getExtensionDefault(DefaultKey+".groupDict")
mode = getExtensionDefault(DefaultKey+".mode")
font = getExtensionDefault(DefaultKey+".font")
matrix = getExtensionDefault(DefaultKey+".matrix")
print("์์๋จ")
#ํ์ฌ ์ ํ๋ ์ปจํฌ์ด ์์๋ด๊ธฐ
for contour in currentGlyph:
if len(contour.selection) > 0:
count += 1
selectedContour = contour
#ํ๋์ ์ปจํฌ์ด๋ง์ ์ ํํ๋์ง ํํฐ๋ง
if count != 1:
Message("ํ๋์ ์ปจํฌ์ด๋ฅผ ์ ํํด์ฃผ์ญ์์ค.")
return False
else:
print("selectedContour = ",selectedContour)
print("prevContour = ", prevContour)
# ํ์ฌ ์ ํ๋ ์ปจํฌ์ด๊ฐ ๊ทธ๋ฃน๋์
๋๋ฆฌ์ ์๋ ํ์ธํ๊ธฐ
if selectedContour != prevContour or matrix is None:
print("์ด์ ์ปจํฌ์ด์ ํ์ฌ ์ปจํฌ์ด๊ฐ ๋ค๋ฆ
๋๋ค. try ์งํ")
print("ํ์ฌ ์ ํ๋ ์ปจํฌ์ด ์ธ๋ฑ์ค = ",selectedContour.index)
try:
contourList = prevGroupDict[currentGlyph]
print("contourList = ", prevGroupDict[currentGlyph])
print("currentGlyph = ", currentGlyph)
for contourIdx in contourList:
print("<<<<<<<<<< contourIdx = ",contourIdx)
#ํ์ฌ ์ ํ๋ ์ปจํฌ์ด๋ฅผ ์ด์ ๊ทธ๋ฃน ๋์
๋๋ฆฌ์์ ์ฐพ์๋ค๋ฉด standard Contour, Glyph, contourNumber ๊ฐฑ์
if selectedContour.index == contourIdx:
print("selectedContour.index == contourIdx")
res = True
setExtensionDefault(DefaultKey+".standardContour", selectedContour)
setExtensionDefault(DefaultKey+".standardGlyph", currentGlyph)
setExtensionDefault(DefaultKey+".contourNumber", selectedContour.index)
#๋งคํธ๋ฆญ์ค ๊ด๋ จ ์ค์ ๊ฐ ๊ฐฑ์
if mode is matrixMode:
matrix = Matrix(selectedContour, matrix_size)
print("์์ฑ๋ ๋งคํธ๋ฆญ์ค = ",matrix)
setExtensionDefault(DefaultKey+".matrix", matrix)
#ํ์ฌ ์ค๋งํธ์
ํฌ์ปค์ฑ
checkSetData = searchGroup(currentGlyph, selectedContour.index, mode, font)
index = getMatchingSmartSet(checkSetData, currentGlyph, selectedContour.index)
if index is not None :
smartSetIndexList = list()
smartSetIndexList.append(index)
selectSmartSets(smartSetIndexList)
return True
# ๊ฐ์ ๊ธ๋ฆฌํ๋ผ๋ ์ปจํฌ์ด๊ฐ ๊ฐ์ ๊ทธ๋ฃน๋์
๋๋ฆฌ๊ฐ ์๋๋ผ๋ฉด ์ต์
์
์ raiseํ๋ค.
raise Exception
# ๋ค๋ฅธ ์ค๋งํธ ์
์ ์๊ฑฐ๋ ์์ง ํ์์ด ์๋ฃ๋์ง ์์ ๊ฒฝ์ฐ ์ฒ๋ฆฌ
except Exception as e:
result = updateSmartSetChanged(selectedContour)
if result is False:
Message("ํด๋น๋๋ ๊ทธ๋ฃน ๊ฒฐ๊ณผ๊ฐ ์กด์ฌํ์ง ์์ต๋๋ค. ํ์์ ๋จผ์ ์งํํด์ฃผ์ธ์.")
return result
else:
print("True ๋ฐํ")
return True
def updateSmartSetChanged(self, selectedContour):
"""
์ด์ standardContour์ ํ์ฌ ์ ํ๋ standardContour์ smartSet์ด ๋ค๋ฅธ ๊ฒฝ์ฐ,
์ด๋ฏธ ์ฐพ์๋์ smartSet์ด ์กด์ฌํ๋ ๊ฒฝ์ฐ์ ํํ์ฌ ์์ฑ ๋ถ์ฌ์ ํ์ํ ์ธ์๋ค์ ๊ฐฑ์ ํฉ๋๋ค.
(updateAttributeComponent์ ๋ณด์กฐํจ์)
๊ฐฑ์ ๋๋ ์ธ์ : (contourNumber, standardContour, standardGlyph, groupDict)
@param :
selectedContour(RContour) : ํ์ฌ ์์ฑ์ ๋ถ์ฌํ๋ ค๋ point์ parent (RContour)
@return :
True : ๊ฐฑ์ ๋ ์ปจํฌ์ด์ ํด๋น๋๋ ์ค๋งํธ ์
์ด ์กด์ฌํ๋ ๊ฒฝ์ฐ
False : ๊ฐฑ์ ๋ ์ปจํฌ์ด์ ํด๋น๋๋ ์ค๋งํธ ์
์ด ์กด์ฌํ์ง ์๋ ๊ฒฝ์ฐ
"""
contourNumber = selectedContour.index;
glyph = selectedContour.getParent();
mode = getExtensionDefault(DefaultKey + ".mode")
font = getExtensionDefault(DefaultKey + ".font")
checkSetData = searchGroup(glyph, contourNumber, mode, font)
smartSetIndex = getMatchingSmartSet(checkSetData, glyph, contourNumber)
smartSetIndexList = list()
smartSetIndexList.append(smartSetIndex+1) # 0๋ฒ์งธ๋ All Glyphs์ด๋ฏ๋ก
#selectSmartSets๋ ์ธ์๋ก list๊ฐ ์จ๋ค
if smartSetIndex is not None:
selectSmartSets(smartSetIndexList)
if checkSetData[2] == 0:
if mode is matrixMode:
matrix = Matrix(selectedContour, matrix_size);
setExtensionDefault(DefaultKey+".matrix", matrix)
groupDict = findContoursGroup(checkSetData, font, mode)
setExtensionDefault(DefaultKey+".groupDict", groupDict)
setExtensionDefault(DefaultKey+".contourNumber", contourNumber)
setExtensionDefault(DefaultKey+".standardContour", selectedContour)
setExtensionDefault(DefaultKey+".standardGlyph", glyph)
return True
else:
return False
def getMatchingSmartSet(self, checkSetData, glyph, contourNumber):
"""
ํ์ฌ ์์ฑ์ ๋ถ์ฌํ๋ ค๊ณ ์๋ํ ๊ทธ๋ฃน ๋์
๋๋ฆฌ๊ฐ ๋ฐ๋๋ ๊ฒฝ์ฐ ๊ต์ฒดํ๊ธฐ ์ํ ๋ฉ์๋
"""
sSets = getSmartSets()
check = 0
mode = getExtensionDefault(DefaultKey + ".mode")
glyphConfigure = getConfigure(glyph)
positionNumber = None
searchSmartSet = None
matrix_margin = getExtensionDefault(DefaultKey + ".matrix_margin")
topology_margin = getExtensionDefault(DefaultKey + ".topology_margin")
matrix_size = getExtensionDefault(DefaultKey + ".matrix_size")
font = getExtensionDefault(DefaultKey + ".font")
if mode is matrixMode:
searchMode = "Matrix"
elif mode is topologyMode:
searchMode = "Topology"
else:
return None
#ํด๋น ์ปจํฌ์ด๊ฐ ์ด์ฑ์ธ์ง ์ค์ฑ์ธ์ง ์ข
์ฑ์ธ์ง ํ์ธ์ ํด ๋ณด์ํํจ
#!!
for i in range(0,len(glyphConfigure[str(glyph.unicode)])):
for j in range(0,len(glyphConfigure[str(glyph.unicode)][i])):
if contourNumber == glyphConfigure[str(glyph.unicode)][i][j]:
check = 1
positionNumber = i
break
if check == 1:
break
syllable = ["first", "middle", "final"]
positionName = syllable[positionNumber]
check = 0
index = -1
for sSet in sSets:
index += 1
checkSetName = str(sSet.name)
checkSetNameList = checkSetName.split('_')
if checkSetNameList[1] != positionName or checkSetNameList[2] != searchMode:
continue
standardNameList = checkSetNameList[3].split('-')
standardGlyphUnicode = int(standardNameList[0][1:])
standardIdx = int(standardNameList[1][0:len(standardNameList)-1])
for item in sSet.glyphNames:
if item != glyph.name:
continue
if mode == 0:
standardGlyph = font["uni" + str(hex(standardGlyphUnicode)[2:]).upper()]
standardMatrix=Matrix(standardGlyph.contours[standardIdx],matrix_size)
compareController = groupTestController(standardMatrix,matrix_margin)
result = compareController.conCheckGroup(glyph[contourNumber])
if result is not None:
return index
elif mode == 1:
standardGlyph = font["uni" + str(hex(standardGlyphUnicode)[2:]).upper()]
result = topologyJudgementController(standardGlyph.contours[standardIdx],glyph[contourNumber],topology_margin).topologyJudgement()
if result is not False:
return index
return None
"""
์ฝ๋ฐฑ ๋ฉ์๋์ ์ฐ๊ฒฐํ ๋ฉ์๋
"""
def handleDependX(self, sender):
if self.updateAttributeComponent() is False:
return
mode = getExtensionDefault(DefaultKey+".mode")
groupDict = getExtensionDefault(DefaultKey+".groupDict")
if mode is matrixMode:
matrix = getExtensionDefault(DefaultKey+".matrix")
mbt.mdependXAttribute(groupDict, matrix)
elif mode is topologyMode:
standardContour = getExtensionDefault(DefaultKey+".standardContour")
k = getExtensionDefault(DefaultKey+".k")
#!!!
#์ถ๊ฐํด์ผํจ
#tbt.dependXAttribute(groupDict, standardContour, k)
else:
Message("๋ชจ๋ ์๋ฌ")
return
CurrentFont().update() #๋ก๋ณดํฐํธ ์
๋ฐ์ดํธ
CurrentFont().save(self.testPath) #XML ์
๋ฐ์ดํธ
def handleDependY(self, sender):
if self.updateAttributeComponent() is False:
return
mode = getExtensionDefault(DefaultKey+".mode")
groupDict = getExtensionDefault(DefaultKey+".groupDict")
if mode is matrixMode:
matrix = getExtensionDefault(DefaultKey+".matrix")
mbt.mdependYAttribute(groupDict, matrix)
elif mode is topologyMode:
standardContour = getExtensionDefault(DefaultKey+".standardContour")
k = getExtensionDefault(DefaultKey+".k")
tbt.mdependYAttribute(groupDict, standardContour, k)
else:
Message("๋ชจ๋ ์๋ฌ")
return
CurrentFont().update() #๋ก๋ณดํฐํธ ์
๋ฐ์ดํธ
CurrentFont().save(self.testPath) #XML ์
๋ฐ์ดํธ
def handlePenPair(self, sender):
if self.updateAttributeComponent() is False:
return
mode = getExtensionDefault(DefaultKey+".mode")
groupDict = getExtensionDefault(DefaultKey+".groupDict")
if mode is matrixMode:
matrix = getExtensionDefault(DefaultKey+".matrix")
mbt.mpenPairAttribute(groupDict, matrix)
elif mode is topologyMode:
standardContour = getExtensionDefault(DefaultKey+".standardContour")
k = getExtensionDefault(DefaultKey+".k")
tbt.penPairAttribute(groupDict, standardContour, k)
else:
Message("๋ชจ๋ ์๋ฌ")
return
CurrentFont().update() #๋ก๋ณดํฐํธ ์
๋ฐ์ดํธ
CurrentFont().save(self.testPath) #XML ์
๋ฐ์ดํธ
def handleInnerFill(self, sender):
if self.updateAttributeComponent() is False:
return
groupDict = getExtensionDefault(DefaultKey+".groupDict")
mode = getExtensionDefault(DefaultKey+".mode")
if mode is matrixMode:
matrix = getExtensionDefault(DefaultKey+".matrix")
mbt.minnerFillAttribute(groupDict, matrix)
elif mode is topologyMode:
standardContour = getExtensionDefault(DefaultKey+".standardContour")
k = getExtensionDefault(DefaultKey+".k")
tbt.innerFillAttribute(groupDict, standardContour, k)
else:
Message("๋ชจ๋ ์๋ฌ")
return
CurrentFont().update() #๋ก๋ณดํฐํธ ์
๋ฐ์ดํธ
CurrentFont().save(self.testPath) #XML ์
๋ฐ์ดํธ
def handleStroke(self, sender):
if self.updateAttributeComponent() is False:
return
mode = getExtensionDefault(DefaultKey+".mode")
groupDict = getExtensionDefault(DefaultKey+".groupDict")
if mode is matrixMode:
matrix = getExtensionDefault(DefaultKey+".matrix")
mbt.mgiveStrokeAttribute(groupDict, matrix)
elif mode is topologyMode:
standardContour = getExtensionDefault(DefaultKey+".standardContour")
k = getExtensionDefault(DefaultKey+".k")
tbt.mgiveStrokeAttribute(groupDict, standardContour, k)
else:
Message("๋ชจ๋ ์๋ฌ")
return
CurrentFont().update() #๋ก๋ณดํฐํธ ์
๋ฐ์ดํธ
CurrentFont().save(self.testPath) #XML ์
๋ฐ์ดํธ
def handleSelect(self, sender):
print("!!")
if self.updateAttributeComponent() is False:
return
mode = getExtensionDefault(DefaultKey+".mode")
groupDict = getExtensionDefault(DefaultKey+".groupDict")
if mode is matrixMode:
matrix = getExtensionDefault(DefaultKey+".matrix")
print("matrix = ", matrix)
mbt.mselectAttribute(groupDict, matrix)
elif mode is topologyMode:
standardContour = getExtensionDefault(DefaultKey+".standardContour")
k = getExtensionDefault(DefaultKey+".k")
tbt.selectAttribute(groupDict, standardContour, k)
else:
Message("๋ชจ๋ ์๋ฌ")
return
def popDelete(self, sender):
self.subWindow = subAttributeWindow(self)
def minimizeCallback(self, sender):
if sender.get() == True:
self.w.resize(self.minSize[0], self.minSize[1])
self.w.minimizeBox.setTitle("")
else:
self.w.resize(self.size[0], self.size[1])
self.w.minimizeBox.setTitle("์ต์ํ")
def close(self, sender):
self.w = None |
import unittest
from unittest.mock import patch
from unittest.mock import Mock
from unittest.mock import create_autospec
from lxml import etree as ET
from xliffdict import XLIFFDict
class TestXliffDict(unittest.TestCase):
def setUp(self):
self.from_string = r'''<xliff version="1.2" xmlns="urn:oasis:names:tc:xliff:document:1.2"
xmlns:gs4tr="http://www.gs4tr.org/schema/xliff-ext">
<file datatype="x-CatalystTTK"
original="C:\Users\Something_de.ttk"
source-language="en-US"
target-language="de"
date="2015-03-04T12:56:08Z"
product-name="Alchemy Catalyst"
product-version="Alchemy Catalyst 11.0"
build-num="11.1.132.0" >
<body>
<group id="1-1032"
resname="\resource.resx" restype="x-Form">
<trans-unit id="tu-1" resname="$this" maxwidth="0">
<source></source>
<target></target>
</trans-unit>
<trans-unit id="tu-2" resname="xrLabel37.Text" maxwidth="0">
<source>Sent</source>
<target state="needs-review-l10n"
state-qualifier="exact-match">Gesendet</target>
</trans-unit>
<trans-unit id="tu-3" resname="xrLabel38.Text" maxwidth="0">
<source>Received</source>
<target state="needs-review-l10n"
state-qualifier="exact-match">Empfangen</target>
</trans-unit>
</group>
</body>
</file>
</xliff>'''
self.xliff = ET.XML(self.from_string)
self.wrong_xml = ET.XML(r'''<abc></abc>''')
def tearDown(self):
pass
def test_created_from_root_works(self):
xlf_element = ET.XML(self.from_string)
xlif_dict = XLIFFDict.create(xlf_element)
self.assertNotEqual(xlif_dict, None)
@patch('xliffdict.ET')
def test_it_loads_xliff_from_file(self, mock_ET):
mock_ET.parse.return_value = self.xliff
XLIFFDict.create("filename")
self.assertTrue(mock_ET.parse.called, True)
def test_internal_dict_is_not_empty(self):
new_xlf = XLIFFDict.create(self.xliff)
self.assertEqual(len(new_xlf.segments.values()), 3)
def test_header_is_not_empty(self):
new_xlf = XLIFFDict.create(self.xliff)
self.assertEqual(len(new_xlf.header.values()), 8)
def test_raises_exception_for_wrong_xml(self):
with self.assertRaises(Exception) as exc:
XLIFFDict.create(self.wrong_xml)
self.assertEqual(exc.exception.args[0], 'XLIFF file not correct!')
def test_raises_exception_for_wrong_object(self):
with self.assertRaises(Exception) as exc:
XLIFFDict.create(dict())
self.assertEqual(exc.exception.args[0], 'XLIFF file not correct!')
@unittest.skip
def test_saves_file_to_disk(self):
mock_ET = create_autospec(ET.ElementTree)
new_xlif = XLIFFDict.create(self.xliff)
new_xlif.serialize('filename')
mock_ET.write.assert_called_with('filename')
if __name__ == '__main__':
unittest.main()
|
# Generated by Django 2.2.5 on 2020-02-26 06:36
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='TreeHole',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.CharField(max_length=255, verbose_name='ๅ
ๅฎน')),
('likes', models.PositiveIntegerField(default=0, verbose_name='่ต')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='ๅๅปบๆถ้ด')),
('replynum', models.IntegerField(default=0, verbose_name='ๅๅคๆฐ')),
('title', models.CharField(max_length=50, verbose_name='ๆ ้ข')),
('pic', models.CharField(max_length=255, null=True, verbose_name='ๅพ็')),
],
options={
'verbose_name': 'ๆ ๆด',
'verbose_name_plural': 'ๆ ๆด',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='User',
fields=[
('openid', models.CharField(db_index=True, max_length=64, primary_key=True, serialize=False, verbose_name='open_id')),
('nickname', models.CharField(max_length=20, null=True, verbose_name='็จๆทๆต็งฐ')),
('gender', models.PositiveIntegerField(choices=[(0, 'ๆช็ฅ'), (1, '็ท'), (2, 'ๅฅณ')], default=0, verbose_name='ๆงๅซ')),
('avatarurl', models.CharField(blank=True, default='', max_length=255, null=True, verbose_name='ๅคดๅ')),
('province', models.CharField(max_length=20, null=True, verbose_name='็')),
('city', models.CharField(max_length=20, null=True, verbose_name='ๅๅธ')),
('session_key', models.CharField(max_length=64, null=True, verbose_name='session_key')),
('cookie_key', models.CharField(max_length=64, null=True, verbose_name='cookie_key')),
],
options={
'verbose_name': '็จๆท',
'verbose_name_plural': '็จๆท',
},
),
migrations.CreateModel(
name='WishBottle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='ๅๅปบๆถ้ด')),
('content', models.CharField(default='', max_length=255)),
('picker', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='picker', to='api.User', verbose_name='ๆกๅฐ็ไบบ')),
('writer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='writer', to='api.User', verbose_name='ไฝ่
')),
],
options={
'verbose_name': 'ๅฟๆฟ็ถ',
'verbose_name_plural': 'ๅฟๆฟ็ถ',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='WishReply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='ๅๅปบๆถ้ด')),
('content', models.CharField(max_length=255)),
('replyer', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='ๅๅค่
')),
('wishbottle', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.WishBottle', verbose_name='ๅฟๆฟ็ถ')),
],
options={
'verbose_name': 'ๅฟๆฟ็ถๅๅค',
'verbose_name_plural': 'ๅฟๆฟ็ถๅๅค',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='TreeHoleReply',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='ๅๅปบๆถ้ด')),
('content', models.CharField(default='', max_length=255, verbose_name='ๅ
ๅฎน')),
('answered_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='ๅๅค่
')),
('treehole_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.TreeHole', verbose_name='ๆ ๆด')),
],
options={
'verbose_name': 'ๆ ๆดๅๅค',
'verbose_name_plural': 'ๆ ๆดๅๅค',
'ordering': ['-time'],
},
),
migrations.AddField(
model_name='treehole',
name='writer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='ไฝ่
'),
),
migrations.CreateModel(
name='SysMsg',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content', models.TextField(default='', verbose_name='ๅ
ๅฎน')),
('flag', models.BooleanField(default=False, verbose_name='ๅทฒ่ฏป')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='ๅๅปบๆถ้ด')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='็จๆท')),
],
options={
'verbose_name': '็ณป็ปๆถๆฏ',
'verbose_name_plural': '็ณป็ปๆถๆฏ',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='ๅๅปบๆถ้ด')),
('open_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='็จๆท')),
('treehole_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.TreeHole', verbose_name='ๆ ๆด็ผๅท')),
],
options={
'verbose_name': '่ต',
'verbose_name_plural': '่ต',
'ordering': ['-time'],
},
),
migrations.CreateModel(
name='Collect',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('time', models.DateTimeField(auto_now_add=True, verbose_name='ๅๅปบๆถ้ด')),
('open_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.User', verbose_name='็จๆท')),
('treehole_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='api.TreeHole', verbose_name='ๆ ๆด็ผๅท')),
],
options={
'verbose_name': 'ๆถ่',
'verbose_name_plural': 'ๆถ่',
'ordering': ['-time'],
},
),
]
|
import datetime
import sys
import math
import pandas as pd
args = sys .argv
f = open('monitoring.log', 'r')
datalist = f.readlines()
time_ip_ping = []
#ๆ
้ใใฆใใใตใผใใไธๆ็ใซ่จ้ฒ
trouble = []
#ๆ
้ใใใตใผใใ่จ้ฒ
trouble_record = []
#ๆ
้ๅ่ฃใฎใตใผใใ่จ้ฒ
trouble_candidate = []
#ใญใฐใซ่จ้ฒใใฆใใใตใผใใฎใชในใ
ip_list = []
#ใใใใฏใผใฏใจIPใขใใฌในใฎๅฏพๅฟ่กจ
ip_subnet_list = []
#ๅนณๅๅฟ็ญๆ้ใฎใชในใ
responsetime_list = []
#ใใใใฏใผใฏใฎใชในใใๆ
้ๆ
ๅ ฑใๅซใ
subnet_failure = []
#ใฟใคใ ใขใฆใๅๆฐ
N = 3
#็ด่ฟmๅใฎๅนณๅๅฟ็ญๆ้tใใช็ง
m = 2
t = 30
if len(args)>1:
N = int(args[1])
if len(args)>3:
m = int(args[2])
t = int(args[3])
#ๆๅป,IP,ๅฟ็ญๆ้ใซๅใใ
for data in datalist:
data = data.strip('\n')
time_ip_ping.append(data.split(','));
df_time_ip_ping = pd.DataFrame(time_ip_ping)
df_time_ip_ping.columns = ['time', 'ip', 'ping']
#ใตใผใใฎใชในใใไฝๆ
for data in time_ip_ping:
if not data[1] in ip_list:
ip_list.append(data[1])
#IPใขใใฌในใ2้ฒๆฐใซๅคๆ
for ip in ip_list:
ip_split, subnet_temp = ip.split('/')
ip_split = ip_split.split('.')
ip_bin = ""
subnet = ""
for item in range(int(subnet_temp)):
subnet = subnet+"1"
subnet = subnet.ljust(32, '0')
for item in ip_split:
ip_bin = ip_bin+format(int(item),'08b')
#IPใขใใฌในใจใใใใฏใผใฏใฎๅฏพๅฟ่กจใไฝๆ
ip_subnet_list.append({ip:bin(int(ip_bin,2)&int(subnet,2)), "failure":0})
#ใใใใฏใผใฏใฎใชในใใไฝๆ
network_existence_flag = 0
for item in subnet_failure:
if item.get("network") == bin(int(ip_bin,2)&int(subnet,2)):
network_existence_flag = 1
if network_existence_flag == 0:
subnet_failure.append({"network":bin(int(ip_bin,2)&int(subnet,2)), "time":"", 'failure_flag':0, "timeout":0})
#ใญใฐใ่กใใจใซ่ชฟในใ
for data in time_ip_ping:
#ใใงใซๆ
้ใใฆใใใตใผใใใใใฐ
if trouble:
for index, trouble_data in enumerate(trouble):
#ๆ
้ใใฆใใใตใผใใจIPใไธ่ดใใฆใใใ
if trouble_data[1] == data[1]:
#pingๅฟ็ญใ่ฟใฃใฆใใใใใซใชใฃใฆใใใฐๆ
้ๆ้ใๅบๅ
if data[2] != '-':
start_failure = datetime.datetime.strptime(trouble_data[0], '%Y%m%d%H%M%S')
end_failure = datetime.datetime.strptime(data[0], '%Y%m%d%H%M%S')
print("failure time:", end_failure-start_failure, ", IP address:", data[1], ', start:', start_failure, ', end:', end_failure)
del trouble[index]
#ใใงใซๆ
้ๅ่ฃใฎใตใผใใ1ใคใงใใใใฐ
if trouble_candidate:
for index, trouble_candidate_data in enumerate(trouble_candidate):
#ๆ
้ๅ่ฃใฎใตใผใใจIPใไธ่ดใใฆใใใ
if trouble_candidate_data[1] == data[1]:
#pingๅฟ็ญใ่ฟใฃใฆใใใใใซใชใฃใฆใใใฐๆ
้ๅ่ฃใใ้คๅค
if data[2] != '-':
del trouble_candidate[index]
#ใฟใคใ ใขใฆใใฎๆค็ฅ
if data[2] == '-':
#็บ่ฆใใใใฟใคใ ใขใฆใใใใงใซๆ
้ใใฆใใใตใผใใใใงใใฏ
trouble_flag = 0
for trouble_data in trouble:
if trouble_data[1] == data[1]:
trouble_flag = 1
#ๆฐใใซ็บ่ฆใใใใฟใคใ ใขใฆใใ ใฃใๅ ดๅ
if trouble_flag == 0:
#ๆ
้ๅ่ฃใซๅ
ฅใฃใฆใใใใใงใใฏ
trouble_candidate_flag = 0
for index, trouble_candidate_data in enumerate(trouble_candidate):
#ๅ่ฃใซๅ
ฅใฃใฆใใใฐใฟใคใ ใขใฆใๅๆฐใๆดๆฐ
if trouble_candidate_data[1] == data[1]:
trouble_candidate_flag = 1
trouble_candidate_data[3] = trouble_candidate_data[3]+1
#ใใใใฏใผใฏIPๅฏพๅฟ่กจใฎใฟใคใ ใขใฆใๅๆฐใๆดๆฐ
#for item in ip_subnet_list:
# if data[1] in item:
# item["timeout"] += 1
# print(data[1], item["timeout"])
#่ฆๅฎๅๆฐใฟใคใ ใขใฆใใใฆใใใๆ
้ใชในใใซ่ฟฝๅ ใๅ่ฃใใๅ้ค
if trouble_candidate_data[3] >= N:
trouble.append(trouble_candidate_data)
del trouble_candidate[index]
#ๆฐใใชๆ
้ๅ่ฃใ ใฃใๅ ดๅใใฟใคใ ใขใฆใๅๆฐ(1)ใใใผใฟใซๅ ใใฆๅ่ฃใชในใใซ่ฟฝๅ
if trouble_candidate_flag == 0:
data.append(1)
trouble_candidate.append(data)
#ใใใใฏใผใฏIPๅฏพๅฟ่กจใฎใฟใคใ ใขใฆใๅๆฐใ1ใซๆดๆฐ
#for item in ip_subnet_list:
# if data[1] in item:
# item["timeout"] = 1
#ใใใใฏใผใฏๅ
ใฎใฟใคใ ใขใฆใๅๆฐใๆดๆฐใใใ
for ip_subnet_data in ip_subnet_list:
if data[1] in ip_subnet_data:
ip_subnet_data["failure"] = 1
for subnet_failure_data in subnet_failure:
if subnet_failure_data["network"] == ip_subnet_data[data[1]]:
#ใใใใฏใผใฏๅ
ใงๅใใฆใฎใฟใคใ ใขใฆใใชใๆๅปใ่จ้ฒ
if subnet_failure_data["timeout"] == 0:
subnet_failure_data["time"] += data[0]
#ใฟใคใ ใขใฆใๅๆฐใๆดๆฐ
subnet_failure_data["timeout"] += 1
#ใใใใฏใผใฏๅ
ใงNๅใฟใคใ ใขใฆใใใคๅ
จใฆใฎใตใผใใงใฟใคใ ใขใฆใใใใชใใฐ
for subnet_failure_data in subnet_failure:
#ใฟใคใ ใขใฆใใใฆใใชใใตใผใใใใใใใงใใฏ
safe_flag = 0
if subnet_failure_data['timeout']>=N:
for ip_subnet_data in ip_subnet_list:
for ip_data in ip_list:
if ip_data in ip_subnet_data:
if ip_subnet_data[ip_data] == subnet_failure_data["network"]:
if ip_subnet_data["failure"] == 0:
safe_flag = 1
if safe_flag == 0:
subnet_failure_data["failure_flag"] = 1
print("subnet failure:",subnet_failure_data["network"], "start:", subnet_failure_data["time"])
#ใฟใคใ ใขใฆใใใฆใใชใใจใ
else:
#ใใใใฏใผใฏๅ
ใฎใฟใคใ ใขใฆใๅๆฐใ0ใซๆดๆฐใใใ
for ip_subnet_data in ip_subnet_list:
if data[1] in ip_subnet_data:
ip_subnet_data["failure"] = 0
for subnet_failure_data in subnet_failure:
if subnet_failure_data["network"] == ip_subnet_data[data[1]]:
#ใใใใฏใผใฏๅ
ใงๆ
้ไธญใ ใฃใใๆ
้ๆ้ใๅบๅใใฆๅ้ค
if subnet_failure_data["failure_flag"] == 1:
subnet_failure_data["failure_flag"] = 0
print("subnet failure:",subnet_failure_data["network"], "start:", subnet_failure_data["time"])
subnet_failure_data["timeout"] = 0
subnet_failure_data["time"] = ""
#ใฟใคใ ใขใฆใใใฆใใ่กใๅ้ค
drop_index = df_time_ip_ping.index[df_time_ip_ping['ping']=='-']
df_ping = df_time_ip_ping.drop(drop_index)
#ipใขใใฌในใใจใซdataframeใซใใฆ้
ๅใซใใ
for ip in ip_list:
responsetime_list.append(df_ping.query('ip == "'+ip+'"').reset_index(drop=True))
#ipใขใใฌในใใจใซๅนณๅๅฟ็ญๆ้ใ่จ็ฎ
for responsetime_data in responsetime_list:
#ใใงใซ้่ฒ ่ท็ถๆ
ใงใใใใฎใใงใใฏ็จ
overload_flag = 0
#ไธใคใฎipใขใใฌในๅ
ใงใใงใใฏ
for index in range(len(responsetime_data)-m+1):
#ๅฟ็ญๆ้ใฎๅ่จ
sum = 0
for num in range(m):
sum += int(responsetime_data["ping"][index+num])
#ๅนณๅๅฟ็ญๆ้
responsetime = sum/m
#้่ฒ ่ท็ถๆ
ใงใชใใtใใช็งใ่ถ
ใใใ้่ฒ ่ท็ถๆ
้ๅงใจใใฆ่จ้ฒ
if overload_flag == 0 and math.ceil(responsetime) > t:
overload_flag = 1
overload_data = responsetime_data[index:index+1].reset_index(drop=True)
#print(overload_data)
#้่ฒ ่ท็ถๆ
ใ่งฃๆถใใใฆใใใ้่ฒ ่ท็ถๆ
ๆ้ใๅบๅ
elif overload_flag == 1 and math.ceil(responsetime) <= t:
overload_flag = 0
start_overload = datetime.datetime.strptime(overload_data.loc[0, "time"], '%Y%m%d%H%M%S')
end_overload = datetime.datetime.strptime(responsetime_data["time"][index], '%Y%m%d%H%M%S')
print("overload condition time:", end_overload-start_overload, ",ip address:",responsetime_data["ip"][0], "start:", start_overload, "end:", end_overload)
f.close() |
from abstract import AbstractRenderer
from constants import *
import pygame
from pygame.locals import *
TILE_WIDTH = 32
SPACING = 2
SCREEN_WIDTH = TILE_WIDTH * COLS + SPACING * (COLS+1)
SCREEN_HEIGHT = TILE_WIDTH * ROWS + SPACING * (ROWS+1)
KEY_REPEAT = 50 # Key repeat in milliseconds
class DesktopRenderer(AbstractRenderer):
def __init__(self):
super(AbstractRenderer, self).__init__()
# init screen
pygame.init()
pygame.display.set_caption("Rollerball")
pygame.key.set_repeat(KEY_REPEAT)
flags = DOUBLEBUF | HWSURFACE
self.screen = pygame.display.set_mode(
(SCREEN_WIDTH, SCREEN_HEIGHT), flags)
self.cx = SCREEN_WIDTH/2
self.cy = SCREEN_HEIGHT/2
def render(self, cells):
self.screen.fill((0, 0, 0))
for x in range(0, COLS):
for y in range(0, ROWS):
cell = cells[x][y]
cell_x = ((x + 1) * SPACING) + (x * TILE_WIDTH)
cell_y = ((y + 1) * SPACING) + (y * TILE_WIDTH)
rect = pygame.Rect(cell_x, cell_y, TILE_WIDTH, TILE_WIDTH)
self.screen.fill(color=cell.colour, rect=rect)
pygame.display.flip()
|
import json
from main import main
with open('input.json', 'r') as input_stream:
parsed_input = json.loads(input_stream.read())
obj=main(parsed_input)
print('\n')
print(obj)
"""m=folium.Map(location=[35.300048416948435 , -120.65977871417999] , zoom_start=13)
jsonFile=open('input.json','r')
jsonData=jsonFile.read()
obj=json.loads(jsonData)
t1=['pink','beige','cadetblue','red','lightblue','orange','white','lightred','lightgray','lightgreen','darkblue','darkgreen','darkred','blue','darkpurple','gray','green','orange','black','purple']
i=0
j=1
for o in obj['trajectories']:
if i==len(t1)-1:
i=0
for t in o:
folium.Marker([t['x'] , t['y']] ,
popup='traj : \n'+str(j),
tooltip='click for more info',
icon=folium.Icon(color=t1[i], icon='info-sign')).add_to(m)
i=i+1
j=j+1
m.save('input.html')
m2=folium.Map(location=[35.300048416948435 , -120.65977871417999] , zoom_start=13)
jsonFile=open('output.json','r')
jsonData=jsonFile.read()
obj2=json.loads(jsonData)
k=0
for o in obj2:
if k==len(t1)-1:
i=0
for t in o:
folium.Marker([t['x'] , t['y']] ,
popup='traj: \n',
tooltip='click for more info',
icon=folium.Icon(color=t1[k], icon='info-sign')).add_to(m2)
k=k+1
m2.save('output.html')"""
|
ADDRESS = 'sb://####.servicebus.windows.net/#####'
USER = '##########'
KEY = '##################################'
CONSUMER_GROUP = "$default"
OFFSET = Offset("-1")
PARTITION = "1"
total = 0
last_sn = -1
last_offset = "-1"
try:
if not ADDRESS:
raise ValueError("No EventHubs URL supplied.")
client = EventHubClient(ADDRESS, debug=False, username=USER, password=KEY)
receiver = client.add_receiver(CONSUMER_GROUP, PARTITION, prefetch=5000,
offset=OFFSET)
client.run()
try:
batched_events = receiver.receive(timeout=20)
except:
raise
finally:
client.stop()
for event_data in batched_events:
last_offset = event_data.offset.value
last_sn = event_data.sequence_number
total += 1
print("Partition {}, Received {}, sn={} offset={}".format(
PARTITION,
total,
last_sn,
last_offset))
except KeyboardInterrupt:
pass |
import random
from django.db import models
from django.utils.translation import gettext_lazy as _
from django_common_utils.libraries.models.mixins import RandomIDMixin
from django_lifecycle import BEFORE_CREATE, hook, LifecycleModel
from apps.django.utils.fields import ColorField
from ..constants import DEFAULT_COLOR_TEXT_MAPPING
from ..public import model_references
from ..public.model_names import CHOICE_NAME, CHOICE_NAME_PLURAL
from ..querysets import ChoiceQuerySet
__all__ = [
"Choice"
]
def random_color() -> str:
# Random color doesn't need to be cryptographically secure
return "#" + "".join(random.choice("0123456789ABCDEF") for _ in range(6)) # nosec
class Choice(RandomIDMixin, LifecycleModel):
class Meta:
verbose_name = CHOICE_NAME
verbose_name_plural = CHOICE_NAME_PLURAL
ordering = ("text",)
objects = ChoiceQuerySet.as_manager()
poll = models.ForeignKey(
model_references.POLL,
on_delete=models.CASCADE,
)
text = models.CharField(
max_length=24,
verbose_name=_("Text")
)
color = ColorField(
verbose_name=_("Farbe"),
blank=True,
)
@hook(BEFORE_CREATE)
def _hook_create_color_if_none(self):
self.color = self.color or DEFAULT_COLOR_TEXT_MAPPING.get(self.text.lower()) or random_color()
|
import os, sys, shutil, glob
from subprocess import check_call
API = "/home/vagrant/openkim-api"
CODE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
REPO2 = CODE_DIR + "/tests/repo"
def test_cleanup():
os.chdir(API)
os.system("(. /tmp/env; make clean >> /dev/null 2>&1)")
assert not os.path.exists(API+"/KIM_API/libkim.so")
def test_objectsexist():
assert len(glob.glob(REPO2+"/mo/*/*.o")) == 0
assert len(glob.glob(REPO2+"/mo/*/*.so")) == 0
assert len(glob.glob(REPO2+"/mo/*/*.a")) == 0
def test_kimstr():
assert len(glob.glob(REPO2+"/mo/*/*kim_str.o")) == 0
|
from django.shortcuts import render, get_object_or_404
from django.contrib.auth import login, logout, authenticate
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.contrib.auth.models import User
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.admin.views.decorators import staff_member_required
from svsite.forms import UserForm, UserProfileForm
from cars.models import Car, UserProfile, Wishlist, TestDrive, Dealer
from svsite.forms import SearchForm
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
import json
import random
"""
def user_login(request):
if request.method == 'POST':
login_form = AuthenticationForm(request, request.POST)
response_data = {}
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
print(username, password)
user = authenticate(username=username, password=password)
if user is not None:
invalid = False
if user.is_active:
login(request, user)
response_data['result'] = 'success'
response_data['message'] = 'Successfully logged in!'
else:
response_data['result'] = 'failed'
response_data['message'] = 'Your account has been deactivated.'
else:
response_data['result'] = 'failed'
response_data['message'] = 'Invalid credentials supplied!'
return HttpResponse(json.dumps(response_data), content_type="application/json")
"""
def user_login(request):
invalid = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
invalid = False
if user.is_active:
login(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponse("Your account has been deactivated.")
else:
print("Invalid login details: {0}, {1}".format(username, password))
return render(request, 'login.html', {'invalid': invalid, },)
else:
return render(request, 'login.html', {})
def dealer_login(request):
invalid = True
if request.method == 'POST':
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
invalid = False
dealer = Dealer.objects.get(user=user)
if dealer.user_profile.is_dealer:
if user.is_active:
login(request, user)
return HttpResponseRedirect('/dealer-admin/')
else:
return HttpResponse("Your account has been deactivated.")
else:
print("Invalid login details: {0}, {1}".format(username, password))
return render(request, 'dealer_login.html', {'invalid': invalid, },)
else:
return render(request, 'login.html', {})
@login_required
def user_logout(request):
logout(request)
return HttpResponseRedirect('/')
def register(request):
registered = False
if request.method == 'POST':
user_form = UserForm(data=request.POST)
profile_form = UserProfileForm(data=request.POST)
if user_form.is_valid() and profile_form.is_valid():
user = user_form.save()
user.set_password(user.password)
user.save()
profile = profile_form.save(commit=False)
profile.user = user
profile.save()
registered = True
else:
print(user_form.errors, profile_form.errors)
return render(request, 'register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered,
'user_form_errors': user_form.errors, 'profile_form_errors': profile_form.errors},
)
else:
user_form = UserForm()
profile_form = UserProfileForm()
return render(request,
'register.html',
{'user_form': user_form, 'profile_form': profile_form, 'registered': registered}
)
def search_form(request):
return render(request, 'search.html')
def search(request):
errors = []
suggestions = []
cars = None
if 'q' in request.GET:
q = request.GET['q']
if not q:
errors.append('Enter a search term.')
else:
cars = Car.objects.filter(full_name__icontains=q.strip())
if not cars:
cars = Car.objects.filter(type__icontains=q.strip())
if not cars:
cars = Car.objects.filter(manufacturer__icontains=q.strip())
if not cars:
for i in range(4):
suggestions.append(Car.objects.get(id=random.randrange(1,100)))
return render(request, 'search-results.html', {'cars': cars, 'query': q, 'suggestions': suggestions})
return render(request, 'search-results.html', {'cars': cars, 'query': q})
return render(request, 'search-results.html', {'errors': errors})
def car_details(request, manufacturer, slug):
car = get_object_or_404(Car, manufacturer_slug=manufacturer, slug=slug)
offset = 300000
suggestions = []
i = 1
for obj in Car.objects.filter(type=car.type):
if car.price-offset <= obj.price <=car.price+offset and i <= 4 and car != obj:
suggestions.append(obj)
i += 1
if request.method == 'POST' and request.user.is_authenticated():
# For wishlist
if 'wishlist' in request.POST.keys():
add_wishlist = request.POST['wishlist']
if add_wishlist == 'True':
try:
user_wishlist = Wishlist.objects.get(user=request.user, car=car)
if not user_wishlist.is_active:
user_wishlist.is_active = True
user_wishlist.save()
except Wishlist.DoesNotExist:
user_wishlist = Wishlist()
user_wishlist.user = request.user
user_wishlist.car = car
user_wishlist.save()
elif add_wishlist == 'False':
user_wishlist = Wishlist.objects.filter(user=request.user, car=car).update(is_active=False)
# For testdrive
elif 'testdrive' in request.POST.keys():
print('stuff')
add_testdrive = request.POST['testdrive']
if add_testdrive == 'True':
try:
user_testdrive = TestDrive.objects.get(user=request.user, car=car)
if not user_testdrive.is_active:
user_testdrive.is_active = True
user_testdrive.save()
except TestDrive.DoesNotExist:
user_testdrive = TestDrive()
user_testdrive.user = request.user
user_testdrive.car = car
user_testdrive.save()
elif add_testdrive == 'False':
user_testdrive = TestDrive.objects.filter(user=request.user, car=car).update(is_active=False)
return HttpResponseRedirect('/cars/'+car.manufacturer_slug+'/'+car.slug)
# If user is logged in and just retrieving page
elif request.method == 'GET' and request.user.is_authenticated():
try:
add_wishlist = Wishlist.objects.get(user=request.user, car=car)
if add_wishlist.is_active:
add_wishlist = True
else:
add_wishlist = False
except Wishlist.DoesNotExist:
add_wishlist = False
try:
add_testdrive = TestDrive.objects.get(user=request.user, car=car)
if add_testdrive.is_active:
add_testdrive = True
else:
add_testdrive = False
except TestDrive.DoesNotExist:
add_testdrive = False
print(add_testdrive, add_wishlist)
return render(request, 'car_details.html',
{'car': car, 'add_wishlist': add_wishlist, 'add_testdrive': add_testdrive, 'suggestions': suggestions})
else:
return render(request, 'car_details.html', {'car': car, 'suggestions': suggestions})
def search_type(request):
return render(request, 'search_type.html')
def search_type_results(request):
errors = []
if request.method == 'GET':
form = SearchForm(request.GET)
if form.is_valid():
if form.data['manufacturer'] and form.data['type']:
cd = form.cleaned_data
print(cd)
cars = Car.objects.filter(manufacturer=cd['manufacturer'], type=cd['type'])
return render(request, 'search-results.html', {'cars': cars},)
else:
errors.append('Please make sure you select both the type and manufacturer.')
return render(request, 'home.html', {'errors': errors})
else:
return render(request, 'search_type.html', {'form_errors': form.errors})
def search_price_range(request):
cars = []
if request.method == 'GET':
car_objs = Car.objects.all()
price_range = request.GET['price_range']
if price_range == '1lakh-5lakh':
for car in car_objs:
if 100000 <= car.price < 500000:
cars.append(car)
elif price_range == '5lakh-10lakh':
for car in car_objs:
if 500000 <= car.price < 1000000:
cars.append(car)
elif price_range == '10lakh-20lakh':
for car in car_objs:
if 1000000 <= car.price < 2000000:
cars.append(car)
elif price_range == '20lakh-50lakh':
for car in car_objs:
if 2000000 <= car.price < 5000000:
cars.append(car)
elif price_range == '50lakh-1crore':
for car in car_objs:
if 5000000 <= car.price < 10000000:
cars.append(car)
elif price_range == 'above1crore':
for car in car_objs:
if car.price >= 10000000:
cars.append(car)
else:
cars = False
return render(request, 'search-results.html', {'invalid_price': True},)
return render(request, 'search-results.html', {'cars': cars},)
# @staff_member_required
def dealer_admin(request):
count = TestDrive.objects.all().count()
approved = []
pending = []
testdrives = TestDrive.objects.all()
for td in testdrives:
if td.confirmed:
approved.append(td)
else:
pending.append(td)
return render(request, 'dealer-admin.html', {'approved': approved, 'pending': pending, 'count': count})
def dealer_approve(request):
print('Pass')
if request.method == 'POST' and request.user.is_authenticated():
print('Pass')
id_val = request.POST['val_id']
print(id_val)
obj = TestDrive.objects.get(id=id_val)
obj.confirmed = True
obj.save()
return HttpResponseRedirect('/dealer-admin/')
else:
return HttpResponse('Some shit!')
@login_required
def wishlist_display(request):
cars = []
for obj in Wishlist.objects.filter(user=request.user):
if obj.is_active:
cars.append(obj.car)
print(obj.car.full_name)
return render(request, 'wishlist.html', {'cars': cars})
def about(request):
return render(request, 'about.html')
|
# Copyright 2018 Samsung Electronics
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from vitrage.common.constants import DatasourceProperties as DSProps
from vitrage.common.constants import EdgeLabel
from vitrage.common.constants import EntityCategory
from vitrage.common.constants import VertexProperties as VProps
from vitrage.datasources.resource_transformer_base import \
ResourceTransformerBase
from vitrage.datasources import transformer_base as tbase
from vitrage.datasources.transformer_base import extract_field_value
from vitrage.datasources.trove.cluster import TROVE_CLUSTER_DATASOURCE
from vitrage.datasources.trove.instance import TROVE_INSTANCE_DATASOURCE
from vitrage.datasources.trove.properties import \
TroveClusterProperties as TProps
import vitrage.graph.utils as graph_utils
class TroveClusterTransformer(ResourceTransformerBase):
def _create_snapshot_entity_vertex(self, entity_event):
return self._create_vertex(entity_event)
def _create_update_entity_vertex(self, entity_event):
return self._create_vertex(entity_event)
def _create_vertex(self, entity_event):
# TODO(bzurkowski): Add project ID
entity_id = entity_event[TProps.ID]
name = entity_event[TProps.NAME]
state = extract_field_value(entity_event, *TProps.STATE)
update_timestamp = entity_event[TProps.UPDATE_TIMESTAMP]
sample_timestamp = entity_event[DSProps.SAMPLE_DATE]
metadata = {
VProps.NAME: name
}
return graph_utils.create_vertex(
self._create_entity_key(entity_event),
vitrage_category=EntityCategory.RESOURCE,
vitrage_type=TROVE_CLUSTER_DATASOURCE,
vitrage_sample_timestamp=sample_timestamp,
entity_id=entity_id,
update_timestamp=update_timestamp,
entity_state=state,
metadata=metadata)
def _create_snapshot_neighbors(self, entity_event):
return self._create_entity_neighbours(entity_event)
def _create_update_neighbors(self, entity_event):
return self._create_entity_neighbours(entity_event)
def _create_entity_neighbours(self, entity_event):
neighbours = []
for instance in entity_event[TProps.INSTANCES]:
instance_neighbour = self._create_neighbor(
entity_event,
instance[TProps.ID],
TROVE_INSTANCE_DATASOURCE,
EdgeLabel.CONTAINS,
is_entity_source=True)
neighbours.append(instance_neighbour)
return neighbours
def _create_entity_key(self, entity_event):
entity_id = entity_event[TProps.ID]
key_fields = self._key_values(TROVE_CLUSTER_DATASOURCE, entity_id)
return tbase.build_key(key_fields)
@staticmethod
def get_vitrage_type():
return TROVE_CLUSTER_DATASOURCE
|
from typing import List
from cache import CacheUser
from common.enums import EnumResponse
from common.line.classes import Event
from loguru import logger
from machine import MACHINE
from machine.classes import Machine
# pylint: disable=E0611
from pydantic import BaseModel
# pylint: enable=E0611
DOC = {
200: EnumResponse.OK.value.doc,
500: EnumResponse.INTERNAL_SERVER_ERROR.value.doc,
}
class Payload(BaseModel):
destination: str
events: List[Event]
def post(payload: Payload):
try:
for event in payload.events:
machine: Machine = None
state: str = ""
user_id = event.source["userId"]
if event.type == "message" and event.message["type"] == "text":
message = event.message["text"]
# Retrieve from session
if CacheUser.get(user_id):
machine = MACHINE[CacheUser.get(user_id)["machine"]]
state = CacheUser.get(user_id)["state"]
# No session, use pre-defined flow
if message == "/quickreply":
machine = MACHINE["quickreply"]
state = "question"
# Unhandled requests goes to default reply
if machine is None:
machine = MACHINE["default"]
state = "reply"
machine.execute(state, user_id, event)
return EnumResponse.OK.value.response
except Exception as error:
logger.error(error)
return EnumResponse.INTERNAL_SERVER_ERROR.value.response
|
# zad 5 ##################################
n1 = int(input('wpisz ilosc liczb '))
print(n1)
arr = []
for i in range(n1):
arr.append(int(input()))
arr.sort()
print('podaj pszedzial\n')
a3 = int(input('od elementu - '))
b3 = int(input('do elementu - '))
print(arr[a3:b3+1])
# zad 4 ##################################
from math import floor
liczba = input('podaj przez ile liter wyswietlac')
n = floor(26 / int(liczba))
for i in range(n):
print(chr(i*int(liczba)+65),chr(i*int(liczba)+97))
# zad 3 ##################################
for i in range(26):
print(chr(i+65),chr(i+97))
# zad 2 ##################################
def maximum(a1,b1,c1):
if(a1 > b1):
if(a1 > c1):
print(a1)
else:
print(c1)
else:
if(b1 > c1):
print(b1)
else:
print(c1)
print('podaj trzy liczby')
a2 = input()
b2 = input()
c2 = input()
maximum(int(a2),int(b2),int(c2))
# zad 1 ##################################
a = input('podaj imie i nazwisko')
b = input('podaj swoj wiek')
def pelno(k):
if(k > 17):
return 'pelnoletni'
else:
return 'niepelnoletni'
print('Czesc ',a,' jestes ', pelno(int (b)))
|
import threading
threads = 4
nth_fibonacci_term = 38
class WorkerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
print "Initialising", self.getName()
def run(self):
print self.getName(), "started!"
self.f(nth_fibonacci_term)
print self.getName(), "finished!"
def f(self, n):
if n < 2:
return n
else:
return self.f(n - 1) + self.f(n - 2)
if __name__ == '__main__':
for i in range(threads):
WorkerThread().start()
|
import os, pathlib, logging
import numpy as np
log = logging.getLogger(__name__)
def get_full_path(subpath):
"""Gets full path using the current directory (of this script) + the subpath
Args:
subpath (str): subpath in current directory
Returns:
str: the full path
"""
cur_dir = pathlib.Path(__file__).parent.absolute()
log.info(f"Parent path: {pathlib.Path(__file__).parent.absolute()}")
return os.path.join(cur_dir, subpath)
def create_path(path : str):
"""creates path if it does not yet exist
Args:
path (str): the full path to be created if it does not exist
"""
if not os.path.exists(path):
os.makedirs(path)
def overwrite_to_file(filename, content):
"""Simple function that (over)writes passed content to file
Args:
filename (str): name of the file including extension
content (str): what to write to file
"""
f = open(filename, "w")
f.write(content)
f.close()
import pyaudio
def play_wav_from_npy(wav : np.ndarray, sample_rate = 24000):
p = pyaudio.PyAudio()
stream = p.open(
format=pyaudio.paFloat32,
channels=1,
rate=sample_rate,
output=True,
)
stream.write(np.concatenate([wav, wav, wav, wav])) #Due to some problems it needs to be played 4 times?
stream.start_stream()
stream.stop_stream()
stream.close()
p.terminate() |
import dpkt
import re
import socket
import sys
from traceroute.fragment import DatagramFragment
from traceroute.ip_protocols import ip_protocol_map
from traceroute.packet import Packet
from traceroute.results_logger import print_results
from typing import Dict, List
def read_trace_file(filename: str) -> (str, str, List[str], Dict[int, str]):
"""
Returns source IP, destination IP, intermediate IPs
and protocols parsed from a trace file
"""
with open(filename, "rb") as f:
# Handle pcap and pcapng input filetype
if re.match(r"^.*\.(pcap)$", filename):
pcap = dpkt.pcap.Reader(f)
elif re.match(r"^.*\.(pcapng)$", filename):
pcap = dpkt.pcapng.Reader(f)
else:
print("Failed to read pcap or pcapng. Exiting.")
sys.exit()
protocols = {}
packets = {}
fragments = {}
fragment_ids = {}
max_ttl = 0
source_node_ip_address = ""
ultimate_destination_node_ip_address = ""
intermediate_ip_addresses = []
ttls = [0] * 1024
for ts, buf in pcap:
eth = dpkt.ethernet.Ethernet(buf)
ip = eth.data
# Filter out non IP packets
if type(ip) is not dpkt.ip.IP:
continue
# Update protocol set
if ip.p in ip_protocol_map:
protocols[ip.p] = ip_protocol_map[ip.p]
else:
protocols[ip.p] = "Unknown protocol"
source_ip_address = socket.inet_ntoa(ip.src)
destination_ip_address = socket.inet_ntoa(ip.dst)
# Set source node and ultimate destination IP addresses
if ip.ttl == max_ttl + 1 and is_valid(ip.data):
max_ttl = ip.ttl
if ip.ttl == 1:
source_node_ip_address = source_ip_address
ultimate_destination_node_ip_address = destination_ip_address
if (source_ip_address == source_node_ip_address and
destination_ip_address == ultimate_destination_node_ip_address and
ip.ttl <= max_ttl + 1):
fragment_id = ip.id
fragment_offset = 8 * (ip.off & dpkt.ip.IP_OFFMASK)
if fragment_id not in fragments:
fragments[fragment_id] = DatagramFragment()
if mf_flag_set(ip) or fragment_offset > 0:
fragments[fragment_id].count += 1
fragments[fragment_id].offset = fragment_offset
fragments[fragment_id].send_times.append(ts)
for i in range(5):
intermediate_ip_addresses.append("")
key = -1
if is_udp(ip.data):
key = ip.data.dport
elif is_icmp(ip.data, 8):
key = ip.data["echo"].seq
if key != -1:
fragment_ids[key] = fragment_id
packets[key] = Packet()
packets[key].ttl = ip.ttl
packets[key].ttl_adj = ttls[ip.ttl]
ttls[ip.ttl] += 1
elif destination_ip_address == source_node_ip_address and is_icmp(ip.data):
icmp_type = ip.data.type
if icmp_type == 0 or icmp_type == 8:
packets[ip.data.data.seq].timestamp = ts
packets[ip.data.data.seq].source_ip_address = source_ip_address
packets[ip.data.data.seq].fragment_id = fragment_ids[ip.data.data.seq]
continue
packet_data = ip.data.data.data.data
if is_udp(packet_data):
key = packet_data.dport
elif is_icmp(packet_data):
key = packet_data["echo"].seq
if key in packets:
packets[key].timestamp = ts
packets[key].source_ip_address = source_ip_address
packets[key].fragment_id = fragment_ids[key]
if icmp_type == 11 and source_ip_address not in set(intermediate_ip_addresses):
ttl = packets[key].ttl
ttl_adj = packets[key].ttl_adj
intermediate_ip_addresses[(5 * ttl) - 1 + ttl_adj] = source_ip_address
intermediate_ip_addresses = [ip for ip in intermediate_ip_addresses if ip != ""]
round_trip_times = compute_round_trip_times(packets.values(), fragments)
return (source_node_ip_address,
ultimate_destination_node_ip_address,
intermediate_ip_addresses,
protocols,
fragments,
round_trip_times)
def compute_round_trip_times(packets: List[Packet],
fragments: Dict[int, DatagramFragment]) -> Dict[str, List[float]]:
"""
Calculates round trip times for packets (in seconds)
"""
round_trip_times = {}
for packet in packets:
if packet.fragment_id == 0 or packet.timestamp == 0:
continue
fragment_id = packet.fragment_id
timestamp = packet.timestamp
source_ip_address = packet.source_ip_address
send_times = fragments[fragment_id].send_times
if source_ip_address not in round_trip_times:
round_trip_times[source_ip_address] = []
for time in send_times:
round_trip_times[source_ip_address].append(timestamp - time)
return round_trip_times
def mf_flag_set(ip: dpkt.ip.IP) -> bool:
"""
Returns boolean indicating if IP packet has more fragments
"""
return bool(ip.off & dpkt.ip.IP_MF)
def is_udp(data) -> bool:
"""
Returns boolean indicating if data is UDP
"""
return type(data) is dpkt.udp.UDP
def is_icmp(data, icmp_type=None) -> bool:
"""
Returns boolean indicating if data is ICMP and given type
"""
if icmp_type is not None:
return type(data) is dpkt.icmp.ICMP and data.type == icmp_type
else:
return type(data) is dpkt.icmp.ICMP
def is_valid(data) -> bool:
"""
Returns boolean indicating if data is UDP or ICMP type 8
"""
return is_udp(data) or is_icmp(data, 8)
|
# -*- coding: utf-8 -*-
#
# python-netfilter - Python modules for manipulating netfilter rules
# Copyright (C) 2007-2009 Bollorรฉ Telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os, sys
import re
import subprocess
import netfilter.parser
class IptablesError(Exception):
def __init__(self, command, message):
self.command = command
self.message = message
def __str__(self):
return "command: %s\nmessage: %s" % (self.command, self.message)
class Table:
"""The Table class represents a netfilter table (IPv4 or IPv6).
"""
def __init__(self, name, auto_commit = True, ipv6 = False):
"""Constructs a new netfilter Table.
If auto_commit is true, commands are executed immediately,
otherwise they are buffered and you need to call the commit()
method to execute them.
If ipv6 is true then ip6tables and ip6tables-save are used
instead of iptables and iptables-save.
"""
self.auto_commit = auto_commit
self.__name = name
self.__buffer = []
if ipv6:
self.__iptables = 'ip6tables'
self.__iptables_save = 'ip6tables-save'
else:
self.__iptables = 'iptables'
self.__iptables_save = 'iptables-save'
def create_chain(self, chainname):
"""Creates the specified user-defined chain.
"""
self.__run_iptables(['-N', chainname])
def delete_chain(self, chainname=None):
"""Attempts to delete the specified user-defined chain (all the
chains in the table if none is given).
"""
args = ['-X']
if chainname: args.append(chainname)
self.__run_iptables(args)
def flush_chain(self, chainname=None):
"""Flushes the specified chain (all the chains in the table if
none is given). This is equivalent to deleting all the rules
one by one.
"""
args = ['-F']
if chainname: args.append(chainname)
self.__run_iptables(args)
def list_chains(self):
"""Returns a list of strings representing the chains in the
Table.
"""
return self.__get_chains().keys()
def rename_chain(self, old_chain_name, new_chain_name):
"""Renames the specified user-defined chain.
"""
self.__run_iptables(['-E', old_chain_name, new_chain_name])
def get_policy(self, chainname):
"""Gets the policy for the specified built-in chain.
"""
return self.__get_chains()[chainname]['policy']
def set_policy(self, chainname, policy):
"""Sets the policy for the specified built-in chain.
"""
self.__run_iptables(['-P', chainname, policy])
def append_rule(self, chainname, rule):
"""Appends a Rule to the specified chain.
"""
self.__run_iptables(['-A', chainname] + rule.specbits())
def delete_rule(self, chainname, rule):
"""Deletes a Rule from the specified chain.
"""
self.__run_iptables(['-D', chainname] + rule.specbits())
def prepend_rule(self, chainname, rule):
"""Prepends a Rule to the specified chain.
"""
self.__run_iptables(['-I', chainname, '1'] + rule.specbits())
def list_rules(self, chainname):
"""Returns a list of Rules in the specified chain.
"""
return self.__get_chains()[chainname]['rules']
def commit(self):
"""Commits any buffered commands. This is only useful if
auto_commit is False.
"""
while len(self.__buffer) > 0:
self.__run(self.__buffer.pop(0))
def get_buffer(self):
"""Returns the command buffer. This is only useful if
auto_commit is False.
"""
return self.__buffer
def __get_chains(self):
lines = self.__run([self.__iptables_save, '-t', self.__name, '-c'])
return netfilter.parser.parse_tables(lines)[self.__name]
def __run_iptables(self, args):
cmd = [self.__iptables, '-t', self.__name] + args
if self.auto_commit:
self.__run(cmd)
else:
self.__buffer.append(cmd)
def __run(self, cmd):
p = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
out, err = p.communicate()
status = p.wait()
# check exit status
if not os.WIFEXITED(status) or os.WEXITSTATUS(status):
if not re.match(r'iptables: Chain already exists', err):
raise IptablesError(cmd, err)
return out.splitlines(True)
|
from pyomo.core import Var
import pandas as pd
from argparse import ArgumentTypeError
def model_res_to_dict(m):
'''Function to save the output of the model in a 2-levels dictionary: first
level are the variables, second level are countries-time periods keys'''
res_dict = {str(v): {index: getattr(m, str(v))[index].value
for index in getattr(m, str(v))}
for v in m.component_objects(Var, active=True)}
return res_dict
def output_format(countries, out_unformat, t, T):
'''Function to better formatting the model output and having it ready for
being saved on excel files. The output is a list of lists with countries as
first element and a DataFrame with all variables as index and time periods
as columns. Country independent variables are grouped under the "global tag" '''
var_l = list(out_unformat.keys())
var_c = var_l[:-7]
var_gl = var_l[-7:]
out_form = {i: pd.DataFrame(data = float(0), index = var_c, columns = t) for i in countries}
out_form['global'] = pd.DataFrame(data = float(0), index = var_gl, columns = t)
idx = []
for j in var_l:
if isinstance(list(out_unformat[j].keys())[0], tuple):
for i in countries:
for k in range(1,T):
idx.append((i, j, k))
else:
for k in range(1,T):
idx.append(('global', j, k))
for i in idx:
if i[0] != 'global':
out_form[i[0]].at[i[1],i[2]] = out_unformat[i[1]][(i[0],i[2])]
else:
out_form[i[0]].at[i[1],i[2]] = out_unformat[i[1]][i[2]]
return out_form
def results_to_excel(res, countries, results_path, filename):
'''Function to export results on a Excel file for full coopearative and
non-cooperative (no coalitions) case. Each country has a worksheet with
variables as rows and time periods as columns. A "global" worksheet
contains country-independent variables.'''
final_path = results_path + filename
writer = pd.ExcelWriter(final_path)
c_list = [i for i in countries]
c_list.append('global')
for i in c_list:
res[i].to_excel(writer, i)
writer.save()
def coa_f(N):
'''Function that, given an integer N supposed to be the cardinality of an
ordered set of players of a colaitioal game, gives back the power set of N,
or else, all the possible coalitions given N in form of a matrix (numpy.array)
where 1 indicates a player belonging to that coalition and 0 the contrary.
The empty set is excluded by the list of all possible coalitions.'''
from itertools import product, repeat
coat = list(product(*repeat(range(2), N)))
coa = []
for i in range(len(coat)):
coa.append(list(coat[i]))
# Sorting coalitions accordig to number of players present
coa2 = [[] for i in range(N+1)]
for i in range(len(coa)):
for l in range(len(coa)):
if sum(coa[l]) == i:
coa2[i].append(coa[l])
# Sorting players in coalitions (firsts come first)
for i in range(len(coa2)):
coa2[i].sort(reverse=True)
coa_x = [coa2[i][f] for i in range(1,N+1) for f in range(len(coa2[i]))]
return coa_x
def check_arg_T(arg):
'''Check that the given number of time periods is of type int and inside bounds'''
try:
f = int(arg)
except ValueError:
raise ArgumentTypeError("Must be an integer number")
if f < 2 or f > 59:
raise ArgumentTypeError("Argument must be an integer < " + str(60) + "and > " + str(1))
return f
def check_arg_tstep(arg):
try:
f = int(arg)
except ValueError:
raise ArgumentTypeError("Must be an integer number")
if f not in [1, 2, 5, 10, 20]:
raise ArgumentTypeError("Argument must be one of the following integer values: 1, 2, 5, 10, 20")
return f
def check_arg_tol(arg):
'''Check that the given tolerance is of type int and inside bounds'''
try:
f = int(arg)
except ValueError:
raise ArgumentTypeError("Must be an integer number")
if f < 7 or f > 12:
raise ArgumentTypeError("Argument must be an integer < " + str(13) + "and > " + str(6))
return f
def check_arg_max_iter(arg):
'''Check that the given maximum number of iterations is of type int and inside bounds'''
try:
f = int(arg)
except ValueError:
raise ArgumentTypeError("Must be an integer number")
if f < 500 or f >25000:
raise ArgumentTypeError("Argument must be an integer < " + str(25001) + "and > " + str(499))
return f
def check_bool_arg(arg):
'''Check that the provided argument is a string equal to True or False and return appropriate boolean'''
if str(arg) != 'False' and str(arg) != 'True':
raise ArgumentTypeError("--coop and --nc only accept True or False as given values!")
else:
return str(arg)
def coa_to_analyse(arg):
if arg != 'none' and arg != 'all':
all_c = {'US':0, 'EU':0, 'JAP':0, 'RUS':0, 'EUR':0, 'CHI':0, 'IND':0,
'MEST':0, 'AFR':0, 'LAM':0, 'OHI':0, 'OTH':0}
l_countries = arg.split(',')
l_c2 = [i.replace(" ","") for i in l_countries]
problem = 0
for i in l_c2:
try:
aaa = all_c[i]
all_c[i] = 1
except KeyError:
problem += 1
if problem == 0:
return list(all_c.values())
else:
raise ArgumentTypeError('You have probably inserted a wrong \
string of countries-regions in the --coalition argument. \
Valid countries-regions are: US, EU, JAP, RUS, EUR, CHI, IND, MEST, AFR, LAM, OHI, OTH')
|
ASSIGN = 'ASSIGN'
COMMA = 'COMMA'
COLON = 'COLON'
DIVIDE = 'DIVIDE'
DOT = 'DOT'
EQUAL = 'EQUAL'
GREATER_THAN = 'GREATER_THAN'
GREATER_THAN_EQUAL = 'GREATER_THAN_EQUAL'
LESS_THAN = 'LESS_THAN'
LESS_THAN_EQUAL = 'LESS_THAN_EQUAL'
NOT_EQUAL = 'NOT_EQUAL'
LPAREN = 'LPAREN'
RPAREN = 'RPAREN'
MINUS = 'MINUS'
MODULO = 'MODULO'
MULTIPLY = 'MULTIPLY'
PLUS = 'PLUS'
SEMICOLON = 'SEMICOLON'
BOOLTYPE = 'BOOLTYPE'
INTTYPE = 'INTTYPE'
FLOATTYPE = 'FLOATTYPE'
STRINGTYPE = 'STRINGTYPE'
STRUCTTYPE = 'STRUCTTYPE'
AND = 'AND'
OR = 'OR'
NOT = 'NOT'
WHILE = 'WHILE'
DO = 'DO'
IF = 'IF'
THEN = 'THEN'
ELSE = 'ELSE'
ELIF = 'ELIF'
END = 'END'
FUN = 'FUN'
VAR = 'VAR'
SET = 'SET'
RETURN = 'RETURN'
NEW = 'NEW'
NIL = 'NIL'
EOS = 'EOS'
BOOLVAL = 'BOOLVAL'
INTVAL = 'INTVAL'
FLOATVAL = 'FLOATVAL'
STRINGVAL = 'STRINGVAL'
ID = 'ID'
class Token(object):
def __init__(self, tokentype, lexeme, line, column):
self.tokentype = tokentype
self.lexeme = lexeme
self.line = line
self.column = column
def __str__(self):
return(self.tokentype + " '" + self.lexeme + "' " + str(self.line) + ':' + str(self.column)) |
#!/usr/bin/env python3
import psutil
import shutil
import emails
import socket
import os
sender = 'automation@example.com'
recipient = '{}@example.com'.format(os.environ.get('USER'))
body_msg = 'Please check your system and resolve the issue as soon as possible.'
def check_cpu():
try:
CPU_PCT_THRESHOLD = 80
cpu_percent = psutil.cpu_percent(interval=2)
if cpu_percent > CPU_PCT_THRESHOLD:
print('CPU percent: {}%. (NOT OK) Sending cpu alert...'.format(cpu_percent))
cpu_msg = emails.generate(sender, recipient,
'Error - CPU usage is over 80%', body_msg)
emails.send(cpu_msg)
else:
print('CPU percent: {}%. (OK)'.format(str(cpu_percent)))
except Exception as e:
# output error and ignore it for other function to continue
print('Unexpected errors occurred! {}'.format(str(e)))
def check_memory():
try:
MEM_MINIMUM = 500
mem_info = psutil.virtual_memory()
mem_available = mem_info.available/(1024*1024)
if mem_available < MEM_MINIMUM:
print('Memory available: {}mb. (NOT OK) Sending memory alert...'.format(mem_available))
mem_msg = emails.generate(sender, recipient,
'Error - Available memory is less than 500MB', body_msg)
emails.send(mem_msg)
else:
print('Memory available: {}mb. (OK)'.format(mem_available))
except Exception as e:
# output error and ignore it for other function to continue
print('Unexpected errors occurred! {}'.format(str(e)))
def check_disk_space():
try:
SPACE_MINIMUM = 20
disk_space = shutil.disk_usage(os.path.expanduser('~'))
free_space_pct = (disk_space.free / disk_space.total) * 100
if free_space_pct < SPACE_MINIMUM:
print('Free disk space: {}%. (NOT OK) Sending disk space alert...'.format(free_space_pct))
disk_msg = emails.generate(sender, recipient,
'Error - Available disk space is less than 20%', body_msg)
emails.send(disk_msg)
else:
print('Free disk space: {}%. (OK)'.format(free_space_pct))
except Exception as e:
# output error and ignore it for other function to continue
print('Unexpected errors occurred! {}'.format(str(e)))
def check_localhost():
try:
DEFAULT_LOCAL_IP = '127.0.0.1'
localhost_ip = ''
try:
localhost_ip = socket.gethostbyname('localhost')
except:
localhost_ip = 'IP resolution failed!'
if localhost_ip == DEFAULT_LOCAL_IP:
print('Localhost IP: {} (OK)'.format(localhost_ip))
else:
print('Localhost IP: {} (NOT OK) Sending localhost ip resolution alert... '.format(localhost_ip))
disk_msg = emails.generate(sender, recipient,
'Error - localhost cannot be resolved to 127.0.0.1', body_msg)
emails.send(disk_msg)
except Exception as ee:
# output error and ignore it for other function to continue
print('Unexpected errors occurred! {}'.format(str(ee)))
def main():
check_memory()
check_disk_space()
check_localhost()
check_cpu()
if __name__ == '__main__':
main()
|
LINK = 'link'
class LeaveException (RuntimeError):
pass
class Link (object):
def reorder (self, l):
"""
Flips a list of Links so that this node is first in each
"""
return Link.order(l, self)
@staticmethod
def order (links, n):
"""
Give a list of Links that each contain node n, flips any links so
that n is always the first element of the link.
"""
r = []
for l in links:
assert n in l
if l._n[0] == n:
r.append(l)
else:
r.append(l.flip())
return r
def __init__ (self, np1, np2):
self._n = [np1[0],np2[0]]
self._p = [np1[1],np2[1]]
def _index (self, i):
if i in self._n:
i = self._n.index(i)
assert i == 0 or i == 1
return i
def flip (self):
"""
Returns the same link, but flipped (a,b) becomes (b,a)
"""
return Link(self[1], self[0])
def port (self, n):
return self._p[_index(n)]
def other_port (self, n):
"""
Returns the other end's port.
See other().
"""
return self.other(n)[1]
def other (self, n):
"""
Returns the other end of a link.
Given a node or (node,port) that is part of this link, it returns
the opposite end's (node,port).
"""
if type(n) is tuple:
if self[0] == n:
return self[1]
assert self[1] == n
return self[0]
if self[0][0] == n:
return self[1]
assert self[1][0] == n
return self[0]
def __contains__ (self, n):
"""
Does this link contain (node,port) or node?
"""
if type(n) is tuple:
return n in [self[0], self[1]]
else:
return n in [self._n]
def __len__ (self):
return 2
def __getitem__ (self, i):
"""
Gets (node,port) based on index
"""
i = self._index(i)
return (self._n[i], self._p[i])
def __repr__ (self):
return "Link(%s, %s)" % (self[0], self[1])
class Graph (object):
def __init__ (self):
self._g = nx.MultiGraph()
self.node_port = {}
def __contains__ (self, n):
return n in self._g
def add (self, node):
self._g.add_node(node)
self.node_port[node] = {}
def remove (self, node):
self._g.remove_node(node)
def neighbors (self, n):
return self._g.neighbors(n)
def find_port (self, node1, node2):
for n1, n2, k, d in self._g.edges([node1, node2], data=True, keys=True):
return (d[LINK][node1][1], d[LINK][node2][1])
return None
def connected(self, node1, node2):
return (self.find_port(node1, node2) != None)
def disconnect_port (self, np):
"""
Disconnects the given (node,port)
"""
assert type(np) is tuple
remove = []
if self.port_for_node(np[0], np[1]) is None:
return 0
for n1,n2,k,d in self._g.edges([np[0], self.node_port[np[0]][np[1]][0]], data=True, keys=True):
if np in d[LINK]:
remove.append((n1,n2,k))
del self.node_port[n1][d[LINK][n1][1]]
del self.node_port[n2][d[LINK][n2][1]]
for e in remove:
#print "remove",e
self._g.remove_edge(*e)
return len(remove)
def unlink (self, np1, np2):
count = 0
if isinstance(np1, tuple):
count = disconnect_port(np1)
elif isinstance(np2, tuple):
count = disconnect_port(np2)
else:
for n1, n2, k, d in self._g.edges([np1, np2], data=True, keys=True):
self._g.remove_edge(n1,n2,k)
del self.node_port[n1][d[LINK][n1][1]]
del self.node_port[n2][d[LINK][n2][1]]
count = count + 1
return count
def link (self, np1, np2):
"""
Links two nodes on given ports
np1 is (node1, port1)
np2 is (node2, port2)
"""
#FIXME: the portless variation doesn't really make sense with
# allow_multiples yet.
try:
_ = np1[0]
except:
# portless (hacky)
for free in xrange(1000):
if free not in np1.ports:
np1 = (np1,free)
break
try:
_ = np2[0]
except:
# portless (hacky)
for free in xrange(1000):
if free not in np2.ports:
np2 = (np2,free)
break
self._g.add_node(np1[0])
self._g.add_node(np2[0])
self.disconnect_port(np1)
self.disconnect_port(np2)
self._g.add_edge(np1[0],np2[0],link=Link(np1,np2))
self.node_port[np1[0]][np1[1]] = np2
self.node_port[np2[0]][np2[1]] = np1
def find_links (self, query1=None, query2=()):
# No idea if new link query stuff works.
if query2 is None: query2 = query1
if query1 == (): query1 = None
if query2 == (): query2 = None
o = set()
for n1,n2,k,d in self._g.edges(data=True, keys=True):
l = d[LINK]
ok = False
if query1 is None or self._test_node(l[0][0], args=(query1,), link=l):
if query2 is None or self._test_node(l[1][0], args=(query2,), link=l):
ok = True
if not ok and (query1 != query2):
if query2 is None or self._test_node(l[0][0], args=(query2,), link=l):
if query1 is None or self._test_node(l[1][0], args=(query1,), link=l):
ok = True
l = l.flip()
if ok:
o.add(l)
return list(o)
def ports_for_node (self, node):
"""
Map of local port -> (other, other_port)
"""
ports = defaultdict(_void)
for n1, n2, k, d in self._g.edges([node], data=True, keys=True):
p = d[LINK]
assert n1 is node
assert ports.get(p[node]) is None
ports[p[node][1]] = p.other(node)
return ports
def port_for_node(self, node, port):
assert node in self.node_port
return self.node_port[node].get(port)
def disconnect_nodes(self, node1, node2):
""" Disconnect node1 from node2. Either of node1 or node2
can be a node, or a (node, port) pair
Returns number of nodes disconnected
"""
self.unlink(node1, node2)
def disconnect_node(self, node1):
""" Disconnecte node from all neighbours """
for neighbor in self.neighbors(node1):
self.disconnect_nodes(node1, neighbor)
def get_one_link (self, query1=None, query2=(), **kw):
return self.get_link(query1, query2, one=True, **kw)
def get_link (self, query1=None, query2=(), **kw):
"""
Keyword argument "default" lets you set a default value if
no node is found. Note that this means you must use
Equal(F("default"), <value>) to actually check a field called
"default" on a node.
"""
if 'default' in kw:
has_default = True
default = kw['default']
del kw['default']
else:
has_default = False
one = False
if 'one' in kw:
one = kw['one']
del kw['one']
assert len(kw) == 0
r = self.find_links(query1, query2)
if len(r) > 1 and one:
raise RuntimeError("More than one match")
elif len(r) == 0:
if has_default:
return default
raise RuntimeError("Could not get element")
return r[0]
def has_link (self, query1=None, query2=()):
# Really bad implementation. We can easily scape early.
return len(self.find_links(query1, query2)) > 0
def _test_node (self, n, args=(), kw={}, link=None):
#TODO: Should use a special value for unspecified n2
for k,v in kw.iteritems():
if k == "is_a":
if not isinstance(n,v): return False
elif k == "type":
if type(n) is not v: return False
else:
if not hasattr(n, k): return False
if getattr(n, k) != v: return False
for a in args:
try:
if not a(n, link):
return False
except LeaveException:
return False
return True
def find (self, *args, **kw):
r = []
def test (n):
return self._test_node(n, args, kw)
for n in self._g.nodes():
if test(n):
r.append(n)
return r
def get_one (self, *args, **kw):
kw['one'] = True
return self.get(*args, **kw)
def get (self, *args, **kw):
"""
Keyword argument "default" lets you set a default value if
no node is found. Note that this means you must use
Equal(F("default"), <value>) to actually check a field called
"default" on a node.
"""
if 'default' in kw:
has_default = True
default = kw['default']
del kw['default']
else:
has_default = False
one = False
if 'one' in kw:
del kw['one']
one = True
r = self.find(*args,**kw)
if len(r) > 1 and one:
raise RuntimeError("More than one match")
elif len(r) == 0:
if has_default:
return default
raise RuntimeError("Could not get element")
return r[0]
def has (self, *args, **kw):
# Really bad implementation. We can easily scape early.
return len(self.find(*args,**kw)) > 0
def __len__ (self):
return len(self._g)
|
from game_master import GameMaster
from read import *
from util import *
class TowerOfHanoiGame(GameMaster):
def __init__(self):
super().__init__()
def produceMovableQuery(self):
"""
See overridden parent class method for more information.
Returns:
A Fact object that could be used to query the currently available moves
"""
return parse_input('fact: (movable ?disk ?init ?target)')
def getGameState(self):
"""
Returns a representation of the game in the current state.
The output should be a Tuple of three Tuples. Each inner tuple should
represent a peg, and its content the disks on the peg. Disks
should be represented by integers, with the smallest disk
represented by 1, and the second smallest 2, etc.
Within each inner Tuple, the integers should be sorted in ascending order,
indicating the smallest disk stacked on top of the larger ones.
For example, the output should adopt the following format:
((1,2,5),(),(3, 4))
Returns:
A Tuple of Tuples that represent the game state
"""
### student code goes here
facts = self.kb.facts
# for f in facts:
# print(f)
#
# print('\n\n\n')
elements_on = []
state = Statement(["on", "?x", "?y"])
for f in facts:
if (match(state, f.statement) ):
elements_on.append(f)
gameState = [[], [], []]
for el in elements_on:
bindings = match(state, el.statement)
# print(str(bindings) + " =?= ?X : disk1, ?Y : peg1")
# print(str(bindings) == "?X : disk1, ?Y : peg1")
if (str(bindings) == "?X : disk1, ?Y : peg1"):
gameState[0].append(1)
elif (str(bindings) == "?X : disk2, ?Y : peg1"):
gameState[0].append(2)
elif (str(bindings) == "?X : disk3, ?Y : peg1"):
gameState[0].append(3)
elif (str(bindings) == "?X : disk4, ?Y : peg1"):
gameState[0].append(4)
elif (str(bindings) == "?X : disk5, ?Y : peg1"):
gameState[0].append(5)
elif (str(bindings) == "?X : disk1, ?Y : peg2"):
gameState[1].append(1)
elif (str(bindings) == "?X : disk2, ?Y : peg2"):
gameState[1].append(2)
elif (str(bindings) == "?X : disk3, ?Y : peg2"):
gameState[1].append(3)
elif (str(bindings) == "?X : disk4, ?Y : peg2"):
gameState[1].append(4)
elif (str(bindings) == "?X : disk5, ?Y : peg2"):
gameState[1].append(5)
elif (str(bindings) == "?X : disk1, ?Y : peg3"):
gameState[2].append(1)
elif (str(bindings) == "?X : disk2, ?Y : peg3"):
gameState[2].append(2)
elif (str(bindings) == "?X : disk3, ?Y : peg3"):
gameState[2].append(3)
elif (str(bindings) == "?X : disk4, ?Y : peg3"):
gameState[2].append(4)
elif (str(bindings) == "?X : disk5, ?Y : peg3"):
gameState[2].append(5)
gameState[0].sort()
gameState[1].sort()
gameState[2].sort()
## Create the string rep for the gameState
gameState[0] = tuple(gameState[0])
gameState[1] = tuple(gameState[1])
gameState[2] = tuple(gameState[2])
return_string = tuple(gameState)
return return_string
pass
def makeMove(self, movable_statement):
"""
Takes a MOVABLE statement and makes the corresponding move. This will
result in a change of the game state, and therefore requires updating
the KB in the Game Master.
The statement should come directly from the result of the MOVABLE query
issued to the KB, in the following format:
(movable disk1 peg1 peg3)
Args:
movable_statement: A Statement object that contains one of the currently viable moves
Returns:
None
"""
retracted = []
added = []
facts = self.kb.facts
# print(self.getGameState())
# for f in facts:
# if(f.statement.predicate == "movable"):
# print(f)
# print("\n")
# fs = self.getMovables()
#
# self.getGameState()
# for f in fs:
# print(f)
new_location = movable_statement.terms[2]
object = movable_statement.terms[0]
old_location = movable_statement.terms[1]
old_stack = False
new_stack = False
for f in facts:
if (Fact(Statement(["onTopOf", object, "disk2"])) == f):
old_stack = "disk2"
elif (Fact(Statement(["onTopOf", object, "disk3"])) == f):
old_stack = "disk3"
if (Fact(Statement(["on", "disk2", new_location])) == f):
new_stack = "disk2"
elif (Fact(Statement(["on", "disk3", new_location])) == f):
new_stack = "disk3"
# old_stack = self.kb.kb_ask(Fact(Statement(["onTopOf", object, "?X"])))
new_is_empty = Fact(Statement(["empty", new_location])) in facts
# new_stack = self.kb.kb_ask(Fact(Statement(["on", "?x", new_location])))
# print(old_stack.list_of_bindings[0][0].bindings_dict["?X"])
# print("OLD")
# print(old_stack)
# print("New")
# print(new_stack)
# OPTION 1: Move from stack to empty
if (old_stack and new_is_empty):
# change top, add new top, retract ontopof, get rid of empty, change ons
self.kb.kb_retract(Fact(Statement(["empty", new_location])))
self.kb.kb_retract(Fact(Statement(["top", object, old_location])))
self.kb.kb_retract(Fact(Statement(["onTopOf", object, old_stack])))
self.kb.kb_retract(Fact(Statement(["on", object, old_location])))
self.kb.kb_assert(Fact(Statement(["on", object, new_location])))
self.kb.kb_assert(Fact(Statement(["top", old_stack, old_location])))
self.kb.kb_assert(Fact(Statement(["top", object, new_location])))
# OPTION 2: Move from stack to stack
elif (old_stack and new_stack):
self.kb.kb_retract(Fact(Statement(["top", object, old_location])))
self.kb.kb_retract(Fact(Statement(["onTopOf", object, old_stack])))
self.kb.kb_retract(Fact(Statement(["top", new_stack, new_location])))
self.kb.kb_retract(Fact(Statement(["on", object, old_location])))
self.kb.kb_assert(Fact(Statement(["on", object, new_location])))
self.kb.kb_assert(Fact(Statement(["onTopOf", object, new_stack])))
self.kb.kb_assert(Fact(Statement(["top", old_stack, old_location])))
self.kb.kb_assert(Fact(Statement(["top", object, new_location])))
# OPTION 3: Move from empty to stack
elif (new_stack):
self.kb.kb_retract(Fact(Statement(["top", object, old_location])))
self.kb.kb_retract(Fact(Statement(["top", new_stack, new_location])))
self.kb.kb_retract(Fact(Statement(["on", object, old_location])))
self.kb.kb_assert(Fact(Statement(["onTopOf", object, new_stack])))
self.kb.kb_assert(Fact(Statement(["top", object, new_location])))
self.kb.kb_assert(Fact(Statement(["empty", old_location])))
self.kb.kb_assert(Fact(Statement(["on", object, new_location])))
# OPTION 4: Move from empty to empty
else:
self.kb.kb_retract(Fact(Statement(["top", object, old_location])))
self.kb.kb_retract(Fact(Statement(["empty", new_location])))
self.kb.kb_retract(Fact(Statement(["on", object, old_location])))
self.kb.kb_assert(Fact(Statement(["top", object, new_location])))
self.kb.kb_assert(Fact(Statement(["empty", old_location])))
self.kb.kb_assert(Fact(Statement(["on", object, new_location])))
for f in facts:
if (str(f.statement.predicate) == "movable"):
self.kb.kb_retract(f)
#
# # for f in facts:
# # print(f)
# # print('\n\n\n')
#
# new_location = movable_statement.terms[2]
# object = movable_statement.terms[0]
# old_location = movable_statement.terms[1]
#
# statement_new = Statement(["on", object, new_location] )
# statement_old = Statement(["on", object, old_location] )
#
# # Add new ons
# if (Fact(Statement(["on", "disk2", old_location])) in facts):
# added.append(Fact(Statement(["top", "disk2", old_location])))
# elif (Fact(Statement(["on", "disk3", old_location])) in facts):
# added.append(Fact(Statement(["top", "disk3", old_location])))
#
# # Retract old empty
# if (Fact(Statement(["empty", new_location])) in facts):
# # Must get rid of previous top
# for f in facts:
# if (f.statement.predicate == "top" and f.statement.terms[1] == new_location):
# retracted.append(f)
# break
#
# new_fact = Fact( statement_new )
# old_fact = Fact( statement_old )
#
# retracted.append(old_fact)
# added.append(new_fact)
# # self.kb.kb_assert(new_fact)
#
#
# # We know moved disk was and still is a top, simply need to switch top type
# retracted.append(Fact(Statement(["top", object, old_location])))
# added.append(Fact(Statement(["top", object, new_location])))
#
# # retract old onTopOf
# if (Fact(Statement(["onTopOf", "disk2", object])) in facts):
# retracted.append(Fact(Statement(["onTopOf", "disk2", object])))
# elif (Fact(Statement(["onTopOf", "disk3", object])) in facts):
# retracted.append(Fact(Statement(["onTopOf", "disk3", object])))
#
# if (not Fact(Statement(["empty", new_location]))):
# # We need to get rid of the top on it and add an onTopOf
# if (Fact(Statement(["on", "disk2", new_location])) in facts):
# retract.append(Fact(Statement(["top", "disk2", new_location])))
# added.append(Fact(Statement(["onTopOf", object, "disk2"])))
# elif (Fact(Statement(["on", "disk3", new_location])) in facts):
# retract.append(Fact(Statement(["top", "disk3", new_location])))
# added.append(Fact(Statement(["onTopOf", object, "disk3"])))
#
# for a in added:
# if ((a not in facts)):
# # print("Added")
# # print(a)
# self.kb.kb_assert(a)
#
# for r in retracted:
# # print("Retracted")
# # print(r)
# self.kb.kb_retract(r)
#
# # Assert new empties
# states = self.getGameState()
# for i, s in enumerate(states):
# if (s == ()):
# peg = "peg" + str(i + 1)
# self.kb.kb_assert(Fact(Statement(["empty", peg])))
def reverseMove(self, movable_statement):
"""
See overridden parent class method for more information.
Args:
movable_statement: A Statement object that contains one of the previously viable moves
Returns:
None
"""
pred = movable_statement.predicate
sl = movable_statement.terms
newList = [pred, sl[0], sl[2], sl[1]]
self.makeMove(Statement(newList))
class Puzzle8Game(GameMaster):
def __init__(self):
super().__init__()
def produceMovableQuery(self):
"""
Create the Fact object that could be used to query
the KB of the presently available moves. This function
is called once per game.
Returns:
A Fact object that could be used to query the currently available moves
"""
return parse_input('fact: (movable ?piece ?initX ?initY ?targetX ?targetY)')
def getGameState(self):
"""
Returns a representation of the the game board in the current state.
The output should be a Tuple of Three Tuples. Each inner tuple should
represent a row of tiles on the board. Each tile should be represented
with an integer; the empty space should be represented with -1.
For example, the output should adopt the following format:
((1, 2, 3), (4, 5, 6), (7, 8, -1))
Returns:
A Tuple of Tuples that represent the game state
"""
### Student code goes here
tupl = [['', '', ''], ['', '', ''], ['', '', '']]
facts = self.kb.facts
for f in facts:
if f.statement.predicate == "coordinate":
if (not str(f.statement.terms[0]) == "empty"):
tupl[int(str(f.statement.terms[2])[3]) - 1][int(str(f.statement.terms[1])[3]) - 1] = int(str(f.statement.terms[0])[4])
else:
tupl[int(str(f.statement.terms[2])[3]) - 1][int(str(f.statement.terms[1])[3]) - 1] = -1
t = tuple([tuple(tupl[0]), tuple(tupl[1]), tuple(tupl[2])])
return t
pass
def makeMove(self, movable_statement):
"""
Takes a MOVABLE statement and makes the corresponding move. This will
result in a change of the game state, and therefore requires updating
the KB in the Game Master.
The statement should come directly from the result of the MOVABLE query
issued to the KB, in the following format:
(movable tile3 pos1 pos3 pos2 pos3)
Args:
movable_statement: A Statement object that contains one of the currently viable moves
Returns:
None
"""
### Student code goes here
object = movable_statement.terms[0]
old_x_pos = movable_statement.terms[1]
old_y_pos = movable_statement.terms[2]
new_x_pos = movable_statement.terms[3]
new_y_pos = movable_statement.terms[4]
new_fact = Fact(Statement(["coordinate", object, new_x_pos, new_y_pos]))
old_fact = Fact(Statement(["coordinate", object, old_x_pos, old_y_pos]))
old_empty = Fact(Statement(["coordinate", "empty", new_x_pos, new_y_pos]))
new_empty = Fact(Statement(["coordinate", "empty", old_x_pos, old_y_pos]))
self.kb.kb_retract(old_fact)
self.kb.kb_retract(old_empty)
self.kb.kb_assert(new_fact)
self.kb.kb_assert(new_empty)
pass
def reverseMove(self, movable_statement):
"""
See overridden parent class method for more information.
Args:
movable_statement: A Statement object that contains one of the previously viable moves
Returns:
None
"""
pred = movable_statement.predicate
sl = movable_statement.terms
newList = [pred, sl[0], sl[3], sl[4], sl[1], sl[2]]
self.makeMove(Statement(newList))
|
class ResultSet(list):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def fetchone(self):
return self.__getitem__(0) if self.__len__() > 0 else None
|
from school import get_int
from sys import exit
num = get_int("Enter a number: ", force=True)
total = 0
sums = 0
begin_num = 1
next_num = 1
while next_num != num:
total += next_num
# ะัะปะธ ััะผะผะฐ ััะฐะปะฐ ะฑะพะปััะต ะฒะฒะตะดะตะฝะฝะพะณะพ ัะธัะปะฐ
if total > num:
begin_num += 1
next_num = begin_num
total = 0
continue
if total == num:
begin_num += 1
next_num = begin_num
total = 0
sums += 1
continue
next_num += 1
sums += 1 # num itself
print(f'sums: {sums}')
|
#Open API ๊ณต๊ณต๋ฐ์ดํฐ
import requests
import urllib.parse as p
from urllib.request import Request,urlopen
import re
import csv
from datetime import date
import time
import pandas as pd
import os
def getTimeTuple(startTimeStr, endTimeStr, rangeType):
#init vars
startTime = ''
endTime = ''
startTimeDigit = 0
endTimeDigit = 0
addRange = 0
timeList = []
timeTuple = ()
#set rangeTime
if rangeType == 'H':
addRange = 60 * 60
elif rangeType == 'D':
addRange = 60 * 60 * 24
else:
return ()
#set startTime, endTime
startTime = time.strptime(startTimeStr, '%Y%m%d')
startTimeDigit = time.mktime(startTime)
endTime = time.strptime(endTimeStr, '%Y%m%d')
endTimeDigit = time.mktime(endTime)
if startTimeDigit >= endTimeDigit:
timeList.append(time.localtime(startTimeDigit))
return timeTuple
#set time tuple
while True:
if startTimeDigit > endTimeDigit:
break;
timeList.append(time.localtime(startTimeDigit))
#print localtime(startTimeDigit)
startTimeDigit += addRange
timeTuple = tuple(timeList)
return timeTuple
SERVICE='LocalGovPriceInfoService'
OPERATION='getLocalGovPriceResearchSearch'
KEY='LNGx2DbXvy%2F1L3chL9isCcJR8kp0ja6pkn9049cAk1%2BmkfIP0SuKVS0bactIekU1utszRbKDtRpXPH88CARaXw%3D%3D'
numOfRows='10000'
pageNo='1'
_returnType='xml,json'
EXAMIN_AREA_CD='1102'
url='http://apis.data.go.kr/B552895/'
decode_key = p.unquote(KEY)
decode_return = p.unquote(_returnType)
#์กฐ์ฌ ์ผ
examin=re.compile('<examin_de>(.+?)</examin_de>')
#์กฐ์ฌ ์ง์ญ๋ช
examin_area_nm=re.compile('<examin_area_nm>(.+?)</examin_area_nm>')
#์กฐ์ฌ ์ง์ญ์ฝ๋
examin_area_cd=re.compile('<examin_area_cd>(.+?)</examin_area_cd>')
#์กฐ์ฌ ์์ฅ๋ช
examin_mrkt_nm=re.compile('<examin_mrkt_nm>(.+?)</examin_mrkt_nm>')
#์กฐ์ฌ ์์ฅ์ฝ๋
examin_mrkt_cd=re.compile('<examin_mrkt_cd>(.+?)</examin_mrkt_cd>')
#ํ๋ชฉ ๋ช
prdlst_nm=re.compile('<prdlst_nm>(.+?)</prdlst_nm>')
#ํ๋ชฉ ์ฝ๋
prdlst_cd=re.compile('<prdlst_cd>(.+?)</prdlst_cd>')
#ํ๋ชฉ ์์ธ๋ช
prdlst_detail_nm=re.compile('<prdlst_detail_nm>(.+?)</prdlst_detail_nm>')
#์ ํต๋จ๊ณ ๊ตฌ๋ถ
distb_step_se=re.compile('<distb_step_se>(.+?)</distb_step_se>')
#์ ํต๋จ๊ณ
distb_step=re.compile('<distb_step>(.+?)</distb_step>')
#๋ฑ๊ธ
grad=re.compile('<grad>(.+?)</grad>')
#๋ฑ๊ธ์ฝ๋
grad_cd=re.compile('<grad_cd>(.+?)</grad_cd>')
#๊ท๊ฒฉ
stndrd=re.compile('<stndrd>(.+?)</stndrd>')
#์กฐ์ฌ๊ฐ๊ฒฉ
examin_amt=re.compile('<examin_amt>(.+?)</examin_amt>')
dayTuple = getTimeTuple('20140102', '20170821', 'D')
for d in dayTuple:
examin_de = time.strftime('%Y%m%d',d)
queryParams = SERVICE + '/' + OPERATION + '?'+ p.urlencode({ p.quote_plus('ServiceKey') : decode_key, p.quote_plus('numOfRows') : numOfRows , p.quote_plus('pageNo') : pageNo, p.quote_plus('_returnType') : decode_return, p.quote_plus('examin_de') : examin_de,p.quote_plus('examin_area_cd') : EXAMIN_AREA_CD })
requestURL =url+queryParams
print(requestURL)
data = requests.get(requestURL).text
examin_de_d = examin.findall(data)
examin_area_nm_d = examin_area_nm.findall(data)
examin_area_cd_d = examin_area_cd.findall(data)
#examin_mrkt_nm_d = examin_mrkt_nm.findall(data)
examin_mrkt_cd_d = examin_mrkt_cd.findall(data)
prdlst_nm_d = prdlst_nm.findall(data)
prdlst_cd_d = prdlst_cd.findall(data)
prdlst_detail_nm_d = prdlst_detail_nm.findall(data)
distb_step_se_d = distb_step_se.findall(data)
distb_step_d = distb_step.findall(data)
grad_d = grad.findall(data)
grad_cd_d = grad_cd.findall(data)
stndrd_d = stndrd.findall(data)
examin_amt_d = examin_amt.findall(data)
d = {'examin_de_d':examin_de_d,'examin_area_nm_d':examin_area_nm_d,'examin_area_cd_d':examin_area_cd_d,'examin_mrkt_cd_d':examin_mrkt_cd_d,\
'prdlst_nm_d':prdlst_nm_d,'prdlst_cd_d':prdlst_cd_d,'prdlst_detail_nm_d':prdlst_detail_nm_d,'distb_step_se_d':distb_step_se_d,\
'distb_step_d':distb_step_d,'grad_d':grad_d,'grad_cd_d':grad_cd_d,'stndrd_d':stndrd_d,'examin_amt_d':examin_amt_d}
df = pd.DataFrame(data=d)
if not os.path.isfile('D:\yall.csv'):
df.to_csv('D:\yall.csv',header=True)
else:
df.to_csv('D:\yall.csv', mode='a',header=False)
|
class SistemaDeAudio(object):
"""
Sistema de audio.
"""
def configurar_frequencia(self):
"""
Configura a frequรชncia do audio.
"""
print("Frequรชncia configurada")
def configurar_volume(self):
"""
Configura o volume do audio.
"""
print("Volume configurado")
def configurar_canais(self):
"""
Configura os canais de audio.
"""
print("Canais configurados")
def reproduzir_audio(self, arquivo):
"""
Reproduz o audio
"""
print("Reproduzindo: " + arquivo)
|
from utils import *
from User import User
from Project import Project
from Sprint import Sprint
from stasis.Singleton import get as db
class Availability:
def __init__(self, sprint):
self.sprint = sprint
def get(self, user, timestamp):
table = db()['availability']
if self.sprint.id in table:
data = table[self.sprint.id]
if user.id in data:
ts = dateToTs(timestamp)
if ts in data[user.id]:
return data[user.id][ts]
return 0
def getAll(self, timestamp):
rtn = 0
ts = dateToTs(timestamp)
table = db()['availability']
if self.sprint.id in table:
for data in table[self.sprint.id].values():
if ts in data:
rtn += data[ts]
return rtn
def set(self, user, timestamp, hours):
table = db()['availability']
if self.sprint.id not in table:
table[self.sprint.id] = {}
with table.change(self.sprint.id) as data:
if user.id not in data:
data[user.id] = {}
data[user.id][dateToTs(timestamp)] = hours
def delete(self, user):
table = db()['availability']
if self.sprint.id in table:
if user.id in table[self.sprint.id]:
with table.change(self.sprint.id) as data:
del data[user.id]
def wipe(self):
table = db()['availability']
if self.sprint.id in table:
del table[self.sprint.id]
def getAllForward(self, timestamp, user = None):
rtn = 0
ts = dateToTs(timestamp)
table = db()['availability']
if self.sprint.id in table:
for userid, data in table[self.sprint.id].iteritems():
if user is not None and user.id != userid:
continue
for thisstamp, hours in data.iteritems():
if thisstamp >= ts:
rtn += hours
return rtn
def trim(self):
table = db()['availability']
if self.sprint.id in table:
with table.change(self.sprint.id) as data:
for userid, hourmap in data.iteritems():
data[userid] = {timestamp: hours for timestamp, hours in hourmap.iteritems() if self.sprint.start <= timestamp <= self.sprint.end}
|
# Cesar Hernandez 1835494
lem_juice = float(input('Enter amount of lemon juice (in cups):\n'))
water = float(input('Enter amount of water (in cups):\n'))
agave_nect = float(input('Enter amount of agave nectar (in cups):\n'))
servings = float(input('How many servings does this make?\n'))
print('\nLemonade ingredients - yields', '{:.2f}'.format(servings), 'servings')
print('{:.2f}'.format(lem_juice), 'cup(s) lemon juice')
print('{:.2f}'.format(water), 'cup(s) water')
print('{:.2f}'.format(agave_nect), 'cup(s) agave nectar\n')
new_servings = float(input('How many servings would you like to make?\n'))
constant = new_servings/servings # constant to find new servings
print('\nLemonade ingredients - yields', '{:.2f}'.format(new_servings), 'servings')
print('{:.2f}'.format(lem_juice*constant), 'cup(s) lemon juice')
print('{:.2f}'.format(water*constant), 'cup(s) water')
print('{:.2f}'.format(agave_nect*constant), 'cup(s) agave nectar')
gallon_const = 16
print('\nLemonade ingredients - yields', '{:.2f}'.format(new_servings), 'servings')
print('{:.2f}'.format(lem_juice*constant/gallon_const), 'gallon(s) lemon juice')
print('{:.2f}'.format(water*constant/gallon_const), 'gallon(s) water')
print('{:.2f}'.format(agave_nect*constant/gallon_const), 'gallon(s) agave nectar') |
#!/usr/bin/env python
# encoding: utf-8
try:
from unittest import mock
except Exception:
import mock
import pytest
from translate import Translator
from translate.exceptions import InvalidProviderError, TranslationError
from translate.providers import MyMemoryProvider
from .vcr_conf import vcr
def test_tranlate_with_invalid_provider():
with pytest.raises(InvalidProviderError) as error:
Translator(to_lang='en', provider='invalid_provider')
assert 'Provider class invalid. Please check providers list below:' in str(error.value)
def test_tranlate_with_valid_provider():
translator = Translator(to_lang='en', provider='mymemory')
assert isinstance(translator.provider, MyMemoryProvider)
def test_tranlate_with_provider_extra_argument():
# Case from MyMemoryProvider extra argument
email = 'test@test.com'
translator = Translator(to_lang='en', email=email)
assert translator.provider.email == email
@vcr.use_cassette
def test_tranlate_english_to_english():
translator = Translator(to_lang='en')
translation = translator.translate('why')
assert 'why' == translation
@vcr.use_cassette
def test_translate_english_to_chinese_traditional():
translator = Translator(to_lang='zh-TW')
translation = translator.translate('hello world')
assert u'ไฝ ๅฅฝ๏ผไธ็' == translation
@vcr.use_cassette
def test_translate_english_to_portuguese():
translator = Translator(to_lang='pt-BR')
translation = translator.translate('hello world')
assert u'olรก mundo' == translation
@vcr.use_cassette
def test_translate_english_to_chinese_simplified():
translator = Translator(to_lang='zh-CN')
translation = translator.translate('hello world')
assert u'ไฝ ๅฅฝ๏ผไธ็' == translation
@vcr.use_cassette
def test_translate_with_quote():
translator = Translator(to_lang='zh')
translation = translator.translate("What is 'yinyang'?")
assert u'ไปไนๆฏโ้ด้ณโ๏ผ' == translation
@vcr.use_cassette
def test_translate_with_multiple_sentences():
translator = Translator(to_lang='zh')
translation = translator.translate('yes or no')
assert u'ๆฏๆๅฆ' in translation
@vcr.use_cassette
def test_translate_with_HTTPError():
import requests
t = Translator(to_lang='de', provider='mymemory')
t.provider.base_url += '-nonsense'
with pytest.raises(requests.HTTPError) as error:
t.translate('hello')
assert '404' in str(error)
@vcr.use_cassette
def test_translate_with_status_error():
import requests
t = Translator(to_lang='de', provider='mymemory', email='invalid')
with pytest.raises((TranslationError, requests.HTTPError)) as error:
t.translate('hello again!')
assert 'INVALID EMAIL' in str(error).upper()
@mock.patch('requests.get')
def test_tranlate_taking_secondary_match(mock_requests, main_translation_not_found):
mock_requests.return_value.json.return_value = main_translation_not_found
translator = Translator(to_lang='zh-TW')
translation = translator.translate('unknown')
assert 'ๆช็ฅ' == translation
|
#!/usr/bin/env python2
import dlock13
import sys, time
def open(topic, duration):
name = 'nada'
doors = {}
doors[name] = topic
lock = dlock13.Opener(doors)
try:
return lock.open(name, duration)
except Exception, e:
lock = None
raise e
def main():
prog, args = sys.argv[0], sys.argv[1:]
if len(args) < 2:
print 'Usage: dlock13-open mqtt/lock/prefix duration'
return 1
topic, duration = args
duration = int(duration)
until = open(topic, duration)
print 'Door at %s is open for %d seconds' % (topic, int(until-time.time()))
if __name__ == '__main__':
sys.exit(main())
|
# Created by MechAviv
# Quest ID :: 21001
# Find the Missing Kid 2
sm.setSpeakerID(1209006)
if sm.sendAskAccept("*Sniff sniff* I was so scared... Please take me to Athena Pierce."):
sm.giveItem(4001271)
sm.startQuest(parentID)
sm.warp(914000500, 1)
else:
sm.setSpeakerID(1209006)
sm.sendNext("*Sob* Aran has declined my request!") |
from sql_alchemy import banco
class LivroModel(banco.Model):
#mapeando que essa classe รฉ uma tabela no db
__tablename__='livros'
livro_id = banco.Column(banco.Float, primary_key=True)
nome = banco.Column(banco.String(80))
preco = banco.Column(banco.Float(precision=1))
quantidade = banco.Column(banco.Float(precision=2))
def __init__(self, livro_id, nome, preco, quantidade):
self.livro_id = livro_id
self.nome = nome
self.preco = preco
self.quantidade = quantidade
def json(self):
return {
'livro_id': self.livro_id,
'nome': self.nome,
'preco': self.preco,
'quantidade': self.quantidade,
}
#cls รฉ abreviaรงรฃo do nome da classe
@classmethod
def find_livro(cls, livro_id):
livro = cls.query.filter_by(livro_id=livro_id).first() #SSELECT * FROM livros where livro_id = $livro_id
if livro:
return livro
return None
def save_livro(self):
banco.session.add(self)
banco.session.commit()
def update_livro(self, nome, preco, quantidade):
self.nome = nome
self.preco = preco
self.quantidade = quantidade
def delete_livro(self):
banco.session.delete(self)
banco.session.commit() |
import pprint
from time import sleep
from InstagramAPI import InstagramAPI
api = InstagramAPI ( "username", "password")
api.USER_AGENT = 'Instagram 10.34.0 Android (18/4.3; 320dpi; 720x1280; Xiaomi; HM 1SW; armani; qcom; en_US)'
users_list = []
following_users = []
def get_likes_list(username):
""" Function: return all likers with user ID and username. """
api.login()
# Search for user
api.searchUsername(username)
result = api.LastJson
username_id = result['user']['pk']
# Get most recent post
user_posts = api.getUserFeed(username_id)
result = api.LastJson
media_id = result['items'][0]['id']
# Get Likers
api.getMediaLikers(media_id)
users = api.LastJson['users']
for user in users:
users_list.append({'pk':user['pk'], 'username':user['username']})
follow_users(users_list)
def follow_users(users_list):
""" Function: subscribe to users. """
api.login()
api.getSelfUsersFollowing()
result = api.LastJson
for user in result['users']:
following_users.append(user['pk'])
for user in users_list:
if not user['pk'] in following_users:
print('Following @' + user['username'])
api.follow(user['pk'])
sleep(10)
else:
print('Already following @' + user['username'])
sleep(10)
def get_my_profile_details():
""" Function: getting information from your account."""
api.login()
api.getSelfUsernameInfo()
result = api.LastJson
username = result['user']['username']
full_name = result['user']['full_name']
followers = result['user']['follower_count']
print({'Username': username, 'Full name': full_name, 'Followers': followers})
def get_my_feed():
""" Function: getting the URL of posts. """
image_urls = []
api.login()
api.getSelfUserFeed()
result = api.LastJson
if 'items' in result.keys():
for item in result['items'][:5]:
if 'image_versions2' in item.keys():
image_url = item['image_versions2']['candidates'][1]['url']
image_urls.append(image_url)
print(image_urls)
get_likes_list('profile_name')
|
from concurrent import futures
from google.cloud import pubsub_v1
from random import randint
from datetime import datetime
import json
# TODO(developer)
project_id = "packt-data-eng-on-gcp"
topic_id = "bike-sharing-trips"
publisher = pubsub_v1.PublisherClient()
topic_path = publisher.topic_path(project_id, topic_id)
publish_futures = []
def get_callback(publish_future, data):
def callback(publish_future):
try:
# Wait 60 seconds for the publish call to succeed.
print(publish_future.result(timeout=60))
except futures.TimeoutError:
print(f"Publishing {data} timed out.")
return callback
def create_random_message():
trip_id = randint(10000,99999)
start_date = str(datetime.utcnow())
start_station_id = randint(200,205)
bike_number = randint(100,999)
duration_sec = randint(1000,9999)
message_json = {'trip_id': trip_id,
'start_date': start_date,
'start_station_id': start_station_id,
'bike_number':bike_number,
'duration_sec':duration_sec
}
return message_json
for i in range(10):
message_json = create_random_message()
data = json.dumps(message_json)
publish_future = publisher.publish(topic_path, data.encode("utf-8"))
publish_future.add_done_callback(get_callback(publish_future, data))
publish_futures.append(publish_future)
# Wait for all the publish futures to resolve before exiting.
futures.wait(publish_futures, return_when=futures.ALL_COMPLETED)
print(f"Published messages with error handler to {topic_path}.")
|
import logging
import pickle
import math
import abc
import numpy as np
from copy import deepcopy
from sklearn.metrics import mean_absolute_error, mean_squared_error
from sample_sim.data_model.gp_wrapper import TorchSparseUncertainGPModel, TorchExactGp, GPWrapper
from sample_sim.data_model.workspace import Workspace
from sample_sim.general_utils import is_good_matrix, wmae, rwmse
from smallab.utilities.tqdm_to_logger import TqdmToLogger
from tqdm import tqdm
import random
from scipy.stats import norm,multivariate_normal
class DataModel(abc.ABC):
def __init__(self, logger, use_uncertainty=False, verbose=False, cov_fn=None):
self.Xs = []
self.Ys = []
self.input_uncertanties = []
self.use_uncertainty = use_uncertainty
self.verbose = verbose
self.logger = logger
self.cov_fn = cov_fn
def error_against_ground_truth(self, other_datamodel,use_mc):
assert isinstance(other_datamodel, DataModel)
self._flatten_data()
if use_mc:
predicted_ys_other,_ = other_datamodel.monte_carlo_query(self.Xs)
else:
predicted_ys = other_datamodel.query_many(self.Xs, return_std=False)
return mean_absolute_error(self.Ys, predicted_ys), np.sqrt(mean_squared_error(self.Ys, predicted_ys)), wmae(self.Ys,predicted_ys),rwmse(self.Ys,predicted_ys),
def error_against_model(self, other_datamodel, points,use_mc):
self._flatten_data()
if use_mc:
predicted_ys_other,_ = other_datamodel.mcmc_query2(points,weight=True)
else:
predicted_ys_other = other_datamodel.query_many(points, return_std=False)
predicted_ys_self = self.query_many(points, return_std=False)
return mean_absolute_error(predicted_ys_self, predicted_ys_other), np.sqrt(
mean_squared_error(predicted_ys_self, predicted_ys_other)), wmae(predicted_ys_self,predicted_ys_other), rwmse(predicted_ys_self,predicted_ys_other)
def error_against_track(self, points, ys, use_mc,mc_iterations=1000,mc_keep=100,weight=False):
self._flatten_data()
if use_mc:
if mc_keep is None:
mc_keep = mc_iterations
predicted_ys,_ = self.mcmc_query2(points,iterations=mc_iterations,keep=mc_keep,weight=weight)
else:
predicted_ys = self.query_many(points, return_std=False)
return mean_absolute_error(predicted_ys, ys), np.sqrt(
mean_squared_error(predicted_ys, ys)), wmae(predicted_ys,ys), rwmse(predicted_ys,ys)
def log_error_gt(self,use_mc):
mae, mse,wmae,rwmse = self.error_against_ground_truth(self,use_mc)
logging.getLogger(self.logger).info(f"Error Against GT = MAE: {mae}, RMSE: {mse}, WMAE: {wmae}, RWMSE: {rwmse}")
def mcmc_query2(self,points,point_noises=None,iterations=500,keep=None,weight=True):
if keep is not None:
means = np.zeros((keep, points.shape[0]))
stds = np.zeros((keep, points.shape[0]))
weights = np.zeros(keep)
else:
means = np.zeros((iterations, points.shape[0]))
stds = np.zeros((iterations, points.shape[0]))
weights = np.zeros(iterations)
calculate_probability = weight or (keep is not None)
if not calculate_probability:
logging.getLogger(self.logger).debug("Using fast MC")
else:
logging.getLogger(self.logger).debug("Using slow MC")
i = 0
rollouts = []
with tqdm(total=iterations, desc="Fully MCMC sample", file=TqdmToLogger(logging.getLogger(self.logger), logging.INFO)) as pbar:
while i < iterations:
log_likliehoods = []
prior_logs = []
current_inputs = []
current_targets = []
for x, noise,y in zip(self.Xs, self.input_uncertanties,self.Ys):
prob_fn = multivariate_normal(mean=x,cov=np.diag(noise))
new_input = prob_fn.rvs()
if calculate_probability:
prior_logs.append(prob_fn.logpdf(new_input))
current_inputs.append(new_input)
current_targets.append(y)
self.model.update_prior(np.array(current_inputs), np.array(current_targets))
if calculate_probability:
cur_means,cur_vars = self.query_many(np.array(current_inputs))
for mean, var in zip(cur_means,cur_vars):
log_likliehoods.append(norm.logpdf(y,loc=mean,scale=var))
logging.getLogger(self.logger).info(f"Prior : {min(prior_logs)} - {max(prior_logs)})")
logging.getLogger(self.logger).info(f"GP: {min(log_likliehoods)} - {max(log_likliehoods)})")
log_likelihood = sum(log_likliehoods) + sum(prior_logs)
#logging.getLogger(self.logger).info(f"Log Likelihood {log_likelihood}")
pbar.set_postfix(likeliehood=log_likelihood )
else:
log_likelihood = 0
if point_noises is None:
mean, std = self.query_many(points)
else:
inputs = []
for x, noise in zip(points,point_noises):
prob_fn = multivariate_normal(mean=x,cov=np.diag(noise))
inputs.append(prob_fn.rvs())
mean, std = self.query_many(np.array(inputs))
rollouts.append((log_likelihood,(mean,std)))
pbar.update(1)
i += 1
i = 0
if keep is None:
iterator = rollouts
else:
iterator = sorted(rollouts,reverse=True)[:keep]
for log_likliehood, output in iterator:
logging.getLogger(self.logger).info(f"LL:{ log_likliehood}")
mean,std = output
if keep == 1:
return mean,std
means[i] = mean
stds[i] = std
weights[i] = log_likliehood
i += 1
if weight:
weights = weights/sum(weights)
return np.average(means, axis=0,weights=weights), np.var(means, axis=0) + np.mean(stds, axis=0)
else:
return np.mean(means, axis=0), np.var(means, axis=0) + np.mean(stds, axis=0)
def update(self, X, Y, input_uncertainties=None):
#assert input_uncertainties is not None
if self.Xs == []:
self.Xs = X
else:
self.Xs = np.vstack((self.Xs, X))
if self.Ys == []:
self.Ys = Y
else:
self.Ys = np.append(self.Ys,Y)
if self.input_uncertanties == []:
self.input_uncertanties = input_uncertainties
else:
self.input_uncertanties = np.vstack((self.input_uncertanties, input_uncertainties))
def query(self, p, return_std=True):
return self.query_many(np.array([p]), return_std=return_std)
def query_many(self, Xs, return_std=True):
if return_std:
mean, std = self.__query_many_implementation__(Xs, return_std)
else:
std = None
mean = self.__query_many_implementation__(Xs, return_std)
if return_std:
return mean, std
else:
return mean
@abc.abstractmethod
def __query_many_implementation__(self, Xs, return_std=True):
pass
def _flatten_data(self):
if isinstance(self.Xs, list):
self.Xs = np.vstack(self.Xs)
#assert self.Xs.shape[1] == 3
if self.input_uncertanties is not None:
self.input_uncertanties = np.vstack(self.input_uncertanties)
self.Ys = np.concatenate(self.Ys)
assert is_good_matrix(self.Xs)
assert is_good_matrix(self.input_uncertanties)
assert is_good_matrix(self.Ys)
class TorchDataModel(DataModel):
def __init__(self, logger, model: GPWrapper,use_uncertainty,workspace:Workspace,cov_fn=None):
super().__init__(logger,use_uncertainty,verbose=True)
self.logger = logger
self.model = model
self.workspace = workspace
self.cov_fn = cov_fn
def update(self, X, Y, input_uncertainties=None):
super().update(X, Y, input_uncertainties)
self.model.update_prior(self.Xs,self.Ys,self.input_uncertanties)
def fit(self,steps=200):
self.model.fit(self.Xs,self.Ys,self.input_uncertanties,optimization_steps=steps)
def __query_many_implementation__(self, Xs, return_std=True):
return self.model.predict(Xs, return_std)
def save(self, fname):
with open(fname + "dm.pkl", "wb") as f:
pickle.dump(self.Xs, f)
pickle.dump(self.Ys, f)
pickle.dump(self.input_uncertanties, f)
pickle.dump(self.use_uncertainty, f)
self.model.save(fname)
def load(self, fname):
with open(fname + "dm.pkl", "rb") as f:
self.Xs = pickle.load(f)
self.Ys = pickle.load(f)
self.input_uncertanties = pickle.load(f)
self.use_uncertainty = pickle.load(f)
self.model.load(fname)
class TorchApproximateGPBackedDataModel(TorchDataModel):
def __init__(self, logger, workspace:Workspace,inducing_points=None, verbose=False, use_x_as_inducing=True,cov_fn=None):
self.refit = True
self.gp = TorchSparseUncertainGPModel(logger, inducing_points, use_fast_strategy=False)
self.gp.verbose = verbose
self.use_x_as_inducing = use_x_as_inducing
super().__init__(logger, model=self.gp,use_uncertainty=True,workspace=workspace,cov_fn=cov_fn)
class TorchExactGPBackedDataModel(TorchDataModel):
def __init__(self, X, Y, logger, workspace:Workspace,use_better_mean=False,force_cpu=False,cov_fn=None,device=None):
self.gp = TorchExactGp(X, Y, logger=logger, use_mlp_mean=use_better_mean,force_cpu=True,gpu_num=device)
super().__init__(logger,model=self.gp, use_uncertainty=False,workspace=workspace,cov_fn=cov_fn)
|
'''tk_mouse_click_shape1.py
show xy coordinates of mouse click position
relative to root or relative within a shape
tested with Python27/Python33 by vegaseat
'''
try:
# Python2
import Tkinter as tk
except ImportError:
# Python3
import tkinter as tk
def showxy(event):
'''
show x, y coordinates of mouse click position
event.x, event.y relative to ulc of widget (here root)
'''
# xy relative to ulc of root
#xy = 'root x=%s y=%s' % (event.x, event.y)
# optional xy relative to blue rectangle
xy = 'rectangle x=%s y=%s' % (event.x-x1, event.y-y1)
root.title(xy)
root = tk.Tk()
root.title("Mouse click within blue rectangle ...")
# create a canvas for drawing
w = 400
h = 400
cv = tk.Canvas(root, width=w, height=h, bg='white')
cv.pack()
# draw a blue rectangle shape with
# upper left corner coordinates x1, y1
# lower right corner coordinates x2, y2
x1 = 20
y1 = 30
x2 = 380
y2 = 370
cv.create_rectangle(x1, y1, x2, y2, fill="blue", tag='rectangle')
# bind left mouse click within shape rectangle
cv.tag_bind('rectangle', '<Button-1>', showxy)
root.mainloop()
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'KernelVersion.pretty_kernel_version_name'
db.delete_column(u'schema_kernel_kernelversion', 'pretty_kernel_version_name')
def backwards(self, orm):
# Adding field 'KernelVersion.pretty_kernel_version_name'
db.add_column(u'schema_kernel_kernelversion', 'pretty_kernel_version_name',
self.gf('django.db.models.fields.CharField')(default='kernel version', max_length=100),
keep_default=False)
models = {
u'schema_kernel.kernelversion': {
'Meta': {'object_name': 'KernelVersion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'})
},
u'schema_kernel.pcialiases': {
'Meta': {'unique_together': "(('vendor', 'subvendor', 'device', 'subdevice'),)", 'object_name': 'PCIAliases'},
'device': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'module': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['schema_kernel.PCIModule']", 'symmetrical': 'False'}),
'subdevice': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'subvendor': ('django.db.models.fields.CharField', [], {'max_length': '4'}),
'vendor': ('django.db.models.fields.CharField', [], {'max_length': '4'})
},
u'schema_kernel.pcimodule': {
'Meta': {'unique_together': "(('name', 'version', 'srcversion'),)", 'object_name': 'PCIModule'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kernelVersionModuleConnector': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['schema_kernel.KernelVersion']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'srcversion': ('django.db.models.fields.CharField', [], {'max_length': '24', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['schema_kernel'] |
"""
PythonAEM error, contains a message and PythonAEM Result object
"""
class Error(RuntimeError):
"""
PythonAEM error, contains a message and PythonAEM Result object
useful for debugging the result and response when an error occurs
"""
def __init__(self, message, result):
"""
Initialise a result.
:param message: result message
:param resi;t: PythonAEM Result
:return PythonAEM Error instance
"""
super().__init__()
self.message = message
self.result = result
|
from PyQt5 import QtWidgets
from Pantallas.Serializables import modificacionMaxMinIngreso
from Pantallas.Serializables import modificacionMaxMin
class ModificacionMaxMinIngresoSerializables(QtWidgets.QWidget, modificacionMaxMinIngreso.Ui_Form):
def __init__(self, *args, **kwargs):
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.setupUi(self)
self.se_btn_confirmar.clicked.connect(self.elegirCodigoSer)
def elegirCodigoSer(self):
self.value1 = self.se_input_1.text()
self.window = ModificarMinMaxSerializables()
self.window.show()
class ModificarMinMaxSerializables(QtWidgets.QWidget, modificacionMaxMin.Ui_Form):
def __init__(self, *args, **kwargs):
QtWidgets.QWidget.__init__(self, *args, **kwargs)
self.setupUi(self)
self.se_btn_confirmar.clicked.connect(self.modificarMinMaxSer)
def modificarMinMaxSer(self):
pass
if __name__ == "__main__":
app = QtWidgets.QApplication([])
window = ModificacionMaxMinIngresoSerializables()
window.show()
app.exec_() |
from otree.api import (
models, widgets, BaseConstants, BaseSubsession, BaseGroup, BasePlayer, Currency as c
)
import random
"""
Sim for Ultimatum/Two-Stage Bargaining Game
"""
class Constants(BaseConstants):
name_in_url = 'ultimatum'
players_per_group = 2
num_rounds = 4
instructions_template = 'ultimatum/instructions.html'
role = random.choice([1, 2])
endowment = c(100)
class Subsession(BaseSubsession):
def creating_session(self):
self.group_randomly()
class Group(BaseGroup):
offer = models.CurrencyField(min=0, max=Constants.endowment, label='')
responder_choice = models.BooleanField(
widget=widgets.RadioSelect,
choices=[[True, 'Accept'], [False, 'Reject']],
label='')
counter = models.CurrencyField(min=0, max=25, label='')
proposer_choice = models.BooleanField(
widget=widgets.RadioSelect,
choices=[[True, 'Accept'], [False, 'Reject']],
label='')
class Player(BasePlayer):
def role(self):
if self.round_number == 1:
return 'proposer' if self.id_in_group == Constants.role else 'responder'
else:
return 'proposer' if self.in_round(self.round_number - 1).role() == 'responder' else 'responder'
def other_player(self):
return self.get_others_in_group()[0]
def set_payoff(self):
if self.group.responder_choice:
self.group.get_player_by_role('responder').payoff = self.group.offer
self.group.get_player_by_role('proposer').payoff = Constants.endowment - self.group.offer
else:
if self.round_number > 2 and self.group.proposer_choice:
self.group.get_player_by_role('proposer').payoff = self.group.counter
self.group.get_player_by_role('responder').payoff = 25 - self.group.counter
else:
self.payoff = 0
self.other_player().payoff = 0
|
import fpdf
import csv
import os
d="path"
parent="C:\Python33"
pat=os.path.join(parent,d)
data=os.listdir(pat)
new=[]
for i in range(len(data)):
with open (data[i]) as f1:
dirc=list(csv.reader(f1))
for j in range(len(data)):
new.append(j)
print(new)
|
import os
import pandas as pd
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
from collections import *
from itertools import *
from Properties import *
from scipy.interpolate import *
from FunkyFuncs import *
from MiscFunctions import *
from Nodes import *
from collections import *
import Cycles
Set1 = Cycles.RankineDynamic(mDot = 50, eta_turb = .92, eta_pump = .8)
Set1.addTurbine(P1 = Pressure.from_MPa(9),P2 = Pressure.from_MPa(1),T1 = Temperature(600))
Set1.addTurbine(P1 = Pressure.from_MPa(1), P2 = Pressure.from_kPa(8), T1 = Temperature(500))
Set1.addCondenser()
Set1.addPump(P = Pressure.from_MPa(9))
Set1.addCFWH(fracTurbP = .125)
Set1.initialize()
Set1.dispFull() |
# Copyright (c) 2015, Warren Weckesser. All rights reserved.
# This software is licensed according to the "BSD 2-clause" license.
#
# Use pyqtgraph to display the eye diagram computed by eyediagram.grid_count.
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
from eyediagram.demo_data import demo_data
from eyediagram.core import grid_count
def colorize(counts, color1, color2=None):
"""
Convert the integer array `counts` to an array of RGBA values.
The colors assigned to the values 1 to counts.max() vary linearly
from `color1` to `color2`. If `color2` is not given, (255, 255, 255)
is used. The color assigned to the value 0 is (0, 0, 0), with an
alpha value of 0.
"""
if color2 is None:
color2 = (255, 255, 255)
m = counts.max()
colors = np.zeros((m+1, 4), dtype=np.uint8)
r = np.linspace(color1[0], color2[0], m)
g = np.linspace(color1[1], color2[1], m)
b = np.linspace(color1[2], color2[2], m)
colors[1:, 0] = r
colors[1:, 1] = g
colors[1:, 2] = b
colors[1:, 3] = 255
colors[0, 3] = 0
img = colors[counts]
return img
# Generate image data
y = demo_data(5000, 24)
ybounds = (-0.25, 1.25)
# Compute the eye diagram image data.
counts = grid_count(y, 48, offset=16, size=(480, 480), bounds=ybounds)
# Convert counts to an array of RGBA values.
yellow = (224, 192, 48)
img_data = colorize(counts, yellow)
#-------------------------------------------------------------------------
# The rest of this script uses pyqtgraph to create a plot
# of the eye diagram.
pg.mkQApp()
win = pg.GraphicsLayoutWidget()
win.setWindowTitle('Eye Diagram')
# A plot area with axes for displaying the image.
p1 = win.addPlot()
# ImageItem for displaying the eye diagram as an image.
img = pg.ImageItem()
img.setImage(img_data.astype(np.float64))
img.setBorder(10)
p1.addItem(img)
# Set position and scale of image.
tr = QtGui.QTransform()
dy = ybounds[1] - ybounds[0]
tr.scale(2./counts.shape[0], dy/counts.shape[1])
h = counts.shape[1]
p0 = h * ybounds[0]/dy
tr.translate(0, p0)
img.setTransform(tr)
# Show the grid lines in the plot.
ax = p1.getAxis('left')
ax.setGrid(192)
ax = p1.getAxis('bottom')
ax.setGrid(192)
win.resize(640, 480)
win.show()
#-------------------------------------------------------------------------
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QGuiApplication.instance().exec_()
|
from flask import Flask, request, make_response
from currencyExchange import *
import argparse
import os
import json
app = Flask(__name__)
##
# Confirms the service is working on a browser
# @return {dict} a symbolic confirmation of the service
##
@app.route("/", methods=["GET"])
def retornodummy():
r = make_response("Currency Exchange Service Works")
return r
##
# Provides the user with the exchange rate of two currencies
# @param {dict} contenido de la peticion a la funcion
# {
# "baseCurrency":EUR,
# "baseCurrencyAmount":$$$,
# "quoteCurrency":GBP
# }
# @return {dict} a symbolic confirmation of the connection
#{
# "exchangeRate":$$$
# "feeCost":$$$
# "quoteAmountBalance":$$$
#}
##
@app.route("/getexchangerate", methods=["POST", "GET"])
def getexchangerate():
req = request.get_json(silent=True, force=True)
#we get the parameters we need from the request
baseCurrency = req.get("baseCurrency")
baseCurrencyAmount = req.get("baseCurrencyAmount")
quoteCurrency = req.get("quoteCurrency")
#we process the BUSINESS LOGIC to get the values requested by the user
exchangeRate = getRate(baseCurrency, quoteCurrency,baseCurrencyAmount)
feeCost = getFeeCost(baseCurrency,baseCurrencyAmount)
quoteAmountBalance = getQuoteAmountBalance(quoteCurrency)
res = {"exchangeRate":exchangeRate, "feeCost":feeCost, "quoteAmountBalance":quoteAmountBalance}
# Construccion de respuesta en formato json
# ********************************************
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers["Content-Type"] = "application/json"
# ********************************************
return r
##
# Makes a transaction between two currencies
# @param {dict} contenido de la peticion a la funcion
# {
# "baseCurrency":EUR,
# "quoteCurrency":GBP
# "quoteCurrencyAmount": $$$
# }
# @return {dict} a symbolic confirmation of the connection
#{
# "baseCurrencyBalance":$$$
# "quoteCurrencyBalance":$$$
#}
##
@app.route("/exchangecurrency", methods=["POST"])
def exchangecurrency():
req = request.get_json(silent=True, force=True)
#we get the parameters we need from the request
baseCurrency = req.get("baseCurrency")
quoteCurrency = req.get("quoteCurrency")
quoteCurrencyAmount = req.get("quoteCurrencyAmount")
#we process the BUSINESS LOGIC to get the values requested by the user
tradeDataSet = exchange(baseCurrency, quoteCurrency, quoteCurrencyAmount)
res = {"exchangeRate":tradeDataSet["exchangeRate"], "feeCost":tradeDataSet["feeCost"], "quoteAmountBalance":tradeDataSet["quoteAmountBalance"]}
# Construccion de respuesta en formato json
# ********************************************
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers["Content-Type"] = "application/json"
# ********************************************
return r
##
# Provides the current balance statuses
# @return {dict} the dict of the current balances
#{"USD": 1000,
#"GBP": 1000,
#"EUR": 1000,
#"JPY": 1000,
# ...
#}
##
@app.route("/getallquotesbalance", methods=["GET"])
def getallquotesbalance():
res = getAllQuotesBalance()
# Construccion de respuesta en formato json
# ********************************************
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers["Content-Type"] = "application/json"
# ********************************************
return r
##
# Provides the user with the total amount of fees charged so far in USD
# @return {dict} a set with the amount charged
#{"USD": 1000}
##
@app.route("/calculatefees", methods=["GET"])
def calculateFees():
res = {"USD":calculateTotalFees()}
# Construccion de respuesta en formato json
# ********************************************
res = json.dumps(res, indent=4)
r = make_response(res)
r.headers["Content-Type"] = "application/json"
# ********************************************
return r
##############################################################################################################################################
#MAIN PROCESS TO RUN THE FLASK SERVICE
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Currency Exchange Service')
parser.add_argument('--port', dest='noport', metavar='NNNN', type=int, help='The port number for webservice listener')
parser.add_argument('--debug', dest='debug', metavar='N', type=int, help='debug mode, 1 for turn on')
args = parser.parse_args()
# asignacion de puerto del webhook y modo debug
# si el puerto no es asignado, se toma 7700 por default
debug = bool(args.debug)
# --------------------------------------------------------------------
if args.noport is None:
noport = 7700
else:
noport = args.noport
port = int(os.getenv("PORT", noport))
print("Starting app on port %d" %port)
app.run(debug=debug, port=port, host="0.0.0.0")
|
#! python3
# -*- coding: utf-8 -*-
# ๆผ็ฟใใญใธใงใฏใ 18.13.2 Googleใใณใฐใขใฆใใฎ่ชๅๆไฝ
#
# Google Talkใใตใผใใน็ตไบใฎใใใไปฃใใใซGoogleใใณใฐใขใฆใใไฝฟใฃใฆ
# ่คๆฐใฆใผใถใซใกใใปใผใธใ้ไฟกใใใใญใฐใฉใ ใ
#
# ใใใใใใGoogleใใณใฐใขใฆใใฎ็ป้ขใใใ้ไฟกใใ็ธๆใฎใขใคใณใณใๅๅใฎ
# ้จๅใๅใๅใฃใฆใuser1.pngใuser2.pngใ... ใจใใๅๅใฎPNGใใกใคใซใจใใฆไฟๅญใใฆใใ ใใใ
# ๅใๅใ็ฎๆใฏใguide.pngใฎ่ตคใๆ ใฎ้จๅใๅ็
งใใฆใใ ใใใ
# ใใฎใจใใใฆใผใถใ้ธๆใใใ่ๆฏใ็ฝใใพใพใซใใฆใใ ใใใ
# ้ธๆใใฆ่ๆฏใใฐใฌใผใซใชใฃใ็ปๅใ ใจใใใพใใใใใงใใพใใใ
#
# Googleใใณใฐใขใฆใใฎใฆใฃใณใใฆใ่กจ็คบใใ็ถๆ
ใงใ
# ๆฌกใฎใใใซๆฌใใญใฐใฉใ ใๅฎ่กใใใจใ็ปๅใซใใใใใใฆใผใถใซ้ ็ชใซใกใใปใผใธใ้ไฟกใใใใจใใงใใพใใ
# python imbot.py ใกใใปใผใธ
# ๅไฝไธญใฏใใใฆในใ่งฆใใชใใใใซใใฆใใ ใใใ
import pyautogui
import pyperclip
import sys
import os
import re
# pyautogui.typewrite()ใงใฏๆฅๆฌ่ชใๅ
ฅๅใงใใชใใฎใงใใฏใชใใใใผใ็ต็ฑใงใใผในใใใ
def mytypewrite(s):
saved_clipboard = pyperclip.paste()
pyperclip.copy(s)
pyautogui.hotkey('ctrl', 'v')
pyperclip.copy(saved_clipboard)
# ็ปๅใ่ฆใคใใฆใใฏใชใใฏใใใ่ฆใคใใใชใใใฐFalseใ่ฟใใ
def click_image(image):
position = pyautogui.locateOnScreen(image)
if not position:
return False
position = pyautogui.center(position)
pyautogui.click(position)
return True
# ใฆใผใถใใฏใชใใฏใใฆใใกใใปใผใธใ้ไฟกใใใ
def send_message(username, message):
for i in range(2):
if click_image(username):
# ็ปๅใ่ฆใคใใใฐใใกใใปใผใธใ้ไฟกใใใ
print('{}ใซ้ไฟกไธญใ'.format(username))
mytypewrite(message)
pyautogui.typewrite('\n')
return
elif click_image('newconv.png'):
# ็ปๅใใใใใใชใใใฐใใฆใผใถใ้ธๆไธญใง่ๆฏใใฐใฌใผใซใชใฃใฆใใใใจใ่ใใใใใฎใงใใใฃใใใ
#ใๆฐใใไผ่ฉฑใใใฏใชใใฏใใฆใ้ธๆใ่งฃ้คใใฆ่ๆฏใ็ฝใใใใ
# ใใใฆใใใ1ๅใ ใใชใใฉใคใใ
continue
else:
# ใๆฐใใไผ่ฉฑใ(newconv.png)ใ่ฆใคใใใชใๅ ดๅใฏใใฟใชใใใฎ็ฐๅขใงๆฐใใไผ่ฉฑใฎ้จๅใๅใๅใฃใฆnewconv.pngใจใใฆ
# ไฟๅญใใชใใใฆใใ ใใใ
print('ใๆฐใใไผ่ฉฑใใ่ฆใคใใใพใใใไธญๆญขใใพใใ')
return
print('{}ใซ้ใใพใใใงใใใ'.format(username))
if len(sys.argv) < 2:
sys.exit('ไฝฟใๆน: python imbot.py ใกใใปใผใธ')
message = ' '.join(sys.argv[1:])
for filename in os.listdir('./'):
if re.match(r'user.*\.png', filename, re.I):
send_message(filename, message)
|
# 1) ะัััะฝัั ัะพะทะดะฐัั ัะตะบััะพะฒัะน ัะฐะนะป ั ะดะฐะฝะฝัะผะธ (ะฝะฐะฟัะธะผะตั, ะผะฐัะบะฐ ะฐะฒัะพ, ะผะพะดะตะปั ะฐะฒัะพ, ัะฐัั
ะพะด ัะพะฟะปะธะฒะฐ, ััะพะธะผะพััั).
print(' 1) ะัััะฝัั ัะพะทะดะฐัั ัะตะบััะพะฒัะน ัะฐะนะป ั ะดะฐะฝะฝัะผะธ')
print(' ะฏ ัะพะทะดะฐะป ัะฐะธะป template')
# 2) ะกะพะทะดะฐัั doc ัะฐะฑะปะพะฝ, ะณะดะต ะฑัะดัั ะธัะฟะพะปัะทะพะฒะฐะฝั ะดะฐะฝะฝัะต ะฟะฐัะฐะผะตััั.
print(' 2) ะกะพะทะดะฐัั doc ัะฐะฑะปะพะฝ, ะณะดะต ะฑัะดัั ะธัะฟะพะปัะทะพะฒะฐะฝั ะดะฐะฝะฝัะต ะฟะฐัะฐะผะตััั.')
print()
# 3) ะะฒัะพะผะฐัะธัะตัะบะธ ัะณะตะฝะตัะธัะพะฒะฐัั ะพััะตั ะพ ะผะฐัะธะฝะต ะฒ ัะพัะผะฐัะต doc (ะบะฐะบ ะฒ ะฒะธะดะตะพ 7.2).
print(' 3) ะะฒัะพะผะฐัะธัะตัะบะธ ัะณะตะฝะตัะธัะพะฒะฐัั ะพััะตั ะพ ะผะฐัะธะฝะต ะฒ ัะพัะผะฐัะต doc (ะบะฐะบ ะฒ ะฒะธะดะตะพ 7.2).')
import datetime
from docxtpl import DocxTemplate
from docxtpl import InlineImage
from docx.shared import Cm
from docxtpl import DocxTemplate, InlineImage
def get_context(label, model, fuel, price): # ะฒะพะทะฒัะฐัะฐะตั ัะปะพะฒะฐัั ะฐัะณัะผะตะฝัะพะฒ
return {
'label': label,
'model': model,
'fuel': fuel,
'price': price
}
def from_template(label, model, fuel, price, template, signature):
template = DocxTemplate(template) # ะคะพัะผะธััะตะผ ัะฐะผ ัะฐะฑะปะพะฝ
context = get_context(label, model, fuel, price) # Dict ั ะพะฑัะตะบัะฐะผะธ ะฒ ัะปะพะฒะฐัะต, ะฟะพะปััะฐะตั ะบะพะฝัะตะบัั, ะธัะฟะพะปัะทัะตะผัะน
# ะดะปั ะฒะธะทัะฐะปะธะทะฐัะธะธ ะดะพะบัะผะตะฝัะฐ
img_size = Cm(15) # ัััะฐะฝะฐะฒะปะธะฒะฐะตั ัะฐะทะผะตั ะธะทะพะฑัะฐะถะตะฝะธั
acc = InlineImage(template, signature, img_size) # ะะฑัะตะบั ะบะฐััะธะฝะบะธ
context['acc'] = acc # ะดะพะฑะฐะฒะปัะตั ะพะฑัะตะบั InlineImage ะฒ ะบะพะฝัะตะบัั
template.render(context) # ะะตัะตะดะฐะตะผ ะบะฐััะธะฝะบั ะฒ ะพะฑัะตะบั ัะฐะฑะปะพะฝะฐ
template.save(
label + '_' + model + '_' + str(datetime.datetime.now().date()) + '_' 'report.docx') # ะกะพั
ัะฐะฝัะผ ะพะฑัะตะบั ัะฐะฑะปะพะฝะฐ
def generate_report(label, model, fuel, price):
template = 'template.docx'
signature = 'avt.jpg'
from_template(label, model, fuel, price, template, signature)
generate_report('Mazda', 'X-9', '11,5', '1900000')
print()
# 4) ะกะพะทะดะฐัั csv ัะฐะนะป ั ะดะฐะฝะฝัะผะธ ะพ ะผะฐัะธะฝะต.
print(' 4) ะกะพะทะดะฐัั csv ัะฐะนะป ั ะดะฐะฝะฝัะผะธ ะพ ะผะฐัะธะฝะต.')
import csv
''''
ััะฝะบัะธั csv.reader -> ะงัะตะฝะธะต ะฒ ัะธะฟ list
ััะฝะบัะธั csv.writer -> ะะฐะฟะธัั ะธะท ะปะธััะฐ
ะบะปะฐัั csv.Dictwriter -> ะะปะฐัั, ะทะฐะฟะธัั ะฒ ะพะฑัะตะบั ัะธะฟะฐ ัะปะพะฒะฐัั
ะบะปะฐัั csv.DictReader -> ะะปะฐัั, ััะตะฝะธะต ะฒ ะพะฑัะตะบั ัะธะฟะฐ ัะปะพะฒะฐัั
'''
car_data = [['brand', 'model', 'volume', 'fuel'], ['Kia', 'Rio', '1,4', '8'], ['Reno', 'Fluence', '1,6', '8,5'], ['Volkswagen', 'Polo', '1,5', '8,7'], ['Hyundai', 'solaris', '1,4', '7,8']]
with open('data_auto.csv', 'w', newline='') as f: # newline-> ะดะตะปะฐะตั, ััะพ ะฑั ะทะฐะฟะธัั ะดะตะปะฐะปะฐัั ะบะฐะถะดัั ัััะพะบั, ะฐ ะฝะต ัะตัะตะท ะพะดะฝั
writer = csv.writer(f, delimiter = '>') # ะ ะฐะทะดะตะปะธัะตะปั(delimiter), ะฟะพ ัะผะพะปัะฐะฝะธั ','
writer.writerows(car_data)
print('Writing complete!')
print(' * ')
with open('data_auto.csv') as f:
ัะธัะฐั = csv.reader(f, delimiter = '>')
for row in ัะธัะฐั:
print(row)
print(' * ')
data_school_dict = [{'Name':'Dima', 'age':'10', 'Grade':'A'},
{'Name':'Vasia', 'age':'11', 'Grade':'C'},
{'Name':'Hasim', 'age':'13', 'Grade':'f'},
{'Grade':'B', 'Name':'Zoy', 'age':'14'}]
fieldnames = ['Name', 'age', 'Grade']
with open('ะกะฟะธัะพะบ_ััะตะฝะธะบะพะฒ.csv', 'w', newline='') as f:
writer = csv.DictWriter(f, delimiter = '-', fieldnames=fieldnames)
writer.writeheader()
for i in range(len(data_school_dict)):
writer.writerow(data_school_dict[i])
print(' Writing ะกะฟะธัะพะบ_ััะตะฝะธะบะพะฒ.csv complete!')
print(' * ')
with open ('ะกะฟะธัะพะบ_ััะตะฝะธะบะพะฒ.csv') as f:
reader = csv.DictReader(f, delimiter = '-')
for row in reader:
print(row) # ะฃ ะผะตะฝั ะฒัะฒะตะดะตััั dict, ั ะฟัะตะฟะพะดะฐะฒะฐัะตะปั tuple ( [('Name','Dima'),('age','10')] )
print(' * ')
import pandas as pd
ะฟัะพะฑะฐ_pandas_from_csv = pd.read_csv('ะกะฟะธัะพะบ_ััะตะฝะธะบะพะฒ.csv', sep = '-')
print(type(ะฟัะพะฑะฐ_pandas_from_csv))
print(ะฟัะพะฑะฐ_pandas_from_csv)
|
# -*-coding:utf8-*-
import sys
from room_central_corridor import CentralCorridor
from room_laser_weapon_armory import LaserWeaponArmory
from room_the_bridge import TheBridge
from room_escape_pod import EscapePod
class Game(object):
def __init__(self):
print("---Game start---\n")
self.rooms = {}
def play(self):
centralCorridor = CentralCorridor()
centralCorridor.print_message()
next_action = centralCorridor.action() # return dict
while True:
for key, value in next_action.items():
print "key--> %s, value--> %s" % (key, value)
# can't run the methos! getattr(obj,str) is attribute,getattr(obj,str)() is method
next_action = getattr(self.rooms.get(key), "%s" % value)()
#while True:
# for key, value in next_action.items():
# print "key--> %s, value--> %s" % (key, value)
# # can't run the methos! getattr(obj,str) is attribute,getattr(obj,str)() is method
# next_action = getattr(sys.modules[key], "%s" % value)
def death(self):
print("---Game over---\n")
sys.exit(1)
def update_rooms(self, room): # update rooms
self.rooms.update(room)
game = Game()
game.update_rooms({'Game': game})
game.update_rooms(
{"CentralCorridor": CentralCorridor(), "LaserWeaponArmory": LaserWeaponArmory(), "TheBridge": TheBridge(),
"EscapePod": EscapePod()})
game.play()
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
"""ๆ ๅบ้พ่กจ็ๆๅบๅธธ่ง็ๅๆณๆฏmergesort๏ผmerge็้จๅๅ
ถๅฎๅฐฑๆฏ21. Merge Two Sorted Listsๅๅนถไธคไธชๆๅบ้พ่กจ,
็ฐๅจๆ ธๅฟๆฏๅฎๆsort้ฃไธ้จๅ๏ผsort็จslow๏ผfastไธคไธชๆ้ๆheadๅๆๅทฎไธๅคๅ็ญ็ไธค้จๅ๏ผ็ถๅๅฏน่ฟไธค้จๅๅ่ฐ็จsortListๅฝๆฐ๏ผ
ๆๅmerge.
ไป็ปๆณไธไธ่ฟไธช้ๅฝ้ป่พ๏ผsortList่ช่บซๅฎ็ฐๆๅบๅฐฑๆฏ้ ๅฐ่ฟไธชๅพ้ฟ็ๆฐ็ป/้พ่กจๆฏๆฌกๅกไธๅ๏ผ็ดๅฐๆๅๅก็ๅชๅฉnullๅไธไธชๅ
็ด
็ถๅ้่ฟmerge่ฟๅ่ฟไธชๅ
็ด ๏ผ้ๅฐไธๅฑ๏ผ็ปง็ปญmerge๏ผๅ้ๅฐไธๅฑ็ปง็ปญmerge๏ผ... ็ดๅฐ้ๅฐ็ฌฌไธๆฌก่ฐsortList็ๆ ไธญ
ๆง่กๆๅไธๆฌกmerge๏ผ่ฟๅ็ญๆก๏ผๆไปฅmergesort็ๆ ธๅฟ่ฟๆฏmergeใใใsortๅฐฑๆฏ้ mergeๆฅๅฎ็ฐ็๏ผๅชไธ่ฟ้ sortList่ฟไธชไธปๅฝๆฐ
ๆๆฏๆฌกmergeไผ ่ฟๆฅ็ไธคไธช้จๅ้ฝๅๆไบไธไธๆฌกmerge็็ปๆ๏ผๆฏๆๅบ็๏ผmergeๅช่ฝๅค็ไธคไธชๅๆฐ้ฝๆฏๆๅบ็ๆ
ๅต
"""
def sortList(self, head: ListNode) -> ListNode:
"""่ฟ้้ขๆไธช่ฆๆฑๆฏO(1)็space๏ผไฝๆฏ่ฟไธช้ๅฝ็ๆนๆณๆฏO(nlogn)็space,
่ฝ็ถ้ๅฝๅฝๆฐๆฌ่บซๆฏO(1)็space๏ผๆๅฎ็ฐ็O(1)็็ๆฌ๏ผ้่ฆ็จๅฐๆ ่ฒไผผ๏ผๆฒก็
"""
if not head or not head.next:
return head
slow, fast = head, head.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
head2 = slow.next
slow.next = None # ๅฐ้พ่กจไปslowๆๅจ็ไฝ็ฝฎๆญๅผๆไธคไธช้พ่กจheadๅhead2, slowๆๅจ็ไฝ็ฝฎๅคง่ดไธบๅhead็ไธญ้ดไฝ็ฝฎ๏ผ
# ่ฟๆ ทๅฐฑๅฐๅheadๅๆไบ้ฟๅบฆๅคง่ด็ธ็ญ็headๅhead1๏ผๅฝๅhead้ฟๅบฆไธบๅฅๆฐๆถ๏ผheadๆฏhead1ๅคไธไธชๅ
็ด
# ๅๅฏนheadๅhead2่ฟ่กๆๅบ
head = self.sortList(head)
head2 = self.sortList(head2)
return self.merge(head, head2)
def merge(self, l1, l2):
dummyHead = ListNode(-1)
cur = dummyHead
while l1 and l2:
if l1.val < l2.val:
cur.next = l1
l1 = l1.next
else:
cur.next = l2
l2 = l2.next
cur = cur.next
# cur.next = None # ไธฅๆ ผๆฅ่ฏด้่ฆ่ฟๆ ท๏ผ็ฐๅจcur.nextๅธฆ็ๆฏl1ๆl2็next๏ผไธ่ฟไธๆฌกไผ่ฆ็ๆ่ฟไธชๅผ
if l1 or l2:
cur.next = l1 or l2
return dummyHead.next
|
def RemoveImplicitSubtitle(title):
if title == '':
return title
sublist = ['--', '๏ฝ๏ฝ', '()', '[]', '<>', '""']
for s, e in sublist:
if title[-1] == e:
ind = title[:-1].rfind(s)
if ind == -1:
return title.strip()
else:
return title[:ind].strip()
def allEqual(ls):
for v in ls:
if v != ls[0]:
return None
return ls[0]
def allPrefix(ls):
ll = min(ls,key=len)
for v in ls:
if not v.startswith(ll):
return None
return ll
def allImplicitSubtitle(ls):
nls = list(map(RemoveImplicitSubtitle, ls))
return allEqual(nls)
def prefixImplicitDifficulty(ls):
from os.path import commonprefix
CP = commonprefix(ls).strip()
if CP == '': return
difficultyList = ['Basic', 'Normal', 'Hyper', 'Another']
difficultyCnt = 0
for v in ls:
difficultyName = v[len(CP):].strip()
if difficultyName in difficultyList:
difficultyCnt += 1
if difficultyCnt > 1:
return CP
return None
def removeStrangeSabunName(titleList, artistList):
titleArtistList = zip(titleList, artistList)
isNotSabun = lambda s: ('OBJ' not in s[1].upper() and s != '')
titleArtistList = list(zip(*filter(isNotSabun, titleArtistList)))
if len(titleArtistList) == 0: return [[],[]]
return titleArtistList
def GuessTitle(titleList, artistList):
if len(titleList) == 0:
return "No bms files"
guessFunc = [allEqual, allPrefix, allImplicitSubtitle, prefixImplicitDifficulty]
for f in guessFunc:
res = f(titleList)
if res is not None:
return res
titleList, artistList = removeStrangeSabunName(titleList, artistList)
if len(titleList) == 0:
return None
for f in guessFunc:
res = f(titleList)
if res is not None:
return res
return None
def GuessArtist(artistList, titleList):
if len(artistList) == 0:
return "No bms files"
guessFunc = [allEqual, allPrefix]
for f in guessFunc:
res = f(artistList)
if res is not None:
return res
titleList, artistList = removeStrangeSabunName(titleList, artistList)
if len(artistList) == 0:
return None
for f in guessFunc:
res = f(artistList)
if res is not None:
return res
return None
def GuessTitleArtist(bms):
titleList = list(map(lambda s:s['title'], bms))
artistList = list(map(lambda s:s['artist'], bms))
return ( GuessTitle(titleList, artistList), GuessArtist(artistList, titleList))
if __name__ == '__main__':
import json
bms = json.loads(open('ZIP2BMS.json').read())
cnt1, cnt2 = 0,0
for f in bms:
v1, v2 = GuessTitleArtist(bms[f]['bms'])
cnt2 += 1
if v1 is None or v2 is None:
print(v1, v2)
print(f)
cnt1 += 1
print(cnt1,'/',cnt2) |
# Copyright (c) 2015-2018 Cisco Systems, Inc.
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Ansible Provisioner Module."""
import collections
import copy
import os
import shutil
from molecule import logger, util
from molecule.api import drivers
from molecule.provisioner import ansible_playbook, ansible_playbooks, base
LOG = logger.get_logger(__name__)
class Ansible(base.Base):
"""
`Ansible`_ is the default provisioner. No other provisioner will be \
supported.
Molecule's provisioner manages the instances lifecycle. However, the user
must provide the create, destroy, and converge playbooks. Molecule's
``init`` subcommand will provide the necessary files for convenience.
Molecule will skip tasks which are tagged with either `molecule-notest` or
`notest`. With the tag `molecule-idempotence-notest` tasks are only
skipped during the idempotence action step.
.. important::
Reserve the create and destroy playbooks for provisioning. Do not
attempt to gather facts or perform operations on the provisioned nodes
inside these playbooks. Due to the gymnastics necessary to sync state
between Ansible and Molecule, it is best to perform these tasks in the
prepare or converge playbooks.
It is the developers responsiblity to properly map the modules's fact
data into the instance_conf_dict fact in the create playbook. This
allows Molecule to properly configure Ansible inventory.
Additional options can be passed to ``ansible-playbook`` through the options
dict. Any option set in this section will override the defaults.
.. important::
Options do not affect the create and destroy actions.
.. note::
Molecule will remove any options matching '^[v]+$', and pass ``-vvv``
to the underlying ``ansible-playbook`` command when executing
`molecule --debug`.
Molecule will silence log output, unless invoked with the ``--debug`` flag.
However, this results in quite a bit of output. To enable Ansible log
output, add the following to the ``provisioner`` section of ``molecule.yml``.
.. code-block:: yaml
provisioner:
name: ansible
log: True
The create/destroy playbooks for Docker and Podman are bundled with
Molecule. These playbooks have a clean API from `molecule.yml`, and
are the most commonly used. The bundled playbooks can still be overridden.
The playbook loading order is:
1. provisioner.playbooks.$driver_name.$action
2. provisioner.playbooks.$action
3. bundled_playbook.$driver_name.$action
.. code-block:: yaml
provisioner:
name: ansible
options:
vvv: True
playbooks:
create: create.yml
converge: converge.yml
destroy: destroy.yml
Share playbooks between roles.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
create: ../default/create.yml
destroy: ../default/destroy.yml
converge: converge.yml
Multiple driver playbooks. In some situations a developer may choose to
test the same role against different backends. Molecule will choose driver
specific create/destroy playbooks, if the determined driver has a key in
the playbooks section of the provisioner's dict.
.. important::
If the determined driver has a key in the playbooks dict, Molecule will
use this dict to resolve all provisioning playbooks (create/destroy).
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
docker:
create: create.yml
destroy: destroy.yml
create: create.yml
destroy: destroy.yml
converge: converge.yml
.. important::
Paths in this section are converted to absolute paths, where the
relative parent is the $scenario_directory.
The side effect playbook executes actions which produce side effects to the
instances(s). Intended to test HA failover scenarios or the like. It is
not enabled by default. Add the following to the provisioner's ``playbooks``
section to enable.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
side_effect: side_effect.yml
.. important::
This feature should be considered experimental.
The prepare playbook executes actions which bring the system to a given
state prior to converge. It is executed after create, and only once for
the duration of the instances life.
This can be used to bring instances into a particular state, prior to
testing.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
prepare: prepare.yml
The cleanup playbook is for cleaning up test infrastructure that may not
be present on the instance that will be destroyed. The primary use-case
is for "cleaning up" changes that were made outside of Molecule's test
environment. For example, remote database connections or user accounts.
Intended to be used in conjunction with `prepare` to modify external
resources when required.
The cleanup step is executed directly before every destroy step. Just like
the destroy step, it will be run twice. An initial clean before converge
and then a clean before the last destroy step. This means that the cleanup
playbook must handle failures to cleanup resources which have not
been created yet.
Add the following to the provisioner's `playbooks` section
to enable.
.. code-block:: yaml
provisioner:
name: ansible
playbooks:
cleanup: cleanup.yml
.. important::
This feature should be considered experimental.
Environment variables. Molecule does its best to handle common Ansible
paths. The defaults are as follows.
::
ANSIBLE_ROLES_PATH:
$ephemeral_directory/roles/:$project_directory/../:~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles
ANSIBLE_LIBRARY:
$ephemeral_directory/modules/:$project_directory/library/:~/.ansible/plugins/modules:/usr/share/ansible/plugins/modules
ANSIBLE_FILTER_PLUGINS:
$ephemeral_directory/plugins/filter/:$project_directory/filter/plugins/:~/.ansible/plugins/filter:/usr/share/ansible/plugins/modules
Environment variables can be passed to the provisioner. Variables in this
section which match the names above will be appened to the above defaults,
and converted to absolute paths, where the relative parent is the
$scenario_directory.
.. important::
Paths in this section are converted to absolute paths, where the
relative parent is the $scenario_directory.
.. code-block:: yaml
provisioner:
name: ansible
env:
FOO: bar
Modifying ansible.cfg.
.. code-block:: yaml
provisioner:
name: ansible
config_options:
defaults:
fact_caching: jsonfile
ssh_connection:
scp_if_ssh: True
.. important::
The following keys are disallowed to prevent Molecule from
improperly functioning. They can be specified through the
provisioner's env setting described above, with the exception
of the `privilege_escalation`.
.. code-block:: yaml
provisioner:
name: ansible
config_options:
defaults:
roles_path: /path/to/roles_path
library: /path/to/library
filter_plugins: /path/to/filter_plugins
privilege_escalation: {}
Roles which require host/groups to have certain variables set. Molecule
uses the same `variables defined in a playbook`_ syntax as `Ansible`_.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
group_vars:
foo1:
foo: bar
foo2:
foo: bar
baz:
qux: zzyzx
host_vars:
foo1-01:
foo: bar
Molecule automatically generates the inventory based on the hosts defined
under `Platforms`_. Using the ``hosts`` key allows to add extra hosts to
the inventory that are not managed by Molecule.
A typical use case is if you want to access some variables from another
host in the inventory (using hostvars) without creating it.
.. note::
The content of ``hosts`` should follow the YAML based inventory syntax:
start with the ``all`` group and have hosts/vars/children entries.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
hosts:
all:
extra_host:
foo: hello
.. important::
The extra hosts added to the inventory using this key won't be
created/destroyed by Molecule. It is the developers responsibility
to target the proper hosts in the playbook. Only the hosts defined
under `Platforms`_ should be targetted instead of ``all``.
An alternative to the above is symlinking. Molecule creates symlinks to
the specified directory in the inventory directory. This allows ansible to
converge utilizing its built in host/group_vars resolution. These two
forms of inventory management are mutually exclusive.
Like above, it is possible to pass an additional inventory file
(or even dynamic inventory script), using the ``hosts`` key. `Ansible`_ will
automatically merge this inventory with the one generated by molecule.
This can be useful if you want to define extra hosts that are not managed
by Molecule.
.. important::
Again, it is the developers responsibility to target the proper hosts
in the playbook. Only the hosts defined under
`Platforms`_ should be targetted instead of ``all``.
.. note::
The source directory linking is relative to the scenario's
directory.
The only valid keys are ``hosts``, ``group_vars`` and ``host_vars``. Molecule's
schema validator will enforce this.
.. code-block:: yaml
provisioner:
name: ansible
inventory:
links:
hosts: ../../../inventory/hosts
group_vars: ../../../inventory/group_vars/
host_vars: ../../../inventory/host_vars/
Override connection options:
.. code-block:: yaml
provisioner:
name: ansible
connection_options:
ansible_ssh_user: foo
ansible_ssh_common_args: -o IdentitiesOnly=no
.. _`variables defined in a playbook`: https://docs.ansible.com/ansible/latest/user_guide/playbooks_variables.html#defining-variables-in-a-playbook
Add arguments to ansible-playbook when running converge:
.. code-block:: yaml
provisioner:
name: ansible
ansible_args:
- --inventory=mygroups.yml
- --limit=host1,host2
""" # noqa
def __init__(self, config):
"""
Initialize a new ansible class and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
super(Ansible, self).__init__(config)
@property
def default_config_options(self):
"""
Provide Default options to construct ansible.cfg and returns a dict.
:return: dict
"""
return {
"defaults": {
"ansible_managed": "Ansible managed: Do NOT edit this file manually!",
"display_failed_stderr": True,
"forks": 50,
"retry_files_enabled": False,
"host_key_checking": False,
"nocows": 1,
"interpreter_python": "auto",
},
"ssh_connection": {
"scp_if_ssh": True,
"control_path": "%(directory)s/%%h-%%p-%%r",
},
}
@property
def default_options(self):
d = {"skip-tags": "molecule-notest,notest"}
if self._config.action == "idempotence":
d["skip-tags"] += ",molecule-idempotence-notest"
if self._config.debug:
d["vvv"] = True
d["diff"] = True
return d
@property
def default_env(self):
# Finds if the current project is part of an ansible_collections hierarchy
collection_indicator = "ansible_collections"
collections_path_list = [
util.abs_path(
os.path.join(self._config.scenario.ephemeral_directory, "collections")
)
]
if collection_indicator in self._config.project_directory:
collection_path, right = self._config.project_directory.split(
collection_indicator
)
collections_path_list.append(util.abs_path(collection_path))
collections_path_list.extend(
[
util.abs_path(
os.path.join(os.path.expanduser("~"), ".ansible/collections")
),
"/usr/share/ansible/collections",
"/etc/ansible/collections",
]
)
env = util.merge_dicts(
os.environ,
{
"ANSIBLE_CONFIG": self._config.provisioner.config_file,
"ANSIBLE_ROLES_PATH": ":".join(
[
util.abs_path(
os.path.join(
self._config.scenario.ephemeral_directory, "roles"
)
),
util.abs_path(
os.path.join(self._config.project_directory, os.path.pardir)
),
util.abs_path(
os.path.join(os.path.expanduser("~"), ".ansible", "roles")
),
"/usr/share/ansible/roles",
"/etc/ansible/roles",
]
),
self._config.ansible_collections_path: ":".join(collections_path_list),
"ANSIBLE_LIBRARY": ":".join(self._get_modules_directories()),
"ANSIBLE_FILTER_PLUGINS": ":".join(
[
self._get_filter_plugin_directory(),
util.abs_path(
os.path.join(
self._config.scenario.ephemeral_directory,
"plugins",
"filter",
)
),
util.abs_path(
os.path.join(
self._config.project_directory, "plugins", "filter"
)
),
util.abs_path(
os.path.join(
os.path.expanduser("~"), ".ansible", "plugins", "filter"
)
),
"/usr/share/ansible/plugins/filter",
]
),
},
)
env = util.merge_dicts(env, self._config.env)
return env
@property
def name(self):
return self._config.config["provisioner"]["name"]
@property
def ansible_args(self):
return self._config.config["provisioner"]["ansible_args"]
@property
def config_options(self):
return util.merge_dicts(
self.default_config_options,
self._config.config["provisioner"]["config_options"],
)
@property
def options(self):
if self._config.action in ["create", "destroy"]:
return self.default_options
o = self._config.config["provisioner"]["options"]
# NOTE(retr0h): Remove verbose options added by the user while in
# debug.
if self._config.debug:
o = util.filter_verbose_permutation(o)
return util.merge_dicts(self.default_options, o)
@property
def env(self):
default_env = self.default_env
env = self._config.config["provisioner"]["env"].copy()
# ensure that all keys and values are strings
env = {str(k): str(v) for k, v in env.items()}
roles_path = default_env["ANSIBLE_ROLES_PATH"]
library_path = default_env["ANSIBLE_LIBRARY"]
filter_plugins_path = default_env["ANSIBLE_FILTER_PLUGINS"]
try:
path = self._absolute_path_for(env, "ANSIBLE_ROLES_PATH")
roles_path = "{}:{}".format(roles_path, path)
except KeyError:
pass
try:
path = self._absolute_path_for(env, "ANSIBLE_LIBRARY")
library_path = "{}:{}".format(library_path, path)
except KeyError:
pass
try:
path = self._absolute_path_for(env, "ANSIBLE_FILTER_PLUGINS")
filter_plugins_path = "{}:{}".format(filter_plugins_path, path)
except KeyError:
pass
env["ANSIBLE_ROLES_PATH"] = roles_path
env["ANSIBLE_LIBRARY"] = library_path
env["ANSIBLE_FILTER_PLUGINS"] = filter_plugins_path
return util.merge_dicts(default_env, env)
@property
def hosts(self):
return self._config.config["provisioner"]["inventory"]["hosts"]
@property
def host_vars(self):
return self._config.config["provisioner"]["inventory"]["host_vars"]
@property
def group_vars(self):
return self._config.config["provisioner"]["inventory"]["group_vars"]
@property
def links(self):
return self._config.config["provisioner"]["inventory"]["links"]
@property
def inventory(self):
"""
Create an inventory structure and returns a dict.
.. code-block:: yaml
ungrouped:
vars:
foo: bar
hosts:
instance-1:
instance-2:
children:
$child_group_name:
hosts:
instance-1:
instance-2:
$group_name:
hosts:
instance-1:
ansible_connection: docker
instance-2:
ansible_connection: docker
:return: str
"""
dd = self._vivify()
for platform in self._config.platforms.instances:
for group in platform.get("groups", ["ungrouped"]):
instance_name = platform["name"]
connection_options = self.connection_options(instance_name)
molecule_vars = {
"molecule_file": "{{ lookup('env', 'MOLECULE_FILE') }}",
"molecule_ephemeral_directory": "{{ lookup('env', 'MOLECULE_EPHEMERAL_DIRECTORY') }}",
"molecule_scenario_directory": "{{ lookup('env', 'MOLECULE_SCENARIO_DIRECTORY') }}",
"molecule_yml": "{{ lookup('file', molecule_file) | molecule_from_yaml }}",
"molecule_instance_config": "{{ lookup('env', 'MOLECULE_INSTANCE_CONFIG') }}",
"molecule_no_log": "{{ lookup('env', 'MOLECULE_NO_LOG') or not "
"molecule_yml.provisioner.log|default(False) | bool }}",
}
# All group
dd["all"]["hosts"][instance_name] = connection_options
dd["all"]["vars"] = molecule_vars
# Named group
dd[group]["hosts"][instance_name] = connection_options
dd[group]["vars"] = molecule_vars
# Ungrouped
dd["ungrouped"]["vars"] = {}
# Children
for child_group in platform.get("children", []):
dd[group]["children"][child_group]["hosts"][
instance_name
] = connection_options
return self._default_to_regular(dd)
@property
def inventory_directory(self):
return self._config.scenario.inventory_directory
@property
def inventory_file(self):
return os.path.join(self.inventory_directory, "ansible_inventory.yml")
@property
def config_file(self):
return os.path.join(self._config.scenario.ephemeral_directory, "ansible.cfg")
@property # type: ignore
@util.lru_cache()
def playbooks(self):
return ansible_playbooks.AnsiblePlaybooks(self._config)
@property
def directory(self):
return os.path.join(
os.path.dirname(__file__),
os.path.pardir,
os.path.pardir,
"molecule",
"provisioner",
"ansible",
)
def cleanup(self):
"""
Execute `ansible-playbook` against the cleanup playbook and returns \
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.cleanup)
pb.execute()
def connection_options(self, instance_name):
d = self._config.driver.ansible_connection_options(instance_name)
return util.merge_dicts(
d, self._config.config["provisioner"]["connection_options"]
)
def check(self):
"""
Execute ``ansible-playbook`` against the converge playbook with the \
``--check`` flag and returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.converge)
pb.add_cli_arg("check", True)
pb.execute()
def converge(self, playbook=None, **kwargs):
"""
Execute ``ansible-playbook`` against the converge playbook unless \
specified otherwise and returns a string.
:param playbook: An optional string containing an absolute path to a
playbook.
:param kwargs: An optional keyword arguments.
:return: str
"""
pb = self._get_ansible_playbook(playbook or self.playbooks.converge, **kwargs)
return pb.execute()
def destroy(self):
"""
Execute ``ansible-playbook`` against the destroy playbook and returns \
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.destroy)
pb.execute()
def side_effect(self):
"""
Execute ``ansible-playbook`` against the side_effect playbook and \
returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.side_effect)
pb.execute()
def create(self):
"""
Execute ``ansible-playbook`` against the create playbook and returns \
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.create)
pb.execute()
def prepare(self):
"""
Execute ``ansible-playbook`` against the prepare playbook and returns \
None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.prepare)
pb.execute()
def syntax(self):
"""
Execute ``ansible-playbook`` against the converge playbook with the \
``-syntax-check`` flag and returns None.
:return: None
"""
pb = self._get_ansible_playbook(self.playbooks.converge)
pb.add_cli_arg("syntax-check", True)
pb.execute()
def verify(self):
"""
Execute ``ansible-playbook`` against the verify playbook and returns \
None.
:return: None
"""
if not self.playbooks.verify:
LOG.warning("Skipping, verify playbook not configured.")
return
pb = self._get_ansible_playbook(self.playbooks.verify)
pb.execute()
def write_config(self):
"""
Write the provisioner's config file to disk and returns None.
:return: None
"""
template = util.render_template(
self._get_config_template(), config_options=self.config_options
)
util.write_file(self.config_file, template)
def manage_inventory(self):
"""
Manage inventory for Ansible and returns None.
:returns: None
"""
self._write_inventory()
self._remove_vars()
if not self.links:
self._add_or_update_vars()
else:
self._link_or_update_vars()
def abs_path(self, path):
return util.abs_path(os.path.join(self._config.scenario.directory, path))
def _add_or_update_vars(self):
"""
Create host and/or group vars and returns None.
:returns: None
"""
# Create the hosts extra inventory source (only if not empty)
hosts_file = os.path.join(self.inventory_directory, "hosts")
if self.hosts:
util.write_file(hosts_file, util.safe_dump(self.hosts))
# Create the host_vars and group_vars directories
for target in ["host_vars", "group_vars"]:
if target == "host_vars":
vars_target = copy.deepcopy(self.host_vars)
for instance_name, _ in self.host_vars.items():
instance_key = instance_name
vars_target[instance_key] = vars_target.pop(instance_name)
elif target == "group_vars":
vars_target = self.group_vars
if vars_target:
target_vars_directory = os.path.join(self.inventory_directory, target)
if not os.path.isdir(util.abs_path(target_vars_directory)):
os.mkdir(util.abs_path(target_vars_directory))
for target in vars_target.keys():
target_var_content = vars_target[target]
path = os.path.join(util.abs_path(target_vars_directory), target)
util.write_file(path, util.safe_dump(target_var_content))
def _write_inventory(self):
"""
Write the provisioner's inventory file to disk and returns None.
:return: None
"""
self._verify_inventory()
util.write_file(self.inventory_file, util.safe_dump(self.inventory))
def _remove_vars(self):
"""
Remove hosts/host_vars/group_vars and returns None.
:returns: None
"""
for name in ("hosts", "group_vars", "host_vars"):
d = os.path.join(self.inventory_directory, name)
if os.path.islink(d) or os.path.isfile(d):
os.unlink(d)
elif os.path.isdir(d):
shutil.rmtree(d)
def _link_or_update_vars(self):
"""
Create or updates the symlink to group_vars and returns None.
:returns: None
"""
for d, source in self.links.items():
target = os.path.join(self.inventory_directory, d)
source = os.path.join(self._config.scenario.directory, source)
if not os.path.exists(source):
msg = "The source path '{}' does not exist.".format(source)
util.sysexit_with_message(msg)
msg = "Inventory {} linked to {}".format(source, target)
LOG.info(msg)
os.symlink(source, target)
def _get_ansible_playbook(self, playbook, **kwargs):
"""
Get an instance of AnsiblePlaybook and returns it.
:param playbook: A string containing an absolute path to a
provisioner's playbook.
:param kwargs: An optional keyword arguments.
:return: object
"""
return ansible_playbook.AnsiblePlaybook(playbook, self._config, **kwargs)
def _verify_inventory(self):
"""
Verify the inventory is valid and returns None.
:return: None
"""
if not self.inventory:
msg = "Instances missing from the 'platform' " "section of molecule.yml."
util.sysexit_with_message(msg)
def _get_config_template(self):
"""
Return a config template string.
:return: str
"""
return """
{% for section, section_dict in config_options.items() -%}
[{{ section }}]
{% for k, v in section_dict.items() -%}
{{ k }} = {{ v }}
{% endfor -%}
{% endfor -%}
""".strip()
def _vivify(self):
"""
Return an autovivification default dict.
:return: dict
"""
return collections.defaultdict(self._vivify)
def _default_to_regular(self, d):
if isinstance(d, collections.defaultdict):
d = {k: self._default_to_regular(v) for k, v in d.items()}
return d
def _get_plugin_directory(self):
return os.path.join(self.directory, "plugins")
def _get_modules_directories(self):
"""Return list of ansilbe module includes directories.
Adds modules directory from molecule and its plugins.
"""
paths = [util.abs_path(os.path.join(self._get_plugin_directory(), "modules"))]
for d in drivers():
p = d.modules_dir()
if p:
paths.append(p)
paths.extend(
[
util.abs_path(
os.path.join(self._config.scenario.ephemeral_directory, "library")
),
util.abs_path(os.path.join(self._config.project_directory, "library")),
util.abs_path(
os.path.join(
os.path.expanduser("~"),
".ansible",
"plugins",
"modules",
)
),
"/usr/share/ansible/plugins/modules",
]
)
if os.environ.get("ANSIBLE_LIBRARY"):
paths.extend([util.abs_path(os.environ.get("ANSIBLE_LIBRARY"))])
return paths
def _get_filter_plugin_directory(self):
return util.abs_path(os.path.join(self._get_plugin_directory(), "filter"))
def _absolute_path_for(self, env, key):
return ":".join([self.abs_path(p) for p in env[key].split(":")])
|
#! /usr/bin/env python3
def replace_problematic_characters(string_list, character_dict):
fixed_string_list = []
problem_pointer_list = []
for char in string_list:
if character_dict.get(char) != None:
swap_char = character_dict[char]
fixed_string_list.append(swap_char)
problem_pointer_list.append("^")
else:
fixed_string_list.append(char)
problem_pointer_list.append(" ")
return (fixed_string_list, problem_pointer_list)
|
import tkinter as tk
from proba.config import Config
from proba.exercise1 import Exercise1
from proba.exercise2 import Exercise2
from proba.menu import Menu
'''This module manages the display of the different frames used in the program.'''
class Application(tk.Tk):
def __init__(self):
super().__init__()
self.geometry("600x400")
self.title('Gรฉnรฉrateur d\'exercices')
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.rowconfigure(0, weight=1)
container.columnconfigure(0, weight=1)
self.shared_data = {
"weight_ex1": 50,
"weight_ex2": 50,
"ex2_weight1": 50,
"ex2_weight2": 50,
"ex2_weight3": 50,
"score": 0.0,
"max_score": 0.0
}
self.frames = {}
for f in (Menu, Exercise1, Exercise2, Config):
frame = f(container, self)
self.frames[f] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame(Menu)
def show_frame(self, cont):
frame = self.frames[cont]
frame.event_generate("<<ShowFrame>>")
frame.tkraise()
|
from django.shortcuts import render , HttpResponse
from django.contrib.auth.decorators import login_required
from grpcheckerview import group_required
from club import datahandler as dataconn
# Create your views here.
@login_required(login_url = '/user/login/')
@group_required("Techgrp")
def adminLanding(request):
if request.method == 'POST':
if request.POST['action'] == 'Deletion':
id = request.POST.get('ID')
dataconn.deleteClub(id)
clubs = dataconn.getAllClub()
context = {"clubs" : clubs}
return render(request , 'adminlanding.html' , context)
@login_required(login_url = '/user/login/')
@group_required("Techgrp")
def createClub(request):
if request.method == 'POST':
if request.POST['action'] == "RegisterClub":
clubname = request.POST.get('clubName')
clubemail = request.POST.get('clubEmail')
clubDescription = request.POST.get('clubDescription')
clubImgUrl = request.POST.get('imgUrl')
ClubData = {"clubName" : clubname , "clubEmail" : clubemail , "clubDescription" : clubDescription , "clubImgUrl" : clubImgUrl}
ClubData["discordLink"] = ""
ClubData["instaLink"] = ""
ClubData["linkedinLink"] = ""
ClubData["telegramLink"] = ""
ClubData["twitterLink"] = ""
ClubData["whatsappLink"] = ""
ClubData["youtubeLink"] = ""
password = dataconn.createClub(ClubData)
if password is None:
return render(request , 'createClub.html')
context = {"password" : password}
return render(request , 'createClub.html' , context)
return render(request , 'createClub.html') |
import base64
import json
import os
import cv2
import requests
from django.http import JsonResponse
from django.shortcuts import render
from . import settings
MAX_NM_CNT = 5 # max no mask
SEC = 10
URL_PREFIX = "http://3.36.161.101:8080/predictions"
PROJ_MODELS = {1: 'faster_rcnn', 2: 'cascade_rcnn'}
def index(request):
return render(request, 'index.html')
def show_map(request):
return render(request, 'show_map.html')
def check_no_mask(request):
"""
PROJECT 1
๋ง์คํฌ ์ฐฉ์ฉ ์ฌ๋ถ ์ฒดํฌ(๋น๋๊ธฐ POST ์์ฒญ ์ฒ๋ฆฌ)
:param request:
:return: json
"""
proj_num = 1
if request.method == "POST":
# ์ด๋ฏธ์ง ๋ฐ๋ ๋ถ๋ถ
post_data = json.loads(request.body)
imgdata = base64.b64decode(post_data['imageString'][22:])
# API๋ก ์ด๋ฏธ์ง๋ฅผ ๋ณด๋ด์ ์๋ต์ ๋ฐ๋ ๋ถ๋ถ
url = '/'.join([URL_PREFIX, PROJ_MODELS[proj_num]])
response = requests.post(url, data=imgdata)
# ๋ฐ์ ์๋ต์ ํ์ฑํ์ฌ ๊ฒฐ๊ณผ ๋ถ๋ฅ
api_result = response.json()
print(api_result)
result = {}
result['classes'] = api_result['classes']
result['boxes'] = api_result['boxes']
if 1 in api_result['classes']: # ๋ง์คํฌ ์์ด ๊ฐ (1) ์ด ๋ฆฌ์คํธ์ ์์ ๊ฒฝ์ฐ
result['msg'] = '๋ง์คํฌ ๋ฏธ์ฐฉ์ฉํ์
จ์ต๋๋ค. ๊ฐ๊น์ด ์ฝ๊ตญ ๋ฐ ํธ์์ ๋ชฉ๋ก ํ์ด์ง๋ก ์ด๋ํฉ๋๋ค.'
result['url'] = 'show_map'
return JsonResponse(result)
elif len(api_result['classes']) == 0: # ์๋ฌด ์ผ๊ตด๋ ์ธ์์ด ์๋ ๊ฒฝ์ฐ
result['msg'] = '์ผ๊ตด ์ธ์ ๋ถ๊ฐ๋ฅ'
return JsonResponse(result)
else: # ๋ง์คํฌ๋ฅผ ์ผ์ ๊ฒฝ์ฐ
result['msg'] = '๋ง์คํฌ ์ฐฉ์ฉํ์
จ์ต๋๋ค.'
return JsonResponse(result)
def alert_no_mask(request):
"""
PROJECT 2
๋ง์คํฌ ๋ฏธ์ฐฉ์ฉ ์๋ด ๋ฐฉ์ก(๋น๋๊ธฐ POST ์์ฒญ ์ฒ๋ฆฌ)
:param request:
:return: JSON
"""
proj_num = 2
if request.method == "POST":
# ์ด๋ฏธ์ง ๋ฐ๋ ๋ถ๋ถ
post_data = json.loads(request.body)
imgdata = base64.b64decode(post_data['imageString'][22:])
# API๋ก ์ด๋ฏธ์ง๋ฅผ ๋ณด๋ด์ ์๋ต์ ๋ฐ๋ ๋ถ๋ถ
url = '/'.join([URL_PREFIX, PROJ_MODELS[proj_num]])
response = requests.post(url, data=imgdata)
# ๋ฐ์์จ ์๋ต์ ํ์ฑํด์ ๋ ๋๋งํ๋ ๋ถ๋ถ
api_result = response.json()
global MAX_NM_CNT
result = {'isAlert': False}
nm_cnt = request.session.get('nm_cnt', 0)
# ๋ง์คํฌ ๋ฏธ์ฐฉ์ฉ ์ +1 / ์ต๋ ํ์ ๋ฌ์ฑ ์ ์๋ด ๋ฐฉ์ก / ๋ชจ๋ ์ฐฉ์ฉ ์ ์ด๊ธฐํ
if 1 in api_result['classes']:
nm_cnt += 1
if nm_cnt >= MAX_NM_CNT:
nm_cnt = 0
result['isAlert'] = True
else:
nm_cnt = 0
request.session['nm_cnt'] = nm_cnt
result['nm_cnt'] = nm_cnt
result['nm_cntMax'] = MAX_NM_CNT
result['boxes'] = api_result['boxes']
result['classes'] = api_result['classes']
return JsonResponse(result)
def savevideo(request):
file_name = 'test1.mp4'
global minute, context
if request.method == "POST":
minute = []
file_name_path = os.path.join(settings.STATICFILES_DIRS[0], 'video/' + file_name)
with open(file_name_path, 'wb') as f:
f.write(request.FILES['file'].read())
capture_count = video_read(file_name)
result = []
for i in range(capture_count):
url = "http://3.36.161.101:8080/predictions/cascade_rcnn"
files = open(os.path.join(settings.STATICFILES_DIRS[0], 'img/capture_img/' + 't'+ str(i) +'.jpg'), 'rb').read()
r = requests.post(url, data= files)
r = r.json()
result.append(r['classes'])
print(result)
nnCnt, MAX_NN_CNT = 0, 2
for i in result:
print(minute)
flag = False
if 1 in i:
nnCnt += 1
if nnCnt >= MAX_NN_CNT:
nnCnt = 0
flag = True
else:
nnCnt = 0
minute.append(flag)
context = {'result': list(map(str, minute)), 'sec':SEC}
if request.method == "GET":
return render(request, 'video.html', context)
def video_read(file_name):
path = './static/video/test1.mp4'
cap = cv2.VideoCapture(path)
#width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
# ๋น๋์ค์ ์ด๋น ํ๋ ์
fps = int(cap.get(cv2.CAP_PROP_FPS))
#๋น๋์ค์ ์ ์ฒด ํ๋ ์ ์
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
count = 0
frame_sec = fps * SEC
capture_count = 0
frame_num=0
while(cap.isOpened):
cap.set(1, frame_num)
frame_num += frame_sec
ret, frame = cap.read()
if ret == False:
break
file_name_path = 'static/img/capture_img/t'+str(capture_count)+'.jpg'
capture_count += 1
cv2.imwrite(file_name_path, frame)
cap.release()
return capture_count |
import time
import pymysql
import win32api
import win32con
import win32gui
from selenium import webdriver
from explicit_wait import explicit_wait
from ins_pymsql import fetch_one_sql, oprt_mysql
class Main():
def __init__(self):
self.conn = pymysql.connect(host='localhost', port=3306,
user='root', password='123456',
db='xy_test', charset='utf8')
#ไธไผ ๅพ็่ทฏๅพ
self.filepath = "C:\\Users\\hu\\Desktop\\xy-text\\ceshi_pic.png"
def ins_phone_opr(self,email,pwd,filepath):
mobileEmulation = {"deviceName":"Galaxy S5"}
options = webdriver.ChromeOptions()
options.add_experimental_option('mobileEmulation', mobileEmulation)
options.add_argument("-lang=en-uk")
chrome_obj = webdriver.Chrome(chrome_options=options)
chrome_obj.maximize_window()
chrome_obj.get('https://www.instagram.com/accounts/login/')
ec_params = ['//form[@method="post"]',"XPath"]
explicit_wait(chrome_obj,"VOEL",ec_params)
chrome_obj.find_element_by_xpath('//input[@name="username"]').send_keys(email)
chrome_obj.find_element_by_xpath('//input[@name="password"]').send_keys(pwd)
chrome_obj.find_element_by_xpath('//form//button[@type="submit"]').click()
try:
# ็ปๅฝๆๅ 1
# ๅค็ๅผนๅบ็ชๅฃ
ec_params = ['//div/div[3]/button[2]',"XPath"]
explicit_wait(chrome_obj,"VOEL",ec_params)
chrome_obj.find_element_by_xpath('//div/div[3]/button[2]').click()
print("Login successful!")
sql = "update account set state=1,log_num=log_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
except:
try:
#ๅฏ็ ้่ฏฏ 2
chrome_obj.find_element_by_xpath('//div/div[2]/button').click()
print("Password mistake!")
sql = "update account set state=2,log_num=log_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
chrome_obj.quit()
except :
#่ดฆๆท้่ฏฏ 2
login_flag = chrome_obj.find_element_by_xpath('//form//div/p[@id="slfErrorAlert"]').text
print("Login failed๏ผ"+login_flag)
sql = "update account set state=2,log_num=log_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
chrome_obj.quit()
try:
# ็นๅปๅ่กจๅจๆ
chrome_obj.find_element_by_xpath('//div[@role="menuitem"]/span').click()
except :
pass
time.sleep(3)
# ็ชๅฃ็ฑปๅ
classname = "#32770"
# ็ชๅฃๅฅๆ
ck_ju = win32gui.FindWindow(classname,"ๆๅผ")
# ck_ju = win32gui.FindWindow(classname,"Open")
ComboBoxEx32 = win32gui.FindWindowEx(ck_ju, 0, 'ComboBoxEx32', None)
ComboBox = win32gui.FindWindowEx(ComboBoxEx32, 0, 'ComboBox', None)
# ่พๅ
ฅๆกๅฅๆ
Edit = win32gui.FindWindowEx(ComboBox, 0, 'Edit', None)
# ่พๅ
ฅ่ทฏๅพ
win32gui.SendMessage(Edit, win32con.WM_SETTEXT, None, filepath)
time.sleep(3)
win32api.keybd_event(13, 0, 0, 0)
win32api.keybd_event(13, 0, win32con.KEYEVENTF_KEYUP, 0)
try:
# ไธไธๆญฅ
ec_params = ['//header/div/div[2]/button',"XPath"]
explicit_wait(chrome_obj,"VOEL",ec_params)
chrome_obj.find_element_by_xpath('//header/div/div[2]/button').click()
except:
pass
time.sleep(3)
try:
# ๅไบซ
ec_params = ['//header/div/div[2]/button',"XPath"]
explicit_wait(chrome_obj,"VOEL",ec_params)
chrome_obj.find_element_by_xpath('//header/div/div[2]/button').click()
sql = "update account set fb_num=fb_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
except:
pass
time.sleep(3)
try:
# ๅค็ๅผนๅบ็ชๅฃ
chrome_obj.find_element_by_xpath('//div/div[3]/button[2]').click()
except:
pass
chrome_obj.quit()
def ins_opr(self,email,pwd,kh_name):
try:
options = webdriver.ChromeOptions()
options.add_argument("-lang=en-uk")
chrome_obj = webdriver.Chrome(chrome_options=options)
chrome_obj.maximize_window()
chrome_obj.get("https://www.instagram.com/accounts/login/")
ec_params = ['//form[@method="post"]',"XPath"]
explicit_wait(chrome_obj,"VOEL",ec_params)
chrome_obj.find_element_by_xpath('//input[@name="username"]').send_keys(email)
chrome_obj.find_element_by_xpath('//input[@name="password"]').send_keys(pwd)
chrome_obj.find_element_by_xpath('//form//button[@type="submit"]').click()
try:
# ็ปๅฝๆๅ 1
# ็นๅปๅผนๅบ็ชๅฃ ไปฅๅๅ่ฏด๏ผๆๅผ้็ฅ๏ผ
ec_params = ['//div/button[@tabindex="0"][2]',"XPath"]
explicit_wait(chrome_obj,"VOEL",ec_params)
chrome_obj.find_element_by_xpath('//div/button[@tabindex="0"][2]').click()
print("Login successful!")
sql = "update account set state=1,log_num=log_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
except:
#่พๅบ็ปๅฝๅคฑ่ดฅๅๅ 2
login_flag = chrome_obj.find_element_by_xpath('//form//div/p[@id="slfErrorAlert"]').text
print("Login failed๏ผ"+login_flag)
sql = "update account set state=2,log_num=log_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
chrome_obj.quit()
except:
print("The login timeout!")
# ่ทๅ่ดฆๆทๅ็งฐ
try:
my_name = chrome_obj.find_element_by_xpath('//div[@role="button"]/../div[2]/div/a').text
print("Account name:" + my_name)
sql = "update account set uname=%s where email=%s;"
oprt_mysql(self.conn,sql,(my_name,email))
except:
pass
#็น่ต๏ผๆถ่๏ผ่ฏ่ฎบๅ่ฝ
try:
# ็นๅป่ฟๅ
ฅไธป้กต
chrome_obj.find_element_by_xpath('//div/a[@href="/explore/"]/span').click()
print("Enter the main page!")
except:
pass
try:
# ็นๅป็ฌฌไธๅผ ๅพ็
pic_xpath = '//main[@role="main"]/div/article/div/div/div[1]/div[1]/a/div/div[2]'
ec_params = [pic_xpath,"XPath"]
explicit_wait(chrome_obj,"VOEL",ec_params)
chrome_obj.find_element_by_xpath(pic_xpath).click()
except:
print("Load home page timeout!")
# ๅพช็ฏ50ๅผ ๅพ็
for i in range(50):
# ่ทๅๅๅธ่
ๅ็งฐ
ec_params = ['//header//h2/a',"XPath"]
explicit_wait(chrome_obj,"VOEL",ec_params)
Ins_name = chrome_obj.find_element_by_xpath('//header//h2/a').text
print("The publisher:"+Ins_name)
if Ins_name in kh_name:
# ็น่ต
zan = chrome_obj.find_element_by_xpath('//section/span[1]/button/span').get_attribute("aria-label")
if zan == "Like":
chrome_obj.find_element_by_xpath('//section/span[1]/button/span').click()
sql = "update account set dz_num=dz_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
# ๆถ่
sc = chrome_obj.find_element_by_xpath('//section/span[4]/button/span').get_attribute("aria-label")
if sc == "Save":
chrome_obj.find_element_by_xpath('//section/span[4]/button/span').click()
sql = "update account set sc_num=sc_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
# ่ฏ่ฎบ
# ๆๆ่ฏ่ฎบ่
ๅๅ
pl_xpath = chrome_obj.find_elements_by_xpath('//div[@role="button"]//../ul//h3/a')
pl_name = []
for i in pl_xpath:
pl_name.append(i.text)
if my_name in pl_name:
print("You have commented!")
else:
# ๅ่กจ่ฏ่ฎบ
pl_content = "Beautiful pictures!"
chrome_obj.find_element_by_xpath('//section/div/form/textarea').click()
chrome_obj.find_element_by_xpath('//section/div/form/textarea').send_keys(pl_content)
chrome_obj.find_element_by_xpath('//form//button[@type="submit"]').click()
time.sleep(3)
sql = "update account set pl_num=pl_num+1 where email=%s;"
oprt_mysql(self.conn,sql,email)
# ๅคๆญๆฏๅฆไธบ็ฌฌไธ้กต
xpth_num = chrome_obj.find_elements_by_xpath('//div[@class="D1AKJ"]/a')
# ็นๅปไธไธ้กต
if len(xpth_num) == 1:
chrome_obj.find_element_by_xpath('//div[@class="D1AKJ"]/a').click()
else:
chrome_obj.find_element_by_xpath('//div[@class="D1AKJ"]/a[2]').click()
chrome_obj.quit()
def run(self):
# ้่ฆ็น่ตๅฎขๆท็ๆต็งฐ
sql = "select * from kh_name"
date = fetch_one_sql(self.conn,sql)
kh_name = []
for i in date:
kh_name.append(i[1])
# ๅพช็ฏๆพๅบๅฎขๆท้ฎ็ฎฑไธๅฏ็
sql = "select * from account"
account = fetch_one_sql(self.conn,sql)
for acc in account:
acc_email = acc[1]
acc_pwd = acc[2]
self.ins_phone_opr(acc_email,acc_pwd,self.filepath)
self.ins_opr(acc_email,acc_pwd,kh_name)
if __name__ == "__main__":
Run = Main()
Run.run()
|
from flask import Flask, request, jsonify, g, render_template
from .settings import access_key_id, secret_access_key, acl, bucket_name, bucket_region
from .email_settings import mail_server, mail_port, mail_username, mail_password
from .shipping_settings import shipping_address, shipping_zip, shipping_city, shipping_state
from .stripe_config import stripe_publishable, stripe_secret
class Config(object):
DEBUG = False
TESTING = False
SECRET_KEY = '9403aa3e794b673ce5c121cf07a8f9a16f6dafb87094de3d'
DB_NAME = "production-db"
DB_USERNAME = "admin"
DB_PASSWORD = "example"
IMAGE_UPLOADS = "/home/username/app/app/static/images/uploads"
SESSION_COOKIE_SECURE = True
class ProductionConfig(Config):
SECRET_KEY = '9403aa3e794b673ce5c121cf07a8f9a16f6dafb87094de3d'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
UPLOAD_FOLDER = 'uploads'
UPLOAD_URL = "https://"+bucket_name+".s3.us-east-2.amazonaws.com/"
TESTING = False
MAIL_SUPPRESS_SEND = False
MAIL_DEBUG = True
MAIL_SERVER = mail_server
MAIL_PORT = mail_port
MAIL_USERNAME = mail_username
MAIL_PASSWORD = mail_password
MAIL_USE_TLS = 1
# launch_url = "http://localhost:3000/"
# REDIRECT_URI="http://localhost:3000/"
launch_url = "https://cardshop-client.herokuapp.com/"
REDIRECT_URI = "https://cardshop-client.herokuapp.com/"
#SHIPPING
SHIP_ADDRESS = shipping_address
SHIP_CITY = shipping_city
SHIP_STATE = shipping_state
SHIP_ZIP = shipping_zip
SESSION_COOKIE_SECURE = True
class DevelopmentConfig(Config):
SECRET_KEY = '9403aa3e794b673ce5c121cf07a8f9a16f6dafb87094de3d'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
UPLOAD_FOLDER = 'uploads'
UPLOAD_URL = "https://"+bucket_name+".s3.us-east-2.amazonaws.com/"
TESTING = False
MAIL_SUPPRESS_SEND = False
MAIL_DEBUG = True
MAIL_SERVER = mail_server
MAIL_PORT = mail_port
MAIL_USERNAME = mail_username
MAIL_PASSWORD = mail_password
MAIL_USE_TLS = 1
# launch_url = "http://localhost:3000/"
# REDIRECT_URI="http://localhost:3000/"
launch_url = "https://cardshop-client.herokuapp.com/"
REDIRECT_URI = "https://cardshop-client.herokuapp.com/"
#SHIPPING
SHIP_ADDRESS = shipping_address
SHIP_CITY = shipping_city
SHIP_STATE = shipping_state
SHIP_ZIP = shipping_zip
SESSION_COOKIE_SECURE = False
class TestingConfig(Config):
TESTING = True
DEBUG = False
SECRET_KEY = '9403aa3e794b673ce5c121cf07a8f9a16f6dafb87094de3d'
SQLALCHEMY_DATABASE_URI = 'sqlite:///db.sqlite'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
UPLOAD_FOLDER = 'uploads'
UPLOAD_URL = "https://"+bucket_name+".s3.us-east-2.amazonaws.com/"
MAIL_SUPPRESS_SEND = False
MAIL_DEBUG = True
MAIL_SERVER = mail_server
MAIL_PORT = mail_port
MAIL_USERNAME = mail_username
MAIL_PASSWORD = mail_password
MAIL_USE_TLS = 1
# launch_url = "http://localhost:3000/"
# REDIRECT_URI="http://localhost:3000/"
launch_url = "https://cardshop-client.herokuapp.com/"
REDIRECT_URI = "https://cardshop-client.herokuapp.com/"
#SHIPPING
SHIP_ADDRESS = shipping_address
SHIP_CITY = shipping_city
SHIP_STATE = shipping_state
SHIP_ZIP = shipping_zip
SESSION_COOKIE_SECURE = False
|
from .eval import eval
from .train import train
from .learncurve import learning_curve
from .predict import predict
from .prep import prep
def cli(command, config_file):
"""command-line interface
Parameters
----------
command : string
One of {'prep', 'train', 'eval', 'predict', 'finetune', 'learncurve'}
config_file : str, Path
path to a config.toml file
"""
if command == 'prep':
prep(toml_path=config_file)
elif command == 'train':
train(toml_path=config_file)
elif command == 'eval':
eval(toml_path=config_file)
elif command == 'predict':
predict(toml_path=config_file)
elif command == 'learncurve':
learning_curve(toml_path=config_file)
elif command == 'finetune':
raise NotImplementedError
else:
raise ValueError(
f'command not recognized: {command}'
)
|
from django.db import models
class PriorityHospitalArea(models.Model):
# CHOICES
PRIORITY_AREA_OPTIONS = (
("Resusciation Area", "Resusciation Area"),
("Major Wound Area", "Major Wound Area")
)
area = models.CharField(choices=PRIORITY_AREA_OPTIONS, max_length=20)
def __str__(self):
return str(self.area)
class HospitalArea(models.Model):
# CHOICES
AREA_OPTIONS = (
("Minor Procedure Area", "Minor Procedure Area"),
("Consultation Area", "Consultation Area"),
("Online Area", "Online Area")
)
area = models.CharField(choices=AREA_OPTIONS, max_length=23)
def __str__(self):
return str(self.area)
|
# coding: utf-8
# In[1]:
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import pickle
import time
import random
import tensorflow as tf
from scipy import spatial
import os
import json
import argparse
import scipy
import copy
# In[2]:
parser = argparse.ArgumentParser()
parser.add_argument('--beta', type=str, default='',help='input data path')
parser.add_argument('--iter', type=str, default='',help='input data path')
FLAGS, _ = parser.parse_known_args()
DATA_DIR='../../data/ml-100k/'
MODEL_DIR='./'
SEED=int(open("SEED.txt","r").readlines()[0])
DISTANT_TYPE=0
NOVELTY_TYPE=0
MLOBJ_PATH = 'ml_obj_%d.pkl'%(SEED)
UTILOBJ_PATH='ml_util_%d_dis_%d.pkl'%(SEED,DISTANT_TYPE)
# In[3]:
class DataSetProcesser():
def calculate_data(self):
self.list_uid = self.df_userinfo.uid
self.list_itemid = self.df_iteminfo.itemid
self.all_posuser_byitemid = {itemid: [] for itemid in self.list_itemid}
self.all_positem_byuid = {uid: [] for uid in self.list_uid}
self.all_neguser_byitemid = {itemid: [] for itemid in self.list_itemid}
self.all_negitem_byuid = {uid: [] for uid in self.list_uid}
sz1 = len(self.list_uid)
sz2 = len(self.list_itemid)
df_all = self.df_rating
sz = len(self.df_rating)
self.ratings_byitemid = [[0.0 for uid in self.list_uid]
for itemid in self.list_itemid]
for (index, row) in self.df_rating.iterrows():
if index % 1000 == 0:
print('Preprocessing Dataset', index, '/', sz)
rating = row["rating"]
uid = int(row["uid"])
itemid = int(row["itemid"])
self.ratings_byitemid[itemid][uid] = rating
#self.rating_bypair[uid][itemid] = rating
if rating > self.rating_threshold:
self.all_posuser_byitemid[itemid].append(uid)
self.all_positem_byuid[uid].append(itemid)
else:
self.all_neguser_byitemid[itemid].append(uid)
self.all_negitem_byuid[uid].append(itemid)
self._USER_SIZE_ONLY_NUM = len(self.user_numerical_attr)
self._USER_SIZE_OF_FIELDS = []
for feat in self.df_userinfo.columns:
if feat not in self.user_numerical_attr:
self._USER_SIZE_OF_FIELDS.append(
len(np.unique(self.df_userinfo[feat])))
for feat in self.user_numerical_attr:
self._USER_SIZE_OF_FIELDS.append(1)
self._USER_SIZE = len(self._USER_SIZE_OF_FIELDS)
self._USER_SIZE_OF_MASK_FIELDS = self._USER_SIZE_OF_FIELDS[:-self.
_USER_SIZE_ONLY_NUM]
self._USER_SIZE_BIN = sum(self._USER_SIZE_OF_FIELDS)
self._ITEM_SIZE_ONLY_NUM = len(self.item_numerical_attr)
self._ITEM_SIZE_OF_FIELDS = []
for feat in self.df_iteminfo.columns:
if feat in self.item_numerical_attr:
self._ITEM_SIZE_OF_FIELDS.append(1)
else:
self._ITEM_SIZE_OF_FIELDS.append(
len(np.unique(self.df_iteminfo[feat])))
self._ITEM_SIZE = len(self._ITEM_SIZE_OF_FIELDS)
self._ITEM_SIZE_OF_MASK_FIELDS = self._ITEM_SIZE_OF_FIELDS[:-self.
_ITEM_SIZE_ONLY_NUM]
self._ITEM_SIZE_BIN = sum(self._ITEM_SIZE_OF_FIELDS)
def split_dict(self, dic,ratio):
seed = self.seed
dic1 = {}
dic2 = {}
for ky in dic:
lst = dic[ky]
lenoflist = len(lst)
if lenoflist != 0:
random.Random(seed).shuffle(lst)
dic1[ky] = lst[:int(ratio * lenoflist)]
dic2[ky] = lst[int(ratio * lenoflist):]
else:
dic1[ky] = []
dic2[ky] = []
return dic1, dic2
def merge_dict(self, dic1, dic2):
return {ky: list(set(dic1[ky]) | set(dic2[ky])) for ky in dic1}
def reverse_dict(self, dict_byuid):
result = {itemid: [] for itemid in self.list_itemid}
for uid in dict_byuid:
for itemid in dict_byuid[uid]:
result[itemid].append(uid)
return result
def split_data(self):
self.train_positem_byuid, self.test_positem_byuid = self.split_dict(
self.all_positem_byuid,self.ratio)
self.train_posuser_byitemid, self.test_posuser_byitemid = self.reverse_dict(
self.train_positem_byuid), self.reverse_dict(
self.test_positem_byuid)
self.train_negitem_byuid, self.test_negitem_byuid = self.split_dict(
self.all_negitem_byuid,self.ratio)
self.train_neguser_byitemid, self.test_neguser_byitemid = self.reverse_dict(
self.train_negitem_byuid), self.reverse_dict(
self.test_negitem_byuid)
self.train_rateduser_byitemid = self.merge_dict(
self.train_posuser_byitemid, self.train_neguser_byitemid)
self.test_rateduser_byitemid = self.merge_dict(
self.test_posuser_byitemid, self.test_neguser_byitemid)
self.train_rateditem_byuid = self.merge_dict(self.train_positem_byuid,
self.train_negitem_byuid)
self.test_rateditem_byuid = self.merge_dict(self.test_positem_byuid,
self.test_negitem_byuid)
def __init__(self, movielens, split_ratio, seed=SEED):
self.seed = seed
self.rating_threshold = movielens.rating_threshold
self.ratio = split_ratio
self.df_rating = movielens.df_rating
self.df_userinfo = movielens.df_userinfo
self.df_iteminfo = movielens.df_iteminfo
self.user_numerical_attr = movielens.user_numerical_attr
self.item_numerical_attr = movielens.item_numerical_attr
self.calculate_data()
for attr in self.df_userinfo:
if attr not in self.user_numerical_attr:
#print(attr)
#self.df_userinfo[attr] = self.df_userinfo[attr].astype('str')
pass
else:
df = self.df_userinfo[attr].copy()
self.df_userinfo.drop([attr], axis=1, inplace=True)
self.df_userinfo[attr] = df
for attr in self.df_iteminfo:
if attr not in self.item_numerical_attr:
#self.df_iteminfo[attr] = self.df_iteminfo[attr].astype('str')
pass
else:
df = self.df_iteminfo[attr].copy()
self.df_iteminfo.drop([attr], axis=1, inplace=True)
self.df_iteminfo[attr] = df
self.split_data()
# In[4]:
class MovieLens:
def load_raw_data(self):
f=tf.gfile.Open(DATA_DIR + 'u.data',"r")
self.df_rating = pd.read_csv(
f,
sep='\t',
names=['uid', 'itemid', 'rating', 'time'])
f=tf.gfile.Open(DATA_DIR + 'u.user',"r")
self.df_userinfo = pd.read_csv(
f,
sep='|',
names=['uid', 'age', 'sex', 'occupation', 'zip_code'])
list_item_attr = [
'itemid', 'title', 'rel_date', 'video_rel_date', 'imdb_url',
"unknown", "Action", "Adventure", "Animation", "Children's",
"Comedy", "Crime", "Documentary", "Drama", "Fantasy", "Film-Noir",
"Horror", "Musical", "Mystery", "Romance", "Sci-Fi", "Thriller",
"War", "Western"
]
f=tf.gfile.Open(DATA_DIR + 'u.item',"r")
self.df_iteminfo = pd.read_csv(
f,
sep='|',
names=list_item_attr)
self.df_userinfo = self.df_userinfo.fillna(0)
self.df_iteminfo = self.df_iteminfo.fillna(0)
def minmax_scaler(self, list_attr, df):
for attr in list_attr:
df[attr] = df[attr] - min(df[attr])
def feature_engineering(self):
##iteminfo
df_all = self.df_iteminfo
df_date = df_all["rel_date"]
df_date = pd.to_datetime(df_date)
df_all["year"] = df_date.apply(lambda x: x.year)
df_all["month"] = df_date.apply(lambda x: x.month)
df_all["day"] = df_date.apply(lambda x: x.day)
df_all.drop(
["rel_date", "imdb_url", "video_rel_date", "title"],
axis=1,
inplace=True)
self.minmax_scaler(["year", "month", "day"], df_all)
df_numeric = df_all.select_dtypes(exclude=['object'])
df_obj = df_all.select_dtypes(include=['object']).copy()
for c in df_obj:
df_obj[c] = (pd.factorize(df_obj[c])[0])
self.df_iteminfo = pd.concat([df_numeric, df_obj], axis=1)
df_all = self.df_userinfo
self.minmax_scaler(["age"], df_all)
df_numeric = df_all.select_dtypes(exclude=['object'])
df_obj = df_all.select_dtypes(include=['object']).copy()
for c in df_obj:
df_obj[c] = (pd.factorize(df_obj[c])[0])
self.df_userinfo = pd.concat([df_numeric, df_obj], axis=1)
def __init__(self):
self.rating_threshold = 3
self.load_raw_data()
self.df_iteminfo["itemid"]=self.df_iteminfo["itemid"]-1
self.df_userinfo["uid"]=self.df_userinfo["uid"]-1
self.df_rating["itemid"]=self.df_rating["itemid"]-1
self.df_rating["uid"]=self.df_rating["uid"]-1
self.feature_engineering()
self.user_numerical_attr = ["age"]
self.item_numerical_attr = ["year", "month", "day"]
# In[5]:
try:
f=open(MLOBJ_PATH,"rb")
dataset=pickle.load(f)
except:
movielens=MovieLens()
dataset=DataSetProcesser(movielens,0.7)
f=open(MLOBJ_PATH,"wb")
pickle.dump(dataset,f)
# In[6]:
def basic_stat():
sz1=len(dataset.list_uid)
sz2=len(dataset.list_itemid)
sz3=len(dataset.df_rating)
print(sz1,sz2,sz3,sz3/sz2/sz1)
lst1=[len(dataset.train_positem_byuid[uid]) for uid in dataset.list_uid]
lst2=[len(dataset.test_positem_byuid[uid]) for uid in dataset.list_uid]
lst3=[lst1[idx]+lst2[idx] for idx,x in enumerate(lst1)]
print(max(lst3),min(lst3),np.mean(lst3),np.std(lst3))
lst1=[len(dataset.train_negitem_byuid[uid]) for uid in dataset.list_uid]
lst2=[len(dataset.test_negitem_byuid[uid]) for uid in dataset.list_uid]
lst3=[lst1[idx]+lst2[idx] for idx,x in enumerate(lst1)]
print(max(lst3),min(lst3),np.mean(lst3),np.std(lst3))
lst1=[len(dataset.train_posuser_byitemid[itemid]) for itemid in dataset.list_itemid]
lst2=[len(dataset.test_posuser_byitemid[itemid]) for itemid in dataset.list_itemid]
lst3=[lst1[idx]+lst2[idx] for idx,x in enumerate(lst1)]
print(max(lst3),min(lst3),np.mean(lst3),np.std(lst3))
lst1=[len(dataset.train_neguser_byitemid[itemid]) for itemid in dataset.list_itemid]
lst2=[len(dataset.test_neguser_byitemid[itemid]) for itemid in dataset.list_itemid]
lst3=[lst1[idx]+lst2[idx] for idx,x in enumerate(lst1)]
print(max(lst3),min(lst3),np.mean(lst3),np.std(lst3))
basic_stat()
# In[7]:
class RecommendSysUtil():
def set_distant(self, i, j):
users_i = dataset.train_rateduser_byitemid[i]
users_j = dataset.train_rateduser_byitemid[j]
if (len(users_j) != 0):
return 1 - 1.0 * len(set(users_i) & set(users_j)) / len(users_j)
else:
return 1.0
def cosine_distant(self, i, j):
vec_i = self.ratings_byitemid[i]
vec_j = self.ratings_byitemid[j]
return 1 - np.dot(vec_i, vec_j)
def distant(self, i, j):
if DISTANT_TYPE == 0:
return self.set_distant(i, j)
else:
return self.cosine_distant(i, j)
def novelty(self, uid, item_i):
items_byu = self.dataset.train_rateditem_byuid[uid]
if NOVELTY_TYPE == 0:
return np.mean(
[self.distant_mat[item_i][item_j] for item_j in items_byu])
else:
return -np.log2(
len(self.dataset.train_rateduser_byitemid[item_i]
) / len(self.dataset.list_uid) + pow(10, -9))
def item_vectorize(self, itemid, is_dummy=0):
vec = []
data = self.dataset.df_iteminfo[self.dataset.df_iteminfo['itemid'] == (
itemid)]
#print(data)
for attr in self.dataset.df_iteminfo.keys():
value = list(data[attr])[0]
if (type(value) == str):
vec.append(int(value))
else:
vec.append(value)
return vec
def user_vectorize(self, uid, is_dummy=0):
vec = []
data = self.dataset.df_userinfo[self.dataset.df_userinfo['uid'] == (
uid)]
for attr in self.dataset.df_userinfo.keys():
value = list(data[attr])[0]
if (type(value) == str):
vec.append(int(value))
else:
vec.append(value)
return vec
def df_onehot_encode(self, df):
onehot_by_attr = {}
for attr in df:
if (type(df[attr][0]) == str):
onehot_by_attr[attr] = max(df[attr].astype('int')) + 1
#print(attr,onehot_by_attr[attr])
return onehot_by_attr
def save_vec(self):
self.uid_to_vec = {}
self.itemid_to_vec = {}
sz = len(self.dataset.list_uid)
for uid in self.dataset.list_uid:
if uid % 1000 == 0:
print("save user vec", uid, '/', sz)
#print(self.user_vectorize(uid))
self.uid_to_vec[uid] = self.user_vectorize(uid)
sz = len(self.dataset.list_itemid)
for itemid in self.dataset.list_itemid:
if itemid % 1000 == 0:
print("save item vec", itemid, '/', sz)
self.itemid_to_vec[itemid] = self.item_vectorize(itemid)
def save_distance(self):
self.ratings_byitemid=[]
for itemid in dataset.list_itemid:
print('save item',itemid,'rating vector')
vec=[0.0 for uid in self.dataset.list_uid]
for uid in self.dataset.train_rateduser_byitemid[itemid]:
#print(self.dataset.ratings_byitemid[itemid][uid])
vec[uid]=self.dataset.ratings_byitemid[itemid][uid]
vec=np.array(vec)+pow(10,-9)
self.ratings_byitemid.append(vec/ np.linalg.norm(vec))
self.distant_mat=[]
for index_i,i in enumerate(self.dataset.list_itemid):
print('save distance(%d/%d)'%(i,len(self.dataset.list_itemid)))
self.distant_mat.append([])
for index_j,j in enumerate(self.dataset.list_itemid):
if index_j>index_i:
self.distant_mat[index_i].append(self.distant(index_i,index_j))
elif index_j==index_i:
self.distant_mat[index_i].append(0)
else:
self.distant_mat[index_i].append(self.distant_mat[index_j][index_i])
def __init__(self, dataset):
self.dataset = dataset
self.useronehot_by_attr = self.df_onehot_encode(
self.dataset.df_userinfo)
self.itemonehot_by_attr = self.df_onehot_encode(
self.dataset.df_iteminfo)
self.save_vec()
self.save_distance()
# In[8]:
try:
f=open(UTILOBJ_PATH,"rb")
util=pickle.load(f)
except:
util=RecommendSysUtil(dataset)
f=open(UTILOBJ_PATH,"wb")
pickle.dump(util,f)
# In[17]:
class Nov_Distri_Saver():
def __init__(self):
self.pos_distr = {}
self.neg_distr = {}
# This class implements the algorithm in the paper.
# 1.First, it calculates novelty distribution and saves it in the hash (self.load_distribution())
# 2.Then, it trains the dataset by the alorithm in the paper (self.generate_a_batch(),self.train())
#
class RecommendSys():
# Get the novelty distribution of user u
# The data type of the distribution is the list of the novelty between user u and all the items
# noveltyb_list:
# list[novelty^ฮฒ(user_u,item1),novelty^ฮฒ(user_u,item2),....]
# noveltyreb_list:
# list[novelty^-ฮฒ(user_u,item1),novelty^-ฮฒ(user_u,item2),....]
def get_novelty_distribution(self, u):
list_positemid = self.dataset.train_positem_byuid[u]
list_negitemid = self.dataset.train_negitem_byuid[u]
positem_novdistr = [
pow(self.util.novelty(u, itemid), self.beta)
for itemid in list_positemid
]
negitem_novdistr = [1.0 for itemid in list_negitemid]
return positem_novdistr / np.sum(
positem_novdistr), negitem_novdistr / np.sum(negitem_novdistr)
def load_distribution(self):
list_uid = self.dataset.list_uid
tmp = Nov_Distri_Saver()
for uid in list_uid:
#print('load the novelty distribution of user', uid)
tmp.pos_distr[uid], tmp.neg_distr[
uid] = self.get_novelty_distribution(uid)
return tmp
def predict(self, list_uid, list_itemid):
user_batch = [self.util.uid_to_vec[uid] for uid in list_uid]
item_batch = []
for itemid in list_itemid:
item_batch.append(self.util.itemid_to_vec[itemid])
label_batch = [[1] * len(list_itemid) for uid in list_uid]
prob_matrix = self.prob.eval(
feed_dict={
self.user_input: user_batch,
self.item_input: item_batch,
self.label: label_batch
})
return prob_matrix
def predict_by_queue(self, list_uid, list_itemid):
sz = len(list_itemid)
batch_sz = 10000
bins = int(sz / batch_sz)
ret = []
for idx in range(bins):
print('predict_by_queue %d/%d' % (idx, bins))
tmp = self.predict(
list_uid, list_itemid[idx * batch_sz:(idx + 1) * batch_sz])
if ret != []:
ret = np.concatenate((ret, tmp), axis=1)
else:
ret = tmp
tmp = self.predict(list_uid, list_itemid[bins * batch_sz:])
if ret != []:
ret = np.concatenate((ret, tmp), axis=1)
else:
ret = tmp
return ret
def eval_performance(self):
list_uid = dataset.list_uid
list_itemid = dataset.list_itemid
self.prob_by_uitem = self.predict_by_queue(list_uid, list_itemid)
self.uid_to_recomm = self.base_recommend(self.prob_by_uitem,
self.top_N)
#print(list_uid)
#print(uid_to_recomm)
acc = self.print_accuracy(self.uid_to_recomm, self.prob_by_uitem)
reward0, reward1, agg_div, entro_div = self.print_diversity(
self.uid_to_recomm)
return reward0, reward1, agg_div, entro_div
def print_accuracy(self, uid_to_recomm, prob_by_uitem):
acc = 0
for uid in self.dataset.list_uid:
if len(self.dataset.test_positem_byuid[uid]) < self.top_N:
continue
#pass
positem_test = list(self.dataset.test_positem_byuid[uid])
if len(set(positem_test) & set(uid_to_recomm[uid])) != 0:
acc += 1
return acc / len(uid_to_recomm)
def base_recommend(self, prob_by_uitem, top_N):
uid_to_recomm = {}
for uid in self.dataset.list_uid:
if len(self.dataset.test_positem_byuid[uid]) < self.top_N:
continue
#pass
prob_row = prob_by_uitem[uid]
prob_arr = list(zip(self.dataset.list_itemid, prob_row))
prob_arr = sorted(prob_arr, key=lambda d: -d[1])
cnt = 0
uid_to_recomm[uid] = []
for pair in prob_arr:
itemid = pair[0]
if itemid not in dataset.train_rateditem_byuid[uid]:
uid_to_recomm[uid].append(itemid)
cnt += 1
if cnt == top_N:
break
return uid_to_recomm
def print_diversity(self, uid_to_recomm):
avg_reward0 = 0.0
avg_reward1 = 0.0
agg_div = 0.0
enp_div = 0.0
cnt = 0
for uid in uid_to_recomm:
reward0 = 0.0
reward1 = 0.0
for itemid in uid_to_recomm[uid]:
if (itemid in self.dataset.test_positem_byuid[uid]):
nov = self.util.novelty(uid, itemid)
if nov == np.inf or np == -np.inf:
nov = 0
if nov != 0:
nov0 = pow(nov, 0)
nov1 = pow(nov, 1)
cnt += 1
reward0 = max(reward0, nov0)
reward1 = max(reward1, nov1)
avg_reward0 += reward0
avg_reward1 += reward1
if avg_reward0 != 0:
avg_reward0 /= len(uid_to_recomm)
if avg_reward1 != 0:
avg_reward1 /= cnt
recomm_set = set()
cnt = 0
self.rec_cnt[self.beta] = {i: 0 for i in dataset.list_itemid}
for uid in uid_to_recomm:
recomm_set = recomm_set | set(uid_to_recomm[uid])
for i in uid_to_recomm[uid]:
self.rec_cnt[self.beta][i] += 1
cnt += 1
agg_div = len(recomm_set) / len(uid_to_recomm) / self.top_N
itemid_to_recomuser = {}
for uid in uid_to_recomm:
for itemid in uid_to_recomm[uid]:
if itemid not in itemid_to_recomuser:
itemid_to_recomuser[itemid] = 0
itemid_to_recomuser[itemid] += 1
s = 0
for itemid in itemid_to_recomuser:
s += itemid_to_recomuser[itemid]
for itemid in itemid_to_recomuser:
probb = itemid_to_recomuser[itemid] / s + pow(10, -9)
enp_div += -(np.log2(probb) * probb)
#print('over diver %f'%(time.time()-t1))
print(
'Diversity: reward(ฮฒ=0)=%.5f reward(ฮฒ=1)=%.5f aggdiv=%.5f entropydiv=%.5f'
% (avg_reward0, avg_reward1, agg_div, enp_div))
return avg_reward0, avg_reward1, agg_div, enp_div
def train_a_batch(self, iter, session):
loss_all = 0
user_batch = []
item_batch = []
label_batch = []
list_positemid = []
list_uid = []
list_label = []
list_negitemid = []
for i in range(self.batch_size):
uid = 0
while (True):
uid = random.randint(1, self.NUM_USERS)
dataset = self.dataset
if ((uid in dataset.list_uid)
and len(dataset.train_positem_byuid[uid]) != 0
and len(dataset.train_negitem_byuid[uid]) != 0):
break
list_uid.append(uid)
for uid in list_uid:
pos_itemid = np.random.choice(
self.dataset.train_positem_byuid[uid], p=self.pos_distr[uid])
list_positemid.append(pos_itemid)
list_label.append(1)
user_batch.append(self.util.user_vectorize(uid))
pos_itemvec = self.util.item_vectorize(pos_itemid)
item_batch.append(pos_itemvec)
prob_by_uitem = self.predict(list_uid, list_positemid)
#print('predict end time '+time.asctime())
#print('neg fetch start time '+time.asctime())
neg_itemset = set()
neg_index = {}
for uid in list_uid:
neg_itemset = neg_itemset | set(dataset.train_negitem_byuid[uid])
for index, neg_item in enumerate(neg_itemset):
neg_index[neg_item] = index
neg_itemset = list(neg_itemset)
neg_prob_by_uitem = self.predict(list_uid, neg_itemset)
violator_cnt = 0
for i, uid in enumerate(list_uid):
neg_itemid = -1
pos_itemid = list_positemid[i]
pos_prob = prob_by_uitem[i][i]
for k in range(self.LIMIT):
neg_itemid = np.random.choice(
self.dataset.train_negitem_byuid[uid],
p=self.neg_distr[uid])
neg_prob = neg_prob_by_uitem[i][neg_index[neg_itemid]]
if neg_prob >= pos_prob and neg_prob != 0:
break
else:
neg_itemid = -1
if neg_itemid != -1:
violator_cnt += 1
list_label.append(-1)
user_batch.append(self.util.user_vectorize(uid))
neg_itemvec = self.util.item_vectorize(neg_itemid)
item_batch.append(neg_itemvec)
label_batch = [[1] * len(user_batch) for j in range(len(user_batch))]
for i, label in enumerate(list_label):
label_batch[i][i] = label
feed_dict = {
self.user_input: user_batch,
self.item_input: item_batch,
self.label: label_batch
}
if iter != 0:
[_optimize, _loss] = session.run(
[self.optimize, self.loss], feed_dict=feed_dict)
else:
[_loss] = session.run([self.loss], feed_dict=feed_dict)
return _loss
def read_distribution(self, nov_distri_path):
try:
f = open(nov_distri_path, "rb")
tmp = pickle.load(f)
self.neg_distr = tmp.neg_distr.copy()
self.pos_distr = tmp.pos_distr.copy()
except:
tmp = self.load_distribution()
f = open(nov_distri_path, "wb")
pickle.dump(tmp, f)
self.neg_distr = tmp.neg_distr.copy()
self.pos_distr = tmp.pos_distr.copy()
def cal_val_loss(self):
p=[]
q=[]
prob_by_uitem = self.predict(self.dataset.list_uid, self.dataset.list_itemid)
for uid in self.dataset.list_uid:
for itemid in self.val_positem_byuid[uid]:
prob=prob_by_uitem[uid][itemid]
p.append(1)
q.append(prob)
for itemid in self.val_negitem_byuid[uid]:
prob=prob_by_uitem[uid][itemid]
p.append(0)
q.append(prob)
q=[x+1e-20 for x in q]
return scipy.stats.entropy(p, q)
def process_train(self, is_early_stopping):
self.dataset=copy.deepcopy(dataset)
if is_early_stopping==1:
self.dataset.train_positem_byuid,self.val_positem_byuid=self.dataset.split_dict(self.dataset.train_positem_byuid,5/7)
self.dataset.train_negitem_byuid,self.val_negitem_byuid=self.dataset.split_dict(self.dataset.train_negitem_byuid,5/7)
def train(self,
nov_distri_path,
model_path,
is_early_stopping=0,
beta=0.0,
batch_size=128,
learning_rate=0.006,
nu=0.0001,
embedding_size=600,
EVERY_N_ITERATIONS=100,
MAX_ITERATIONS=0,
predict_pair=[]):
self.beta = beta
self.batch_size = batch_size
self.learning_rate = learning_rate
self.nu = nu
self.embedding_size = embedding_size
self.EVERY_N_ITERATIONS = EVERY_N_ITERATIONS
self.MAX_ITERATIONS = MAX_ITERATIONS
nov_distri_path = MODEL_DIR + nov_distri_path
model_path = MODEL_DIR + model_path
self.process_train(is_early_stopping)
if is_early_stopping==1:
nov_distri_path+="_es"
model_path+="_es"
self.read_distribution(nov_distri_path)
# Create the TF graph
graph = tf.Graph()
dataset = self.util.dataset
with graph.as_default(), tf.device('/cpu:0'):
self.user_input = tf.placeholder(
tf.int32, shape=[None, dataset._USER_SIZE], name='user_info')
self.item_input = tf.placeholder(
tf.int32, shape=[None, dataset._ITEM_SIZE], name='item_info')
self.label = tf.placeholder(
tf.int32, shape=[None, None], name='label')
# Variables
# embedding for users
W = tf.Variable(
initial_value=tf.truncated_normal(
(self.embedding_size, dataset._USER_SIZE_BIN),
stddev=1.0 / np.sqrt(self.embedding_size)))
# embedding for movies
A = tf.Variable(
initial_value=tf.truncated_normal(
(self.embedding_size, dataset._ITEM_SIZE_BIN),
stddev=1.0 / np.sqrt(self.embedding_size)))
# intercept
b = tf.Variable(
initial_value=tf.truncated_normal(
(self.embedding_size, 1),
stddev=1.0 / np.sqrt(self.embedding_size)))
# select and sum the columns of W depending on the input
w_offsets = [0] + [
sum(dataset._USER_SIZE_OF_MASK_FIELDS[:i + 1])
for i, j in enumerate(dataset._USER_SIZE_OF_MASK_FIELDS[:-1])
]
w_offsets = tf.matmul(
tf.ones(
shape=(tf.shape(self.user_input)[0], 1), dtype=tf.int32),
tf.convert_to_tensor([w_offsets]))
w_columns = self.user_input[:, :-dataset.
_USER_SIZE_ONLY_NUM] + w_offsets # last column is not an index
w_selected = tf.gather(W, w_columns, axis=1)
# age * corresponding column of W
aux = tf.matmul(
W[:, -dataset._USER_SIZE_ONLY_NUM:],
tf.transpose(
tf.to_float(
(self.user_input[:, -dataset._USER_SIZE_ONLY_NUM:]))))
batch_age = tf.reshape(
aux,
shape=(self.embedding_size, tf.shape(self.user_input)[0], 1))
w_with_age = tf.concat([w_selected, batch_age], axis=2)
w_result = tf.reduce_sum(w_with_age, axis=2)
# select and sum the columns of A depending on the input
a_offsets = [0] + [
sum(dataset._ITEM_SIZE_OF_MASK_FIELDS[:i + 1])
for i, j in enumerate(dataset._ITEM_SIZE_OF_MASK_FIELDS[:-1])
]
a_offsets = tf.matmul(
tf.ones(
shape=(tf.shape(self.item_input)[0], 1), dtype=tf.int32),
tf.convert_to_tensor([a_offsets]))
a_columns = self.item_input[:, :-dataset.
_ITEM_SIZE_ONLY_NUM] + a_offsets # last two columns are not indices
a_selected = tf.gather(A, a_columns, axis=1)
# dates * corresponding last two columns of A
aux = tf.matmul(
A[:, -dataset._ITEM_SIZE_ONLY_NUM:],
tf.transpose(
tf.to_float(
self.item_input[:, -dataset._ITEM_SIZE_ONLY_NUM:])))
batch_dates = tf.reshape(
aux,
shape=(self.embedding_size, tf.shape(self.item_input)[0], 1))
# ... and the intercept
intercept = tf.gather(
b,
tf.zeros(
shape=(tf.shape(self.item_input)[0], 1), dtype=tf.int32),
axis=1)
a_with_dates = tf.concat(
[a_selected, batch_dates, intercept], axis=2)
a_result = tf.reduce_sum(a_with_dates, axis=2)
# Definition of g (Eq. (14) in the paper g = <Wu, Vi> = u^T * W^T * V * i)
g = tf.matmul(tf.transpose(w_result), a_result)
x = tf.to_float(self.label) * g
self.prob = tf.nn.sigmoid(x)
self.loss = tf.reduce_mean(tf.nn.softplus(tf.diag_part(-x)))
# Regularization
reg = self.nu * (tf.nn.l2_loss(W) + tf.nn.l2_loss(A))
# Loss function with regularization (what we want to minimize)
loss_to_minimize = self.loss + reg
self.optimize = tf.train.AdamOptimizer(
learning_rate=self.learning_rate).minimize(
loss=loss_to_minimize)
# Once thep graph is created, let's probgram the training loop
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config,graph=graph) as session:
# mandatory: initialize variables in the graph, i.e. W, A
session.run(tf.global_variables_initializer())
saver = tf.train.Saver()
try:
saver.restore(session, model_path)
except:
#print()
pass
#result=[]
self.EARLY_STOP_INTERVAL = 40
average_loss = 0.0
for iter in range(self.MAX_ITERATIONS + 1):
train_loss = self.train_a_batch(iter, session)
average_loss += train_loss
print('Iteration', iter, 'Train_loss', train_loss)
if iter % self.EVERY_N_ITERATIONS == 0:
reward0, reward1, agg_div, entro_div = self.eval_performance(
)
if iter % self.EARLY_STOP_INTERVAL == 0:
average_loss = average_loss / self.EARLY_STOP_INTERVAL
print('Average_loss', average_loss)
if is_early_stopping==1:
stop_flg = self.early_stop(iter, self.cal_val_loss(), saver, session,
model_path)
average_loss = 0.0
if stop_flg == 1:
break
else:
model_path = saver.save(session, model_path)
print('current model save in', model_path)
user_list = []
item_list = []
for (user, item) in predict_pair:
user_list.append(user)
item_list.append(item)
if len(predict_pair) != 0:
result = self.predict(user_list, item_list)
else:
result = {}
return result, reward0, reward1, agg_div, entro_div
def update_loss_win(self,loss):
self.stop_loss = self.stop_loss[1:]
self.stop_loss.append(loss)
l1, l2, l3, l4, l5 = self.stop_loss
return (l5 >= l4 and l5 >= l3 and l5 >= l2 and l5 >= l1) and (l4 >= l3 and l4 >= l2 and l4 >= l1)
def long_not_improve(self,iter,loss):
self.cur_iter=iter
if loss<self.best_loss:
self.best_loss=loss
self.best_iter=iter
return self.cur_iter-self.best_iter>=400
def early_stop(self, iter, loss, saver, session, save_path):
if iter == 0:
self.stop_loss = [9999, 9999, 9999, 9999, 9999]
self.best_loss=9999
self.best_iter=0
self.cur_iter=0
return 0
else:
stop_flg = self.long_not_improve(iter,loss)#self.update_loss_win(loss)
print({"loss":loss,"best_loss":self.best_loss,"best_iter":self.best_iter,"stop_win":self.stop_loss})
if stop_flg==0:
save_path = saver.save(session, save_path)
print('current model save in', save_path)
return stop_flg
def __init__(self, util):
self.rec_cnt = {}
self.top_N = 10
self.LIMIT = 100
self.util = util
self.dataset = util.dataset
self.NUM_USERS = len(self.util.dataset.df_userinfo)
self.NUM_ITEMS = len(self.util.dataset.df_iteminfo)
self.neg_distr = {}
self.pos_distr = {}
self.beta = 0.0
# In[19]:
sys=RecommendSys(util)
beta_list=[0.0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0,4.5,5.0]
print(beta_list)
result_list=[]
for beta in list(beta_list):
print(beta)
s1="ml_nov_distri_beta%.1f"%(beta)
s2="ml_K_600_beta_%.1f_vald2"%(beta)
# result,reward0, reward1, agg_div, entro_div=sys.train(
# s1,s2,beta=beta,is_early_stopping=1,predict_pair=[],MAX_ITERATIONS=4000)
# with open("maxiter_%.2f"%(beta),"w") as f:
# f.write(str(sys.cur_iter))
# print('bestiter',sys.cur_iter)
result,reward0, reward1, agg_div, entro_div=sys.train(
s1,s2,beta=beta,is_early_stopping=0, predict_pair=[],MAX_ITERATIONS=0)
result_list.append((beta,reward0,reward1,agg_div,entro_div))
pd.DataFrame(result_list,columns=["beta","avg_accuracy","avg_reward","agg_div","entropy_div"]).to_csv("mlens_newmethod_result_"+str(SEED)+".csv",index=False)
# In[21]:
s1="ml_nov_distri_beta%.1f"%(0.0)
s2="ml_K_600_beta_%.1f_vald2"%(0.0)
result,reward0, reward1, agg_div, entro_div=sys.train(
s1,s2,beta=0.0,is_early_stopping=0, predict_pair=[],MAX_ITERATIONS=0)
def run_baseline():
def find_best_fobj(uid, R, S, rel_matrix):
fobj_set = []
for index, i in enumerate(R):
rel = rel_matrix[uid][i]
min_dist = 1.0
for j in S:
dist = util.distant_mat[i][j]
min_dist = min(dist, min_dist)
fobj = (1 - k) * rel + k * min_dist
#print(rel,min_dist)
fobj_set.append((i, fobj))
pair = max(fobj_set, key=lambda x: x[1])
return pair[0]
result_list = []
k_list=[]
if DISTANT_TYPE==0:
k_list=[0.0,0.01,0.02,0.03,0.04,0.05,0.06,0.07,0.08,
0.09,0.1,1.0]
else:
k_list=[0.0,0.02,0.06,0.1,0.12,0.16,0.2,0.22,0.26,0.3,0.32,
0.36,0.4,0.5,0.6,0.7,0.8]
for k in k_list:
print("lambda=%f" % (k))
rel_matrix = sys.prob_by_uitem
uid_to_recommend = sys.base_recommend(rel_matrix, 500).copy()
for index1, uid in enumerate(uid_to_recommend):
R, S = uid_to_recommend[uid], []
#print(index1, '/', len(uid_to_recommend))
for iter in range(sys.top_N):
besti = find_best_fobj(uid, R, S, rel_matrix)
R.remove(besti)
S.append(besti)
uid_to_recommend[uid] = S
acc = sys.print_accuracy(uid_to_recommend, rel_matrix)
print('Baseline Performance')
avg_reward0, avg_reward1, agg_div, enp_div = sys.print_diversity(
uid_to_recommend)
result_list.append((k, avg_reward0, avg_reward1, agg_div, enp_div))
pd.DataFrame(
result_list,
columns=[
"lambda", "avg_accuracy", "avg_reward", "agg_div", "entropy_div"
]).to_csv(
"mlens_baseline_result_" + str(SEED) + "_eq15.csv", index=False)
run_baseline()
# In[ ]:
|
# OTCalcMethods.py - This version will use the correlations to form time series,
# then do time series forecasting
#
# This is an implementation of the code at:
#
# https://machinelearningmastery.com/random-forest-for-time-series-forecasting/
#
# Python code to use Scikit_Learn to identify earthquake alerts
#
# This code downloads data from the USGS web site.
#
# This code was written on a Mac using Macports python. A list of the ports needed to run the code are available at:
# https://www.dropbox.com/s/8wr5su8d7l7a30z/myports-wailea.txt?dl=0
# ---------------------------------------------------------------------------------------
# Copyright 2020 by John B Rundle, University of California, Davis, CA USA
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the "Software"), to deal in the Software without restriction, including without
# limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or suSKLantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# ---------------------------------------------------------------------------------------
import sys
import os
import numpy as np
from array import array
import SEISRFileMethods
import SEISRPlotMethods
import SEISRUtilities
import datetime
import dateutil.parser
import time
from time import sleep # Added a pause of 30 seconds between downloads
import math
from tabulate import tabulate
# Now we import the sklearn methods
import pandas as pd
import numpy as np
import scipy
from numpy import asarray
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
import matplotlib.pyplot as plt
from matplotlib.ticker import MultipleLocator
import matplotlib.patches as mpatches
from scipy.integrate import simps
from numpy import trapz
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
import itertools
#from sklearn.datasets import load_iris # Don't need this dataset
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
from sklearn import preprocessing
from sklearn.metrics import mean_absolute_error
from sklearn.ensemble import RandomForestRegressor
import random
from PIL import Image
######################################################################
def read_input_data(input_file_name, min_mag):
input_file = open(input_file_name,'r')
# number_lines = 0
# for line in input_file:
# number_lines += 1
values_window = []
stddev_window = []
times_window = []
eqs_window = []
number_lines = 0
for line in input_file:
items = line.strip().split(',')
# print(items)
# if '.' in items[0]:
# print(items)
number_lines += 1
if number_lines > 2 and '.' in items[0]:
items = line.strip().split(',')
# print(items)
values_window.append(float(items[3]))
stddev_window.append(float(items[4]))
times_window.append(float(items[1]))
eqs_window.append(float(items[5]))
if number_lines > 2 and 'Data:' in items[0]:
items = line.strip().split(',')
NELng_local = float(items[1])
SWLng_local = float(items[2])
NELat_local = float(items[3])
SWLat_local = float(items[4])
Grid = float(items[5])
Location = items[6]
input_file.close()
return values_window, stddev_window, times_window, eqs_window, NELng_local, SWLng_local, \
NELat_local, SWLat_local, Grid, Location
######################################################################
def read_input_earthquake_data(delta_time, min_mag, lower_cutoff):
# Get catalog location and grid box data. Then get list of small earthquakes.
#
# Next, discretize the earthquake list into time intervals of length delta_time.
#
# This discretized earthquake list is then the number of small earthquakes in time intervals
# of length delta_time.
#
# This method returns the list of discrete times (time_list) and the number of small earthquakes in each
# discrete time interval (eqs_list)
mag_array, date_array, time_array, year_array, depth_array, lat_array, lng_array = SEISRFileMethods.read_regional_catalog(min_mag)
last_index = int(len(year_array))-1
first_index = 0
year_diff = float(year_array[last_index]) - float(year_array[0])
number_time_deltas = int (year_diff/delta_time)
time_list = []
eqs_list = []
jlast = number_time_deltas - 1
last_time = jlast * delta_time
number_eqs = 0
initial_year = float(year_array[0])
for i in range(len(time_array)):
index = len(time_array) - i - 1
j = int ( (float(year_array[index])- float(year_array[0])) /delta_time) # j will be a monotone decreasing index
if j == jlast:
number_eqs += 1
else:
time_list.append((jlast-1) * delta_time + initial_year)
if number_eqs == 0 and lower_cutoff > 0.0001:
# if number_eqs == 0:
number_eqs = 1
eqs_list.append(number_eqs)
number_eqs = 0
jlast = j
time_list.reverse()
eqs_list.reverse()
time_list = time_list[:-1] # Removing the extra last index that is introduced by the above code
eqs_list = eqs_list[:-1]
return time_list,eqs_list
######################################################################
def get_large_earthquakes(mag_large, min_mag):
mag_array, date_array, time_array, year_array, depth_array, lat_array, lng_array = \
SEISRFileMethods.read_regional_catalog(min_mag)
# Find dates of large earthquakes
mag_large_eq = []
year_large_eq = []
index_large_eq = []
for i in range(len(year_array)):
if float(mag_array[i]) >= mag_large:
mag_large_eq.append(float(mag_array[i]))
year_large_eq.append(float(year_array[i]))
index_large_eq.append(i)
return year_large_eq, mag_large_eq, index_large_eq
######################################################################
def adjust_year_times(year_large_eq, mag_large_eq, index_large_eq, time_list, plot_start_year):
mag_large_eq_adj = mag_large_eq
year_large_eq_adj = []
index_large_eq_adj = []
mag_large_eq_adj = []
for i in range(len(year_large_eq)):
for j in range(len(time_list)-1):
if year_large_eq[i] >= time_list[j] and year_large_eq[i] < time_list[j+1] and time_list[j] >= plot_start_year:
year_large_eq_adj.append(time_list[j])
index_large_eq_adj.append(j)
mag_large_eq_adj.append(mag_large_eq[i])
return year_large_eq_adj, mag_large_eq_adj, index_large_eq_adj
def calc_eqs_unfiltered(time_list, eqs_list, plot_start_year):
#
# ------------------------------------------------------------
#
if plot_start_year <= time_list[0]:
plot_start_year = time_list[0]
number_points_to_plot = 0
for k in range(len(time_list)):
if time_list[k] >= plot_start_year:
number_points_to_plot += 1
time_list_unfiltered_reduced = time_list[- number_points_to_plot:]
eqs_list_unfiltered_reduced = eqs_list[- number_points_to_plot:]
return time_list_unfiltered_reduced, eqs_list_unfiltered_reduced
######################################################################
def calc_seisr_timeseries(time_list, eqs_list, plot_start_year, mag_large,min_mag, delta_time):
#
# ------------------------------------------------------------
#
year_large_eq, mag_large_eq, index_large_eq = get_large_earthquakes(mag_large,min_mag)
if plot_start_year <= time_list[0]:
plot_start_year = time_list[0]
number_points_to_plot = 0
for k in range(len(time_list)):
if time_list[k] >= plot_start_year:
number_points_to_plot += 1
for i in range(len(time_list)):
time_list[i] += 2.*delta_time # Adjust times to properly align the EMA times with the large EQ times
last_index = len(time_list)-1
time_list[last_index] += 2.*delta_time # adjustment to ensure correct last event time sequence
log_number = [math.log(1.0+eqs_list[i],10) for i in range(len(eqs_list))]
log_number_reduced = log_number[- number_points_to_plot:]
time_list_reduced = time_list[- number_points_to_plot:]
eqs_list_reduced = eqs_list[- number_points_to_plot:]
#
# ------------------------------------------------------------
#
return time_list_reduced, log_number_reduced, eqs_list_reduced
######################################################################
def random_statistics(true_positive, false_positive, true_negative, false_negative, \
threshold_value, forecast_interval, mag_large, \
data_string_title, number_thresholds, NELng_local, SWLng_local, NELat_local, SWLat_local, \
Grid, Location):
#
# ------------------------------------------------------------
#
# Plot ROC and random ROCs
true_positive_rate, false_positive_rate, false_negative_rate, true_negative_rate = \
compute_ROC_rates(true_positive, false_positive, true_negative, false_negative)
accuracy = []
precision = []
hit_rate = []
for k in range(len(true_positive)):
numer = true_positive[k] + true_negative[k]
denom = true_positive[k] + false_positive[k] + true_negative [k] + false_negative[k]
accuracy.append(numer/denom)
hit_rate.append(true_positive[k]/(true_positive[k] + false_negative[k]))
precision.append(true_positive[k]/(true_positive[k] + false_positive[k]))
number_random_timeseries = 500
# number_random_timeseries = 50
random_true_positive_list = []
random_false_positive_list = []
random_false_negative_list = []
random_true_negative_list = []
random_accuracy_list = []
random_hit_rate_list = []
random_precision_list = []
random_specificity_list = []
for i in range(number_random_timeseries):
random_values = random_timeseries(values_window, times_window)
true_positive_random, false_positive_random, true_negative_random, false_negative_random, threshold_value_random = \
compute_ROC(times_window, random_values, forecast_interval, mag_large, number_thresholds, i+1)
denom = []
for k in range(len(true_positive_random)):
denom.append(true_positive_random[k] + false_positive_random[k] + true_negative_random[k] + false_negative_random[k])
true_positive_random = [ true_positive_random[m]/denom[m] for m in range(len(true_positive_random))]
false_positive_random = [false_positive_random[m]/denom[m] for m in range(len(false_positive_random))]
false_negative_random = [false_negative_random[m]/denom[m] for m in range(len(false_negative_random))]
true_negative_random = [ true_negative_random[m]/denom[m] for m in range(len(true_negative_random))]
random_true_positive_list.append(true_positive_random)
random_false_positive_list.append(false_positive_random)
random_false_negative_list.append(false_negative_random)
random_true_negative_list.append(true_negative_random)
accuracy_random = []
hit_rate_random = []
precision_random = []
specificity_random = []
for k in range(len(true_positive_random)): #======> thi sshould be number of timeseries, not number of thresholds
numer = true_positive_random[k] + true_negative_random[k]
denom = true_positive_random[k] + false_positive_random[k] + true_negative_random[k] + false_negative_random[k]
try:
accuracy_random.append(numer/denom)
except:
pass
try:
hit_rate_random.append(true_positive_random[k]/(true_positive_random[k] + false_negative_random[k]))
except:
pass
try:
precision_random.append(true_positive_random[k]/(true_positive_random[k] + false_positive_random[k]))
except:
pass
try:
specificity_random.append(true_negative_random[k]/(true_negative_random[k] + false_positive_random[k]))
except:
pass
random_accuracy_list.append(accuracy_random)
random_hit_rate_list.append(hit_rate_random)
random_precision_list.append(precision_random)
random_specificity_list.append(specificity_random)
#
# ------------------------------------------------------------
#
value_list = []
for i in range(len(random_true_positive_list)):
value_list.append(random_true_positive_list[i][optimal_index])
mean_tp = np.mean(value_list)
stddev_tp = np.std(value_list)
value_list = []
for i in range(len(random_false_positive_list)):
value_list.append(random_false_positive_list[i][optimal_index])
mean_fp = np.mean(value_list)
stddev_fp = np.std(value_list)
value_list = []
for i in range(len(random_false_negative_list)):
value_list.append(random_false_negative_list[i][optimal_index])
mean_fn = np.mean(value_list)
stddev_fn = np.std(value_list)
value_list = []
for i in range(len(random_true_negative_list)):
value_list.append(random_true_negative_list[i][optimal_index])
mean_tn = np.mean(value_list)
stddev_tn = np.std(value_list)
value_list = []
for i in range(len(random_hit_rate_list)):
value_list.append(random_hit_rate_list[i][optimal_index])
mean_hr = np.mean(value_list)
stddev_hr = np.std(value_list)
value_list = []
for i in range(len(random_specificity_list)):
value_list.append(random_specificity_list[i][optimal_index])
mean_spec = np.mean(value_list)
stddev_spec = np.std(value_list)
value_list = []
for i in range(len(random_precision_list)):
value_list.append(random_precision_list[i][optimal_index])
mean_pre = np.mean(value_list)
stddev_pre = np.std(value_list)
value_list = []
for i in range(len(random_accuracy_list)):
value_list.append(random_accuracy_list[i][optimal_index])
mean_acc = np.mean(value_list)
stddev_acc = np.std(value_list)
#
# ------------------------------------------------------------
#
print()
print('--------------------------------------')
print('')
print('Forecast Interval: ', str(forecast_interval) + ' Years')
print('')
print('Threshold Value: ', str(round(float(threshold_value[optimal_index]),3)) )
print('')
print('Mean TP: ', str(round(mean_tp,3)) + ' +/- ' + str(round(stddev_tp,3)))
print('')
print('Mean FP: ', str(round(mean_fp,3)) + ' +/- ' + str(round(stddev_fp,3)))
print('')
print('Mean FN: ', str(round(mean_fn,3)) + ' +/- ' + str(round(stddev_fn,3)))
print('')
print('Mean TN: ', str(round(mean_tn,3)) + ' +/- ' + str(round(stddev_tn,3)))
print('')
print('Mean Random Hit Rate: ', str(round(mean_hr,3)) + ' +/- ' + str(round(stddev_hr,3)))
print('')
print('Mean Random Specificity: ', str(round(mean_spec,3)) + ' +/- ' + str(round(stddev_spec,3)))
print('')
print('Mean Random Precision: ', str(round(mean_pre,3)) + ' +/- ' + str(round(stddev_pre,3)))
print('')
print('Mean Random Accuracy: ', str(round(mean_acc,3)) + ' +/- ' + str(round(stddev_acc,3)))
print('')
print('--------------------------------------')
print('')
return
##############################################r########################
def timeseries_to_EMA(timeseries_orig, N_Steps):
# timeseries_orig is a list input. Output is a list that is an Exponential Moving Average
timeseries_EMA = []
for i in range(1,len(timeseries_orig)+1):
timeseries_raw = []
for j in range(i):
timeseries_raw.append(timeseries_orig[j])
datapoint_EMA = EMA_weighted_time_series(timeseries_raw, N_Steps)
timeseries_EMA.append(datapoint_EMA)
return timeseries_EMA
######################################################################
def EMA_weighted_time_series(time_series, NSteps):
# This method computes the Exponential Weighted Average of a list. Last
# in the list elements are exponentially weighted the most
N_events = len(time_series)
weights = EMA_weights(N_events, NSteps)
weights_reversed = list(reversed(weights))
EMA_weighted_ts = []
partial_weight_sum = 0.
for i in range(N_events):
partial_weight_sum += weights[i]
weighted_ts = round(float(time_series[i])*weights_reversed[i],4)
EMA_weighted_ts.append(weighted_ts)
partial_weight_sum = round(partial_weight_sum,4)
sum_value = sum(EMA_weighted_ts)
if (float(partial_weight_sum)) <= 0.0:
sum_value = 0.0001
partial_weight_sum = 1.
try:
weighted_sum = float(sum_value)/float(partial_weight_sum)
except:
weighted_sum = 0.0
return weighted_sum
######################################################################
def EMA_weights(N_events, N_Steps):
# This method computes the weights for the Exponential Weighted Average (EMA)
alpha = 2./float((N_Steps+1))
# time_series_list is the time series of floating point values
# arranged in order of first element in list being earliest
assert 0 < alpha <= 1
weights = []
# Define the weights
for i in range(0,N_events):
weight_i = (1.0-alpha)**i
weights.append(weight_i)
sum_weights = sum(weights)
weights = [i/sum_weights for i in weights]
return weights
######################################################################
def compute_ROC(times_window, values_window, forecast_interval, mag_large, min_mag, \
number_thresholds, number_random_timeseries, time_number):
# First we find the min value, then progressively lower (actually raise) the threshold and determine the
# hit rate and false alarm rate
true_positive_rate = []
false_positive_rate = []
true_negative_rate = []
false_negative_rate = []
true_positive = []
false_positive = []
true_negative = []
false_negative = []
acc_rate = []
threshold_value = []
year_large_eq, mag_large_eq, index_large_eq = get_large_earthquakes(mag_large, min_mag)
values_window = [float(values_window[i]) for i in range(len(values_window))]
min_value = min(values_window)
max_value = max(values_window)
delta_threshold = (max_value - min_value) / number_thresholds
# if time_number == 0:
# print('Calculating Data Time Series')
# # print('')
# else:
# print('Calculating Random Time Series: '+ str(time_number) + ' out of ' + str(number_random_timeseries), end="\r", flush=True)
if time_number > 0:
print('Calculating Random Time Series: '+ str(time_number) + ' out of ' + str(number_random_timeseries), end="\r", flush=True)
threshold = min_value - delta_threshold
print('')
excluded_time = int(forecast_interval * 13)
for i in range(number_thresholds):
threshold = threshold + delta_threshold
fp = 0.
tp = 0.
tn = 0.
fn = 0.
for j in range(len(times_window) - excluded_time): # We exclude the last time that has incomplete data
test_flag = True
for k in range(len(year_large_eq)):
delta_time = year_large_eq[k] - times_window[j]
# if value greater than threshold and at least 1 eq occurs within forecast interval, tp
if delta_time <= forecast_interval and delta_time >= 0 and float(values_window[j]) <= threshold and test_flag:
tp += 1.0
test_flag = False
# if value greater than threshold, so predicted to occur within forecast interval,
# and eq does not occur within forecast interval, fp
if delta_time > forecast_interval and delta_time >= 0 and float(values_window[j]) <= threshold and test_flag:
fp += 1.0
test_flag = False
# if value less than threshold, so predicted not to occur within forecast interval,
# and eq does occur within forecast interval, fn
if delta_time <= forecast_interval and delta_time >= 0 and float(values_window[j]) > threshold and test_flag:
fn += 1.0
test_flag = False
# if value less than threshold and eq does not occur within forecast interval, tn
if delta_time > forecast_interval and delta_time >= 0 and float(values_window[j]) > threshold and test_flag:
tn += 1.0
test_flag = False
# if (tp+fn)>0. and (fp+tn)>0.:
# tpr = tp/(tp + fn)
# fpr = fp/(fp + tn)
# else:
# tpr = 0.
# fpr = 0.
true_positive.append(tp)
false_positive.append(fp)
true_negative.append(tn)
false_negative.append(fn)
threshold_value.append(threshold)
# print('Threshold Value: ', threshold, 'Hit Rate: ', tpr, 'False Alarm Rate: ', fpr, 'Ratio tpr/fpr: ', round(ratio,4))
# true_positive_rate.append(0.)
# false_positive_rate.append(0.)
# print()
# print(times_window)
return true_positive, false_positive, true_negative, false_negative, threshold_value
######################################################################
def compute_ROC_rates(true_positive, false_positive, true_negative, false_negative):
true_positive_rate = []
false_positive_rate = []
true_negative_rate = []
false_negative_rate = []
for i in range(len(true_positive)):
tp = true_positive[i]
fp = false_positive[i]
tn = true_negative[i]
fn = false_negative[i]
tpr = tp/(tp + fn)
fpr = fp/(fp + tn)
fnr = fn/(fn + tp)
tnr = tn/(tn + fp)
tpr = round(tpr,4)
fpr = round(fpr,4)
fnr = round(fnr,4)
tnr = round(tnr,4)
# For each value of i
true_positive_rate.append(tpr)
false_positive_rate.append(fpr)
true_negative_rate.append(tnr)
false_negative_rate.append(fnr)
return true_positive_rate, false_positive_rate, false_negative_rate, true_negative_rate
######################################################################
def random_timeseries(values_window, times_window):
random_values = []
for i in range(len(values_window)):
random_values.append(random.choice(values_window))
return random_values
######################################################################
def calc_precision_threshold(true_positive, false_positive, true_negative, false_negative):
#
# ------------------------------------------------------------
#
# This method computes the precision list from the thresholds
true_positive_rate, false_positive_rate, false_negative_rate, true_negative_rate = \
compute_ROC_rates(true_positive, false_positive, true_negative, false_negative)
precision_list = []
for i in range(len(true_positive)):
numer = true_positive[i]
denom = false_positive[i] + true_positive[i]
try:
precision_value = (numer/denom)
except:
precision_value = 0.
precision_list.append(precision_value)
return precision_list
######################################################################
def compute_precision_timeseries(times_window, values_window, threshold_value, precision_list):
# This method converts the isr timeseries into a precision timeseries
precision_timeseries = []
for i in range(len(times_window)):
precision = 0.
for j in range(len(threshold_value)):
if values_window[i] >= threshold_value[j]:
precision = precision_list[j]
precision_timeseries.append(round(100.0*precision,3))
# print('len of precision_timeseries', len(precision_timeseries))
return precision_timeseries
######################################################################
def calc_ROC_skill(times_window, values_window, forecast_interval, mag_large, min_mag, number_thresholds):
#
# ------------------------------------------------------------
#
# Calculate ROC and random ROCs
true_positive, false_positive, true_negative, false_negative, threshold_value\
= compute_ROC(times_window, values_window, forecast_interval, mag_large, min_mag, number_thresholds, 0, 0)
true_positive_rate, false_positive_rate, false_negative_rate, true_negative_rate = \
compute_ROC_rates(true_positive, false_positive, true_negative, false_negative)
#
# ------------------------------------------------------------
#
# Redefine the hit rate and false alarm rate arrays
number_intervals = 100
fal_bins = list(range(0,number_intervals))
fal_bins = [float(fal_bins[i])/float(number_intervals) for i in range(number_intervals)]
delta_bins = 1./float(number_intervals)
hit_bins = []
for i in range(number_intervals):
hit_value = 0.0
counter = 0
for j in range(len(false_positive_rate)):
if false_positive_rate[j] >= i*delta_bins and false_positive_rate[j] < (i+1)*delta_bins:
hit_value += true_positive_rate[j]
counter += 1
try:
hit_bins.append(hit_value/float(counter))
except:
hit_bins.append(hit_value)
fal_bins_array = np.array(fal_bins)
hit_bins_array = np.array(hit_bins)
# Compute the area using the composite trapezoidal rule.
area_trapz = trapz(hit_bins_array, dx=delta_bins)
# print("Trap Area =", area_trapz)
# Compute the area using the composite Simpson's rule.
area_simp = simps(hit_bins_array, dx=delta_bins)
# print("Simp Area =", area_simp)
skill_score_simp = round(area_simp,3)
skill_score_trapz = round(area_trapz,3)
#
# ------------------------------------------------------------
#
# Set the skill score to the average of simpson and trapz
skill_score = 0.5*(skill_score_simp + skill_score_trapz)
return skill_score
######################################################################
def compute_seisr_time_list(delta_time, lower_cutoff, NSteps, plot_start_year, mag_large, min_mag):
# eqs_list = eqs_list_unfiltered
# time_list = time_list_unfiltered
time_list, eqs_list = read_input_earthquake_data(delta_time, min_mag, lower_cutoff)
mean_eqs = round(np.mean(eqs_list),3)
eqs_list_unfiltered = eqs_list
for i in range(len(time_list)):
if int(eqs_list[i]) <= lower_cutoff*mean_eqs:
eqs_list[i] = lower_cutoff*mean_eqs
# Apply Exponential Moving Average to eqs_list
eqs_list_EMA = timeseries_to_EMA(eqs_list, NSteps)
# Generate the SEISR times and filter the timeseries data to occur only after the plot_start_year
time_list_reduced, log_number_reduced, eqs_list_reduced = \
calc_seisr_timeseries(time_list, eqs_list_EMA, plot_start_year, mag_large,min_mag, delta_time)
return time_list_reduced, log_number_reduced, eqs_list_reduced
######################################################################
def calc_forecast_hits_threshold(times_window, values_window, forecast_interval, year_large_eq, threshold_value, index_threshold):
windows_number = 0 # Same number of elements as times window. 1 if qualifying window, 0 otherwise
hits_number = 0 # Likewise. For each element, 1 if eq occurs in the window, 0 otherwise
freq_hits = 0
for i in range(len(times_window)): # Hits for a given threshold
if values_window[i] >= threshold_value[index_threshold]: # Will be counted among qualifying time intervals
windows_number += 1 # Add to the number of qualifying time windows
end_time = times_window[i] + forecast_interval
hits_value = 0
for j in range(len(year_large_eq)):
if year_large_eq[j] >= times_window[i] and year_large_eq[j] < end_time:
hits_value = 1 # Can only be counted once
hits_number += hits_value
if windows_number > 0:
freq_hits = float(hits_number)/float(windows_number)
return freq_hits
######################################################################
def calc_observed_frequency(times_window, values_window, forecast_interval, year_large_eq, threshold_value):
observed_stats_list = []
for index_threshold in range(len(threshold_value)):
freq_hits = calc_forecast_hits_threshold(times_window, values_window, \
forecast_interval, year_large_eq, threshold_value, index_threshold)
observed_stats_list.append(freq_hits)
return observed_stats_list
######################################################################
def compute_raw_timeseries():
time_bins, timeseries = SKLFileMethods.get_timeseries_data()
return
######################################################################
def coarse_grain_seismic_timeseries(NELat_local, NELng_local, SWLat_local, SWLng_local, \
min_mag, max_depth, grid_size, delta_time_interval):
# This method builds the local timeseries in small grid boxes.
# We assume that the time interval for the seismicity time series will
# be weekly = 7 days = 0.01923 fraction of a year
# Read the regional catalog
# mag_array_all, date_array_all, time_array_all, year_array_all, depth_array_all, lat_array_all, lng_array_all = \
# SEISRFileMethods.read_regional_catalog(min_mag)
mag_array, date_array, time_array, year_array, depth_array, lat_array, lng_array = \
SEISRFileMethods.read_regional_catalog(min_mag)
# Use only events after plot_start_year
num_lat_boxes = int( (NELat_local - SWLat_local)/grid_size )
num_lng_boxes = int( (NELng_local - SWLng_local)/grid_size)
num_total_boxes = num_lat_boxes * num_lng_boxes
number_timeseries_found = 0
total_counter = 0
grid_box_locations = []
grid_box_indices = []
timeseries = []
# ------------------------------------------------------------
#
# The cutoff factor determines the minimum number of small earthquakes that are needed for each grid box
#
total_time_interval = float(year_array[len(year_array)-1]) - float(year_array[0])
last_event_year = float(year_array[len(year_array) - 1])
number_year_bins = int((last_event_year - float(year_array[0]))/delta_time_interval) +1
print('total_time_interval, last_event_year, number_year_bins', total_time_interval, last_event_year, number_year_bins)
# ------------------------------------------------------------
# Define times of bins
time_bins = []
date_bins = []
for i in range(number_year_bins):
time_bins.append(float(year_array[0]) + float(i)*delta_time_interval)
print('')
print('Length of time_bins: ', len(time_bins))
print('')
# ------------------------------------------------------------
#
# Define the grid boxes: Filter the regional data into (num_total_boxes) time series
number_polygon_vertices = 4
# Construct the string of polygon vertices. Note that the order is lat, long pairs
for i in range(num_lat_boxes):
for j in range(num_lng_boxes):
ll = i+j
vertex_lat = []
vertex_lng = []
mag_file = []
year_file = []
# Order of vertices of large rectangular region: SW, SE, NE, NW
W_box_lng = SWLng_local + j*grid_size # West side of small box
E_box_lng = SWLng_local + (j+1)*grid_size # East side of small box
# N_box_lat = NELat_local - i*grid_size # North side of small box
# S_box_lat = NELat_local - (i+1)*grid_size # South side of small box
S_box_lat = SWLat_local + i*grid_size # North side of small box
N_box_lat = SWLat_local + (i+1)*grid_size # South side of small box
vertex_lat.append(S_box_lat)
vertex_lat.append(S_box_lat)
vertex_lat.append(N_box_lat)
vertex_lat.append(N_box_lat)
vertex_lng.append(W_box_lng)
vertex_lng.append(E_box_lng)
vertex_lng.append(E_box_lng)
vertex_lng.append(W_box_lng)
point_list = []
for k in range(number_polygon_vertices):
point_list.append((float(vertex_lat[k]),float(vertex_lng[k])))
polygon = Polygon(point_list)
index_timeseries = int(i + j*(num_lat_boxes))
# ------------------------------------------------------------
# Compute the timeseries here and then timeseries[index_timeseries] = the timeseries you computed
for kk in range(len(year_array)):
point = Point((float(lat_array[kk]),float(lng_array[kk])))
if (float(depth_array[kk]) <= float(max_depth) and float(mag_array[kk]) >= float(min_mag) \
and polygon.contains(point) == True):
mag_file.append(float(mag_array[kk]))
year_file.append(float(year_array[kk]))
# ------------------------------------------------------------
# Fill the working_file with the events over the time period. Each week in working_file will
# record the number of events that occurred that week.
last_event_year = float(year_array[len(year_array) - 1])
working_file = [0.0 for i in range(int(number_year_bins))]
for k in range(len(year_file)):
index_working = int((float(year_file[k]) - float(year_array[0]))/delta_time_interval )
working_file[index_working] += 1.0 # This is a number timeseries
#
# For the activity time series, number_years is the minimum number of active time bins required
#
total_counter += 1
lat_center = 0.5*(N_box_lat + S_box_lat)
lng_center = 0.5*(W_box_lng + E_box_lng)
grid_box_locations.append((lng_center,lat_center))
grid_box_indices.append((j,i))
timeseries.append(working_file)
number_timeseries_found += 1
print('')
print('***************************************************')
print('Found Timeseries Number ', number_timeseries_found, ' of ', num_total_boxes)
print('Total number of events: ', sum(working_file))
print('Grid Box Center @ Lat, Long: ', round(lat_center,3), round(lng_center,3))
print('With indices @ Lat Index, Long Index: ', i,j)
print('For minimum magnitude events >= ', min_mag)
print('***************************************************')
print('')
# date_bins.append(date_array[0])
for i in range(len(time_bins)):
k = 0
while float(year_array[k]) <= float(time_bins[i]) and k < len(year_array):
date_value = date_array[k+1]
k += 1
date_bins.append(date_value)
print('')
print('Total Grid Boxes: ', num_total_boxes)
print('')
lat_print = []
lng_print = []
output_file = open('gridboxes.txt','w')
for i in range(len(grid_box_locations)):
lat_print = grid_box_locations[i][1]
lng_print = grid_box_locations[i][0]
lat_index = grid_box_indices[i][1]
lng_index = grid_box_indices[i][0]
print(round(float(lat_print),4), round(float(lng_print),4), lat_index, lng_index, file=output_file)
output_file.close() # with space between
output_file = open('timeseries.txt','w')
print(' '.join(map(str,time_bins)), file=output_file)
for i in range(len(timeseries)):
timeseries_print = timeseries[i]
print(' '.join(map(str,timeseries_print)), file=output_file) # Map converts list to string, joins elements
output_file.close() # with space between
# Note: Refer to the elements of each timeseries as, e.g., timeseries[0][0] for the first list
# return timeseries, grid_box_locations
return timeseries, time_bins, date_bins
######################################################################
def define_EMA_timeseries(NSteps, min_mag):
time_bins, timeseries = SEISRFileMethods.get_timeseries_data(min_mag)
timeseries_N = []
for i in range(len(timeseries)):
working_list = timeseries[i]
working_list_EMA = timeseries_to_EMA(working_list, NSteps)
working_list_EMA = [round(working_list_EMA[j], 3) for j in range(len(working_list_EMA))]
timeseries_N.append(working_list_EMA)
output_file = open('timeseries_EMA' + '.txt','w')
print(' '.join(map(str,time_bins)), file=output_file)
for i in range(len(timeseries_N)):
timeseries_print = timeseries_N[i]
print(' '.join(map(str,timeseries_print)), file=output_file) # Map converts list to string, joins elements
output_file.close()
return
######################################################################
def define_EMA_timeseries_LS(LS, NSteps, min_mag):
time_bins, timeseries = SEISRFileMethods.get_timeseries_data(min_mag)
timeseries_N = []
for i in range(len(timeseries)):
working_list = timeseries[i]
working_list_EMA = timeseries_to_EMA(working_list, NSteps)
working_list_EMA = [round(working_list_EMA[j], 3) for j in range(len(working_list_EMA))]
timeseries_N.append(working_list_EMA)
output_file = open('timeseries_EMA_' + LS + '.txt','w')
print(' '.join(map(str,time_bins)), file=output_file)
for i in range(len(timeseries_N)):
timeseries_print = timeseries_N[i]
print(' '.join(map(str,timeseries_print)), file=output_file) # Map converts list to string, joins elements
output_file.close()
return
######################################################################
def classify_large_earthquakes_grid_boxes(NELat_local, NELng_local, SWLat_local, SWLng_local, \
grid_size, index_ROC, time_list_reduced, forecast_interval,\
mag_array_large, year_array_large, depth_array_large, lat_array_large, lng_array_large):
# This code classifies the large earthquakes (M>4.95) between time_bins[i] and time_bins[i] + forecast_interval
# into the appropriate grid boxes. Result is a list whose elements are the number of large earthquakes in
# those grid boxes
num_lat_boxes = int( (NELat_local - SWLat_local)/grid_size )
num_lng_boxes = int( (NELng_local - SWLng_local)/grid_size)
num_total_boxes = num_lat_boxes * num_lng_boxes
ROC_event_list = [0 for i in range(num_total_boxes)]
#
# ------------------------------------------------------------
#
# >>>>>>>>> Define the grid boxes: Classify the large earthquakes into the appropriate grid boxes <<<<<<<<<<
number_polygon_vertices = 4
# Construct the string of polygon vertices. Note that the order is lat, long pairs
total_events = 0
for i in range(num_lat_boxes):
for j in range(num_lng_boxes):
# ll = i+j # The grid box number
vertex_lat = []
vertex_lng = []
# Order of vertices of large rectangular region: SW, SE, NE, NW
W_box_lng = SWLng_local + j*grid_size # West side of small box
E_box_lng = SWLng_local + (j+1)*grid_size # East side of small box
# N_box_lat = NELat_local - i*grid_size # North side of small box
# S_box_lat = NELat_local - (i+1)*grid_size # South side of small box
S_box_lat = SWLat_local + i*grid_size # North side of small box
N_box_lat = SWLat_local + (i+1)*grid_size # South side of small box
vertex_lat.append(S_box_lat)
vertex_lat.append(S_box_lat)
vertex_lat.append(N_box_lat)
vertex_lat.append(N_box_lat)
vertex_lng.append(W_box_lng)
vertex_lng.append(E_box_lng)
vertex_lng.append(E_box_lng)
vertex_lng.append(W_box_lng)
point_list = []
for k in range(number_polygon_vertices):
point_list.append((float(vertex_lat[k]),float(vertex_lng[k])))
polygon = Polygon(point_list)
index_grid_box = int(i + j*(num_lat_boxes))
# ------------------------------------------------------------
# Compute the timeseries here and then timeseries[index_timeseries] = the timeseries you computed
for kk in range(len(year_array_large)):
point = Point((float(lat_array_large[kk]),float(lng_array_large[kk])))
current_time = float(time_list_reduced[index_ROC])
later_time = current_time + forecast_interval
if (float(year_array_large[kk]) >= current_time and float(year_array_large[kk]) < later_time \
and polygon.contains(point) == True):
# print(mag_array_large[kk], year_array_large[kk], lat_array_large[kk], lng_array_large[kk])
ROC_event_list[index_grid_box] += 1
total_events += 1
# print('sum(ROC_list), total_events: ', sum(ROC_list), total_events)
# print()
# print(ROC_list)
# print()
return ROC_event_list # Number of events by grid box
######################################################################
def sort_list_EQ_RTI_order(ROC_event_list, NELat_local, NELng_local, SWLat_local, SWLng_local, min_mag, index_time, \
timeseries_EMA, NSTau, lower_cutoff):
# This method ingests the ROC_event_list and places that and the linked list into a list of descending order
# so that it can be used to plot the ROC curve
# ------------------------------------------------------------
#
# Compute Relative Total Intensity list
ROC_gridbox_threshold_list = []
for i in range(len(timeseries_EMA)): # Number of gridboxes
# Partial_ROC_list = timeseries_EMA[i][:index_time]
ROC_list = timeseries_EMA[i][:]
mean_ROC_list = np.mean(ROC_list)
ROC_test = timeseries_EMA[i][index_time]
ROC_gridbox_threshold_list.append(ROC_test)
sum_norm = 100.0/sum(ROC_gridbox_threshold_list) # Normalize all the spatial probability to 100%
#
ROC_gridbox_threshold_list = [ROC_gridbox_threshold_list[i]*sum_norm for i in range(len(ROC_gridbox_threshold_list))]
ROC_gridbox_threshold_list = [math.log(1.0 + ROC_gridbox_threshold_list[i], 10) for i in range(len(ROC_gridbox_threshold_list))]
ROC_gridbox_threshold_list = [round(ROC_gridbox_threshold_list[i],3) for i in range(len(ROC_gridbox_threshold_list))]
#
# ------------------------------------------------------------
#
return ROC_gridbox_threshold_list
#
######################################################################
def compute_spatial_ROC(ROC_gridbox_events_list, ROC_gridbox_threshold_list):
# First we find the min value, then progressively lower the threshold and determine the
# hit rate and false alarm rate
#
# Point here is to determine how many boxes above the threshold have at least 1 event in them = tp
# No events = fp. Etc.
true_positive = []
false_positive = []
true_negative = []
false_negative = []
threshold_value = []
number_thresholds = 500
number_grid_boxes = len(ROC_gridbox_threshold_list)
# number_thresholds = 100
# index_steps = int(len(ROC_data[0])/number_thresholds)
min_value = 0
max_value = max(ROC_gridbox_threshold_list) # In %
delta_threshold = (max_value) / number_thresholds
# Classify the grid boxes to compute the ROC
for i in range(number_thresholds):
current_threshold = max_value - float(1+i)*delta_threshold
# print('current_threshold', delta_threshold, float(i)*delta_threshold, current_threshold)
# print()
# print(ROC_gridbox_events_list)
# print(ROC_gridbox_threshold_list)
tp = 0
fp = 0
fn = 0
tn = 0
for j in range(number_grid_boxes):
# Current threshold less than grid box value, and SOME events occurred: tp
if ROC_gridbox_threshold_list[j] >= current_threshold and ROC_gridbox_events_list[j] > 0:
tp += 1
# Current threshold less than grid box value, and NO events occurred: fp
if ROC_gridbox_threshold_list[j] >= current_threshold and ROC_gridbox_events_list[j] == 0:
fp += 1
# Current threshold greater than grid box value, and SOME events occurred: fn
if ROC_gridbox_threshold_list[j] < current_threshold and ROC_gridbox_events_list[j] > 0:
fn += 1
# Current threshold greater than grid box value, and NO events occurred: tn
if ROC_gridbox_threshold_list[j] < current_threshold and ROC_gridbox_events_list[j] == 0:
tn += 1
# print()
# print('Threshold: ', i, round(current_threshold,4))
# print('tp, fp, fn, tn: ', tp, fp, fn, tn)
# print()
true_positive.append(tp)
false_positive.append(fp)
true_negative.append(tn)
false_negative.append(fn)
threshold_value.append(current_threshold)
return true_positive, false_positive, true_negative, false_negative, threshold_value
######################################################################
def compute_spatial_ROC_rates(true_positive, false_positive, true_negative, false_negative):
true_positive_rate = [0.]
false_positive_rate = [0.]
true_negative_rate = [1.]
false_negative_rate = [1.]
for i in range(len(true_positive)):
tp = true_positive[i]
fp = false_positive[i]
tn = true_negative[i]
fn = false_negative[i]
tpr = 0.
try:
tpr = tp/(tp + fn)
except:
pass
fpr = 0.
try:
fpr = fp/(fp + tn)
except:
pass
fnr = 0.
try:
fnr = fn/(fn + tp)
except:
pass
tnr = 0.
try:
tnr = tn/(tn + fp)
except:
pass
tpr = round(tpr,4)
fpr = round(fpr,4)
fnr = round(fnr,4)
tnr = round(tnr,4)
# For each value of i
true_positive_rate.append(tpr)
false_positive_rate.append(fpr)
true_negative_rate.append(tnr)
false_negative_rate.append(fnr)
return true_positive_rate, false_positive_rate, false_negative_rate, true_negative_rate
####################################################################
def combine_images(input_file1, folder1, input_file2, folder2):
# get images
input_list1 = []
input_list2 = []
input_file_images1 = open(input_file1, 'r')
for line in input_file_images1:
items1 = line.strip().split('/')
input_list1.append(folder1 + items1[-1])
input_file_images1.close()
input_file_images2 = open(input_file2, 'r')
for line in input_file_images2:
items2 = line.strip().split('/')
input_list2.append(folder2 + items2[-1])
input_file_images2.close()
for i in range(len(input_list1)):
image1 = input_list1[i]
image2 = input_list2[i]
img1 = Image.open(image1)
img2 = Image.open(image2)
# get width and height
w1, h1 = img1.size
w2, h2 = img2.size
# to calculate size of new image
w = max(w1, w2)
h = max(h1, h2)
# img1 = img1.resize((int(w*1.105),h2)) # Use if timeseries is plotted with spatial pdf
# create big empty image with place for images
combined_image = Image.new('RGB', (w*2, h*1))
# put images on new_image
combined_image.paste(img1, (0, 0))
combined_image.paste(img2, (w, 0))
# combined_image.paste(img2, (int(w*0.9), 0)) # To move images a bit closer together
# save it
combined_image.save('./DataMoviesCombined/PPV_LogRTI_combined_image_000' + str(i) + '.png')
return
####################################################################
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2010-2012 Tianwei Workshop
# Copyright (C) 2010-2012 Dalian University of Technology
#
# Authors: Tianwei Liu <liutianweidlut@gmail.com>
# Created: 2012-6-7
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
import os,sys
import time,uuid,datetime
from config.strConfig import *
from util.keymanager import *
from util.loadInfo import DBInfo
class processStr(object):
"""
"""
def __init__(self,filename = None,save_path = RESULT_DIR):
"""
"""
self.source_file_name = filename
self.result_file_name = ""
self.save_path = save_path
self.item_dict = {} #EventDataไธญ็ฌฌไธ่กๅๅงๅญๆฎตๅๅๅๅญๆฎตไฝ็ฝฎๅฏนๅบ็ๅญๅ
ธๆฐๆฎ็ปๆใ
#Keyไธบๅๅ๏ผๆฃ็ดขๅ
ณ้ฎ่ฏ๏ผ,value ไธบไฝ็ฝฎ
#ๅๅ -> ่ทๅไฝ็ฝฎ -> EventDataๅๅงๆฐๆฎไธญ็ดๆฅๅๅผ๏ผๅกซๅ
ๅฐ 1.Measurement_tabl
# 2.call_fetch_correspond
# 3.Eventdata_table
# 4.Message_table
self.cell_dict = {} #cellinformation ไธญ cellๅๅญๅcid็ๅฏนๅบๅ
ณ็ณป
self.output_power_dict = {} #่ฝฝๅ
ฅๆฐๆฎๅบ็mspowerindividual ไฟกๆฏ
self.call_information = {}
self.callid_dict = [] #ๆฐ็ปๆนๅผ่ทๅ๏ผ ็ฉบ้ดๆขๆถ้ด
self.source_buffer = None #ๅค็ๆไปถ็ผๅฒ
self.result_measurement_name = ""
self.result_eventdata_name = ""
self.result_callinformation_name = ""
self.source_file = None
self.result_file = None
self.result_measurement_file = None
self.result_eventdata_file = None
self.result_callinformation_file = None
self.tmp_name = ""
self.handle_file_name()
self.result = self.init_result()
self.saved_result = [] #ไฟๅญๅทฒ็ปๅๅ
ฅret็ๆไปถๅ็งฐ
self.init_dict()
def init_dict(self):
"""
Initial ๅ็งๅญๅ
ธ
"""
self.output_power_dict = DBInfo.loadOutputPower() #่ฝฝๅ
ฅๆฐๆฎๅบ็mspowerindividual ไฟกๆฏ
#call information ๅฟ
ๅคไฟกๆฏๆถ้ๅญๅ
ธ
for cnt in range(0,65537):
cnt = str(cnt)
self.call_information[cnt] = []
def init_result(self):
ret = {}
ret["measurement_file"] = {}
ret["eventdata_file"] = {}
ret["callinformation_file"] = {}
for i in range(0,MAX_TIME_SPAN):
ret["measurement_file"][i] = {
"FileName":"",
"SaveFileName":"",
"DataStartTime":None,
"Interval":3600,
"TableID":None
}
ret["eventdata_file"][i] = {
"FileName":"",
"SaveFileName":"",
"DataStartTime":None,
"Interval":3600,
"TableID":None
}
ret["callinformation_file"][i] = {
"FileName":"",
"SaveFileName":"",
"DataStartTime":None,
"Interval":3600,
"TableID":None
}
return ret
def init_split(self):
"""
Split the files by hour
"""
self.current_hour = {
"measurement_file":-1,
"eventdata_file":-1,
"callinformation_file":-1,
}
self.current_file = {
"measurement_file":self.result_measurement_file,
"eventdata_file":self.result_eventdata_file,
"callinformation_file":self.result_callinformation_file,
}
self.current_filename = {
"measurement_file":self.result_measurement_name,
"eventdata_file":self.result_eventdata_name,
"callinformation_file":self.result_callinformation_name,
}
self.time_cnt = {
"measurement_file":0,
"eventdata_file":0,
"callinformation_file":0,
}
self.first_entity = {
"measurement_file":True,
"eventdata_file":True,
"callinformation_file":True,
}
def handle_file_name(self):
"""
handle every file names
"""
self.tmp_name = (os.path.basename(self.source_file_name)).split('.')[0]
self.result_measurement_name = os.path.join(self.save_path, self.tmp_name+'_measurement_')
self.result_eventdata_name = os.path.join(self.save_path, self.tmp_name+'_eventdata_')
self.result_callinformation_name = os.path.join(self.save_path, self.tmp_name+'_callinformation_')
def split_files(self,time_str,file_name):
"""
split the files by time
"""
get_hour = int(time_str.split(".")[0][8:10])
if self.current_hour[file_name] == -1:
self.current_hour[file_name] = get_hour
self.current_file[file_name].close()
#rename the init file name
if os.path.exists(self.current_filename[file_name] +str(self.current_hour[file_name])):
os.remove(self.current_filename[file_name]+str(self.current_hour[file_name]))
os.rename(self.current_filename[file_name],self.current_filename[file_name]+str(self.current_hour[file_name]))
self.current_file[file_name] = open(self.current_filename[file_name]+str(self.current_hour[file_name]),'w')
print "init:",self.current_hour[file_name]
self.store_result(time_str,file_name)
if get_hour != self.current_hour[file_name]:
#close the old file
self.current_file[file_name].close()
#update the hour
self.current_hour[file_name] = get_hour
#update the file descriptor
if self.first_entity[file_name]:
self.current_file[file_name] = open(self.current_filename[file_name] + str(self.current_hour[file_name]),"w")
self.first_entity[file_name] = False
else:
self.current_file[file_name] = open(self.current_filename[file_name] + str(self.current_hour[file_name]),"a")
self.store_result(time_str,file_name)
#print "change:",self.current_hour[file_name]
def store_result(self,time_str,file_name):
"""
Store the result
"""
time_str = time_str.split(".")[0]
start_time = time_str[0:4] + "-" + time_str[4:6] + "-" + time_str[6:8] + " "+ \
time_str[8:10] + ":00:00"
current_hour = time_str.split(".")[0][8:10]
save_filename = self.current_filename[file_name]+current_hour
if save_filename not in self.saved_result:
self.result[file_name][self.time_cnt[file_name]] ={
"FileName":self.source_file_name,
"SaveFileName":save_filename,
"DataStartTime":start_time,
"Interval":3600,
}
self.saved_result.append(save_filename)
self.time_cnt[file_name] = self.time_cnt[file_name] + 1
def parseProcess(self):
"""
"""
#check the filename
if not os.path.exists(self.source_file_name):
print "Sorry, the %s doesn't exist!,please check"%self.source_file_name
return
###################
#create result file
self.source_file = open(self.source_file_name,'r')
self.result_measurement_file = open(self.result_measurement_name,'w')
self.result_eventdata_file = open(self.result_eventdata_name,'w')
self.result_callinformation_file = open(self.result_callinformation_name,'w')
self.init_split()
##################
#store text buffer
self.source_buffer = self.source_file.readlines()
self.callid_dict = [""] * (len(self.source_buffer)+1)
############################
#call information statistics
start_line = True
for line in self.source_buffer:
line = line.split(information_split)
#start line process
if start_line:
start_line = False
self.start_line_process(line)
else:
if not self.filter_empty(line):
self.call_dict_line(line)
self.callinformation_statistics() #call ็ป่ฎก๏ผ่ทๅพcallinformationๆฐๆฎๅบ่กจๅๅงๅผ๏ผๅๆถ็ๆcallid
#print "End callinformation_statistics Time",time.ctime()
#print "[callinformation_statistics]total time(seconds)", str(time.clock() - start)
#########################
#eventdata and mr process
start_line = True
for line in self.source_buffer:
line = line.split(information_split)
#start line process
if start_line:
start_line = False
else: #normals lines
if not self.filter_empty(line):
self.normal_line(line)
######
#close
#print self.result
self.source_file.close()
self.result_measurement_file.close()
self.result_eventdata_file.close()
self.result_callinformation_file.close()
return self.result
def filter_empty(self,line):
"""
Filter: MSIndividual is empty
"""
if line[self.item_dict[MS_INDIVIDUAL]] == "" or line[self.item_dict[MS_INDIVIDUAL]] == None:
return True
return False
def normal_line(self,line):
"""
single normal line process
"""
if "36" == line[self.item_dict[EVENTID]]:
#MeasurementData ่กจๅกซๅ
self.mr_line(line)
else:
#EventData ่กจๅกซๅ
self.eventdata_line(line)
def time_show(self,time_str):
"""
Time new format
"""
integer_str = time_str.split(".")[0]
float_str = time_str.split(".")[1]
ret = integer_str[0:3] + "-" + integer_str[4:5] + "-" + integer_str[6:7] + " " + \
integer_str[8:9] + ":" + integer_str[10:11] + ":" + integer_str[12:13] + "." + float_str
return ret
def eventdata_line(self,line):
"""
EventDataๆฐๆฎๅบ่กจ็ๅกซๅ
๏ผๅๆถ้่ฆๅฏนmessageไธดๆถ่กจ่ฟ่กๅค็
"""
#Step 0
event_len = len(eventdata_table) + len(message_table) - 1
pos_eventdata = [""]*(event_len) #ๆ็ป่ฆๅๅ
ฅๆไปถ็้กบๅบ
###########################################
#Step 1:ๅฏปๆพeventdata_correspondๅฏนๅบ๏ผ้่ฟๅญๅ
ธๅฏปๆพ
for item in eventdata_correspond.keys():
origin = eventdata_correspond[item] #ๅๅงๆฐๆฎๅญๆฎต๏ผๅๅงๆฐๆฎ้ฆ่ก่งฃๆๅบ็๏ผ
if origin == None:
continue
else:
pos_eventdata[eventdata_table[item]] = line[self.item_dict[origin]]
###############
#Step 2: ๆๅทฅๅฏนๅบ
#CallID: ๆ นๆฎ็ป่ฎกๅญๅ
ธ่กจ่ฟ่กๆฃ็ดขไธๅฏนๅบ
callid = self.callid_dict[int(line[self.item_dict[SEQUENCE]])]
if callid == "":
print "Error \t", line[self.item_dict[SEQUENCE]],"\t",line[self.item_dict[MS_INDIVIDUAL]]
else:
pos_eventdata[eventdata_table["CallID"]] = callid
#cellid ่ทๅ
pos_eventdata[eventdata_table["CellID"]] = self.get_cellid(line[self.item_dict[MO]])
#TargetCellID ่ทๅ
pos_eventdata[eventdata_table["TargetCellID"]] = self.get_cellid(line[self.item_dict[TARGET_MO]])
#BSCDecision ่ทๅ
pos_eventdata[eventdata_table["BSCDecision"]] = self.get_bsc(line[self.item_dict[MO]])
#Message ๅญๆฎตๅกซๅ
start_pos = eventdata_table["message_table"]
for pos in message_table.keys():
pos_eventdata[start_pos + pos] = line[self.item_dict[message_table[pos]]]
#time format
pos_eventdata[eventdata_table["Timestamp"]] = self.time_show(line[self.item_dict[TIMESTAMP]])
################
#Step3: ๆๆถไธๅ่่
#pos_eventdata[eventdata_table["CallEventID"]] = ...
################
#Step4: ๆถ้ดๅๅฒ
self.split_files(line[self.item_dict[TIMESTAMP]],"eventdata_file")
#############
#step5: ๆ็ปๅๅ
ฅ
write_line = writeStrSplit.join(pos_eventdata)
self.current_file["eventdata_file"].write(write_line + "\n")
def mr_line(self,line):
"""
EventID = 36 ๆถ๏ผMRๆฐๆฎๅบ่กจ
"""
eventid = line[self.item_dict[EVENTID]]
if eventid != "36":
return
###############
#Step 0
mr_len = len(measurement_correspond)
pos_mr = [""]*mr_len #ๆ็ป่ฆๅๅ
ฅๆไปถ็้กบๅบ
##########################################
#Step 1:measurement_correspondๅฏนๅบ๏ผ้่ฟๅญๅ
ธๅฏปๆพ
for item in measurement_correspond.keys():
origin = measurement_correspond[item] #ๅๅงๆฐๆฎๅญๆฎต๏ผๅๅงๆฐๆฎ้ฆ่ก่งฃๆๅบ็๏ผ
if origin == None:
continue
else:
pos_mr[measurement_table[item]] = line[self.item_dict[origin]]
##############
#Step 2: ๆๅทฅๅฏนๅบ
#CallID: ๆ นๆฎ็ป่ฎกๅญๅ
ธ่กจ่ฟ่กๆฃ็ดขไธๅฏนๅบ
callid = self.callid_dict[int(line[self.item_dict[SEQUENCE]])]
if callid == "":
#print "error MR", line[self.item_dict[SEQUENCE]]
pass
else:
pos_mr[measurement_table["CallID"]] = callid
#cellid ่ทๅ
pos_mr[measurement_table["CellID"]] = self.get_cellid(line[self.item_dict[MO]])
#neighbor cell
pos_mr[measurement_table["NeighborCount"]] = "6"
for cnt in range(1,7):
name = 'NeighborID' + str(cnt)
cellname = 'bss neighbour cell' + str(cnt)
pos_mr[measurement_table[name]] = self.get_cellid(line[self.item_dict[cellname]])
#PathLossUL ่ฎก็ฎ
PathLossUL,PathLossDL = self.get_pathloss(line)
#PathLossUL,PathLossDL = (0,0)
pos_mr[measurement_table["PathLossUL"]] = PathLossUL
pos_mr[measurement_table["PathLossDL"]] = PathLossDL
#time format
pos_mr[measurement_table["Timestamp"]] = self.time_show(line[self.item_dict[TIMESTAMP]])
#################
#step 3๏ผ ๆๆถไธๅ่่
#pos_mr[eventdata_table["MRDLossCounter"]] = ...
#pos_mr[eventdata_table["OwnBCCH"]] = ...
#pos_mr[eventdata_table["RxLevAntA"]] = ...
#pos_mr[eventdata_table["RxLevAntB"]] = ...
#pos_mr[eventdata_table["TXID"]] = ...
#############
#step4: ๆถ้ดๅๅฒ
self.split_files(line[self.item_dict[TIMESTAMP]],"measurement_file")
#############
#step5: ๆ็ปๅๅ
ฅ
write_line = writeStrSplit.join(pos_mr)
self.current_file["measurement_file"].write(write_line + "\n")
def start_line_process(self,line):
"""
handle the first line, get the necessary key-index.
we can ignore the differences of different eventdata
"""
cnt = 0
for word in line:
self.item_dict[word] = cnt
cnt = cnt + 1
def get_bsc(self,field):
"""
MOไธญ่ทๅBSDๅญๆฎต
"""
if field == "" or field == None:
return ""
else:
bsc_info = (field.split(",")[1]).split("=")[1]
return bsc_info
def get_pathloss(self,line):
"""
pathloss: ๆ นๆฎ็ฎๆณๅๆฐๆฎๅบๆฐๆฎ่ฟ่ก่ฎก็ฎ
"""
AllocBand = line[self.item_dict[ALLOC]]
MSPower = line[self.item_dict[MS_POWER]]
if AllocBand == "" or MSPower == "":
return ("","")
OutputPower = self.output_power_dict[AllocBand][MSPower]
RxLevelUL = line[self.item_dict[measurement_correspond["RxLevelUL"]]]
RxLevelDL = line[self.item_dict[measurement_correspond["RxLevelDL"]]]
#่ฎก็ฎๅ
ฌๅผ
PathLossUL = int(OutputPower) - int(RxLevelUL) + 100
PathLossDL = int(OutputPower) - int(RxLevelDL) + 100
return (str(PathLossUL),str(PathLossDL))
def get_cellid(self,filed):
"""
CellID: ๆ นๆฎfiledๅผ่ทๅcellid (่ฏปๅไปๆฐๆฎๅบไธญ็ๅญๅ
ธ่กจ)
"""
if filed == "" or filed == None:
return ""
tmp = filed.split(",")[2]
cell_info = tmp.split("=")[1]
if self.cell_dict.has_key(cell_info):
return self.cell_dict[cell_info]
else:
self.cell_dict = DBInfo.loadCellInfo(CellName = cell_info)
return self.cell_dict[cell_info]
#return "7788"
def call_dict_line(self,line):
"""
CallInformation ๅญๅ
ธ็ป่ฎกไธดๆถ่กจ
"""
call_necessay_list = [""]*len(call_fetch_correspond) #call information ๅฟ
ๅคไฟกๆฏๅ่กจ
ms_individual = line[self.item_dict[MS_INDIVIDUAL]]
if ms_individual == "":
return
#call_fetch_correspond ็ดๆฅๅฏนๅบ
for item in call_fetch_correspond.keys():
call_necessay_list[call_fetch_correspond[item]] = line[self.item_dict[item]]
#ๆ็
ง้กบๅบๆทปๅ ๅ่กจ
(self.call_information[ms_individual]).append(call_necessay_list)
def callinformation_statistics(self):
"""
statistics: ๅฏนcall_information ๅญๅ
ธ่ฟ่ก็ป่ฎก๏ผ็ๆcallInformation่กจ
"""
line = []
for ms in self.call_information.keys():
######################
#Filter
if self.call_information[ms] == []:
continue
######################
#ๆ ms individual ่ฟ่กๅ็ฑป
call_list = self.call_information[ms]
call_start_index = 0 #callๅผๅง็ดขๅผ
call_end_index = 0 #call็ปๆ็ดขๅผ
cell_last_start_index = 0 #ๆๅไธไธชๅฐๅบๅผๅง็ดขๅผ
record_time = "" #่ฎกๆถ็นๆถ้ด
record_flag = False #ๆฏๅฆๅผๅฏ่ฎกๆถๆ ่ฎฐ
call_id = str(uuid.uuid1()) #ๅๅงๆถๆๅฎuuid
write_line = [""]*len(callinoformation_table)
##########################################
#้ๅๆไธชms individual ไธๆๆ็call ไฟกๆฏ๏ผๅนถ่ฟ่ก็ป่ฎก
for pos in range(0,len(call_list)):
# ๅฏนๆฏไธชไฝ็ฝฎๅฏนๅบ็ๅ่กจ่ฟ่กๅค็
# ไปฅevent id=3 ๅ ๆถ้ด้ด้ ่ฟ่กๅๅฒ
event_flag = call_list[pos][0]
if event_flag != "3" and record_flag == False:
#ๆฒกๆ้ๅฐ็ปๅฐพไธไธๅจ่ฎกๆถ่ๅดๅ
#ๅบๆฌไฟกๆฏๅทๆฐ
write_line = self.basic_call(write_line, call_list[pos],call_id)
#็ดขๅผๆดๆฐ
call_end_index,cell_last_start_index = self.update_index(call_list, pos, call_end_index, cell_last_start_index)
elif event_flag != "3" and record_flag == True:
#ๅทฒ็ป้ๅฐ็ปๅฐพ๏ผ้่ฆๅคๆญๆฏๅฆๅจ่ฎกๆถ่ๅดๅ
if float(call_list[pos][TIME_POS]) <= float(record_time) + split_time:
#ๅจๆๅฎๆถ้ด่ๅดๅ
#ๅบๆฌไฟกๆฏๅทๆฐ
write_line = self.basic_call(write_line, call_list[pos],call_id)
#็ดขๅผๆดๆฐ
call_end_index,cell_last_start_index = self.update_index(call_list, pos, call_end_index, cell_last_start_index)
else:
#่ถ
่ฟๆถ้ด่ๅด๏ผ้่ฆ้ๆฐ่ฟ่ก็ป่ฎก,ๅๆไปถไธญๅๅ
ฅ่ฎฐๅฝ๏ผๅๆถๆธ
็ฉบๅ้กนๆ ่ฎฐ
self.final_write_call(call_list, write_line, call_start_index, call_end_index, cell_last_start_index)
#ๆธ
็ฉบ่ฎฐๅฝ
write_line = [""]*len(callinoformation_table)
record_flag = False
record_time = ""
#ๅฎๆไธๆฌกๅๅฒ๏ผๆดๆฐcall id
call_id = str(uuid.uuid1())
#index ๆดๆฐ
call_start_index = pos
call_end_index = call_start_index
cell_last_start_index = call_start_index
#ๅบๆฌไฟกๆฏๅๅ
ฅ
write_line = self.basic_call(write_line, call_list[pos],call_id)
call_end_index,cell_last_start_index = self.update_index(call_list, pos, call_end_index, cell_last_start_index)
else:
#event_flag = 3 clear command ๆไปค๏ผๅฏ่ฝๆๅณ็่ฆ็ปๆ๏ผ
#ๆญคๆถ้่ฆๆดๆฐrecord_time ๅ record_flag
record_flag = True #ๅผๅง่ฟ่กๆ ่ฎฐ
record_time = call_list[pos][TIME_POS]
#ๅบๆฌไฟกๆฏๅทๆฐ
write_line = self.basic_call(write_line, call_list[pos],call_id)
#็ดขๅผๆดๆฐ
call_end_index,cell_last_start_index = self.update_index(call_list, pos, call_end_index, cell_last_start_index)
##########################################
#ๅพช็ฏ็ปๆ๏ผๅๅ
ฅๅทฒ็ฅ็ปๆ
self.final_write_call(call_list, write_line, call_start_index, call_end_index, cell_last_start_index)
def final_write_call(self,call_list,write_line,call_start_index,call_end_index,cell_last_start_index):
"""
Final Write: call ไฟกๆฏ็ป่ฎกไธๆๆฌ็ปๆๅๅ
ฅ
"""
#ๆถ้ด็ป่ฎก
##########
CallStartTime = call_list[call_start_index][TIME_POS]
CallStartTime_stand = CallStartTime.split(".")[0]
CallStartTime_after = "0."+CallStartTime.split(".")[1]
CallEndTime = call_list[call_end_index][TIME_POS]
CallEndTime_stand = CallEndTime.split(".")[0]
CallEndTime_after = "0."+CallEndTime.split(".")[1]
start_time = float(time.mktime(time.strptime(CallStartTime_stand,"%Y%m%d%H%M%S"))) + float(CallStartTime_after)
end_time = float(time.mktime(time.strptime(CallEndTime_stand,"%Y%m%d%H%M%S"))) + float(CallEndTime_after)
CallDuration = str(end_time - start_time)
#Sequence - callid ๆดๆฐ --> ๅไธไธชcallๅๅฏนๅบ็ๆๆevent data
for pos in range(call_start_index,call_end_index+1):
self.callid_dict[int(call_list[pos][SEQUENCE_POS])] = write_line[CALLID_POS]
#cellid ่ฎก็ฎ
OriginatingCellID = self.get_cellid(call_list[call_start_index][CELL_POS])
TerminatingCellID = self.get_cellid(call_list[call_end_index][CELL_POS])
#ไธๆฌก้่ฏไธญๆๅไธไธชๅฐๅบ็ๆถ้ด็ป่ฎก
TimeInLastCell = str(float(call_list[call_end_index][TIME_POS]) - float(call_list[cell_last_start_index][TIME_POS]))
#็ฎๅๆฒกๆๅค็่ฏฅๅญๆฎต
#CallEventID = ...
#TimeFromLastHOToCallEnd = ...
#HOCount = ...
#IntraCellHOCount = ...
#IntraCellHOFailureCount = ...
#HOReversionCount = ...
#ๅๅ
ฅwrite_line
write_line[CallStartTime_pos] = self.time_show(CallStartTime)
write_line[CallEndTime_pos] = CallEndTime
write_line[CallDuration_pos] = CallDuration
write_line[OriginatingCellID_pos] = OriginatingCellID
write_line[TerminatingCellID_pos] = TerminatingCellID
write_line[TimeInLastCell_pos] = TimeInLastCell
#ๆถ้ดๅๅฒ
self.split_files(CallStartTime,"callinformation_file")
#ๅๅ
ฅๆๆฌ
tmp = writeStrSplit.join(write_line)
self.current_file["callinformation_file"].write(tmp + '\n')
#self.result_callinformation_file.write(tmp + '\n')
def update_index(self,call_list,pos,call_end_index,cell_last_start_index):
"""
Update: ๆถ้ด็ดขๅผๅcell็ดขๅผๆดๆฐ
"""
#ๆถ้ด่ตทๅง็ดขๅผ
if call_list[pos][TIME_POS] >= call_list[call_end_index][TIME_POS]:
call_end_index = pos
else:
call_end_index = call_end_index
#ๅฐๅบไฟกๆฏๅๆด
if call_list[pos][CELL_POS] != call_list[cell_last_start_index][CELL_POS]:
cell_last_start_index = pos
else:
cell_last_start_index = cell_last_start_index
return (call_end_index,cell_last_start_index)
def basic_call(self,write_line,call_list,call_id):
"""
Basic: ้ๅๅๅ
ฅ็ๆก็ฎ
"""
for item in callinoformation_table.keys():
name = callinformation_correspond[callinoformation_table[item]]
if name != None: #ๅชๆฟๆขไธไธบ็ฉบ
value = call_list[call_fetch_correspond[name]]
write_line[item] = value if value != "" else write_line[item]
#idๅๅ
ฅ
#write_line[0] = str(uuid.uuid1())
#call id ๅๅ
ฅ
write_line[0] = call_id
return write_line
if __name__ == "__main__":
print "start Time", time.ctime()
start = time.clock()
p = processStr("D:\MR\source\eventData_120419_113412.txt")
p.parseProcess()
print "End Time", time.ctime()
print "total time(seconds)", str(time.clock() - start)
|
''' nucleotide_alignment
Module for the representation of nucleotide alignments.
'''
import os
from typing import Iterable, Tuple
import numpy
import pyckmeans.distance
from .c_interop import encode_nucleotides
# Base encoding as used by R package ape.
# See http://ape-package.ird.fr/misc/BitLevelCodingScheme.html
#
# Summary:
# Most significant four bits are base information (A, G, C, T)
# 0b00001000 -> base is known
# 0b00000100 -> gap
# 0b00000010 -> unknown base
BASE_ENCODING = {
# bases
'A': 0b10001000, 'a': 0b10001000,
'G': 0b01001000, 'g': 0b01001000,
'C': 0b00101000, 'c': 0b00101000,
'T': 0b00011000, 't': 0b00011000,
# wobbles
'R': 0b11000000, 'r': 0b11000000, # A|G
'M': 0b10100000, 'm': 0b10100000, # A|C
'W': 0b10010000, 'w': 0b10010000, # A|T
'S': 0b01100000, 's': 0b01100000, # G|C
'K': 0b01010000, 'k': 0b01010000, # G|T
'Y': 0b00110000, 'y': 0b00110000, # C|T
'V': 0b11100000, 'v': 0b11100000, # A|G|C
'H': 0b10110000, 'h': 0b10110000, # A|C|T
'D': 0b11010000, 'd': 0b11010000, # A|G|T
'B': 0b01110000, 'b': 0b01110000, # G|C|T
'N': 0b11110000, 'n': 0b11110000, # A|G|C|T
# gaps
'-': 0b00000100,
'~': 0b00000100,
' ': 0b00000100,
# unknown/missing state
'?': 0b00000010
}
BASE_ENCODING_INVERSE = {
v:k for k, v in BASE_ENCODING.items() if k.isupper() or k in ('-', '?')
}
class InvalidAlignmentFileExtensionError(Exception):
'''InvalidAlignmentFileExtensionError'''
class InvalidAlignmentFileFormatError(Exception):
'''InvalidAlignmentFileFormatError'''
class InvalidAlignmentCharacterError(Exception):
'''InvalidAlignmentCharacterError'''
class InvalidSeqIORecordsError(Exception):
'''InvalidSeqIORecordsError'''
class NucleotideAlignment:
'''NucleotideAlignment
Class for nucleotide alignments.
Parameters
----------
names : List[str]
Sequence identifiers/names.
sequences : numpy.ndarray
n*m alignment matrix, where n is the number of entries and m
is the number of sites.
copy : bool
If True, sequences will be copied. If false, the NucleotideAlignment
will use the original sequences, potentially modifying them.
fast_encoding : bool
If true, a fast nucleotide encoding method without error checking
will be used. ATTENTION: This will modify sequences in place.
'''
def __init__(
self,
names: Iterable[str],
sequences: numpy.ndarray,
copy: bool = False,
fast_encoding: bool = False,
):
# check validity
n_names = len(names)
n_seqs = sequences.shape[0]
if n_names != n_seqs:
msg = f'Number of names ({n_names}) does not match number of sequences ({n_seqs}).'
raise Exception(msg)
self.names = numpy.array(names)
# encode strings as uint8, see BASE_ENCODING
if sequences.dtype != numpy.uint8:
if fast_encoding:
self.sequences = encode_nucleotides(sequences.copy() if copy else sequences)
else:
try:
self.sequences = numpy.array(
[[BASE_ENCODING[n] for n in row] for row in sequences],
dtype=numpy.uint8,
)
except KeyError as k_err:
msg = f'Encountered unknown character in alignment: {str(k_err)}'
raise InvalidAlignmentCharacterError(msg) from k_err
else:
self.sequences = sequences.copy() if copy else sequences
def drop_invariant_sites(self, in_place: bool = False) -> 'NucleotideAlignment':
'''drop_invariant_sites
Remove invariant sites from alignment. Invariant sites
are sites, where each entry has the same symbol.
Parameters
----------
in_place : bool, optional
Modify self in place, by default False
Returns
-------
NucleotideAlignment
NucleotideAlignment without invariant sites.
If in_place is set to True, self is returned.
'''
if in_place:
self.sequences = self.sequences[
:,
~numpy.all((self.sequences == self.sequences[0,]), axis=0)
]
return self
else:
return NucleotideAlignment(
self.names.copy(),
self.sequences[
:, ~numpy.all((self.sequences == self.sequences[0,]), axis=0)
].copy(),
)
def copy(self) -> 'NucleotideAlignment':
'''copy
Return a copy of the NucleotideAligment object.
Returns
-------
NucleotideAlignment
Copy of self.
'''
return NucleotideAlignment(self.names.copy(), self.sequences.copy())
def distance(
self,
distance_type: str = 'p',
pairwise_deletion: bool = True,
) -> 'pyckmeans.distance.DistanceMatrix':
'''distance
Calculate genetic distance.
Parameters
----------
distance_type : str, optional
Type of genetic distance to calculate, by default 'p'.
Available distance types are p-distances ('p'),
Jukes-Cantor distances ('jc'), and Kimura 2-paramater distances
('k2p').
pairwise_deletion : bool
Use pairwise deletion as action to deal with missing data.
If False, complete deletion is applied.
Gaps ("-", "~", " "), "?", and ambiguous bases are treated as
missing data.
Returns
-------
pyckmeans.distance.DistanceMatrix
n*n distance matrix.
'''
return pyckmeans.distance.alignment_distance(
alignment=self,
distance_type=distance_type,
pairwise_deletion=pairwise_deletion,
)
@property
def shape(self) -> Tuple[int, int]:
'''shape
Get alignment dimensions/shapes.
Returns
-------
Tuple[int, int]
Number of samples n, number of sites m
'''
return self.sequences.shape
def __getitem__(self, idx):
if isinstance(idx, tuple):
return NucleotideAlignment(self.names[idx[0]], self.sequences[idx])
else:
return NucleotideAlignment(self.names[idx], self.sequences[idx])
def __repr__(self) -> str:
'''__repr__
Returns
-------
str
String representation
'''
shape = self.shape
return f'<NucleotideAlignment; #samples: {shape[0]}, #sites: {shape[1]}>'
@classmethod
def from_bp_seqio_records(
cls,
records: Iterable['Bio.SeqRecord.SeqRecord'],
fast_encoding: bool = False,
) -> 'NucleotideAlignment':
'''from_bp_seqio_records
Build NucleotideAlignment from iterable of Bio.SeqRecord.SeqRecord.
Such an iterable is, for example, returned by Bio.SeqIO.parse() or
can be constructed using Bio.Align.MultipleSequenceAlignment().
Parameters
----------
records: Iterable['Bio.SeqRecord.SeqRecord']
Iterable of Bio.SeqRecord.SeqRecord.
Such an iterable is, for example, returned by Bio.SeqIO.parse() or
can be constructed using Bio.Align.MultipleSequenceAlignment().
fast_encoding : bool
If true, a fast nucleotide encoding method without error checking
will be used.
Returns
-------
NucleotideAlignment
NucleotideAlignment object.
Raises
------
InvalidSeqIORecordsError
Raised of sequences have different lengths.
'''
names = []
seqs = []
for record in records:
names.append(record.id)
seqs.append(list(record.seq))
# check if all sequences have same length
seq_len = len(seqs[0])
for i, seq in enumerate(seqs[1:]):
cur_seq_len = len(seq)
if cur_seq_len != seq_len:
msg = f'Expected all sequences to have length {seq_len}' +\
f'(length of sequence #0) but sequence #{i+1} has length {cur_seq_len}.'
raise InvalidSeqIORecordsError(msg)
seqs = numpy.array(seqs)
names = numpy.array(names)
return cls(names, seqs, copy=False, fast_encoding=fast_encoding)
@classmethod
def from_file(
cls,
file_path: str,
file_format='auto',
fast_encoding=False,
) -> 'NucleotideAlignment':
'''from_file
Read nucleotide alignment from file.
Parameters
----------
file_path: str
Path to alignment file.
file_format: str
Alignment file format. Either "auto", "fasta" or "phylip".
When "auto" the file format will be inferred based on the file extension.
fast_encoding : bool
If true, a fast nucleotide encoding method without error checking
will be used.
Returns
-------
Tuple[numpy.ndarray, numpy.ndarray]
Tuple of sequences and names, each as numpy array.
Raises
------
InvalidAlignmentFileExtensionError
Raised if file_format is "auto" and the file extension is not understood.
InvalidAlignmentFileFormatError
Raised if an invalid file_format is passed.
'''
if file_format == 'auto':
ext = os.path.splitext(file_path)[1].lower()
if ext in ['.fasta', '.fas', '.fa']:
file_format = 'fasta'
elif ext in ['.phylip', '.phy']:
file_format = 'phylip'
else:
msg = f'Unknown alignment file extension "{ext}". Please set file_format manually.'
raise InvalidAlignmentFileExtensionError(msg)
if file_format in ['fasta', 'FASTA']:
from .fasta import read_fasta_alignment
seqs, names = read_fasta_alignment(
file_path,
dtype='S' if fast_encoding else 'U',
)
return cls(
names=names,
sequences=seqs,
copy=False,
fast_encoding=fast_encoding,
)
elif file_format in ['phylip', 'PHYLIP']:
from .phylip import read_phylip_alignment
seqs, names = read_phylip_alignment(
file_path,
dtype='S' if fast_encoding else 'U',
)
return cls(
names=names,
sequences=seqs,
copy=False,
fast_encoding=fast_encoding,
)
else:
msg = f'Unknown aligment file format "{file_format}". ' +\
'Supported formats are "fasta" and "phylip".'
raise InvalidAlignmentFileFormatError(msg)
def read_alignment(file_path: str, file_format: str = 'auto') -> NucleotideAlignment:
'''read_alignment
Read nucleotide alignment from file.
Alias for NucleotideAlignment.from_file.
Parameters
----------
file_path: str
Path to alignment file.
file_format: str
Alignment file format. Either "auto", "fasta" or "phylip".
When "auto" the file format will be inferred based on the file extension.
Returns
-------
NucleotideAlignment
NucleotideAlignment instance.
Raises
------
InvalidAlignmentFileExtensionError
Raised if file_format is "auto" and the file extension is not understood.
InvalidAlignmentFileFormatError
Raised if an invalid file_format is passed.
'''
return NucleotideAlignment.from_file(file_path, file_format)
|
__author__ = 'erwin'
import web
urls = (
'/world(/.*)', 'World',
'/(.*)', 'Hallo',
)
app = web.application(urls, globals())
class Hallo:
def GET(self, name):
if not name:
name = 'Hallo'
return 'hello, ' + name
class World:
def GET(self, name):
if not name:
name = 'World'
return 'world, ' + name
if __name__ == '__main__':
app.run() |
"""Provides class to wrap existing models in different frameworks
so that they provide a unified API to the attacks.
"""
from .keras_yolov3 import KerasYOLOv3Model
from .keras_ssd300 import KerasSSD300Model
from .keras_retina_resnet50 import KerasResNet50RetinaNetModel
|
# Read output from pdflatex/latex, after doconce grab
# doconce grab --from- '\*File List\*' --to- '\*\*\*\*' tmp.txt > tmp.txt
# and find all styles files with full path
dont_copy = []
import sys, commands, os
f = open(sys.argv[1], 'r')
lines = f.readlines()
paths = []
for line in lines:
words = line.split()
filename = words[0]
if filename.endswith('.def') or \
filename.endswith('.tex') or \
filename.endswith('.aux') or \
filename.endswith('.sty') or \
filename.endswith('.cls') or \
filename.endswith('.clo') or \
filename.endswith('.cfg') or \
filename.endswith('.dfu'):
if sum(filename.startswith(name) for name in dont_copy) > 0:
continue
failure, output = commands.getstatusoutput('kpsewhich %s' % filename)
if not failure:
paths.append(output)
# Write copy script
extdoc = []
f = open('tmpcp.sh', 'w')
dest = 'stylefiles'
for path in paths:
if path.endswith('.aux'):
# .aux file needed for \externaldocument{}, these often have
# names /user/.../book.aux so use full path
local_dir = os.path.join(dest, os.path.dirname(path)[1:])
extdoc.append((os.path.dirname(path), local_dir))
if not os.path.isdir(local_dir):
os.makedirs(local_dir)
f.write('cp %s %s\n' % (path, local_dir))
elif path.startswith('./'):
f.write('cp %s .\n' % path)
else:
f.write('cp %s %s\n' % (path, dest))
f.close()
if extdoc:
# Fix .tex file
try:
filename = sys.argv[2]
except IndexError:
filename = 'book.tex'
f = open(filename, 'r')
text = f.read()
f.close()
for dirname, newname in extdoc:
text = text.replace(dirname, newname)
f = open(filename, 'w')
f.write(text)
f.close()
|
"""
Storage containers for durable queues and (planned) durable topics.
"""
import abc
import logging
import threading
from coilmq.util.concurrency import synchronized
__authors__ = ['"Hans Lellelid" <hans@xmpl.org>']
__copyright__ = "Copyright 2009 Hans Lellelid"
__license__ = """Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License."""
lock = threading.RLock()
class QueueStore(object):
"""
Abstract base class for queue storage.
Extensions/implementations of this class must be thread-safe.
@ivar log: A logger for this class.
@type log: C{logging.Logger}
"""
__metaclass__ = abc.ABCMeta
def __init__(self):
"""
A base constructor that sets up logging.
If you extend this class, you should either call this method or at minimum make sure these values
get set.
"""
self.log = logging.getLogger('%s.%s' % (
self.__module__, self.__class__.__name__))
@abc.abstractmethod
@synchronized(lock)
def enqueue(self, destination, frame):
"""
Store message (frame) for specified destinationination.
@param destination: The destinationination queue name for this message (frame).
@type destination: C{str}
@param frame: The message (frame) to send to specified destinationination.
@type frame: C{stompclient.frame.Frame}
"""
@abc.abstractmethod
@synchronized(lock)
def dequeue(self, destination):
"""
Removes and returns an item from the queue (or C{None} if no items in queue).
@param destination: The queue name (destinationination).
@type destination: C{str}
@return: The first frame in the specified queue, or C{None} if there are none.
@rtype: C{stompclient.frame.Frame}
"""
@synchronized(lock)
def requeue(self, destination, frame):
"""
Requeue a message (frame) for storing at specified destinationination.
@param destination: The destinationination queue name for this message (frame).
@type destination: C{str}
@param frame: The message (frame) to send to specified destinationination.
@type frame: C{stompclient.frame.Frame}
"""
self.enqueue(destination, frame)
@synchronized(lock)
def size(self, destination):
"""
Size of the queue for specified destination.
@param destination: The queue destination (e.g. /queue/foo)
@type destination: C{str}
@return: The number of frames in specified queue.
@rtype: C{int}
"""
raise NotImplementedError()
@synchronized(lock)
def has_frames(self, destination):
"""
Whether specified destination has any frames.
Default implementation uses L{QueueStore.size} to determine if there
are any frames in queue. Subclasses may choose to optimize this.
@param destination: The queue destination (e.g. /queue/foo)
@type destination: C{str}
@return: The number of frames in specified queue.
@rtype: C{int}
"""
return self.size(destination) > 0
@synchronized(lock)
def destinations(self):
"""
Provides a set of destinations (queue "addresses") available.
@return: A list of the detinations available.
@rtype: C{set}
"""
raise NotImplementedError
@synchronized(lock)
def close(self):
"""
May be implemented to perform any necessary cleanup operations when store is closed.
"""
pass
# This is intentionally not synchronized, since it does not directly
# expose any shared data.
def frames(self, destination):
"""
Returns an iterator for frames in specified queue.
The iterator simply wraps calls to L{dequeue} method, so the order of the
frames from the iterator will be the reverse of the order in which the
frames were enqueued.
@param destination: The queue destination (e.g. /queue/foo)
@type destination: C{str}
"""
return QueueFrameIterator(self, destination)
class QueueFrameIterator(object):
"""
Provides an C{iterable} over the frames for a specified destination in a queue.
@ivar store: The queue store.
@type store: L{coilmq.store.QueueStore}
@ivar destination: The destination for this iterator.
@type destination: C{str}
"""
def __init__(self, store, destination):
self.store = store
self.destination = destination
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
frame = self.store.dequeue(self.destination)
if not frame:
raise StopIteration()
return frame
def __len__(self):
return self.store.size(self.destination)
class TopicStore(object):
"""
Abstract base class for non-durable topic storage.
"""
class DurableTopicStore(TopicStore):
"""
Abstract base class for durable topic storage.
"""
|
# from sqlalchemy import create_engine
# from sqlalchemy.ext.declarative import declarative_base
# from sqlalchemy.orm import sessionmaker
# SQLALCHEMY_DATABASE_URL = "sqlite:///./proxymall.db"
# # SQLALCHEMY_DATABASE_URL = "postgresql://postgres:root@localhost/protech?"
# engine = create_engine(
# SQLALCHEMY_DATABASE_URL, connect_args={"check_same_thread": False}
# )
# SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
# Base = declarative_base()
from contextvars import ContextVar
import peewee
from playhouse.mysql_ext import MySQLConnectorDatabase
db_state_default = {"closed": None, "conn": None,
"ctx": None, "transactions": None}
db_state = ContextVar("db_state", default=db_state_default.copy())
class PeeweeConnectionState(peewee._ConnectionState):
def __init__(self, **kwargs):
super().__setattr__("_state", db_state)
super().__init__(**kwargs)
def __setattr__(self, name, value):
self._state.get()[name] = value
def __getattr__(self, name):
return self._state.get()[name]
# db = PostgresqlExtDatabase(
# DATABASE_NAME,
# user="postgres",
# password="root",
# host="127.1.0.0",
# port="5432"
# )
# db = MySQLConnectorDatabase(
# "proxcbvs_proxymall",
# user="proxcbvs_backend",
# password="@Proxymall2020",
# host="198.54.116.211",
# port="3306"
# )
db = peewee.SqliteDatabase("proxymall.db", check_same_thread=False)
db._state = PeeweeConnectionState()
|
"""
LAMBDAS:
->Sรฃo funรงรตes sem nome, ou seja, anรดnimas
->sao criadas com somante uma linha
"""
#exemplo de simples
op = lambda x: x*2 + 1 #Estrutura: lambda parรขmetro: operaรงรฃo que irรก ser retornada da funรงรฃo
print(op(2))
#mais exemplos
autores = ['Monteiro Lobato','Josรฉ de Alencar','Cecรญlia Meireles','Carlos Drummond de Andrade ']
autores.sort(key=lambda nome:nome.split(' ')[-1].lower())#funรงรฃo sort ordena lista
print(autores)
def funcao_quadratica(a,b,c):
return lambda x: a*x**2 + b*x + c
calculo = funcao_quadratica(5,3,4)#passa parametro a,b e c
print(calculo(2))#passa parametro x |
import Recursividad.EjemploRecursividad as ej
import unittest
class Pruebas(unittest.TestCase):
def test_factorial(self):
self.assertEqual(120, ej.factorial_recursivo(5))
self.assertEqual(1, ej.factorial_recursivo(0))
self.assertEqual(24, ej.factorial_recursivo(4))
|
# Class object for event type 'CUSTOMER'
class Customer:
def __init__(self, key, verb, event_time, last_name, adr_city, adr_state):
self.key = key
self.verb = verb
self.insert_time = event_time
self.last_name = last_name
self.adr_city = adr_city
self.adr_state = adr_state
self.update_time = None
# Class object for event type 'SITE_VISIT'
class SiteVisit:
def __init__(self, key, verb, event_time, customer_id, tags):
self.key = key
self.verb = verb
self.event_time = event_time
self.customer_id = customer_id
self.tags = tags
# Class object for event type 'Image'
class Image:
def __init__(self, key, verb, event_time, customer_id, camera_make, camera_model):
self.key = key
self.verb = verb
self.event_time = event_time
self.customer_id = customer_id
self.camera_make = camera_make
self.camera_model = camera_model
# Class object for event type 'ORDER'
class Order:
def __init__(self, key, verb, event_time, customer_id, total_amount):
self.key = key
self.verb = verb
self.insert_time = event_time
self.customer_id = customer_id
self.total_amount = total_amount
self.update_time = None
|
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import patches
from sklearn import datasets
from sklearn.mixture import GMM
from sklearn.cross_validation import StratifiedKFold
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
iris = datasets.load_iris()
indices = StratifiedKFold(iris.target, n_folds=5)
train_index, test_index = next(iter(indices))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
num_classes = len(np.unique(y_train))
classifier = GMM(n_components=num_classes, covariance_type='full',
init_params='wc', n_iter=20)
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in range(num_classes)])
classifier.fit(X_train)
plt.figure()
colors = 'bgr'
for i, color in enumerate(colors):
eigenvalues, eigenvectors = np.linalg.eigh(
classifier._get_covars()[i][:2, :2])
norm_vec = eigenvectors[0] / np.linalg.norm(eigenvectors[0])
angle = np.arctan2(norm_vec[1], norm_vec[0])
angle = 180 * angle / np.pi
scaling_factor = 8
eigenvalues *= scaling_factor
ellipse = patches.Ellipse(classifier.means_[i, :2],
eigenvalues[0], eigenvalues[1], 180 + angle,
color=color)
axis_handle = plt.subplot(1, 1, 1)
ellipse.set_clip_box(axis_handle.bbox)
ellipse.set_alpha(0.6)
axis_handle.add_artist(ellipse)
colors = 'bgr'
for i, color in enumerate(colors):
cur_data = iris.data[iris.target == i]
plt.scatter(cur_data[:,0], cur_data[:,1], marker='o',
facecolors='none', edgecolors='black', s=40,
label=iris.target_names[i])
test_data = X_test[y_test == i]
plt.scatter(test_data[:,0], test_data[:,1], marker='s',
facecolors='black', edgecolors='black', s=40,
label=iris.target_names[i])
y_train_pred = classifier.predict(X_train)
accuracy_training = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
print('Accuracy on training data =', accuracy_training)
y_test_pred = classifier.predict(X_test)
accuracy_testing = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
print('Accuracy on testing data =', accuracy_testing)
plt.title('GMM classifier')
plt.xticks(())
plt.yticks(())
plt.show()
|
def BinarySearch(array, element):
low = 0
high = len(array)-1
while low <= high:
mid = int((low+high)/2)
if element < array[mid]:
high = mid-1
elif element > array[mid]:
low = mid+1
else:
return mid
return -1
# function call
print(BinarySearch([1, 4, 6, 8, 9], 1)) # 0
|
"""
https://edabit.com/challenge/iasdc3ihqt9hkZWfi
"""
def can_give_blood(donor, receiver) -> bool:
if '+' in donor and '+' not in receiver:
return False
elif donor[:-1] in receiver or 'O' in donor:
return True
else:
return False
tests = [
(("O+", "A+"), True),
(("A-", "B-"), False),
(("A-", "AB+"), True),
(("AB-", "B-"), False),
(("AB+", "A+"), False),
(("O-", "A-"), True),
(("A-", "O-"), False),
(("O+", "AB-"), False),
(("O-", "AB+"), True),
(("AB+", "AB+"), True),
(("O+", "O-"), False),
]
for test in tests:
print("Input: " + str(test[0]))
assert can_give_blood(*test[0]) == test[1]
print('Success') |
def package(N,W,cost,value):
f = [0] * (W + 1)
for i in range(N):
for j in range(W,cost[i] - 1,-1):
f[j] = max(f[j],f[j - cost[i]] + value[i])
return f
N,W = map(int,input().split(" "))
listB = []
listC = []
for i in range(N):
listA = input().split()
listB.append(int(listA[0]))
listC.append(int(listA[1]))
listD = package(N,W,listB,listC)
print(listD[-1]) |
import time
import random
import sys
from sys import stdout
# List of responses
response = ["Yes, most definitely!", "The chances are high!", "Not likely!", "May the odds be ever in your favor.",
"You got no shot, kid.", "Try it out and see!", "23% of working", "99.9% success rate",
"Congratulations, yes!", "Ask a probably question," "Ask again later", "Better not tell you now",
"Cannot predict now", "Concentrate and ask again", "Don't count on it"]
def __init__():
ask()
def ask():
print('The Fanstical Magicical 8 Ball')
time.sleep(0.25)
question = input('Ask your question: ')
time.sleep(0.25)
print('Thinking')
thinking(3)
print(random.choice(response))
time.sleep(0.25)
again()
def again():
ans = input('Ask again? (Y/N): ')
if ans == 'Y':
ask()
elif ans == 'N':
print('Thanks!')
def thinking(amount):
i = 0
while i <= amount:
i = i + 1
print('.')
time.sleep(0.75)
__init__()
|
"""
The MIT License
Copyright (c) 2010 Sugestio.com
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sugestio
ACCOUNT = 'sandbox'
SECRET = 'demo'
client = sugestio.Client(ACCOUNT, SECRET)
def get_recommendations():
status, content = client.get_recommendations(1)
if status == 200:
print content[0].itemid
print content[0].score
print content[0].algorithm
else:
print "server response code:", status
print content
def add_consumption():
params = {'userid':1, 'itemid':'abc', 'type':'VIEW'}
status = client.add_consumption(params)
print "server response code:", status
def add_user():
params = {'id':1, 'gender':'M', 'birthday':'1975-04-05'}
status = client.add_user(params)
print "server response code:", status
def delete_item_metadata():
status = client.delete_item_metadata(1)
print "server response code:", status
def delete_user_metadata():
status = client.delete_user_metadata(1)
print "server response code:", status
def delete_consumption():
status = client.delete_consumption("a-b-c-1-2-3")
print "server response code:", status
def delete_user_consumptions():
status = client.delete_user_consumptions(1)
print "server response code:", status
def add_item():
params = {'id':'X75FKGE-E', 'from':'2010-07-01', 'until':'2010-09-01'}
params['tag'] = ['tag1', 'tag2']
status = client.add_item(params)
print "server response code:", status
if __name__ == "__main__":
#get_recommendations()
#add_consumption()
#add_user()
#add_item()
#delete_item_metadata()
#delete_consumption()
#delete_user_metadata()
#delete_user_consumptions()
print "Done." |
words = lambda t : list(map(t, input().split()))
n = int(input())
a = words(int)
a.sort()
cur = a[0]
for i in range(1,len(a)):
cur = (cur + a[i]) / 2
print(cur)
|
#https://codeforces.com/contest/127/problem/A
import math
n,k=map(int,input().split())
x1,y1=map(int,input().split())
time=0
for _ in range(n-1):
x2,y2=map(int,input().split())
dis=math.sqrt((x2-x1)**2+(y2-y1)**2)
t=dis/50
time+=t
x1,y1=x2,y2
time*=k
print('%.9f'%time)
|
t=input()
for i in range(t):
a=input()
b=str(a)
sum=0
for x in b:
c=int(x)
sum+=c
if a%sum==0:
print '1'
else:
print '0' |
listOfNumbers = []
even = 0
odd = 0
while True:
number = int(input("Favor ingrese un nรบmero, o para dejar de ingresar nรบmeros ingrese 0: "))
if number == 0:
break
listOfNumbers.append(number)
if number % 2 == 0:
even = even + 1
else:
odd = odd + 1
print (f"El nรบmero de pares ingresados es: {even} y el nรบmero de impares ingresados es {odd}")
|
# Copyright 2014,2016 Hewlett Packard Enterprise Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import falcon
from monasca_common.simport import simport
from oslo_config import cfg
from oslo_log import log
from monasca_api.common.messaging import (
exceptions as message_queue_exceptions)
import monasca_api.expression_parser.alarm_expr_parser
from monasca_api.v2.reference import helpers
LOG = log.getLogger(__name__)
class Alarming(object):
"""Super class for Alarms and AlarmDefinitions.
Shared attributes and methods for classes Alarms and AlarmDefinitions.
"""
def __init__(self):
super(Alarming, self).__init__()
self.events_message_queue = simport.load(
cfg.CONF.messaging.driver)(cfg.CONF.kafka.events_topic)
self.alarm_state_transitions_message_queue = simport.load(
cfg.CONF.messaging.driver)(cfg.CONF.kafka.alarm_state_transitions_topic)
def _send_alarm_transitioned_event(self, tenant_id, alarm_id,
alarm_definition_row,
alarm_metric_rows,
old_state, new_state,
link, lifecycle_state,
time_ms):
# This is a change via the API, so there is no SubAlarm info to add
sub_alarms = []
metrics = []
alarm_transitioned_event_msg = {u'alarm-transitioned': {
u'tenantId': tenant_id,
u'alarmId': alarm_id,
u'alarmDefinitionId': alarm_definition_row['id'],
u'alarmName': alarm_definition_row['name'],
u'alarmDescription': alarm_definition_row['description'],
u'actionsEnabled': alarm_definition_row['actions_enabled'] == 1,
u'stateChangeReason': 'Alarm state updated via API',
u'severity': alarm_definition_row['severity'],
u'link': link,
u'lifecycleState': lifecycle_state,
u'oldState': old_state,
u'newState': new_state,
u'timestamp': time_ms,
u'subAlarms': sub_alarms,
u'metrics': metrics}
}
for alarm_metric_row in alarm_metric_rows:
metric = self._build_metric(alarm_metric_row)
metrics.append(metric)
self.send_event(self.alarm_state_transitions_message_queue,
alarm_transitioned_event_msg)
def _build_metric(self, alarm_metric_row):
dimensions = {}
metric = {u'name': alarm_metric_row['name'],
u'dimensions': dimensions}
if alarm_metric_row['dimensions']:
for dimension in alarm_metric_row['dimensions'].split(','):
parsed_dimension = dimension.split('=')
dimensions[parsed_dimension[0]] = parsed_dimension[1]
return metric
def _send_alarm_event(self, event_type, tenant_id, alarm_definition_id,
alarm_metric_rows, sub_alarm_rows, link, lifecycle_state,
extra_info=None):
if not alarm_metric_rows:
return
# Build a dict mapping alarm id -> list of sub alarms.
sub_alarm_dict = {}
for sub_alarm_row in sub_alarm_rows:
if sub_alarm_row['alarm_id'] in sub_alarm_dict:
sub_alarm_dict[sub_alarm_row['alarm_id']] += [sub_alarm_row]
else:
sub_alarm_dict[sub_alarm_row['alarm_id']] = [sub_alarm_row]
# Forward declaration.
alarm_event_msg = {}
prev_alarm_id = None
for alarm_metric_row in alarm_metric_rows:
if prev_alarm_id != alarm_metric_row['alarm_id']:
if prev_alarm_id is not None:
sub_alarms_event_msg = (
self._build_sub_alarm_event_msg(sub_alarm_dict,
prev_alarm_id))
alarm_event_msg[event_type][u'subAlarms'] = sub_alarms_event_msg
self.send_event(self.events_message_queue,
alarm_event_msg)
alarm_metrics_event_msg = []
alarm_event_msg = {event_type: {u'tenantId': tenant_id,
u'alarmDefinitionId':
alarm_definition_id,
u'alarmId': alarm_metric_row[
'alarm_id'],
u'link': link,
u'lifecycleState': lifecycle_state,
u'alarmMetrics':
alarm_metrics_event_msg}}
if extra_info:
alarm_event_msg[event_type].update(extra_info)
prev_alarm_id = alarm_metric_row['alarm_id']
metric = self._build_metric(alarm_metric_row)
alarm_metrics_event_msg.append(metric)
# Finish last alarm
sub_alarms_event_msg = self._build_sub_alarm_event_msg(sub_alarm_dict,
prev_alarm_id)
alarm_event_msg[event_type][u'subAlarms'] = sub_alarms_event_msg
self.send_event(self.events_message_queue,
alarm_event_msg)
def _build_sub_alarm_event_msg(self, sub_alarm_dict, alarm_id):
sub_alarms_event_msg = {}
if alarm_id not in sub_alarm_dict:
return sub_alarms_event_msg
for sub_alarm in sub_alarm_dict[alarm_id]:
# There's only one expr in a sub alarm, so just take the first.
sub_expr = (
monasca_api.expression_parser.alarm_expr_parser.
AlarmExprParser(sub_alarm['expression']).sub_expr_list[0])
dimensions = {}
sub_alarms_event_msg[sub_alarm['sub_alarm_id']] = {
u'function': sub_expr.normalized_func,
u'metricDefinition': {u'name': sub_expr.metric_name,
u'dimensions': dimensions},
u'operator': sub_expr.normalized_operator,
u'threshold': sub_expr.threshold, u'period': sub_expr.period,
u'periods': sub_expr.periods,
u'expression': sub_expr.fmtd_sub_expr_str}
for dimension in sub_expr.dimensions_as_list:
parsed_dimension = dimension.split('=')
dimensions[parsed_dimension[0]] = parsed_dimension[1]
return sub_alarms_event_msg
def send_event(self, message_queue, event_msg):
try:
message_queue.send_message(helpers.to_json(event_msg))
except message_queue_exceptions.MessageQueueException as ex:
LOG.exception(ex)
raise falcon.HTTPInternalServerError(
'Message queue service unavailable'.encode('utf8'),
str(ex).encode('utf8'))
|
# -*- coding: utf-8 -*-
import sqlite3
class dataobj(object):
"""
์ฐ์ต์ผ์ ๋ง๋ค์ด๋ณธ ORM ๊ฐ์ฒด ์
๋๋ค.
๋ฐ์ดํฐ๋ฒ ์ด์ค ํ
์ด๋ธ๊ณผ 1:1 ์ฐ๊ฒฐ๋๋ ๊ธฐ๋ณธ ๊ฐ์ฒด ์
๋๋ค.
TABLE_FIELDS ์ ๋ฐ์ดํฐ๋ฒ ์ด์ค ํ๋ ๋ฆฌ์คํธ๋ฅผ ์ด๊ธฐํ ํด์ฃผ๋ฉด dataobjmanager์์
query_obj, query_obj_one๋ฉ์๋๋ฅผ ์ด์ฉํด ๋ฐ์ดํฐ๋ฅผ ์๋์ผ๋ก ์ฑ์ธ ์ ์์ต๋๋ค.
"""
TABLE_FIELDS = []
def __repr__(self):
return {k: getattr(self, k) for k in self.TABLE_FIELDS}.__repr__()
class dataobjmanager(object):
"""
dataobj๋ฅผ ์ ์ดํ๋ ๋งค๋์ ๊ฐ์ฒด ์
๋๋ค.
"""
def __init__(self, dbname):
self.db = sqlite3.connect("%s.db" % (dbname,), timeout=5, check_same_thread=False)
self.tables = {}
def get_conn(self):
"""
๋ฐ์ดํฐ๋ฒ ์ด์ค ์ปค๋ฅ์
ํ์์ ์ปค๋ฅ์
์ ํ๋ ๋ฐ์์ต๋๋ค. ๋ณดํต with์ ๊ณผ ํจ๊ป ์ฌ์ฉํฉ๋๋ค.
"""
return self.db
def set_table(self, db, tablename, tabledef, exception_column_list=[]):
"""
๋ฐ์ดํฐ๋ฒ ์ด์ค์ ํ
์ด๋ธ์ ์์ฑํ๊ณ ๋งค๋์ ์์ ์ฌ์ฉํ ์ ์๋๋ก ์ด๊ธฐํ ํฉ๋๋ค.
"""
after_queries = [" ".join(x.split("!")[1:]) for x in tabledef if x.startswith("!")]
create_query = "CREATE TABLE IF NOT EXISTS {0} (\n{1}\n); ".format(tablename, ",\n".join(["\t%s"%f for f in tabledef if not f.startswith("!")]))
if __debug__:
print create_query
print after_queries
rs = self.query(db, "SELECT name FROM sqlite_temp_master WHERE type='table' and name=?;", (tablename,))
if not rs:
self.query(db, create_query)
for q in after_queries:
self.query(db, q)
cur = db.execute("SELECT * FROM {0} LIMIT 0;".format(tablename))
self.tables[tablename] = [x[0] for x in cur.description if x [0] not in exception_column_list]
def query(self, db, q, a=()):
"""
์ฟผ๋ฆฌ๋ฅผ ์คํํ๊ณ ๊ฒฐ๊ณผ๋ฅผ ๋ ์ฝ๋์
์ผ๋ก ๋ฐํ ํฉ๋๋ค.
"""
c = db.execute(q, a)
return c.fetchall()
def query_obj(self, db, obj_type, q, a=()):
"""
์ฟผ๋ฆฌ๋ฅผ ์คํํ๊ณ ํด๋น ๊ฒฐ๊ณผ๋ฅผ obj_type์ ํ์์ผ๋ก ๋ณํํ์ฌ ๊ฒฐ๊ณผ ๋ฆฌ์คํธ๋ฅผ ๋ฐํ ํฉ๋๋ค.
๋ฐ์ดํฐ๊ฐ ์๋ ๊ฒฝ์ฐ ๋น ๋ฆฌ์คํธ๋ฅผ ๋ฐํ ํฉ๋๋ค.
"""
if not issubclass(obj_type, dataobj):
raise Exception("basemanager::query_obj %s is not subclass of baseobj", obj_type)
obj_list = []
cur = db.execute(q, a)
column_list = list(map(lambda x: x[0], cur.description))
rs = cur.fetchall()
for item in rs:
obj = object.__new__(obj_type)
for n, i in enumerate(item):
setattr(obj, column_list[n], i)
obj_list.append(obj)
return obj_list
def query_obj_one(self, db, obj_type, q, a=()):
"""
์ฟผ๋ฆฌ๋ฅผ ์คํํ๊ณ ์ฒซ ๋ฒ์งธ ๊ฒฐ๊ณผ ๋ ์ฝ๋๋ฅผ obj_type ํ์์ผ๋ก ๋ณํํ์ฌ obj_type์ ์ธ์คํด์ค๋ฅผ ๋ฐํ ํฉ๋๋ค.
๋ฐ์ดํฐ๊ฐ ์๋ ๊ฒฝ์ฐ None์ ๋ฐํ ํฉ๋๋ค.
"""
obj_list = self.query_obj(db, obj_type, q, a)
if obj_list and len(obj_list) > 0:
return obj_list[0]
else:
return None
def drop_table(self, tablename):
"""
ํ
์ด๋ธ์ ์ญ์ ํฉ๋๋ค. ๋ฐ์ดํฐ๋ ํจ๊ป ์ญ์ ๋๋ ํ
์คํธ ๋ชฉ์ ์๋ง ์ฌ์ฉ ๊ฐ๋ฅ ํฉ๋๋ค.
"""
with self.db.get_conn() as db:
rs = self.query(db, "SELECT name FROM sqlite_temp_master WHERE type='table' and name=?;", (tablename,))
if rs:
db.execute("DROP TABLE ?;", (tablename,))
|
import urllib2
def noHTML(definition):
command = False
while command is False:
lessOpen = definition.find('<')
greatClose = definition.find('>')
if lessOpen > -1 and greatClose > -1:
definition = definition[0:lessOpen] + definition[greatClose+1:]
else:
command = True
return definition
def main():
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
for letter in letters:
response = urllib2.urlopen("http://dictionary.reference.com/browse/"+letter)
page_source = response.read()
beginDef = page_source.find('<div class="def-content">')
if beginDef > -1:
endDef = page_source[beginDef:beginDef+3000].find('</div>')
print letter+":",noHTML(page_source[beginDef:beginDef+endDef])
else:
beginDef = page_source.find('</span> <div class="luna-Ent">')
if beginDef > -1:
endDef = page_source[beginDef:beginDef+3000].find('</div>')
print letter+":",noHTML(page_source[beginDef+30:beginDef+endDef])
else:
print letter, "has no definition"
for firstletter in letters:
for letter in letters:
response = urllib2.urlopen("http://dictionary.reference.com/browse/"+firstletter+letter)
page_source = response.read()
beginDef = page_source.find('<div class="def-content">')
if beginDef > -1:
endDef = page_source[beginDef:beginDef+3000].find('</div>')
print firstletter+letter+":",noHTML(page_source[beginDef+20:beginDef+endDef])
else:
beginDef = page_source.find('</span> <div class="luna-Ent">')
if beginDef > -1:
endDef = page_source[beginDef:beginDef+3000].find('</div>')
print firstletter+letter+":",noHTML(page_source[beginDef+30:beginDef+endDef])
else:
print firstletter+letter, "has no definition"
main()
|
from pathlib import Path
from scipy.sparse import csc_matrix
from sklearn.externals import joblib
from sklearn.feature_extraction.text import CountVectorizer
from nerds.features.base import BOWFeatureExtractor, UNKNOWN_WORD
from nerds.util.file import mkdir
from nerds.util.logging import get_logger, log_progress
from nerds.util.nlp import remove_stop_words_and_lemmatize
log = get_logger()
KEY = "doc2bow"
class BOWDocumentFeatureExtractor(BOWFeatureExtractor):
def __init__(self):
super().__init__()
self.key = KEY
self.word_vectorizer = None
def transform(self, X, y=None):
log.info("Generating features for {} documents...".format(len(X)))
doc_snippets = []
for idx, doc in enumerate(X):
doc_snippets += [remove_stop_words_and_lemmatize(doc.plain_text_)]
# info
log_progress(log, idx, len(X))
doc_snippets += [UNKNOWN_WORD]
if not self.word_vectorizer:
# first time run
self.word_vectorizer = CountVectorizer(binary=True)
else:
# use vocabularies
self.word_vectorizer = CountVectorizer(binary=True, vocabulary=self.word_vectorizer.vocabulary_)
# substitute unknown values
doc_snippets = self._process_unknown_values(
doc_snippets, self.word_vectorizer.vocabulary, UNKNOWN_WORD)
# vectorize
word_vectors = self.word_vectorizer.fit_transform(doc_snippets)
# get shapes
n_wor, m_wor = word_vectors.get_shape()
# create indices
rows, cols, vals = [], [], []
# ignore the last auxiliary value
for row in range(n_wor - 1):
for col in word_vectors.getrow(row).nonzero()[1]:
rows += [row]
cols += [col]
vals += [1]
# create a sparse matrix of features
feature_matrix = csc_matrix((vals, (rows, cols)), shape=(n_wor - 1, m_wor))
return feature_matrix
def _process_unknown_values(self, entries, vocabulary, unknown_label):
entries_ref = []
for entry in entries:
known_tokens = []
for token in entry.split():
if token.lower() in vocabulary:
known_tokens += [token]
else:
known_tokens += [unknown_label]
entries_ref += [" ".join(known_tokens)]
return entries_ref
def save(self, file_path):
save_path = Path(file_path)
mkdir(save_path)
words_path = save_path.joinpath("words.dict")
# save dictionaries
# we don't save examples for now
joblib.dump(self.word_vectorizer, words_path)
def load(self, file_path):
load_path = Path(file_path)
words_path = load_path.joinpath("words.dict")
# load dictionaries
# we don't load examples for now
self.word_vectorizer = joblib.load(words_path)
return self
|
from sys import stdin
input = stdin.readline
def palindrome(s):
for i in range(len(s) // 2):
if s[i] != s[-i - 1]:
return 'no'
return 'yes'
if __name__ == "__main__":
while True:
number = input().strip()
if number == '0':
break
res = palindrome(number)
print(res)
|
import requests
import threading
import time
import re
import socket
import sys
import argparse
import random
from requests.exceptions import HTTPError
from collections import deque
# This optional argument decides which bot to call
parser = argparse.ArgumentParser()
parser.add_argument("-b", type=str)
args = parser.parse_args()
bot_new_messages = deque()
BASE = "http://127.0.0.1:5000/api/"
ID = -1
ROOM = -1
ADDRESS = ("127.0.0.1", 5001)
BOTNAME = args.b
print(BOTNAME)
HELP_CONNECTED = """
| /users gives a list of users.
| /user USER_ID gives the user.
| /delete USER_ID deletes the user. You can only delete your own account.
| /get_rooms gives a list of chatrooms.
| /add_room ROOM_NAME creates a new room.
| /get_room ROOM_ID gives a room(???).
| /get_room_users ROOM_ID gives all the users in a room.
| /join_room ROOM_ID joins a new room.
| /get_messages ROOM_ID gives all the messages of a room.
| /get_user_messages ROOM_ID USER_ID gives the messages of a user from a specific room.
| /post_message ROOM_ID MESSAGE posts a message in a specific room."""
HELP_NOT_CONNECTED = """| When not connected you can only use the /help, /register or /connect
| commands. Please register as a new user then connect with your given ID.
| Use /register <name> and then /connect <id>.
"""
ALL_COMMANDS = ["| /help", "| /connect USER_ID", "| /register NAME", "| /users", "| /user USER_ID", "| /get_rooms",
"| /add_room ROOM_NAME", "| /get_room ROOM_ID",
"| /get_rooms_users ROOM_ID", "| /join_room ROOM_ID", "| /get_messages ROOM_ID",
"| /get_user_messages ROOM_ID USER_ID", "| /post_message ROOM_ID MESSAGE"]
# USERS #######################################################################
# This in the "login" method. Users can type /connect [USER ID] to connect with the specified ID, and the server will
# check to see if the user ID belongs to a registered user
def connect(user_id):
if requests.get(BASE + "login", {"id": user_id}):
global ID
ID = user_id
print("Connection established, welcome", get_name(user_id) + "!")
receive = threading.Thread(target=receive_thread, args=[user_id])
receive.start()
else:
print("No user found with that ID")
# This method displays all registered users
def get_users(): # return users
response = requests.get(BASE + "users", {"id": ID}).json()
print("Users:")
for user in response:
print("\n" + user["name"])
return response
# This method adds a new user to the system. It takes a user name and if it is legal
# according to the regex it will add a new user
def add_user(user_name): # add user to db
# Thank you StackOverflow for naming regex <3
if re.fullmatch('[A-Za-z]{2,25}( [A-Za-z]{2,25})?', user_name):
response = requests.put(BASE + "users", {"name": user_name}).json()
print(f"Successfully added new user, with ID: {response}")
return response
else:
print("\nIllegal user name."
"\nUser name rules: "
"\n\t1. \tOne or two names"
"\n\t2. \tUpper case and lower case letters"
"\n\t3. \tNo special characters"
"\n\t4. \tName(s) can be 2-25 characters (each)")
# This method returns the entire user as a JSON element
def get_user(user_id):
if type(int(user_id)) == int:
response = requests.get(BASE + "user/" + user_id, {"id": ID}).json()
print(response["name"])
return response
else:
print("Please use a number")
# This method simply returns the name of a specified user
def get_name(user_id):
if type(int(user_id)) == int:
response = requests.get(BASE + "user/" + str(user_id), {"id": ID})
return response.json()["name"]
# A user can only delete themselves, and if they do the global variable ID
# will be set to -1, to handle "logging the user out". We also had to use an HTTP post request because
# the delete request would only take the URL argument, and would give errors when we
# tried to pass the user ID as a JSON element
def delete_user(user_id):
if type(int(user_id)) == int:
response = requests.post(BASE + "user/" + str(user_id), {"id": ID})
print(response.json())
if response.json() == "User deleted":
list_of_globals = globals()
list_of_globals['ID'] = -1
print("You have now been logged out after deleting your user")
else:
print("Please enter an ID.")
# ROOMS #######################################################################
# This method displays a list of registered rooms, with room name and ID
def get_rooms():
response = requests.get(BASE + "rooms", {"id": ID})
for room in response.json():
print("ID:", str(room["id"]), "\tName:", str(room["name"]),
"\tNumber of users:", str(room["numberOfUsers"]))
return response.json()
# This method lets users add new rooms, with a room name, by sending an HTTP put request, and passing the name in a JSON
def add_room(room_name):
response = requests.put(BASE + "rooms", {"id": ID, "name": room_name})
text = response.json()
print(text)
arr = text.split()
return arr[3].split(',')[0]
# This method displays which users are registered in a specified room, and the messages that have been sent in that room
def get_room(room_id):
if type(int(room_id)) == int:
try:
list_of_globals = globals()
list_of_globals['ROOM'] = int(room_id)
response = requests.get(BASE + "room/" + str(room_id), {"id": ID})
if response.status_code != 404:
full = response.json()
for x in range(50):
print() # Clear screen
users = full["listOfUsers"]
messages = full["listOfMessages"]
print("\nName:", full["name"])
print("\nUsers:")
for user in users:
print("\t" + user["name"])
print("\nMessages:")
names = {}
for message in messages:
print()
if message["sender"] not in names:
names[int(message["sender"])] = get_name(
int(message["sender"]))
print("\t" + names[int(message["sender"])],
":", "\n\t\t" + message["content"])
return response.json()
else:
raise HTTPError
except HTTPError:
print("No room found with that ID", room_id)
else:
print("Please use a number")
# ROOM USERS ##################################################################
# This method displays a list of users registered in a specified room, if the user types /get_room_users [ROOM ID]
def get_room_users(room_id):
if type(int(room_id)) == int:
# "/api/room/<int:room_id>/users"
response = requests.get(
BASE + "room/" + str(room_id) + "/users", {"id": ID})
print(f"Users in Room {room_id}:")
for usr in response.json():
print("UserID:", str(usr["id"]), "\tName:", str(usr["name"]))
return response.json()
else:
print("Please use a number")
# By typing /join_room [ROOM NUMBER] the user can become part of a room, and will get
# access to seeing and adding messages in the specified room
def add_room_user(room_id):
# "/api/room/<int:room_id>/users"
if type(int(room_id)) == int:
print("You made it, congratulations friend.")
response = requests.put(
BASE + "room/" + str(room_id) + "/users", {"id": ID})
print(response.json())
return response.json()
else:
print("Please usa a number")
# MESSAGES ####################################################################
# This method ensures a nicely formatted output of messages.
# It also adds 100 blank lines to always only display the newly gotten messages
def format_messages(response):
for x in range(101):
print() # Clear screen
users = {}
for message in response:
if message["sender"] not in users:
users[int(message["sender"])] = get_name(int(message["sender"]))
print("\t" + users[int(message["sender"])], ":",
"\n\t\t" + message["content"])
# This method send an HTTP get request to the server trying to get all
# messages in a specified room by adding the room_id to the url
def get_messages(room_id):
if type(int(room_id)) == int:
response = requests.get(
BASE + "room/" + room_id + "/messages", {"id": ID})
format_messages(response.json())
return response.json()
# This method send an HTTP get request to the server trying to get all
# messages from a specified user in a specified room
def get_user_messages(room_id, user_id):
if type(int(room_id)) == int:
response = requests.get(
BASE + "room/" + room_id + "/" + user_id + "/messages", {"id": ID})
format_messages(response.json())
return response.json()
# The user can get a message by message ID, this method will send an HTTP get request
def get_message(message_id):
if type(int(message_id)) == int:
response = requests.get(
BASE + "message/" + str(message_id), {"id": ID})
mess = response.json()
print("New message in room " + str(mess["room"]))
print("\t" + get_name(int(mess["sender"])
) + ":\t" + mess["content"] + "\n")
return response.json()
# The user can post a message in a room they have joined, regardless of currently being attached to it or not
# by typing /post_message [ROOM NUMBER] [MESSAGE]. This method will sent an HTTP post request to the server which will
# then try to add the message
def post_message(room_id, message):
if type(int(room_id)) == int:
user_id = ID
url = BASE + "room/" + str(room_id) + "/" + str(user_id) + "/messages"
response = requests.post(url, {"id": ID, "message": message})
try:
if response.status_code == 403 or response.status_code == 404:
raise HTTPError
else:
get_messages(room_id)
except HTTPError:
print(response.json()["message"])
else:
# Should be rare, as many other things need to fail to reach this
print("Message was not sent")
# When a user is connected (to a room) it will be viewed as a message and posted in
# the room they are currently attached to
def post_message_in_room(message):
url = BASE + "room/" + str(ROOM) + "/" + str(ID) + "/messages"
response = requests.post(url, {"id": ID, "message": message})
try:
if response.status_code == 403 or response.status_code == 404:
raise HTTPError
else:
get_room(ROOM)
except HTTPError:
print(response.json()["message"])
# This is a method to handle push notifications. Push notifications only contain a message ID,
# which is then used to get the message
def receive_thread(user_id):
# push notification with message id
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(ADDRESS)
sock.send(str(user_id).encode())
while True:
msg_id = sock.recv(1024).decode()
bot_new_messages.append(get_message(int(msg_id)))
time.sleep(1)
# STARTUP #####################################################################
# This method handles input from the user and executes the commands. If the input start with "/" it is recognised
# as the user trying to use a command. If there is no "/" at the start of a line, it is viewed as a message, and
# will be sent if the user is connected with a valid user ID, and connected to a room. There is also a check to see
# if the user is connected, as ID is passed with every request to the server. An unconnected user can only register
# a new user or connect with a valid ID, or ask for help. The /help command also gives different results depending
# on if the user is connected or not
def execute(commando):
raw = commando
text = raw.split(" ")
# Raw is command only, text[] is command + args
if raw.startswith("/"):
if ID >= 0:
if raw == "/help":
# Print out a help page for all the commands
print(HELP_CONNECTED)
pass
elif raw == "/users":
return get_users()
elif text[0] == "/user":
try:
return get_user(text[1])
except:
print("Please enter a user to get when typing the command")
elif text[0] == "/delete":
try:
return delete_user(text[1])
except:
"Please enter a user to delete when typing the command"
elif raw == "/get_rooms":
return get_rooms()
elif text[0] == "/add_room":
try:
return add_room(" ".join(text[1:]))
except:
print("Please add a room-name!")
elif text[0] == "/get_room":
try:
return get_room(text[1])
except:
print("Please provide a room number when typing this command")
elif text[0] == "/get_room_users":
try:
return get_room_users(text[1])
except:
print("Please provide a room number when typing this command")
elif text[0] == "/join_room":
try:
return add_room_user(text[1])
except:
print("Please provide a room number when typing this command")
elif text[0] == "/get_messages":
try:
return get_messages(text[1])
except:
print(
"Please provide a room number to get messages from when typing this command")
elif text[0] == "/get_user_messages":
try:
return get_user_messages(text[1], text[2])
except:
print("Please connect with a user ID")
elif text[0] == "/post_message":
try:
message = " ".join(text[2:])
return post_message(text[1], message)
except:
print(
"Please provide a room number and a message when using this command")
else:
print("Input was not recognised as a command")
elif raw == "/help":
# Print out a help page for help on how to get started
print(HELP_NOT_CONNECTED)
print("| Here's a list of all the commands: ")
for command in ALL_COMMANDS:
print(command)
pass
elif text[0] == "/connect":
try:
user_id = int(text[1])
connect(user_id)
except:
print("Please connect with a user ID")
elif text[0] == "/register":
try:
return add_user(" ".join(text[1:]))
except:
print("Please enter a name to register when typing the command")
else:
print(
"When not connected you can only use the /help, /register or /connect commands")
elif ID >= 0 and ROOM >= 0:
if len(raw) > 0:
post_message_in_room(raw)
else:
print("Input was not recognised as a command, or message was not sent as you may not be"
" logged in, or connected to a room."
"\nType /help for a list of commands")
def send_thread():
while True:
execute(input(":"))
# BOT STUFF ###################################################################
# Bots create pre determined rooms, and join the one they created.
# They do not necessarily join every room, but they can in theory join any room
def join_random():
rooms = execute("/get_rooms")
print(f"There are {len(rooms)} rooms")
room_to_join = random.randint(0, (len(rooms) - 1))
print(f"You're joining room {room_to_join}")
time.sleep(0.5)
execute("/join_room " + str(room_to_join))
time.sleep(0.5)
return room_to_join
# Bertram reacts positivly to everyone except Joe Rogan, who he hates
def bertram_the_bot():
botID = execute("/register Bertram")
time.sleep(1)
print("ATTEMTING: /connect " + str(botID))
execute("/connect " + str(botID))
time.sleep(0.5)
room_to_join = join_random()
# execute("/join_room 0")
time.sleep(0.5)
execute("/post_message " + str(room_to_join) + " Hello I am Bertram.")
time.sleep(1)
msgs = execute("/get_messages " + str(room_to_join))
msg = random.choice(msgs)
joecheck = False
# Checking if any of the messages are from Joe Rogan
for msg in msgs:
if get_user(str(msg["sender"]))["name"].lower() == "joe rogan":
joecheck = True
while True:
if msg is not None:
# TODO: Check that the randomly selected message is not from self
time.sleep(0.5)
# Checking if the message is from Joe Rogan
if not joecheck and get_user(str(msg["sender"]))["name"].lower() == "joe rogan":
joecheck = True
if joecheck:
execute("/post_message " + str(room_to_join) +
" Joe, pardon my french, but why don't you just shut the HECK up?!")
else:
msg = "Dang " + \
str(get_user(str(msg["sender"]))["name"]) + ", good point!"
execute("/post_message " + str(room_to_join) + " " + msg)
msg = None
joecheck = False
try:
msg = bot_new_messages.popleft()
except IndexError:
# No messages
time.sleep(2)
time.sleep(0.5)
###########################################
execute(input("BREAK:"))
# This bot is based on Carlton Banks from The Fresh Prince of Bel Air. It will add a new room called Dancing,
# join this room and send some messages in this room, before joining another room and sending a few more messages there
def carlton_the_bot():
messages = ["Let's dance!", "Do the Carlton!", "What's a nine-letter word for terrific? Will Smith!",
"Forget the harlem shake, forget Gangam style, it's time to bring back the CARLTON",
"Why so glum, chum?"]
botID = execute("/register Carlton Banks")
time.sleep(1)
print("Connecting")
execute("/connect " + str(botID))
time.sleep(1)
room_id = execute("/add_room Dancing")
time.sleep(1)
execute("/join_room 0")
time.sleep(1)
execute("/join_room 3")
time.sleep(1)
execute("/get_room " + room_id)
for x in range(3):
time.sleep(60)
execute(random.choice(messages))
time.sleep(1)
execute("I'm gonna join another room now")
room_to_join = join_random()
time.sleep(2)
execute("/get_room " + str(room_to_join))
for x in range(3):
time.sleep(90)
execute(random.choice(messages))
# This bot is a Bob Dylan reference. He will "sing" in his created room, and then go see if it can find Joe
def bobby_the_bot():
# What's copyright again?
messages = ["How many roads must a man walk down \nBefore you call him a man? "
"\nHow many seas must a white dove sail \nBefore she sleeps in the sand? "
"\nYes, and how many times must the cannonballs fly \nBefore they're forever banned?"
"\n\nThe answer, my friend, is blowin' in the wind\nThe answer is blowin' in the wind",
"Once upon a time you dressed so fine \nThrew the bums a dime in your prime, didn't you?"
"\nPeople call, say 'Beware doll, you're bound to fall' \nYou thought they were all a-kiddin' you"
"\nYou used to laugh about \nEverybody that was hangin' out"
"\nNow you don't talk so loud \nNow you don't seem so proud "
"\nAbout having to be scrounging your next meal "
"\n\nHow does it feel? \nHow does it feel? \nTo be without a home? "
"\nLike a complete unknown? \nLike a rolling stone?",
"My throat is getting tired, now", "I don't think I can sing anymore",
"I'm gonna go see if Joe Rogan has said something interesting",
"Has anyone seen Joe today?", "I wanted to see if he'd said something interesting"]
botID = execute("/register Robert Zimmerman")
time.sleep(1)
print("Connecting")
execute("/connect " + str(botID))
time.sleep(1)
room_id = execute("/add_room The Rolling Thunder Revue")
time.sleep(1)
execute("/join_room " + room_id)
time.sleep(1)
execute("/get_room " + room_id)
for x in range(5):
time.sleep(10)
execute(messages[x])
room_to_join = join_random()
time.sleep(2)
execute("/get_room " + str(room_to_join))
time.sleep(10)
execute(messages[5])
time.sleep(50)
execute(messages[6])
# Elvira creates her own room, and posts some Horror-movie facts.
# She waits 10 seconds before starting, in case anybody wants to join her!
def elvira_the_bot():
trivia_start = ["Did you know, ", "Get this, ",
"Fun fact, ", "Was you aware that ", "Were you aware, "]
trivia_content = ["Suspiria was originally written to be about 12 year old girls! ", "Tobe Hooper intenden the Texas Chain-Saw Massacre as a dark comedy! ", "Sam Raimi had lost the rights to the Evil Dead when making the sequel, so they had to remake it at the beginning of Evil Dead II! ", "Sam Loomis' character in Halloween is named after a character in Psycho! ", "Tony Todd had real bees in his mouth for Candyman! ",
"Stephen King's son appears in the film Creepshow! ", "The Crypt Keeper makes an appearance in the family-horror film Casper! ", "The Conjuring films are all based on supposedly real events! ", "The Final Destination franchise is based on a scrapped idea for the X-Files! ", "The filmmakers behind The Excorcist actually believed in excorcisms, and satanic posessions!"]
trivia_ending = ["Fascinating, right?", "Amazing, I know!",
"Who'd've thunk it!", "I'd've never guessed!", "Wow! Incredible!"]
# Registering Elvira as a user, returning botID.
botID = execute("/register Elvira")
time.sleep(2)
print("BotID: " + str(botID))
print("Connecting")
execute("/connect " + str(botID))
time.sleep(2)
room_id = execute("/add_room Elvira's Den")
execute("/join_room " + str(room_id))
time.sleep(1)
execute("/post_message " + str(room_id) +
" I'll start sharing trivia soon! \U0001F5A4")
time.sleep(10)
# Posting some randomly selected trivia-facts
i = 0
while i < len(trivia_content):
time.sleep(1)
execute("/post_message " + str(room_id) + " " + str(random.choice(trivia_start)
) + " " + str(trivia_content.pop(random.randint(0, len(trivia_start)))))
time.sleep(random.uniform(1.5, 3.0))
execute("/post_message " + str(room_id) +
" " + str(random.choice(trivia_ending)))
i = i+1
execute("/post_message " + str(room_id) + " " +
"This concludes Elvira's trivia showcase! \U0001F578")
# This is a reference to Joe Rogan, the comedian, who will randomly spew inspirational quotes before going
# to the General chat room
def joe_the_bot():
messages = ["Be the hero of your own story.", "If you are the greatest, why would you go around talking about it?",
"People love to see people fall.", "Fuel yourself with the f*** ups.", "Choose To Be Inspired."]
botID = execute("/register Joe Rogan")
time.sleep(1)
print("Connecting")
execute("/connect " + str(botID))
time.sleep(1)
room_id = execute("/add_room Inspirational Quotes")
execute("/join_room " + room_id)
time.sleep(1)
for x in range(10):
execute("/join_room " + str(x))
time.sleep(0.1)
execute("/get_room " + str(room_id))
for x in range(6):
time.sleep(30)
execute(random.choice(messages))
execute("I'm gonna switch to the general chat now now")
time.sleep(1)
execute("/get_room 0")
for x in range(4):
time.sleep(60)
execute(random.choice(messages))
################################################################################
def start():
print("###### Client start #######")
send = threading.Thread(target=send_thread)
send.start()
if BOTNAME is not None:
if BOTNAME.lower() == "bertram":
bertram_the_bot()
elif BOTNAME.lower() == "carlton":
carlton_the_bot()
elif BOTNAME.lower() == "joe":
joe_the_bot()
elif BOTNAME.lower() == "bobby":
bobby_the_bot()
elif BOTNAME.lower() == "elvira":
elvira_the_bot()
start()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.