index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
24,500 | 31acb958d423f07af0c758b0d20f2ee30b7ca2a3 |
def main():
f=open("guru99.txt", "r")
|
24,501 | 9eda5ec134b4725fccc850253fe8856384529db7 | from ctypes import *
from .pmat_blasfeo_wrapper import *
from .pvec import *
from .blasfeo_wrapper import *
from multipledispatch import dispatch
from abc import ABC
class pmat_(ABC):
pass
class pmat(pmat_):
blasfeo_dmat = None
def __init__(self, m: int, n: int):
self.blasfeo_dmat = c_pmt_create_blasfeo_dmat(m, n)
self._m = m
self._n = n
@property
def m(self):
return self._m
@property
def n(self):
return self._n
def __getitem__(self, index):
if isinstance(index, tuple):
if len(index) != 2:
raise Exception ('pmat subscript should be a 2-dimensional tuples, \
you have: {}\n. Exiting'.format(index))
if isinstance(index[0], int) and isinstance(index[1], int):
if index[0] < 0 or index[0] > self.m or \
index[1] < 0 or index[1] > self.n:
raise Exception('Invalid subscripting values. Exiting. \n')
el = pmat_get(self, index[0], index[1])
return el
elif isinstance(index[0], slice) and isinstance(index[1], slice):
if index[0].start < 0 or index[0].stop > self.m or \
index[1].start < 0 or index[1].stop > self.n:
raise Exception('Invalid subscripting values. Exiting. \n')
m_value = index[0].stop - index[0].start
n_value = index[1].stop - index[1].start
submatrix = pmat(m_value, n_value)
# TODO(andrea): there might be better performing implementations of this.
# print(index[0].start)
# import pdb; pdb.set_trace()
for i in range(m_value):
for j in range(n_value):
# print(i,j)
submatrix[i,j] = self[index[0].start+i, index[1].start+j]
# print('\n\n')
# pmat_print(submatrix)
# print('\n\n')
return submatrix
else:
raise Exception ('pmat subscript should be a 2-dimensional tuples, \
you have: {}\n. Exiting'.format(index))
def __setitem__(self, index, value):
if isinstance(index, tuple):
if len(index) != 2:
raise Exception ('pmat subscript should be a 2-dimensional tuples, \
you have: {}\n. Exiting'.format(index))
if isinstance(index[0], int) and isinstance(index[1], int):
if index[0] < 0 or index[0] > self.m or \
index[1] < 0 or index[1] > self.n:
raise Exception('Invalid subscripting values. Exiting. \n')
pmat_set(self, value, index[0], index[1])
elif isinstance(index[0], slice) and isinstance(index[1], slice):
m_target = index[0].stop - index[0].start
n_target = index[1].stop - index[1].start
if m_target != value.m or n_target != value.n:
raise Exception('Dimension mismatch: ({},{}) <- ({},{}). Exiting.'.format(m_target, n_target, value.m, value.n))
if index[0].start < 0 or index[0].stop > self.m or \
index[1].start < 0 or index[1].stop > self.n:
raise Exception('Invalid subscripting values. Exiting. \n')
# TODO(andrea): there might be better performing implementations of this.
for i in range(m_target):
for j in range(n_target):
self[index[0].start+i,index[1].start+j] = value[i,j]
else:
raise Exception ('pmat subscripts must be 2-dimensional tuples, \
you have: {}\n. Exiting'.format(index))
else:
raise Exception ('pmat subscripts must be 2-dimensional tuples, \
you have: {}\n. Exiting'.format(index))
# class pmat(pmat_):
# blasfeo_dmat = None
# _i = None
# _j = None
# def __init__(self, m: int, n: int):
# self.blasfeo_dmat = c_pmt_create_blasfeo_dmat(m, n)
# def __getitem__(self, index):
# if self._i is not None:
# self._j = index
# el = self.my_get_item()
# return el
# self._i = index
# return self
# def __setitem__(self, index, value):
# self._j = index
# self.my_set_item(value)
# return
# def my_set_item(self, value):
# pmat_set(self, value, self._i, self._j)
# self._i = None
# self._j = None
# return
# def my_get_item(self):
# el = pmat_get(self, self._i, self._j)
# self._i = None
# self._j = None
# return el
# TODO(andrea): ideally one would have three levels:
# 1) high-level
# 2) intermediate-level
# 3) low-level (BLASFEO wrapper)
# high-level linear algebra
@dispatch(pmat_)
def __mul__(self, other):
if self.n != other.m:
raise Exception('__mul__: mismatching dimensions:'
' ({}, {}) x ({}, {})'.format(self.m, self.n, other.m, other.n))
res = pmat(self.m, other.n)
pmat_fill(res, 0.0)
zero_mat = pmat(self.m, other.n)
pmat_fill(zero_mat, 0.0)
pmt_gemm_nn(self, other, zero_mat, res)
return res
@dispatch(pvec_)
def __mul__(self, other):
if self.n != other.blasfeo_dvec.m:
raise Exception('__mul__: mismatching dimensions:'
' ({}, {}) x ({},)'.format(self.m, self.n, other.blasfeo_dvec.m))
res = pvec(self.m)
res.fill(0.0)
zero_vec = pvec(self.m)
zero_vec.fill(0.0)
pmt_gemv_n(self, other, zero_vec, res)
return res
@dispatch(pmat_)
def __add__(self, other):
if self.m != other.m or self.n != other.n:
raise Exception('__add__: mismatching dimensions:'
' ({}, {}) + ({}, {})'.format(self.m, self.n, other.m, other.n))
res = pmat(self.m, self.n)
pmat_copy(other, res)
pmt_gead(1.0, self, res)
return res
def __sub__(self, other):
if self.m != other.m or self.n != other.n:
raise Exception('__sub__: mismatching dimensions:'
' ({}, {}) + ({}, {})'.format(self.m, self.n, other.m, other.n))
res = pmat(self.m, self.n)
pmat_copy(self, res)
pmt_gead(-1.0, other, res)
return res
def pmat_fill(A: pmat, value: float):
for i in range(A.m):
for j in range(A.n):
A[i,j] = value
return
def pmat_copy(A: pmat, B: pmat):
if A.m != B.m or A.n != B.n:
raise Exception('__copy__: mismatching dimensions:'
' ({}, {}) -> ({}, {})'.format(A.m, A.n, B.m, B.n))
for i in range(A.m):
for j in range(A.n):
B[i,j] = A[i,j]
return
def pmat_tran(A: pmat, B: pmat):
if A.m != B.n or A.n != B.m:
raise Exception('__tran__: mismatching dimensions:'
' ({}, {}) -> ({}, {})'.format(A.m, A.n, B.m, B.n))
for i in range(A.m):
for j in range(A.n):
B[j,i] = A[i,j]
def pmat_vcat(A: pmat, B: pmat, res: pmat):
if A.n != B.n or A.n != res.n or A.m + B.m != res.m:
raise Exception('__vcat__: mismatching dimensions:'
' ({}, {}) ; ({}, {})'.format(A.m, A.n, B.m, B.n))
for i in range(A.m):
for j in range(A.n):
res[i,j] = A[i,j]
for i in range(B.m):
for j in range(B.n):
res[A.m + i,j] = B[i,j]
def pmat_hcat(A: pmat, B: pmat, res: pmat):
if A.m != B.m or A.m != res.m or A.n + B.n != res.n:
raise Exception('__hcat__: mismatching dimensions:'
' ({}, {}) , ({}, {})'.format(A.m, A.n, B.m, B.n))
for i in range(A.m):
for j in range(A.n):
res[i,j] = A[i,j]
for i in range(B.m):
for j in range(B.n):
res[i,A.n + j] = B[i,j]
# def pmt_getrsm(fact: pmat, ipiv: list, rhs: pmat):
# # create permutation vector
# c_ipiv = cast(create_string_buffer(sizeof(c_int)*A.m), POINTER(c_int))
# for i in range(A.n):
# c_ipiv[i] = ipiv[i]
# res = pmat(A.m, B.n)
# # create permuted rhs
# # pB = pmat(B.m, B.n)
# pmat_copy(B, res)
# pmt_rowpe(B.m, c_ipiv, res)
# # solve
# pmt_trsm_llnu(A, res)
# pmt_trsm_lunu(A, res)
# return res
# def pmt_getrsv(fact: pmat, ipiv: list, rhs: pvec):
# # create permutation vector
# c_ipiv = cast(create_string_buffer(sizeof(c_int)*fact.m), POINTER(c_int))
# for i in range(fact.n):
# c_ipiv[i] = ipiv[i]
# # permuted rhs
# pvec_copy(b, res)
# pmt_vecpe(b.blasfeo_dvec.m, c_ipiv, res)
# # solve
# pmt_trsv_llnu(fact, rhs)
# pmt_trsv_lunn(fact, rhs)
# return
def pmt_potrsm(fact: pmat, rhs: pmat):
# solve
pmt_trsm_llnn(fact, rhs)
fact_tran = pmat(fact.m, fact.n)
pmat_tran(fact, fact_tran)
pmt_trsm_lunn(fact_tran, rhs)
return
def pmt_potrsv(fact: pmat, rhs: pvec):
# solve
pmt_trsv_llnu(fact, rhs)
pmt_trsv_lunn(fact, rhs)
return
# intermediate-level linear algebra
# def pmt_gemm(A: pmat, B: pmat, C: pmat, D: pmat):
def pmt_gemm(*argv):
if len(argv) < 3:
raise Exception('Invalid number of arguments')
A = argv[0]
B = argv[1]
if len(argv) == 4:
C = argv[2]
D = argv[3]
else:
C = argv[2]
D = argv[2]
if A.n != B.m or A.m != C.m or B.n != C.n or C.m != D.m or C.n != D.n:
raise Exception('pmt_gemm: mismatching dimensions:'
' ({}, {}) <- ({},{}) + ({}, {}) x ({}, {})'.format(\
D.m, D.n, C.m, C.n, A.m, A.n, B.m, B.n))
c_pmt_dgemm_nn(A, B, C, D)
return
def pmt_gemm_nn(*argv):
if len(argv) < 3:
raise Exception('Invalid number of arguments')
A = argv[0]
B = argv[1]
if len(argv) == 4:
C = argv[2]
D = argv[3]
else:
C = argv[2]
D = argv[2]
if A.n != B.m or A.m != C.m or B.n != C.n or C.m != D.m or C.n != D.n:
raise Exception('pmt_gemm_nn: mismatching dimensions:'
' ({}, {}) <- ({},{}) + ({}, {}) x ({}, {})'.format(\
D.m, D.n, C.m, C.n, A.m, A.n, B.m, B.n))
c_pmt_dgemm_nn(A, B, C, D)
return
def pmt_gemm_nt(*argv):
if len(argv) < 3:
raise Exception('Invalid number of arguments')
A = argv[0]
B = argv[1]
if len(argv) == 4:
C = argv[2]
D = argv[3]
else:
C = argv[2]
D = argv[2]
if A.n != B.n or A.m != C.m or B.m != C.n or C.m != D.m or C.n != D.n:
raise Exception('pmt_gemm_nt: mismatching dimensions:'
' ({}, {}) <- ({},{}) + ({}, {}) x ({}, {})^T'.format(\
D.m, D.n, C.m, C.n, A.m, A.n, B.m, B.n))
c_pmt_dgemm_nt(A, B, C, D)
return
def pmt_gemm_tn(*argv):
if len(argv) < 3:
raise Exception('Invalid number of arguments')
A = argv[0]
B = argv[1]
if len(argv) == 4:
C = argv[2]
D = argv[3]
else:
C = argv[2]
D = argv[2]
if A.m != B.m or A.n != C.m or B.n != C.n or C.m != D.m or C.n != D.n:
raise Exception('pmt_gemm_tn: mismatching dimensions:'
' ({}, {}) <- ({},{}) + ({}, {})^T x ({}, {})'.format(\
D.m, D.n, C.m, C.n, A.m, A.n, B.m, B.n))
c_pmt_dgemm_tn(A, B, C, D)
return
def pmt_gemm_tt(*argv):
if len(argv) < 3:
raise Exception('Invalid number of arguments')
A = argv[0]
B = argv[1]
if len(argv) == 4:
C = argv[2]
D = argv[3]
else:
C = argv[2]
D = argv[2]
if A.m != B.n or A.n != C.m or B.m != C.n or C.m != D.m or C.n != D.n:
raise Exception('pmt_gemm_tt: mismatching dimensions:'
' ({}, {}) <- ({},{}) + ({}, {})^T x ({}, {})^T'.format(\
D.m, D.n, C.m, C.n, A.m, A.n, B.m, B.n))
c_pmt_dgemm_tt(A, B, C, D)
return
# B <= B + alpha*A
def pmt_gead(alpha: float, A: pmat, B: pmat):
if A.m != B.m or A.n != B.n:
raise Exception('pmt_dgead: mismatching dimensions:'
'({},{}) + ({}, {})'.format(A.m, A.n, B.m, B.n))
c_pmt_dgead(alpha, A, B)
return
def pmt_rowpe(m: int, ipiv: POINTER(c_int), A: pmat):
c_pmt_drowpe(m, ipiv, A)
return
def pmt_trsm_llnu(A: pmat, B: pmat):
c_pmt_trsm_llnu(A, B)
return
def pmt_trsm_lunn(A: pmat, B: pmat):
c_pmt_trsm_lunn(A, B)
return
def pmt_trsm_llnn(A: pmat, B: pmat):
c_pmt_trsm_llnn(A, B)
return
def pmt_trsv_llnu(A: pmat, b: pvec):
c_pmt_trsv_llnu(A, b)
return
def pmt_trsv_lunn(A: pmat, b: pvec):
c_pmt_trsv_lunn(A, b)
return
def pmt_getrf(A: pmat, fact: pmat, ipiv: list):
# create permutation vector
c_ipiv = cast(create_string_buffer(sizeof(c_int)*A.m), POINTER(c_int))
# factorize
c_pmt_getrf(A, fact, c_ipiv)
for i in range(A.n):
ipiv[i] = c_ipiv[i]
return
def pmt_potrf(A: pmat, fact: pmat):
# factorize
c_pmt_potrf(A, fact)
return
def pmt_gemv_n(A: pmat, b: pvec, c: pvec, d: pvec):
c_pmt_dgemv_n(A, b, c, d)
return
# auxiliary functions
def pmt_set_data(M: pmat, data: POINTER(c_double)):
c_pmt_set_blasfeo_dmat(M.blasfeo_dmat, data)
return
def pmat_set(M: pmat, value, i, j):
c_pmt_set_blasfeo_dmat_el(value, M.blasfeo_dmat, i, j)
return
def pmat_get(M: pmat, i, j):
el = c_pmt_get_blasfeo_dmat_el(M.blasfeo_dmat, i, j)
return el
def pmat_print(M: pmat):
c_pmt_print_blasfeo_dmat(M)
return
|
24,502 | cee0de6c1e6650b3a490b87e04b6f8b489ea9af2 | # import libraries
import cv2
import time
import numpy as np
import sqlite3
import matplotlib.pyplot as plt
con = sqlite3.connect("traffic.db")
c = con.cursor()
data = c.execute("""SELECT * FROM data""")
rows = c.fetchall()
df = []
df1 = []
df2 = []
df3 = []
for rows in rows:
df.append(rows[0])
df1.append(rows[1])
df2.append(rows[2])
df3.append(rows[3])
con.commit()
c.close()
con.close()
def capturing(p):
net = cv2.dnn.readNet("yolo/yolov3.weights", "yolo/yolov3.cfg")
counter = 1
with open("yolo/coco.names", "r") as f:
classes = [line.strip() for line in f.readlines()]
layer_names = net.getLayerNames()
output_layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
colors = np.random.uniform(0, 255, size=(len(classes), 3)) # to get list of colors for each possible class
# Loading image
with open("{}.jpeg".format(counter), "wb") as f:
f.write(p)
frame = cv2.imread("{}.jpeg".format(counter))
frame = cv2.resize(frame, None, fx=0.4, fy=0.4)
height, width, channels = frame.shape
startingtime = time.time()
frame_id = 0
# Detecting objects
blob = cv2.dnn.blobFromImage(frame, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
net.setInput(blob)
outs = net.forward(output_layers)
l = ['person', 'car', 'truck', 'bus', 'bike']
m = dict({'person': 1, 'car': 15, 'truck': 20, 'bus': 20, 'bike': 5})
# Showing information on the screen
class_ids = []
confidences = []
boxes = [] # coordinate of bounding box
for out in outs:
for detection in out:
scores = detection[5:] # getting all 80 scores
class_id = np.argmax(scores) # finding the max score
confidence = scores[class_id]
# find out strong predictions greater then. 5
if confidence > 0.5:
# Object detected
center_x = int(detection[0] * width)
center_y = int(detection[1] * height)
w = int(detection[2] * width)
h = int(detection[3] * height)
# Rectangle coordinates
x = int(center_x - w / 2)
y = int(center_y - h / 2)
boxes.append([x, y, w, h])
confidences.append(float(confidence))
class_ids.append(class_id)
indexes = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)
count_label = []
count = []
font = cv2.FONT_HERSHEY_PLAIN
for i in range(len(boxes)):
if i in indexes:
x, y, w, h = boxes[i]
label = str(classes[class_ids[i]])
if label not in count_label:
if label in l:
count_label.append(label)
count.append(int(1))
else:
tmp = 0
for k in count_label:
if k == label:
count[tmp] = count[tmp] + 1
tmp = tmp + 1
color = colors[class_ids[i]]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.putText(frame, label, (x, y + 30), font, 3, color, 3)
x = 0
for k in range(len(count_label)):
x = x + m[count_label[k]]
elapsed_time = time.time() - startingtime
fps = frame_id / elapsed_time
cv2.putText(frame, "FPS:" + str(fps), (10, 30), font, 3, (0, 0, 0), 1)
cv2.imshow("Image", frame)
key = cv2.waitKey(1) # 0 keeps on hold 1 waits for a millisecond
return x
# define the shape of the environment (i.e., its states)before importing map let's do it in 11 by 11 area
environment_rows = 6
environment_columns = 6
# Create a 3D numpy array to hold the current Q-values for each state and action pair: Q(s, a)
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# as well as a third "action" dimension.
# The "action" dimension consists of 4 layers that will allow us to keep track of
# the Q-values for each possible action in
# each state (see next cell for a description of possible actions).
# The value of each (state, action) pair is initialized to 0.
q_values = np.zeros((environment_rows, environment_columns, 4))
# define actions
# numeric action codes: 0 = up, 1 = right, 2 = down, 3 = left
actions = ['up', 'right', 'down', 'left']
# Create a 2D numpy array to hold the rewards for each state.
# The array contains 11 rows and 11 columns (to match the shape of the environment),
# and each value is initialized to -999999.
rewards = np.full((environment_rows, environment_columns), -999999.)
k = 0
print("Pick the destination Location from the list")
print("Locations :")
for i in range(len(df)):
k = int(capturing(df[i]))
if k == 0:
k = 1
rewards[df1[i]-23, df2[i]-23] = k*(-1)
print(df3[i])
# taking the value of destination
goalone = -1
goaltwo = -1
goallo=input("Enter Destination Location : ")
for i in range(len(df)):
if df3[i] == goallo:
goalone = df1[i]-23
goaltwo = df2[i]-23
if goalone == -1 or goaltwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
# set the reward for reaching goal (i.e., the goal) to 999999
rewards[goalone,goaltwo] = 999999.0
# define a function that determines if the specified location is a terminal state
def is_terminal_state(current_row_index, current_column_index):
# if the reward for this location is -1, then it is not a terminal state
# (i.e., it is a path which we can travel)
if rewards[current_row_index, current_column_index] == 999999.0 or rewards[current_row_index, current_column_index] == -999999.0:
return True
else:
return False
# define a function that will choose a random, non-terminal starting location
def get_starting_location():
# get a random row and column index
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
# continue choosing random row and column indexes until a non-terminal state is identified
# (i.e., until the chosen state is a 'path which we can travel').
while is_terminal_state(current_row_index, current_column_index):
current_row_index = np.random.randint(environment_rows)
current_column_index = np.random.randint(environment_columns)
return current_row_index, current_column_index
# define an epsilon greedy algorithm that will choose which action to take next (i.e., where to move next)
def get_next_action(current_row_index, current_column_index, epsilon):
# if a randomly chosen value between 0 and 1 is less than epsilon,
# then choose the most promising value from the Q-table for this state.
if np.random.random() < epsilon:
return np.argmax(q_values[current_row_index, current_column_index])
else: # choose a random action
return np.random.randint(4)
# define a function that will get the next location based on the chosen action
def get_next_location(current_row_index, current_column_index, action_index):
new_row_index = current_row_index
new_column_index = current_column_index
if actions[action_index] == 'up' and current_row_index > 0:
new_row_index -= 1
elif actions[action_index] == 'right' and current_column_index < environment_columns - 1:
new_column_index += 1
elif actions[action_index] == 'down' and current_row_index < environment_rows - 1:
new_row_index += 1
elif actions[action_index] == 'left' and current_column_index > 0:
new_column_index -= 1
return new_row_index, new_column_index
# Define a function that will get the shortest path between any location within the source that
# the car is allowed to travel and the goal.
def get_shortest_path(start_row_index, start_column_index):
# return immediately if this is an invalid starting location
if is_terminal_state(start_row_index, start_column_index):
print("You are not on road please get to the road first")
return []
else: # if this is a 'legal' starting location
current_row_index, current_column_index = start_row_index, start_column_index
shortest_path = []
shortest_path.append([current_row_index, current_column_index])
# continue moving along the path until we reach the goal (i.e., the item packaging location)
while not is_terminal_state(current_row_index, current_column_index):
# get the best action to take
action_index = get_next_action(current_row_index, current_column_index, 1.)
# move to the next location on the path, and add the new location to the list
current_row_index, current_column_index = get_next_location(current_row_index, current_column_index, action_index)
shortest_path.append([current_row_index, current_column_index])
return shortest_path
# define training parameters
epsilon = 0.9 # the percentage of time when we should take the best action (instead of a random action)
discount_factor = 0.9 # discount factor for future rewards
learning_rate = 0.9 # the rate at which the AI agent should learn
# run through 1000 training episodes
for episode in range(1000):
# get the starting location for this episode
row_index, column_index = get_starting_location()
# continue taking actions (i.e., moving) until we reach a terminal state
# (i.e., until we reach goal or crash )
while not is_terminal_state(row_index, column_index):
# choose which action to take (i.e., where to move next)
action_index = get_next_action(row_index, column_index, epsilon)
# perform the chosen action, and transition to the next state (i.e., move to the next location)
old_row_index, old_column_index = row_index, column_index # store the old row and column indexes
row_index, column_index = get_next_location(row_index, column_index, action_index)
# receive the reward for moving to the new state, and calculate the temporal difference
reward = rewards[row_index, column_index]
old_q_value = q_values[old_row_index, old_column_index, action_index]
temporal_difference = reward + (discount_factor * np.max(q_values[row_index, column_index])) - old_q_value
# update the Q-value for the previous state and action pair
new_q_value = old_q_value + (learning_rate * temporal_difference)
q_values[old_row_index, old_column_index, action_index] = new_q_value
print('Training complete!')
sourceone = -1
sourcetwo = -1
sourcelo = input("Enter the source from same list Location : ")
for i in range(len(df)):
if df3[i] == sourcelo:
sourceone = df1[i]-23
sourcetwo = df2[i]-23
if sourceone == -1 or sourcetwo == -1:
print("Location not found please check for typos and case if you think u entered correct location")
exit()
q = get_shortest_path(sourceone, sourcetwo)
q1 = []
if q == q1:
print("Your are on the Destination :")
exit()
row = np.array(q)
x = []
y = []
for i in range(len(row)):
x.append(23+row[i][0])
y.append(23+row[i][1])
for i in range(len(x)-1):
for j in range(len(df)):
if df1[j] == x[i] and df2[j] == y[i]:
print(df3[j], "-->", end=" ")
print(goallo)
x = []
y = []
for i in range(len(row)):
x.append(row[i][0])
y.append(row[i][1])
# Plotting the Graph
plt.scatter(x, y)
plt.plot(x, y)
plt.xlabel("Latitude (in Minutes X 10^2)")
plt.ylabel("Longitude (in Minutes X 10^2)")
plt.show()
cv2.destroyAllWindows()
|
24,503 | 674bb1774209a3994152366690385be2de59e0f7 | #! /usr/bin/env python3
from __future__ import print_function
import sys
import findspark
findspark.init()
from pyspark.sql import SparkSession
from pyspark import SparkContext
from pyspark.sql import SQLContext, HiveContext
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.regression import LinearRegression
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas.plotting import scatter_matrix
def spark_session(appName):
return SparkSession.builder \
.appName(appName) \
.enableHiveSupport() \
.getOrCreate()
def sparkcontext():
return SparkContext.getOrCreate()
def hivecontext():
return HiveContext(sparkcontext())
appName = "DS"
spark =spark_session(appName)
##
## Get a DF first based on Databricks CSV libraries ignore column heading because of column called "Type"
##
csvlocation="hdfs://rhes75:9000/ds/Boston.csv"
rows = spark.read.csv(csvlocation, header="true").count()
print("\nnumber of rows is ",rows)
if (rows == 0):
println("Empty CSV directory, aborting!")
sys.exit(1)
house_df = spark.read.csv(csvlocation, header="true")
house_df.cache()
house_df.printSchema()
house_df.describe().toPandas().transpose()
numeric_features = [t[0] for t in house_df.dtypes if t[1] == 'int' or t[1] == 'double']
sampled_data = house_df.select(numeric_features).sample(False, 0.8).toPandas()
axs = scatter_matrix(sampled_data, figsize=(10, 10))
n = len(sampled_data.columns)
for i in range(n):
v = axs[i, 0]
v.yaxis.label.set_rotation(0)
v.yaxis.label.set_ha('right')
v.set_yticks(())
h = axs[n-1, i]
h.xaxis.label.set_rotation(90)
h.set_xticks(())
sys.exit()
|
24,504 | 37dc4aad8564b01395c66411ed97b879968bc11c | import os
import glob
from pathlib import Path
from typing import Dict, Optional
import yaml
from utils import is_base16, is_color
# from logger import log
FONT = "Roboto Mono for Powerline"
ICON_FONT = "Material Design Icons"
FONT_SIZE = 14
BAR_HEIGHT = 22
COLOR_SCHEME = {
"base00": "f9f5d7",
"base01": "ebdbb2",
"base02": "d5c4a1",
"base03": "bdae93",
"base04": "665c54",
"base05": "504945",
"base06": "3c3836",
"base07": "282828",
"base08": "9d0006",
"base09": "af3a03",
"base0A": "b57614",
"base0B": "79740e",
"base0C": "427b58",
"base0D": "076678",
"base0E": "8f3f71",
"base0F": "d65d0e",
}
WIDGET = {
"font": FONT,
"fontsize": FONT_SIZE,
"margin": 0,
"padding": 0,
"foreground": "base07",
"background": "base01",
}
EXTENSION = {
"font": FONT,
"fontsize": FONT_SIZE,
"foreground": "base07",
"background": "base01",
}
LAYOUT = {
"margin": 3,
"border_width": 3,
"border_focus": "d5c4a1",
"border_normal": "282828",
}
# def _update_colors(d, color_scheme=COLOR_SCHEME):
# res = {}
# for k, v in d.items():
# if isinstance(v, str) and is_base16(v):
# try:
# v = COLOR_SCHEME[v]
# except KeyError:
# pass # TODO: raise and exception here?
# res[k] = v
# return res
def _default_colors(color_scheme: Dict = COLOR_SCHEME) -> Dict:
return {
"panel_fg": color_scheme["base07"],
"panel_bg": color_scheme["base01"],
"group_current_fg": color_scheme["base05"],
"group_current_bg": color_scheme["base03"],
"group_active_fg": color_scheme["base07"],
"group_active_bg": color_scheme["base04"],
"group_inactive_fg": color_scheme["base07"],
"group_inactive_bg": color_scheme["base04"],
"powerline_fg": color_scheme["base01"],
"powerline_bg": [
color_scheme["base08"],
color_scheme["base09"],
color_scheme["base0A"],
color_scheme["base0B"],
color_scheme["base0C"],
color_scheme["base0D"],
color_scheme["base0E"],
color_scheme["base0F"],
],
}
DEFAULT_THEME = {
"font": FONT,
"iconfont": ICON_FONT,
"fontsize": FONT_SIZE,
"barheight": BAR_HEIGHT,
"color": COLOR_SCHEME,
"widget": WIDGET,
"layout": LAYOUT,
"extension": EXTENSION,
}
def _deref_colors(theme_info, color_scheme, colors):
d = {}
for name, value in theme_info.items():
if not isinstance(value, (int, float, bool)) and not is_color(value):
if is_base16(value):
if color_scheme is None:
color_scheme = COLOR_SCHEME
value = color_scheme[value]
elif value in colors:
value = colors[value]
d[name] = value
return d
def _load_color_scheme(
scheme_file: str, scheme_folder: Optional[str] = None
) -> Optional[Dict]:
if not scheme_folder:
xdg_data_home = os.environ.get("XDG_DATA_HOME", None)
if xdg_data_home is not None:
scheme_folder = Path(xdg_data_home) / "base16" / "schemes"
else:
scheme_folder = Path(__file__).parent.parent / "schemes"
scheme_file = Path(scheme_file)
if scheme_file.suffix != ".yaml":
scheme_file = scheme_file.with_suffix(".yaml")
for file_path in scheme_folder.rglob(os.path.join("**", "*.yaml")):
if file_path.name.endswith(scheme_file.name):
with open(file_path, "r") as fp:
colors = yaml.load(fp, Loader=yaml.SafeLoader)
return colors
return COLOR_SCHEME
def _load_theme_config(filename: str) -> Dict:
if filename[0] != "/":
xdg_config = os.environ.get("XDG_CONFIG_HOME", None)
if xdg_config:
p = Path(xdg_config)
theme_conf = p / "qtile" / "theme.yaml"
else:
theme_conf = Path(__file__).parent / "theme.yaml"
else:
theme_conf = Path(filename)
if theme_conf.exists():
with open(theme_conf, "r") as fp:
theme = yaml.load(fp, yaml.SafeLoader)
return theme
def load_theme(filename: str = "theme.yaml") -> Dict:
theme = DEFAULT_THEME.copy()
theme_config = _load_theme_config(filename)
if "base16_scheme_name" in theme_config:
color_scheme = _load_color_scheme(theme_config.pop("base16_scheme_name"))
theme_config["base16_scheme"] = color_scheme
elif "base16_scheme" not in theme_config:
theme_config["base16_scheme"] = COLOR_SCHEME
colors = _default_colors(theme_config["base16_scheme"])
widget = WIDGET.copy()
if "widget" in theme_config:
widget.update(theme_config["widget"])
tc = _deref_colors(widget, theme_config["base16_scheme"], colors)
widget.update(tc)
extension = EXTENSION.copy()
if "extension" in theme_config:
extension.update(theme_config["extension"])
tc = _deref_colors(extension, theme_config["base16_scheme"], colors)
extension.update(tc)
layout = LAYOUT.copy()
if "layout" in theme_config:
layout.update(theme_config["layout"])
tc = _deref_colors(layout, theme_config["base16_scheme"], colors)
layout.update(tc)
theme["color"] = colors
theme["widget"] = widget
theme["extension"] = extension
theme["layout"] = layout
for k, v in theme_config.items():
if k not in ["color", "widget", "extension", "layout"]:
# for item in ["font", "iconfont", "fontsize", "barheight", "powerline_separator"]:
# if item in theme_config:
theme[k] = theme_config[k]
return theme
|
24,505 | 732c07029910e337305b137340bde2704a529085 | from fastapi import FastAPI, Depends, HTTPException, status, Request, Form
from fastapi.security import HTTPBasic, HTTPBasicCredentials
from fastapi.responses import HTMLResponse
import secrets
from utils.rsa import Rsa
from schemas import Message
import config
app = FastAPI()
security = HTTPBasic()
def get_authenticated(credentials: HTTPBasicCredentials = Depends(security)):
correct_username = secrets.compare_digest(credentials.username, config.USER)
correct_password = secrets.compare_digest(credentials.password, config.USER_PASSW)
if not (correct_password and correct_username):
return HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Incorrect username or password",
headers={"WWW-authenticate": "Basic"},
)
return HTTPException(
status_code=status.HTTP_200_OK,
detail="You are authenticated",
headers={"WWW-authenticate": "Basic",
"username": credentials.username},
)
@app.get("/")
async def read_root(state: str = Depends(get_authenticated)) -> str:
if state.status_code == 401:
content = 'You are not authorized to use this endpoint!'
return HTMLResponse(content=content, status_code=401)
content = '<form method="post"><p> Enter text to encrypt </p> <input type="textarea" name="message"/><input ' \
'type="submit"/></form> '
return HTMLResponse(content=content, status_code=200)
@app.post("/")
async def read_form(message: str = Form(...), state: str = Depends(get_authenticated)):
if state.status_code == 401:
content = 'You are not authorized to use this endpoint!'
return HTMLResponse(content=content, status_code=401)
rsa = Rsa()
encrypted = rsa.encrypt_RSA(message, rsa.private_key)
decrypted = rsa.decrypt_RSA(encrypted, rsa.public_key)
username = state.headers['username']
info = "message: {} | encrypted: {} | username: {} | pub_key {}".format(message, encrypted, username, rsa.public_key)
content = '<form method="post"><p>Enter text to encrypt </p> <input type="textarea" name="message" ' \
'required/><input type="submit"/></form> <br> {}'.format(info)
return HTMLResponse(status_code=200, content=content)
@app.post("/api/encode/")
async def encode(message: Message, state: str = Depends(get_authenticated)):
if state.status_code == 401:
content = 'You are not authorized to use this endpoint!'
raise HTTPException(status_code=401, detail=content)
if message.message == "":
content = 'No message given, impossible to encode'
raise HTTPException(status_code=422, detail=content)
rsa = Rsa()
encrypted = rsa.encrypt_RSA(message.message, rsa.private_key)
message.username = state.headers["username"]
message.message = encrypted
message.public_key = rsa.public_key
message.private_key = rsa.private_key
return message
@app.get("/api/encode/")
async def read_root(state: str = Depends(get_authenticated)) -> str:
if state.status_code == 401:
content = 'You are not authorized to use this endpoint!'
raise HTTPException(status_code=401, detail=content)
return 'Send your text in this format {"message" : "textoencrypt"} '
@app.post("/api/decode/")
async def decode(message: Message, state: str = Depends(get_authenticated)):
if state.status_code == 401:
content = 'You are not authorized to use this endpoint!'
raise HTTPException(status_code=401, detail=content)
if message.message == "":
content = 'No message given, impossible to decode'
raise HTTPException(status_code=422, detail=content)
if not message.public_key:
content = 'No public_key given, impossible to decode'
raise HTTPException(status_code=422, detail=content)
decrypted = Rsa().decrypt_RSA(message.message, message.public_key)
message.username = state.headers["username"]
message.message = decrypted
return message
@app.get("/api/decode/")
async def read_root(state: str = Depends(get_authenticated)) -> str:
if state.status_code == 401:
content = 'You are not authorized to use this endpoint!'
raise HTTPException(status_code=401, detail=content)
return 'Send your text and public_key in this format {"message" : "textoencrypt", "private_key" : { "key": int, ' \
'"n": int} } '
|
24,506 | c78e90dc1fd201ac7469f6642e31f32aa35b674c | from flask import Flask
import requests
#GET
response = requests.get("http://localhost:5000")
print (response.text)
#POST
requests.post("http://localhost:5000",json={"name":"Bob"})
|
24,507 | 45770719e4810250869fedb9d124af4c2827582d | numbers = []
final = []
final_final = []
num = int(input("what is the number? "))
for i in range(1, num + 1):
numbers.append(i)
max = sorted(numbers, reverse=True)
min = sorted(numbers)
max = int("".join(map(str, max)))
min = int("".join(map(str, min)))
for i in range(min, max + 1):
i = str(i)
check = False
for j in i:
if int(j) in numbers:
check = True
else:
check = False
break
if check == True:
final.append(i)
for number in final:
check = set()
for j in number:
check.add(j)
if len(check) > num - 1:
final_final.append(number)
print(len(final_final))
for i in final_final:
temp = []
for j in i:
temp.append(j)
print(" ".join(temp))
|
24,508 | b59cdb67c5de1b1ceafb02e123a525ed75983d68 | XSym
0075
de66fdc48313d422f17c449f8e25f8ae
/Library/Frameworks/Python.framework/Versions/3.4/lib/python3.4/tempfile.py
|
24,509 | 007a56cab77fad6b90eb4890a5968ba18cd99104 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2016 Magnus (<http://www.magnus.nl>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from odoo import api, fields, models, _
from odoo.tools.safe_eval import safe_eval
from odoo.tools import email_re, email_split
from datetime import datetime, timedelta, date
from dateutil.relativedelta import relativedelta
from lxml import etree
from odoo.osv.orm import setup_modifiers
class Lead(models.Model):
_inherit = ["crm.lead"]
published_customer = fields.Many2one('res.partner', 'Advertiser', domain=[('customer', '=', True)], ondelete='set null',
track_visibility='onchange', index=True,
help="Linked Advertiser (optional). ")
partner_id = fields.Many2one('res.partner', 'Payer', ondelete='set null', track_visibility='onchange',
index=True, help="Linked Payer (optional).")
partner_invoice_id = fields.Many2one('res.partner', 'Payer Invoice Address', ondelete='set null',
index=True, help="Linked partner (optional). Usually created when converting the lead.")
partner_shipping_id = fields.Many2one('res.partner', 'Payer Delivery Address', ondelete='set null',
index=True, help="Linked partner (optional). Usually created when converting the lead.")
partner_contact_id = fields.Many2one('res.partner', 'Contact Person', ondelete='set null', track_visibility='onchange',
index=True, help="Linked Contact Person (optional). Usually created when converting the lead.")
ad_agency_id = fields.Many2one('res.partner', 'Agency', ondelete='set null', track_visibility='onchange',
index=True, help="Linked Advertising Agency (optional). Usually created when converting the lead.")
partner_acc_mgr = fields.Many2one(related='partner_id.user_id', relation='res.users',
string='Account Manager', store=True)
advertising = fields.Boolean('Advertising', default=False)
is_activity = fields.Boolean(string='Activity', default=False)
activities_count = fields.Integer("Activities", compute='_compute_activities_count')
quotations_count = fields.Integer("# of Quotations", compute='_compute_quotations_count')
adv_quotations_count = fields.Integer("# of Advertising Quotations", compute='_compute_adv_quotations_count')
name_salesperson = fields.Char('Name Salesperson')
adv_sale_amount_total= fields.Monetary(compute='_compute_sale_amount_total', string="Sum of Adv. Orders", currency_field='company_currency')
@api.multi
def _compute_quotations_count(self):
for lead in self:
lead.quotations_count = self.env['sale.order'].search_count([('opportunity_id', '=', lead.id), ('state','not in',['sale','done','cancel']), ('advertising', '=', False)])
@api.depends('order_ids')
def _compute_sale_amount_total(self):
for lead in self:
total = adv_total = 0.0
nbr = 0
company_currency = lead.company_currency or self.env.user.company_id.currency_id
for order in lead.order_ids:
if order.state not in ('sale', 'done', 'cancel'):
nbr += 1
if order.state in ('sale', 'done'):
if not order.advertising:
total += order.currency_id.compute(order.amount_total, company_currency)
if order.advertising:
adv_total += order.currency_id.compute(order.amount_untaxed, company_currency)
lead.sale_amount_total, lead.adv_sale_amount_total, lead.sale_number = total, adv_total, nbr
@api.multi
def _compute_adv_quotations_count(self):
for lead in self:
lead.adv_quotations_count = self.env['sale.order'].search_count([('opportunity_id', '=', lead.id), ('state','not in',('sale','done','cancel')), ('advertising', '=', True)])
@api.multi
def _compute_activities_count(self):
for lead in self:
lead.activities_count = self.env['crm.activity.report'].search_count([('lead_id', '=', lead.id), ('subtype_id','not in', ('Lead Created','Stage Changed','Opportunity Won','Discussions','Note','Lead aangemaakt','Fase gewijzigd','Prospect gewonnen','Discussies','Notitie')), ('subtype_id','!=',False)])
@api.model
def read_group(self, domain, fields, groupby, offset=0, limit=None, orderby=False, lazy=True):
ctx = self.env.context
if 'params' in ctx and 'action' in ctx['params']:
if ctx['params']['action'] == self.env.ref("crm.crm_lead_opportunities_tree_view").id:
if groupby and groupby[0] == "stage_id":
stage_logged = self.env.ref("sale_advertising_order.stage_logged")
states_read = self.env['crm.stage'].search_read([('id', '!=', stage_logged.id)], ['name'])
states = [(state['id'], state['name']) for state in states_read]
read_group_res = super(Lead, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby)
result = []
for state_value, state_name in states:
res = filter(lambda x: x['stage_id'] == (state_value, state_name), read_group_res)
res[0]['stage_id'] = [state_value, state_name]
result.append(res[0])
return result
return super(Lead, self).read_group(domain, fields, groupby, offset=offset, limit=limit, orderby=orderby)
@api.onchange('user_id')
def _onchange_userid(self):
self.name_salesperson = self.user_id.name
@api.onchange('published_customer')
def onchange_published_customer(self):
values = {}
if self.published_customer:
advertiser = self.published_customer #self.pool.get('res.partner').browse(cr, uid, published_customer, context=context)
values = {
'partner_name': advertiser.name,
'partner_id': self.published_customer.id,
'title': advertiser.title.id,
'email_from': advertiser.email,
'phone': advertiser.phone,
'mobile': advertiser.mobile,
'fax': advertiser.fax,
'zip': advertiser.zip,
'function': advertiser.function,
'ad_agency_id': False,
}
return {'value' : values }
@api.onchange('ad_agency_id')
def onchange_agency(self):
values = {}
if self.ad_agency_id:
agency = self.ad_agency_id #self.pool.get('res.partner').browse(cr, uid, ad_agency, context=context)
values = {
'partner_id': agency.id,
'title': agency.title and agency.title.id or False,
'email_from': agency.email,
'phone': agency.phone,
'mobile': agency.mobile,
'fax': agency.fax,
'zip': agency.zip,
'function': agency.function,
}
return {'value' : values}
@api.onchange('partner_id')
def onchange_partner(self):
if not self.partner_id:
return {}
part = self.partner_id
addr = self.partner_id.address_get(['delivery', 'invoice', 'contact'])
if part.type == 'contact':
contact = self.env['res.partner'].search([('is_company','=', False),('type','=', 'contact'),('parent_id','=', part.id)])
if len(contact) >=1:
contact_id = contact[0]
else:
contact_id = False
elif addr['contact'] == addr['default']:
contact_id = False
else: contact_id = addr['contact']
values = self._onchange_partner_id_values(part.id)
values.update({
'industry_id': part.sector_id,
'secondary_industry_ids': [(6, 0, part.secondary_industry_ids.ids)],
'opt_out': part.opt_out,
'partner_name': part.name,
'partner_contact_id': contact_id,
'partner_invoice_id': addr['invoice'],
'partner_shipping_id': addr['delivery'],
})
return {'value' : values}
@api.onchange('partner_contact_id')
def onchange_contact(self):
if self.partner_contact_id:
partner = self.partner_contact_id
values = {
'contact_name': partner.name,
'title': partner.title.id,
'email_from' : partner.email,
'phone' : partner.phone,
'mobile' : partner.mobile,
'function': partner.function,
}
else:
values = {
'contact_name': False,
'title': False,
'email_from': False,
'phone': False,
'mobile': False,
'function': False,
}
return {'value' : values}
@api.multi
def _convert_opportunity_data(self, customer, team_id=False):
crm_stage = self.pool.get('crm.case.stage')
if not team_id:
team_id = self.team_id.id if self.team_id else False
val = {
'planned_revenue': self.planned_revenue,
'probability': self.probability,
'name': self.name,
'partner_name': self.partner_name,
'contact_name': self.contact_name,
'street': self.street,
'street2': self.street2,
'zip': self.zip,
'city': self.city,
'state_id': self.state_id.id,
'country_id': self.country_id.id,
'title': self.title.id,
'email_from': self.email_from,
'function': self.function,
'phone': self.phone,
'mobile': self.mobile,
'tag_ids': [(6, 0, [tag_id.id for tag_id in self.tag_ids])],
'user_id': (self.user_id and self.user_id.id),
'type': 'opportunity',
'date_action': fields.datetime.now(),
'date_open': fields.datetime.now(),
}
if customer:
val['published_customer'] = customer.id,
if self.partner_id:
val['partner_id'] = self.partner_id.id,
if self.ad_agency_id:
val['ad_agency_id'] = self.ad_agency_id.id,
if self.partner_invoice_id:
val['partner_invoice_id'] = self.partner_invoice_id.id,
if self.partner_shipping_id:
val['partner_shipping_id'] = self.partner_shipping_id.id,
if self.partner_contact_id:
val['partner_contact_id'] = self.partner_contact_id.id,
if not self.stage_id:
stage = self._stage_find(team_id=team_id)
val['stage_id'] = stage.id
if stage:
val['probability'] = stage.probability
return val
@api.multi
def handle_partner_assignation(self, action='create', partner_id=False):
""" Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to the specified partner_id
:param list ids: leads/opportunities ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
partner_ids = {}
for lead in self:
if lead.partner_id:
partner_ids[lead.id] = lead.partner_id.id
continue
if action in ('create', 'nothing'):
partner = lead._create_lead_partner()
partner_id = partner.id
partner.team_id = lead.team_id
if partner_id:
lead.partner_id = partner_id
partner_ids[lead.id] = partner_id
return partner_ids
@api.multi
def handle_partner_assignation(self, action='create', partner_id=False):
""" Handle partner assignation during a lead conversion.
if action is 'create', create new partner with contact and assign lead to new partner_id.
otherwise assign lead to the specified partner_id
:param list ids: leads/opportunities ids to process
:param string action: what has to be done regarding partners (create it, assign an existing one, or nothing)
:param int partner_id: partner to assign if any
:return dict: dictionary organized as followed: {lead_id: partner_assigned_id}
"""
partner_ids = {}
for lead in self:
if lead.partner_id:
partner_ids[lead.id] = lead.partner_id.id
continue
if action == 'create':
partner = lead._create_lead_partner()
partner_id = partner.id
partner.team_id = lead.team_id
if partner_id:
lead.partner_id = partner_id
lead.published_customer = partner_id
partner_ids[lead.id] = partner_id
return partner_ids
# -- deep added
@api.model
def _get_duplicated_leads_by_emails(self, partner_id, email, include_lost=False):
"""
Search for opportunities that have the same partner and that arent done or cancelled
"""
print ("partner_id", partner_id, self)
partnerDict = self._get_partnerDetails(partner_id)
final_stage_domain = [('stage_id.probability', '<', 100), '|', ('stage_id.probability', '>', 0), ('stage_id.sequence', '<=', 1)]
partner_match_domain = []
for email in set(email_split(email) + [email]):
partner_match_domain.append(('email_from', '=ilike', email))
if partnerDict:
partner_match_domain.append(('partner_id', '=', partnerDict['partner_id']))
partner_match_domain.append(('published_customer', '=', partnerDict['advertiser']))
partner_match_domain = ['|'] * (len(partner_match_domain) - 1) + partner_match_domain
if not partner_match_domain:
return []
domain = partner_match_domain
if not include_lost:
domain += final_stage_domain
return self.search(domain)
@api.model
def _get_partnerDetails(self, partnerID=False):
if not partnerID: return {}
Partner = self.env['res.partner'].browse(partnerID)
lead = self
# a partner is set already in Lead
if lead.partner_id and lead.published_customer:
res = {'partner_id':lead.partner_id.id,
'agent': lead.ad_agency_id.id if lead.partner_id.is_ad_agency else False,
'advertiser': lead.published_customer.id
}
elif Partner.is_ad_agency:
res = {'partner_id' : Partner.id,
'agent' : Partner.id,
'advertiser' : False
}
elif not Partner.is_ad_agency:
res = {'partner_id' : Partner.id,
'agent' : False,
'advertiser' : Partner.id
}
return res
@api.model
def retrieve_sales_dashboard(self):
result = super(Lead, self).retrieve_sales_dashboard()
tasks = self.env['project.task'].search([('user_id', '=', self._uid)])
result['task'] = {
'today' :0,
'next_7_days' :0,
}
for task in tasks:
if task.date_assign and task.date_assign:
date_assign = fields.Date.from_string(task.date_assign)
if date_assign == date.today():
result['task']['today'] += 1
if task.date_deadline:
date_deadline = fields.Date.from_string(task.date_deadline)
if date.today() <= date_deadline <= date.today() + timedelta(days=7):
result['task']['next_7_days'] += 1
current_datetime = datetime.now()
result['sale_confirmed'] = {
'this_month': 0,
'last_month': 0,
}
sale_order_domain = [
('state', 'in', ['sale', 'done']),
('user_id', '=', self.env.uid),
]
sale_data = self.env['sale.order'].search_read(sale_order_domain, ['confirmation_date', 'amount_untaxed'])
for sale in sale_data:
if sale['confirmation_date']:
sale_date = fields.Datetime.from_string(sale['confirmation_date'])
if sale_date <= current_datetime and sale_date >= current_datetime.replace(day=1):
result['sale_confirmed']['this_month'] += sale['amount_untaxed']
elif sale_date < current_datetime.replace(day=1) and sale_date >= current_datetime.replace(day=1) - relativedelta(months=+1):
result['sale_confirmed']['last_month'] += sale['amount_untaxed']
result['invoiced']['target'] = self.env.user.target_sales_invoiced
result['reg_quotes'] = {'overdue': 0}
result['adv_quotes'] = {'overdue': 0}
quote_domain = [
('state', 'not in', ['sale', 'done']),
('user_id', '=', self.env.uid),
('validity_date', '<', fields.Date.to_string(date.today())),
]
quote_data = self.env['sale.order'].search(quote_domain)
for quote in quote_data:
if quote.advertising == False:
result['reg_quotes']['overdue'] += 1
elif quote.advertising == True:
result['adv_quotes']['overdue'] += 1
return result
@api.model
def fields_view_get(self, view_id=None, view_type='form', toolbar=False, submenu=False):
res = super(Lead, self).fields_view_get(view_id=view_id, view_type=view_type, toolbar=toolbar, submenu=submenu)
if view_type == 'form':
ctx = self.env.context
if 'params' in ctx and 'action' in ctx['params']:
doc = etree.XML(res['arch'])
if ctx['params']['action'] == self.env.ref("crm.crm_lead_opportunities_tree_view").id and doc.xpath("//field[@name='stage_id']"):
stage = doc.xpath("//field[@name='stage_id']")[0]
stage_logged = self.env.ref("sale_advertising_order.stage_logged")
stage.set('domain', "['|', ('team_id', '=', team_id), ('team_id', '=', False), ('id', '!=', %d)]" %(stage_logged.id))
res['arch'] = etree.tostring(doc)
return res
@api.multi
def action_set_lost(self):
lead = super(Lead, self).action_set_lost()
for rec in self:
stage_lost = rec.env.ref("sale_advertising_order.stage_lost")
rec.write({'stage_id': stage_lost.id, 'active': True})
return lead
# @api.multi
# def redirect_opportunity_view(self):
# adv_opportunity_view = super(Lead, self).redirect_opportunity_view()
# form_view = self.env.ref('sale_advertising_order.crm_case_form_view_oppor_advertising')
# adv_opportunity_view['views'][0] = (form_view.id, 'form')
# return adv_opportunity_view
class Team(models.Model):
_inherit = ['crm.team']
@api.multi
def _compute_invoiced(self):
for team in self:
confirmed_sales = self.env['sale.order'].search([
('state', 'in', ['sale', 'done']),
('team_id', '=', team.id),
('confirmation_date', '<=', datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
('confirmation_date', '>=', datetime.now().replace(day=1).strftime('%Y-%m-%d %H:%M:%S')),
])
team.invoiced = sum(confirmed_sales.mapped('amount_untaxed'))
@api.model
def action_your_pipeline(self):
action = self.env.ref('crm.crm_lead_opportunities_tree_view').read()[0]
user_team_id = self.env.user.sale_team_id.id
if not user_team_id:
user_team_id = self.search([], limit=1).id
action['help'] = """<p class='oe_view_nocontent_create'>Click here to add new opportunities</p><p>
Looks like you are not a member of a sales team. You should add yourself
as a member of one of the sales teams.
</p>"""
if user_team_id:
action['help'] += "<p>As you don't belong to any sales team, Odoo opens the first one by default.</p>"
action_context = safe_eval(action['context'], {'uid': self.env.uid})
if user_team_id:
action_context['default_team_id'] = user_team_id
action_domain = safe_eval(action['domain'])
tree_view_id = self.env.ref('crm.crm_case_tree_view_oppor').id
form_view_id = self.env.ref('crm.crm_case_form_view_oppor').id
kanb_view_id = self.env.ref('crm.crm_case_kanban_view_leads').id
# Load Views for Advertising:
if self._context.get('advertising', False):
form_view_id = self.env.ref('crm.crm_case_form_view_oppor_advertising').id
action_context['default_advertising'] = True
action_domain.append(('is_activity','=', False))
if self._context.get('search_default_partner_id', False):
action_context['search_default_partner_id'] = self._context['active_id']
action['views'] = [
[kanb_view_id, 'kanban'],
[tree_view_id, 'tree'],
[form_view_id, 'form'],
[False, 'graph'],
[False, 'calendar'],
[False, 'pivot']
]
action['context'] = action_context
action['domain'] = action_domain
return action
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
24,510 | b8dfb10ac2fa5b98d4b205dd4b6427b73c07a027 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
from absl.testing import parameterized
import jax
from jax import random
from jax import test_util as jtu
from jax import tree_util
import jax.numpy as jnp
from jaxopt import objective
from jaxopt import ScipyBoundedLeastSquares
from jaxopt import ScipyBoundedMinimize
from jaxopt import ScipyLeastSquares
from jaxopt import ScipyMinimize
from jaxopt import ScipyRootFinding
from jaxopt._src import scipy_wrappers
from jaxopt._src import test_util
from jaxopt._src.tree_util import tree_scalar_mul
import numpy as onp
import scipy as osp
from sklearn import datasets
class JnpToOnpTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
m, n = 10, 9
self.x_split_indices = [4, 6, 9]
self.x_reshape_shape = [-1, 2]
self.y_split_indices = [6, 8]
self.y_reshape_shape = [-1, 3]
key = random.PRNGKey(0)
key_x, key_w1, key_w2 = random.split(key, 3)
self.x = random.normal(key_x, [m])
self.w1 = random.normal(key_w1, [m, m])
self.w2 = random.normal(key_w2, [m, n])
@staticmethod
def _do_pytree(array, split_indices, reshape_shape):
"""Creates a (hardcoded) PyTree out of flat 1D array for testing purposes."""
pytree = jnp.split(array, split_indices)
pytree[0] = pytree[0].reshape(reshape_shape)
pytree[-1] = {'-1': pytree[-1]}
return pytree
@staticmethod
def _undo_pytree(pytree):
"""Recovers flat 1D array from (hardcoded) PyTree for testing purposes."""
pytree = pytree.copy()
pytree[0] = pytree[0].reshape([-1])
pytree[-1] = pytree[-1]['-1']
return jnp.concatenate(pytree)
def test_vals_and_jac(self):
fun_flat = lambda x: jnp.dot(jnp.arctan(jnp.dot(x, self.w1)), self.w2)
def fun(x_pytree):
"""Wraps fun_flat with (hardcoded) yTree input / output."""
x = self._undo_pytree(x_pytree)
y = fun_flat(x)
return self._do_pytree(y, self.y_split_indices, self.y_reshape_shape)
# Tests function output.
x_pytree = self._do_pytree(self.x,
self.x_split_indices,
self.x_reshape_shape)
y_pytree = fun(x_pytree)
self.assertArraysAllClose(fun_flat(self.x), self._undo_pytree(y_pytree))
# Tests jnp_to_onp.
self.assertArraysAllClose(self._undo_pytree(y_pytree),
scipy_wrappers.jnp_to_onp(y_pytree))
# Tests Jacobian.
x_pytree_topology = scipy_wrappers.pytree_topology_from_example(x_pytree)
y_pytree_topology = scipy_wrappers.pytree_topology_from_example(y_pytree)
jac_jnp_to_onp = scipy_wrappers.make_jac_jnp_to_onp(x_pytree_topology,
y_pytree_topology)
jac_flat = jax.jacrev(fun_flat)(self.x)
jac_pytree = jax.jacrev(fun)(x_pytree)
self.assertArraysAllClose(jac_flat, jac_jnp_to_onp(jac_pytree))
class ScipyMinimizeTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
self.n_samples, self.n_features, self.n_classes = 50, 5, 3
self.data = datasets.make_classification(n_samples=self.n_samples,
n_features=self.n_features,
n_classes=self.n_classes,
n_informative=3,
random_state=0)
self.default_l2reg = float(self.n_samples)
self.solver_kwargs = {'method': 'L-BFGS-B',
'tol': 1e-3,
'options': {'maxiter': 500}}
def logreg_fun(params, *args, **kwargs):
params = tree_util.tree_leaves(params)
return objective.l2_multiclass_logreg(params[0], *args, **kwargs)
self.logreg_fun = logreg_fun
def logreg_with_intercept_fun(params, *args, **kwargs):
return objective.l2_multiclass_logreg_with_intercept(
(params['W'], params['b']), *args, **kwargs)
self.logreg_with_intercept_fun = logreg_with_intercept_fun
def test_logreg(self):
lbfgs = ScipyMinimize(fun=self.logreg_fun,
**self.solver_kwargs)
pytree_init = jnp.zeros([self.n_features, self.n_classes])
pytree_fit, _ = lbfgs.run(pytree_init,
l2reg=self.default_l2reg,
data=self.data)
# Compare against sklearn.
pytree_skl = test_util.logreg_skl(X=self.data[0],
y=self.data[1],
lam=self.default_l2reg,
fit_intercept=False)
for array_skl, array_fit in zip(tree_util.tree_leaves(pytree_skl),
tree_util.tree_leaves(pytree_fit)):
self.assertArraysAllClose(array_skl, array_fit, atol=1e-3)
def test_logreg_with_intercept(self):
lbfgs = ScipyMinimize(fun=self.logreg_with_intercept_fun,
**self.solver_kwargs)
pytree_init = {'W': jnp.zeros([self.n_features, self.n_classes]),
'b': jnp.zeros([self.n_classes])}
pytree_fit, _ = lbfgs.run(pytree_init,
l2reg=self.default_l2reg,
data=self.data)
# Compare against sklearn.
pytree_skl = test_util.logreg_skl(X=self.data[0],
y=self.data[1],
lam=self.default_l2reg,
fit_intercept=True)
for array_skl, array_fit in zip(tree_util.tree_leaves(pytree_skl),
tree_util.tree_leaves(pytree_fit)):
self.assertArraysAllClose(array_skl, array_fit, atol=1e-3)
def test_logreg_implicit_diff(self):
# Make sure the decorator works, evaluating the Jacobian at sklearn's sol.
pytree_skl = test_util.logreg_skl(X=self.data[0],
y=self.data[1],
lam=self.default_l2reg)
lbfgs = ScipyMinimize(fun=self.logreg_fun,
**self.solver_kwargs)
def wrapper(hyperparams):
sol_skl = pytree_skl
return lbfgs.run(sol_skl, hyperparams, self.data).params
jac_num = test_util.logreg_skl_jac(X=self.data[0],
y=self.data[1],
lam=self.default_l2reg)
jac_custom = jax.jacrev(wrapper)(self.default_l2reg)
for array_num, array_custom in zip(tree_util.tree_leaves(jac_num),
tree_util.tree_leaves(jac_custom)):
self.assertArraysAllClose(array_num, array_custom, atol=1e-3)
def test_logreg_with_intercept_implicit_diff(self):
# Make sure the decorator works, evaluating the Jacobian at sklearn's sol.
pytree_skl = test_util.logreg_skl(X=self.data[0],
y=self.data[1],
lam=self.default_l2reg,
fit_intercept=True)
lbfgs = ScipyMinimize(fun=self.logreg_with_intercept_fun,
**self.solver_kwargs)
def wrapper(hyperparams):
sol_skl = {'W': pytree_skl[0], 'b': pytree_skl[1]}
return lbfgs.run(sol_skl, hyperparams, self.data).params
jac_num = test_util.logreg_skl_jac(X=self.data[0],
y=self.data[1],
lam=self.default_l2reg,
fit_intercept=True)
jac_custom = jax.jacrev(wrapper)(self.default_l2reg)
for array_num, array_custom in zip(tree_util.tree_leaves(jac_num),
tree_util.tree_leaves(jac_custom)):
self.assertArraysAllClose(array_num, array_custom, atol=1e-3)
class ScipyBoundedMinimizeTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
self.n_samples, self.n_features = 50, 5
self.data = datasets.make_regression(n_samples=self.n_samples,
n_features=self.n_features,
n_informative=3,
random_state=0)
self.params_init = jnp.zeros(self.n_features)
self.bounds = (-jnp.ones(self.n_features),
+jnp.ones(self.n_features))
# Formats iterates with an (arbitrary) pytree structure for testing
# purposes.
self.pytree_init = self._to_pytree(self.params_init)
self.pytree_bounds = (self._to_pytree(self.bounds[0]),
self._to_pytree(self.bounds[1]))
def fun(params, data):
params = jnp.hstack([params['p0'], params['p1'], params['p2']])
return objective.least_squares(params, data)
self.fun = fun
self.solver_kwargs = {'method': 'L-BFGS-B',
'tol': 1e-3,
'options': {'maxiter': 500}}
def _scipy_sol(self, init_params, **kwargs):
return osp.optimize.lsq_linear(self.data[0], self.data[1], **kwargs).x
def _scipy_box_sol(self, init_params, box_len, **kwargs):
if 'bounds' not in kwargs:
kwargs['bounds'] = self.bounds
kwargs['bounds'] = (box_len * kwargs['bounds'][0],
box_len * kwargs['bounds'][1])
return self._scipy_sol(init_params, **kwargs)
def _scipy_box_jac(self, init_params, box_len, eps=1e-2, **kwargs):
return (
self._scipy_box_sol(init_params, box_len + eps, **kwargs) -
self._scipy_box_sol(init_params, box_len - eps, **kwargs)) / (2. * eps)
@staticmethod
def _to_pytree(pytree):
return {'p0': pytree[0], 'p1': pytree[1:3], 'p2': pytree[3:]}
def test_fwd(self):
lbfgs = ScipyBoundedMinimize(fun=self.fun, **self.solver_kwargs)
pytree_fit, _ = lbfgs.run(self.pytree_init,
bounds=self.pytree_bounds,
data=self.data)
# Checks box constraints.
for array_lb, array_fit, array_ub in zip(
tree_util.tree_leaves(self.pytree_bounds[0]),
tree_util.tree_leaves(pytree_fit),
tree_util.tree_leaves(self.pytree_bounds[1])):
self.assertTrue(jnp.all(array_lb <= array_fit).item())
self.assertTrue(jnp.all(array_fit <= array_ub).item())
# Compares objective values against SciPy.
pytree_osp = self._scipy_sol(self.params_init, bounds=self.bounds)
pytree_osp = self._to_pytree(pytree_osp)
for array_osp, array_fit in zip(tree_util.tree_leaves(pytree_osp),
tree_util.tree_leaves(pytree_fit)):
self.assertArraysAllClose(array_osp, array_fit, atol=1e-3)
def test_bwd_box_len(self):
lbfgs = ScipyBoundedMinimize(fun=self.fun,
**self.solver_kwargs)
# NOTE: cannot use solution as init since changing box_len might make the
# init infeasible.
def wrapper(box_len):
scaled_bounds = (tree_scalar_mul(box_len, self.pytree_bounds[0]),
tree_scalar_mul(box_len, self.pytree_bounds[1]))
return lbfgs.run(self.pytree_init, scaled_bounds, self.data).params
box_len = 10.0
jac_num = self._scipy_box_jac(self.params_init,
box_len,
bounds=self.bounds)
jac_num = self._to_pytree(jac_num)
jac_custom = jax.jacrev(wrapper)(box_len)
for array_num, array_custom in zip(tree_util.tree_leaves(jac_num),
tree_util.tree_leaves(jac_custom)):
self.assertArraysAllClose(array_num, array_custom, atol=1e-2)
class ScipyRootFindingTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
n = 6
key = random.PRNGKey(0)
key_a, key_x = random.split(key)
self.a = random.normal(key_a, [n, n])
self.a1, self.a2 = self.a[:, :n // 2], self.a[:, n // 2:]
self.x = random.normal(key_x, [n])
self.x1, self.x2 = self.x[:n // 2], self.x[n // 2:]
self.b = self.a.dot(self.x)
self.b1, self.b2 = self.b[:n // 2], self.b[n // 2:]
self.fun = lambda x, b: self.a.dot(x) - b
def fun_pytree(x, b):
o = self.a1.dot(x['x1']) + self.a2.dot(x['x2'])
o1, o2 = o[:n // 2], o[n // 2:]
return {'x1': o1 - b['b1'], 'x2': o2 - b['b2']}
self.fun_pytree = fun_pytree
self.solver_kwargs = {'method': 'hybr'}
def test_linalg_inv(self):
root = ScipyRootFinding(optimality_fun=self.fun, **self.solver_kwargs)
pytree_fit, _ = root.run(jnp.zeros_like(self.x), self.b)
self.assertArraysAllClose(pytree_fit, self.x, atol=1e-3)
def test_linalg_inv_idf(self):
root = ScipyRootFinding(optimality_fun=self.fun, **self.solver_kwargs)
def wrapper(b):
return root.run(jnp.zeros_like(self.x), b).params
jac_theo = jnp.linalg.inv(self.a)
jac_idf = jax.jacrev(wrapper)(self.b)
self.assertArraysAllClose(jac_theo, jac_idf, atol=1e-3)
@parameterized.product(pytree_type=['tuple'])
def test_linalg_inv_pytree(self, pytree_type: str):
pytree_init = {'x1': jnp.zeros_like(self.x1),
'x2': jnp.zeros_like(self.x2)}
b = {'b1': self.b1, 'b2': self.b2}
root = ScipyRootFinding(optimality_fun=self.fun_pytree,
**self.solver_kwargs)
pytree_fit, _ = root.run(pytree_init, b)
for array_true, array_fit in zip(tree_util.tree_leaves((self.x1, self.x2)),
tree_util.tree_leaves(pytree_fit)):
self.assertArraysAllClose(array_true, array_fit, atol=1e-3)
@parameterized.product(pytree_type=['dict'])
def test_linalg_inv_pytree_idf(self, pytree_type: str):
pytree_init = {'x1': jnp.zeros_like(self.x1),
'x2': jnp.zeros_like(self.x2)}
b = {'b1': self.b1, 'b2': self.b2}
root = ScipyRootFinding(optimality_fun=self.fun_pytree,
**self.solver_kwargs)
def wrapper(b):
return root.run(pytree_init, b).params
jac_theo = jnp.linalg.inv(self.a)
jac_idf = jax.jacrev(wrapper)(b)
## `jnp.block` requires inputs to be (nested) lists.
if pytree_type == 'dict':
jac_idf = [list(blk_row.values()) for blk_row in jac_idf.values()]
else:
jac_idf = [list(blk_row) for blk_row in jac_idf]
jac_idf = jnp.block(jac_idf)
self.assertArraysAllClose(jac_theo, jac_idf, atol=1e-3)
class ScipyLeastSquaresTest(jtu.JaxTestCase):
def setUp(self):
super().setUp()
self.slope = 2.5
self.params_init = jnp.asarray([0.0, 0.0, 0.0, 0.0])
self.params_true = jnp.asarray([1.5, -1.5, -0.25, 1.0])
# Formats iterates with an (arbitrary) pytree structure for testing
# purposes.
self.pytree_init = self._to_pytree(self.params_init)
self.pytree_true = self._to_pytree(self.params_true)
def model_fun(params, slope, x):
"""Implements a toy non-linear curve fitting problem on 2D.
The model is defined as
`f(theta; x) = offset + slope * sigmoid(w1 * x1 + w2 * x2 - t0)`
where, for the purposes of testing, `theta = (w1, w2, t0, offset)` are
treated as the parameters to be fit via least-squares and `slope` as a
hyperparameter used to test implicit differentiation.
Args:
params: a pytree containing the parameters (w1, w2, t0, offset) of the
model.
slope: a float with the slope hyperparameter.
x: a np.ndarray<float>[batch, 2] representing the covariates.
Returns:
a np.ndarray<float>[batch] with the model output for each row in x.
"""
w = params['w']
t0 = params['t0']
offset = params['offset']
return offset + slope * jax.nn.sigmoid(jnp.dot(x, w) - t0)
def fun(params, slope, data):
"""Computes the residuals of `model_fun` above at `data`."""
x, y_true = data
return y_true - model_fun(params, slope, x)
self.fun = fun
n_samples = 25
key = random.PRNGKey(0)
x = random.normal(key, [n_samples, 2])
y_true = model_fun(self.pytree_true, self.slope, x)
self.data = (x, y_true)
self.onp_data = tree_util.tree_map(lambda t: onp.asarray(t, onp.float64),
self.data)
self.solver_kwargs = {'method': 'trf'}
@staticmethod
def _to_pytree(pytree):
return {'w': pytree[:2], 't0': pytree[2], 'offset': pytree[3]}
def _scipy_sol(self, init_params, slope, **kwargs):
def scipy_fun(params):
x, y_true = self.onp_data
w, t0, offset = params[:2], params[2], params[3]
y_pred = offset + slope * onp.reciprocal(1. + onp.exp(t0 - onp.dot(x, w)))
return y_true - y_pred
return osp.optimize.least_squares(scipy_fun, init_params, **kwargs).x
def _scipy_slope_jac(self, init_params, slope, eps=1e-2, **kwargs):
return (self._scipy_sol(init_params, slope + eps, **kwargs) -
self._scipy_sol(init_params, slope - eps, **kwargs)) / (2. * eps)
@parameterized.product(loss=['linear', 'arctan'],
f_scale=[0.2, 1.0])
def test_fwd(self, loss: str, f_scale: float):
lsq = ScipyLeastSquares(fun=self.fun,
loss=loss,
options={'f_scale': f_scale},
**self.solver_kwargs)
pytree_fit, _ = lsq.run(self.pytree_init, self.slope, self.data)
for array_true, array_fit in zip(tree_util.tree_leaves(self.pytree_true),
tree_util.tree_leaves(pytree_fit)):
self.assertArraysAllClose(array_true, array_fit, atol=1e-3)
@parameterized.product(loss=['huber'],
f_scale=[0.2])
def test_bwd(self, loss: str, f_scale: float):
lsq = ScipyLeastSquares(fun=self.fun,
loss=loss,
options={'f_scale': f_scale},
**self.solver_kwargs)
def wrapper(slope):
return lsq.run(self.pytree_true, slope, self.data).params
jac_num = self._scipy_slope_jac(self.params_true, self.slope,
**{'loss': loss, 'f_scale': f_scale})
jac_num = self._to_pytree(jac_num)
jac_custom = jax.jacrev(wrapper)(self.slope)
for array_num, array_custom in zip(tree_util.tree_leaves(jac_num),
tree_util.tree_leaves(jac_custom)):
self.assertArraysAllClose(array_num, array_custom, atol=1e-3)
class ScipyBoundedLeastSquaresTest(ScipyLeastSquaresTest):
def setUp(self):
super().setUp()
lb = jnp.asarray([-1.0, -1.0, -1.0, -1.0])
ub = jnp.asarray([+1.0, +1.0, +1.0, +1.0])
self.bounds = (lb, ub)
self.pytree_bounds = (self._to_pytree(lb), self._to_pytree(ub))
def _scipy_box_sol(self, init_params, box_len, **kwargs):
if 'bounds' not in kwargs:
kwargs['bounds'] = self.bounds
kwargs['bounds'] = (box_len * kwargs['bounds'][0],
box_len * kwargs['bounds'][1])
return self._scipy_sol(init_params, self.slope, **kwargs)
def _scipy_box_jac(self, init_params, box_len, eps=1e-2, **kwargs):
return (
self._scipy_box_sol(init_params, box_len + eps, **kwargs) -
self._scipy_box_sol(init_params, box_len - eps, **kwargs)) / (2. * eps)
@parameterized.product(loss=['linear', 'cauchy'],
f_scale=[1.0])
def test_fwd(self, loss: str, f_scale: float):
lsq = ScipyBoundedLeastSquares(fun=self.fun,
loss=loss,
options={'f_scale': f_scale},
**self.solver_kwargs)
pytree_fit, _ = lsq.run(
self.pytree_init, self.pytree_bounds, self.slope, self.data)
# Checks box constraints.
for array_lb, array_fit, array_ub in zip(
tree_util.tree_leaves(self.pytree_bounds[0]),
tree_util.tree_leaves(pytree_fit),
tree_util.tree_leaves(self.pytree_bounds[1])):
self.assertTrue(jnp.all(array_lb <= array_fit).item())
self.assertTrue(jnp.all(array_fit <= array_ub).item())
# Compares against SciPy.
sol_osp = self._scipy_sol(
self.params_init,
self.slope,
**{'bounds': self.bounds, 'loss': loss, 'f_scale': f_scale})
pytree_osp = self._to_pytree(sol_osp)
for array_osp, array_fit in zip(tree_util.tree_leaves(pytree_osp),
tree_util.tree_leaves(pytree_fit)):
self.assertArraysAllClose(array_osp, array_fit, atol=1e-3)
@parameterized.product(loss=['huber'],
f_scale=[1.0])
def test_bwd_slope(self, loss: str, f_scale: float):
sol_osp = self._scipy_sol(
self.params_init,
self.slope,
**{'bounds': self.bounds, 'loss': loss, 'f_scale': f_scale})
pytree_osp = self._to_pytree(sol_osp)
lsq = ScipyBoundedLeastSquares(fun=self.fun,
loss=loss,
options={'f_scale': f_scale},
**self.solver_kwargs)
def wrapper(slope):
return lsq.run(pytree_osp, self.pytree_bounds, slope, self.data).params
jac_num = self._scipy_slope_jac(
sol_osp,
self.slope,
**{'bounds': self.bounds, 'loss': loss, 'f_scale': f_scale})
jac_num = self._to_pytree(jac_num)
jac_custom = jax.jacrev(wrapper)(self.slope)
for array_num, array_custom in zip(tree_util.tree_leaves(jac_num),
tree_util.tree_leaves(jac_custom)):
self.assertArraysAllClose(array_num, array_custom, atol=1e-2)
@parameterized.product(loss=['soft_l1'],
f_scale=[0.2])
def test_bwd_box_len(self, loss: str, f_scale: float):
lsq = ScipyBoundedLeastSquares(fun=self.fun,
loss=loss,
options={'f_scale': f_scale},
**self.solver_kwargs)
# NOTE: cannot use solution as init since changing box_len might make the
# init infeasible.
def wrapper(box_len):
scaled_bounds = (tree_scalar_mul(box_len, self.pytree_bounds[0]),
tree_scalar_mul(box_len, self.pytree_bounds[1]))
return lsq.run(
self.pytree_init, scaled_bounds, self.slope, self.data).params
box_len = 1.0
jac_num = self._scipy_box_jac(
self.params_init,
box_len,
**{'bounds': self.bounds, 'loss': loss, 'f_scale': f_scale})
jac_num = self._to_pytree(jac_num)
jac_custom = jax.jacrev(wrapper)(box_len)
for array_num, array_custom in zip(tree_util.tree_leaves(jac_num),
tree_util.tree_leaves(jac_custom)):
self.assertArraysAllClose(array_num, array_custom, atol=1e-2)
if __name__ == '__main__':
# Uncomment the line below in order to run in float64.
# jax.config.update("jax_enable_x64", True)
absltest.main(testLoader=jtu.JaxTestLoader())
|
24,511 | 3f9aadd06d20ba52f674a506f8d8bac93d785c15 | from src.main.alignmentEditDistance import alignmentEditDistance
from src.main import variablesGenerator as vg
# last version is not updated for importer, mine is
import sys
sys.path.append('../pm4py-source')
from pm4py.objects.petri import importer
from pm4py.objects.log.importer.xes import factory as xes_importer
from pm4py.visualization.petrinet import factory as vizu
net, m0, mf = importer.pnml.import_net("/Users/mboltenhagen/Documents/PhD/Josep&Thomas/model-loops/M8_petri_pnml.pnml")
log = xes_importer.import_log("./examples/M8.xes")
# net, m0, mf = importer.pnml.import_net("./examples/AouC.pnml")
# log = xes_importer.import_log("./examples/A.xes")
# vizu.apply(net,m0,mf).view()
alignmentEditDistance(net, m0, mf, log, 15, max_d=6)
|
24,512 | e223c1afd4059b664a3a4cc9a90420e1ba335416 | # TODO: Group chat
from collections import defaultdict
from uuid import uuid4
class User:
def __init__(self, uid, name, service):
self.uid = uid
self.name = name
self.service = service
self.friends = []
def send_friend_request(self, id):
self.service.handle_friend_request(self.uid, id)
def notify_friend_acceptance(self, result, friend):
if result:
print('Hey, {0}, {1} has accepted you a friend request'.format(
self.name, friend.name))
print('Why not send a message?')
message = input()
self.send_message(friend.uid, message)
else:
print('Hey, {0}, {1} has rejected you a friend request'.format(
self.name, friend.name))
def accept_friend_request(self, name):
print('Hey, {0}, {1} has sent you a friend request. Accept or Reject?'.format(
self.name, name))
answer = input()
return answer.lower() == 'accept'
def open_chat(self, friend_id):
self.service.open_chat(self.uid, friend_id)
def send_message(self, friend_id, message):
self.service.deliver_message(self.uid, friend_id, message)
def read_chat(self, friend_id):
self.service.lookup_chat(self.uid, friend_id)
def notify_new_message(self, sender):
print('Hey, {0}, {1} has sent you a new message. Want to check it out? Yes or No'.format(
self.name, sender.name))
check = input()
if check.lower() == 'yes':
self.read_chat(sender.uid)
class Message:
def __init__(self, uid, content):
self.uid = uid
self.content = content
class Chat:
def __init__(self, uid1, uid2):
self.users = [uid1, uid2]
self.messages = []
def add_message(self, new_message):
self.messages.append(new_message)
def read_messages(self):
for message in self.messages:
print(message.content)
class Service:
def __init__(self):
self.users = defaultdict()
self.chats = defaultdict()
def register_user(self, name):
uid = str(uuid4())
new_user = User(uid, name, self)
self.users[uid] = new_user
return new_user
def show_users(self):
for v in self.users.values():
print(v.uid, v.name)
def handle_friend_request(self, uid, id):
sender = self.users[uid]
responder = self.users[id]
if responder.accept_friend_request(sender.name):
responder.friends.append(uid)
sender.friends.append(id)
sender.notify_friend_acceptance(True, responder)
else:
sender.notify_friend_acceptance(False, responder)
def chat_key_generate(self, id1, id2):
return "".join(sorted([id1, id2]))
def open_chat(self, uid, friend_id):
key = self.chat_key_generate(uid, friend_id)
if not key in self.chats:
self.chats[key] = Chat(uid, friend_id)
def deliver_message(self, uid, friend_id, message):
new_message = Message(uid, message)
key = self.chat_key_generate(uid, friend_id)
if key not in self.chats:
self.open_chat(uid, friend_id)
self.chats[key].add_message(new_message)
self.users[friend_id].notify_new_message(self.users[uid])
def lookup_chat(self, uid, friend_id):
key = self.chat_key_generate(uid, friend_id)
if key in self.chats:
self.chats[key].read_messages()
service = Service()
user1 = service.register_user('Tetsuya')
user2 = service.register_user('Yayoi')
user3 = service.register_user('Ganko')
service.show_users()
friend = input()
user1.send_friend_request(friend)
|
24,513 | 2daf0e0a5959b8677cd2422a627237b968e90e36 | import numpy as np
import pandas as pd
import datetime as dt
def excel_to_csv_tables() -> None:
# handle the absence of the excel file when uploaded to server
try:
workout_excel = pd.ExcelFile("workout.xlsx")
df_reps = workout_excel.parse("exercise_volume_2")
df_exercises = workout_excel.parse("exercises")
df_reps.to_csv("volume.csv", index=False)
df_exercises.to_csv("exercises.csv", index=False)
except:
print("Error loading spreadsheet. Existing data used.")
def load_volume(fname: str) -> pd.DataFrame:
df = pd.read_csv(fname)
df["Date"] = pd.to_datetime(df["Date"]).dt.date
df["WeekStartDate"] = df.apply(
lambda row: row["Date"] - dt.timedelta(days=row["Date"].weekday()), axis=1
)
# df = df.set_index("Date")
df = df.replace(np.nan, 0)
return df
def load_exercises(fname: str) -> pd.DataFrame:
df = pd.read_csv(fname)
return df
def get_log() -> pd.DataFrame:
df_reps = load_volume("volume.csv")
return df_reps
def prep_spreadsheet_data() -> pd.DataFrame:
excel_to_csv_tables()
df_reps = load_volume("volume.csv")
reshaped_reps = df_reps.melt(id_vars=["Date", "WeekStartDate"])
df_exercises = load_exercises("exercises.csv")
df_join = pd.merge(
reshaped_reps,
df_exercises,
how="inner",
left_on="variable",
right_on="Exercise",
)
df_join = df_join.set_index("Date")
df_join = df_join.drop(columns=["variable"])
return df_join
def quantity_per_week(df: pd.DataFrame) -> pd.DataFrame:
df = df.groupby(["WeekStartDate", "Movement type"]).sum()
df = df.drop(columns=["Exercise"])
df = df.reset_index()
df = df.rename({"value": "Reps"})
return df
def max_reps() -> pd.DataFrame:
exercises = load_volume("volume.csv")
exercises = exercises.drop(columns=["Date", "WeekStartDate"])
max_vals = exercises.max()
max_vals = max_vals.rename("Reps")
return max_vals
def weekly_total_table() -> pd.DataFrame:
df = prep_spreadsheet_data()
weekly_totals = quantity_per_week(df)
reshaped_weekly_totals = weekly_totals.pivot(
index="WeekStartDate", columns="Movement type", values="value"
)
return reshaped_weekly_totals
if __name__ == "__main__":
df = weekly_total_table()
print(df)
|
24,514 | 265b0421af421abb2c61e045baeff7698c2ffc78 | """
Memoize decorator for methods whose first argument is the request.
Stores values in an annotation of the request.
This is based on ViewMemo from plone.memoize.
"""
from zope.annotation.interfaces import IAnnotations
_marker = object()
class PTSMemo(object):
key = 'pts.memoize'
def memoize(self, func):
def memogetter(*args, **kwargs):
instance = args[0]
request = args[1]
annotations = IAnnotations(request)
cache = annotations.get(self.key, _marker)
if cache is _marker:
cache = annotations[self.key] = dict()
key = hash((instance.__class__.__name__, func.__name__,
args[1:], frozenset(kwargs.items())),)
value = cache.get(key, _marker)
if value is _marker:
value = cache[key] = func(*args, **kwargs)
return value
return memogetter
_m = PTSMemo()
memoize = _m.memoize
class NegotiatorMemo(object):
key = 'pts.memoize_second'
def memoize(self, func):
def memogetter(*args):
instance = args[0]
request = args[2]
annotations = IAnnotations(request)
cache = annotations.get(self.key, _marker)
if cache is _marker:
cache = annotations[self.key] = dict()
key = hash((instance.__class__.__name__, func.__name__),)
value = cache.get(key, _marker)
if value is _marker:
value = cache[key] = func(*args)
return value
return memogetter
_n = NegotiatorMemo()
memoize_second = _n.memoize
__all__ = (memoize, memoize_second, ) |
24,515 | afdc37e60fb26b1dfe5c24659b95f6d73136ebe4 | from collections import namedtuple
from migen.fhdl.std import *
from migen.bus import dfi, lasmibus
from misoclib.lasmicon.refresher import *
from misoclib.lasmicon.bankmachine import *
from misoclib.lasmicon.multiplexer import *
PhySettingsT = namedtuple("PhySettings", "memtype dfi_d nphases rdphase wrphase rdcmdphase wrcmdphase cl cwl read_latency write_latency")
def PhySettings(memtype, dfi_d, nphases, rdphase, wrphase, rdcmdphase, wrcmdphase, cl, read_latency, write_latency, cwl=0):
return PhySettingsT(memtype, dfi_d, nphases, rdphase, wrphase, rdcmdphase, wrcmdphase, cl, cwl, read_latency, write_latency)
GeomSettingsT = namedtuple("_GeomSettings", "bank_a row_a col_a mux_a")
def GeomSettings(bank_a, row_a, col_a):
return GeomSettingsT(bank_a, row_a, col_a, max(row_a, col_a))
TimingSettings = namedtuple("TimingSettings", "tRP tRCD tWR tWTR tREFI tRFC" \
" req_queue_size read_time write_time")
class LASMIcon(Module):
def __init__(self, phy_settings, geom_settings, timing_settings):
if phy_settings.memtype in ["SDR"]:
burst_length = phy_settings.nphases*1 # command multiplication*SDR
elif phy_settings.memtype in ["DDR", "LPDDR", "DDR2", "DDR3"]:
burst_length = phy_settings.nphases*2 # command multiplication*DDR
address_align = log2_int(burst_length)
self.dfi = dfi.Interface(geom_settings.mux_a,
geom_settings.bank_a,
phy_settings.dfi_d,
phy_settings.nphases)
self.lasmic = lasmibus.Interface(
aw=geom_settings.row_a + geom_settings.col_a - address_align,
dw=phy_settings.dfi_d*phy_settings.nphases,
nbanks=2**geom_settings.bank_a,
req_queue_size=timing_settings.req_queue_size,
read_latency=phy_settings.read_latency+1,
write_latency=phy_settings.write_latency+1)
self.nrowbits = geom_settings.col_a - address_align
###
self.submodules.refresher = Refresher(geom_settings.mux_a, geom_settings.bank_a,
timing_settings.tRP, timing_settings.tREFI, timing_settings.tRFC)
self.submodules.bank_machines = [BankMachine(geom_settings, timing_settings, address_align, i,
getattr(self.lasmic, "bank"+str(i)))
for i in range(2**geom_settings.bank_a)]
self.submodules.multiplexer = Multiplexer(phy_settings, geom_settings, timing_settings,
self.bank_machines, self.refresher,
self.dfi, self.lasmic)
def get_csrs(self):
return self.multiplexer.get_csrs()
|
24,516 | d14e8715287ab0775fa55558caa0a47db80feab7 | from System import *
from GUI import *
if __name__ == '__main__':
checkFile('file.stl')
gui()
|
24,517 | 70b7b777c3511fefa94d41dcda7a7585ccbcc68c | # Generated by Django 3.1.7 on 2021-05-29 13:35
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ReservationTool', '0024_auto_20210528_1226'),
]
operations = [
migrations.AlterField(
model_name='consumable',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='createsetup',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='device',
name='added_date',
field=models.DateField(),
),
migrations.AlterField(
model_name='device',
name='arrival_date',
field=models.DateField(),
),
migrations.AlterField(
model_name='device',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='device',
name='po_date',
field=models.DateField(),
),
migrations.AlterField(
model_name='device',
name='shipped_date',
field=models.DateField(),
),
migrations.AlterField(
model_name='devicetype',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='makesetup',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='setuptype',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='team',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='vendor',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
|
24,518 | 1d0e275befbba55c2010d755a73c59c7a18727e2 | import turtle as t
def Number(num):
n = 20
if (num == 0):
C0(n)
while num > 0:
q = num % 10
num //= 10
if q == 0:
C0(n)
elif q == 1:
C1(n)
elif q == 2:
C2(n)
elif q == 3:
C3(n)
elif q == 4:
C4(n)
elif q == 5:
C5(n)
elif q == 6:
C6(n)
elif q == 7:
C7(n)
elif q == 8:
C8(n)
elif q == 9:
C9(n)
t.forward(1.5 * n)
t.right(180)
t.pendown()
def C0(n):
C0 = [(0, n), (90, 2 * n), (90, n), (90, 2 * n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.left(90)
t.penup()
def C1(n):
C0 = [(225, 2 ** 0.5 * n), (135, 2 * n)]
t.penup()
t.right(90)
t.forward(n)
t.pendown()
for u, d in C0:
t.right(u)
t.forward(d)
t.left(180)
t.forward(2 * n)
t.left(90)
t.penup()
t.forward(n)
def C2(n):
C0 = [(0, n), (90, n), (45, 2 ** 0.5 * n), (225, n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.left(90)
t.penup()
t.forward(2 * n)
t.left(90)
t.forward(n)
def C3(n):
C0 = [(0, n), (135, n * 2 ** 0.5), (225, n), (135, 2 ** 0.5 * n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.right(135)
t.penup()
t.forward(2 * n)
t.left(90)
def C4(n):
C0 = [(90, n), (270, n), (90, n), (180, 2 * n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.left(90)
t.penup()
t.forward(n)
def C5(n):
C0 = [(0, n), (180, n), (270, n), (270, n), (90, n), (90, n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.right(90)
t.penup()
t.forward(2 * n)
t.left(90)
def C6(n):
t.penup()
t.forward(n)
t.pendown()
C0 = [(135, 2 ** 0.5 * n), (225, n), (90, n), (90, n), (90, n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.penup()
t.forward(n)
t.left(90)
def C7(n):
C0 = [(0, n), (135, n * 2 ** 0.5), (315, n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.penup()
t.left(180)
t.forward(2 * n)
t.left(90)
def C8(n):
C0 = [(0, n), (90, n), (90, n), (180, n), (90, n), (90, n), (90, 2 * n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.penup()
t.left(90)
def C9(n):
C0 = [(0, n), (90, n), (45, 2 ** 0.5 * n), (180, n * 2 ** 0.5), (225, n), (90, n)]
for u, d in C0:
t.right(u)
t.forward(d)
t.penup()
t.left(90)
num = int(input())
Number(num)
input()
|
24,519 | 9467cf64dba59338aeb1306910c455cba66e89c1 | #!/usr/bin/python
from __future__ import absolute_import, division, print_function, unicode_literals
""" ForestWalk but with stereoscopic view - i.e. for google cardboard
NB in this example the cameras have been set with a negative separation i.e.
for viewing cross-eyed as most people find this easier without a viewer!!!
If a viewer is used then the line defining CAMERA would need to be changed
to an appropriate +ve separation.
NB also, no camera has been explicitly assigned to the objects so they all
use the default instance and this will be CAMERA.camera_3d so long as the
StereoCam instance was created before any other Camera instance. i.e. it
would be safer really to assign the Camera to each Shape as they are created.
This demo also uses relative rotation for the Camera which is more like
the effect of having a gyro sensor attached to stereo goglles. This can
be efficiently done using the Mouse.velocity() method and the argumente:
Camera(... absolute=False) or just set Camera.absolute = False. Using the
'a' and 'd' keys will rotate the camera about the z axis (roll mode)
Because relative rotations are cumulative with no simple way to keep track
of the overall result there are two convenience methods added to Camera
a) euler_angles() to return the Euler (z->x->y) rotations for the current
orientation
b) matrix_from_two_vecors() to return the rotation matrix required to move
from a starting direction to the current direction vector, such a process
might be required to correct 'dead reckoning' from gyro readings with a
magnetometer vector.
"""
import math,random
import demo
import pi3d
# Setup display and initialise pi3d
DISPLAY = pi3d.Display.create(w=1200, h=600)
DISPLAY.set_background(0.4,0.8,0.8,1) # r,g,b,alpha
# yellowish directional light blueish ambient light
pi3d.Light(lightpos=(1, -1, -3), lightcol=(1.0, 1.0, 0.8), lightamb=(0.25, 0.2, 0.3))
CAMERA = pi3d.StereoCam(separation=-0.5, interlace=0)
""" If CAMERA is set with interlace <= 0 (default) then CAMERA.draw() will produce
two images side by side (each viewed from `separation` apart) i.e. -ve
requires viewing slightly cross-eyed.
If interlace is set to a positive value then the two images are interlaced
in vertical stripes this number of pixels wide. The resultant image needs
to be viewed through a grid. See https://github.com/pi3d/pi3d_demos/make_grid.py
"""
# load shader
shader = pi3d.Shader("uv_bump")
shinesh = pi3d.Shader("uv_reflect")
flatsh = pi3d.Shader("uv_flat")
tree2img = pi3d.Texture("textures/tree2.png")
tree1img = pi3d.Texture("textures/tree1.png")
hb2img = pi3d.Texture("textures/hornbeam2.png")
bumpimg = pi3d.Texture("textures/grasstile_n.jpg")
reflimg = pi3d.Texture("textures/stars.jpg")
rockimg = pi3d.Texture("textures/rock1.jpg")
FOG = ((0.3, 0.3, 0.4, 0.8), 650.0)
TFOG = ((0.2, 0.24, 0.22, 1.0), 150.0)
#myecube = pi3d.EnvironmentCube(900.0,"HALFCROSS")
ectex=pi3d.loadECfiles("textures/ecubes","sbox")
myecube = pi3d.EnvironmentCube(size=900.0, maptype="FACES", name="cube")
myecube.set_draw_details(flatsh, ectex)
# Create elevation map
mapsize = 1000.0
mapheight = 60.0
mountimg1 = pi3d.Texture("textures/mountains3_512.jpg")
mymap = pi3d.ElevationMap("textures/mountainsHgt.png", name="map",
width=mapsize, depth=mapsize, height=mapheight,
divx=32, divy=32)
mymap.set_draw_details(shader, [mountimg1, bumpimg, reflimg], 128.0, 0.0)
mymap.set_fog(*FOG)
#Create tree models
treeplane = pi3d.Plane(w=4.0, h=5.0)
treemodel1 = pi3d.MergeShape(name="baretree")
treemodel1.add(treeplane.buf[0], 0,0,0)
treemodel1.add(treeplane.buf[0], 0,0,0, 0,90,0)
treemodel2 = pi3d.MergeShape(name="bushytree")
treemodel2.add(treeplane.buf[0], 0,0,0)
treemodel2.add(treeplane.buf[0], 0,0,0, 0,60,0)
treemodel2.add(treeplane.buf[0], 0,0,0, 0,120,0)
#Scatter them on map using Merge shape's cluster function
mytrees1 = pi3d.MergeShape(name="trees1")
mytrees1.cluster(treemodel1.buf[0], mymap, 0.0, 0.0, 400.0, 400.0, 50, "", 8.0, 3.0)
mytrees1.set_draw_details(flatsh, [tree2img], 0.0, 0.0)
mytrees1.set_fog(*TFOG)
mytrees2 = pi3d.MergeShape(name="trees2")
mytrees2.cluster(treemodel2.buf[0], mymap, 0.0, 0.0, 400.0, 400.0, 80, "", 6.0, 3.0)
mytrees2.set_draw_details(flatsh, [tree1img], 0.0, 0.0)
mytrees2.set_fog(*TFOG)
mytrees3 = pi3d.MergeShape(name="trees3")
mytrees3.cluster(treemodel2, mymap,0.0, 0.0, 300.0, 300.0, 20, "", 4.0, 2.0)
mytrees3.set_draw_details(flatsh, [hb2img], 0.0, 0.0)
mytrees3.set_fog(*TFOG)
#Create monument
monument = pi3d.Model(file_string="models/pi3d.obj", name="monument")
monument.set_shader(shinesh)
monument.set_normal_shine(bumpimg, 16.0, reflimg, 0.4)
monument.set_fog(*FOG)
monument.translate(100.0, -mymap.calcHeight(100.0, 235) + 12.0, 235.0)
monument.scale(20.0, 20.0, 20.0)
monument.rotateToY(65)
#screenshot number
scshots = 1
#avatar camera
rot = 0.0
tilt = 0.0
roll = 0.0001 # to trick the camera update first time through loop before mouse movement
avhgt = 3.5
xm = 0.0
zm = 0.0
ym = mymap.calcHeight(xm, zm) + avhgt
# Fetch key presses
mykeys = pi3d.Keyboard()
mymouse = pi3d.Mouse(restrict = False)
mymouse.start()
start_vector = CAMERA.camera_3d.get_direction()
# Display scene and rotate cuboid
while DISPLAY.loop_running():
l_or_k_pressed = False # to stop routine camera movement for cases where l or k pressed
#Press ESCAPE to terminate
k = mykeys.read()
if k >-1: # or buttons > mymouse.BUTTON_UP:
dx, dy, dz = CAMERA.get_direction()
if k == 119 or buttons == mymouse.LEFT_BUTTON: #key W
xm += dx
zm += dz
ym = mymap.calcHeight(xm, zm) + avhgt
elif k == 115: # or buttons == mymouse.RIGHT_BUTTON: #kry S
xm -= dx
zm -= dz
ym = mymap.calcHeight(xm, zm) + avhgt
elif k == ord('a'):
roll += 2.0
elif k == ord('d'):
roll -= 2.0
elif k == ord('l'):
rx, ry, rz = CAMERA.camera_3d.euler_angles()
CAMERA.move_camera((xm, ym, zm), ry, rx, rz) # default to absolute rotations
print(rx, ry, rz)
l_or_k_pressed = True
elif k == ord('k'):
vector = CAMERA.get_direction()
if start_vector is not None:
CAMERA.camera_3d.r_mtrx = CAMERA.camera_3d.matrix_from_two_vecors(start_vector, vector)
''' The above process will not preserve the z axis rotation compare
with the following system
_, _, rz = CAMERA.camera_3d.euler_angles() # get Euler z rotation
rx, ry, _ = CAMERA.camera_3d.euler_angles( # get required x and y rotations to align vectors.
CAMERA.camera_3d.matrix_from_two_vecors(start_vector, vector))
CAMERA.move_camera((xm, ym, zm), ry, rx, rz)'''
print(CAMERA.camera_3d.r_mtrx)
l_or_k_pressed = True
elif k == ord('m'): # for this to work there needs to be an alteration to the application of the rotation matrix above
start_vector = CAMERA.get_direction()
elif k == 112: #key P
pi3d.screenshot("forestWalk"+str(scshots)+".jpg")
scshots += 1
elif k == 10: #key RETURN
mc = 0
elif k == 27: #Escape key
mykeys.close()
mymouse.stop()
DISPLAY.stop()
break
halfsize = mapsize / 2.0
xm = (xm + halfsize) % mapsize - halfsize # wrap location to stay on map -500 to +500
zm = (zm + halfsize) % mapsize - halfsize
rot, tilt = mymouse.velocity()
rot *= -1.0
if not l_or_k_pressed: #to stop overwriting move_camera() after pressing l
CAMERA.move_camera((xm, ym, zm), rot, tilt, roll, absolute=False)
rot, tilt, roll = 0.0, 0.0, 0.0
myecube.position(xm, ym, zm)
for i in range(2):
CAMERA.start_capture(i)
monument.draw()
mymap.draw()
if abs(xm) > 300:
mymap.position(math.copysign(1000,xm), 0.0, 0.0)
mymap.draw()
if abs(zm) > 300:
mymap.position(0.0, 0.0, math.copysign(1000,zm))
mymap.draw()
if abs(xm) > 300:
mymap.position(math.copysign(1000,xm), 0.0, math.copysign(1000,zm))
mymap.draw()
mymap.position(0.0, 0.0, 0.0)
myecube.draw()
mytrees3.draw()
mytrees2.draw()
mytrees1.draw()
CAMERA.end_capture(i)
CAMERA.draw()
mx, my = mymouse.position()
buttons = mymouse.button_status()
|
24,520 | cfea85ef71f766777f3925c243f2801058f5fba3 | import urllib2
import datetime
InURL = raw_input('URL:')
print datetime.datetime.now()
print "url requested:"
print InURL
#Loads,reads the page and converts the data to a UTF-8 string.
load = urllib2.urlopen(InURL)
Page_info = load.info()
Page_code = load.getcode()
print "Meta-information of the page:"
print Page_info
print "HTTP status code:"
print Page_code
|
24,521 | 3ab03ba8453aca93dfca9cb90768ac80361abc84 | # -*- coding: utf-8 -*-
import os
import hmac
import time
import json
import mimetypes
import functools
from urlparse import urlparse
from urllib import urlencode
from base64 import urlsafe_b64encode
from hashlib import sha1
import requests
version_info = (0, 1, 2)
VERSION = __version__ = '.'.join( map(str, version_info) )
"""
Usage:
cow = Cow(ACCESS_KEY, SECRET_KEY)
b = cow.get_bucket(BUCKET)
b.put('a')
b.put('a', 'b')
b.put('a', names={'a': 'x'})
b.put('a', 'b', names={'a': 'x', 'b': 'y'})
b.stat('a')
b.stat('a', 'b')
b.delete('a')
b.delete('a', 'b')
b.copy('a', 'c')
b.copy(('a', 'c'), ('b', 'd'))
b.move('a', 'c')
b.move(('a', 'c'), ('b', 'd'))
"""
RS_HOST = 'http://rs.qbox.me'
UP_HOST = 'http://up.qbox.me'
RSF_HOST = 'http://rsf.qbox.me'
class CowException(Exception):
def __init__(self, url, status_code, reason, content):
self.url = url
self.status_code = status_code
self.reason = reason
self.content = content
Exception.__init__(self, '%s, %s' % (reason, content))
def signing(secret_key, data):
return urlsafe_b64encode(
hmac.new(secret_key, data, sha1).digest()
)
def requests_error_handler(func):
@functools.wraps(func)
def deco(*args, **kwargs):
try:
return func(*args, **kwargs)
except AssertionError as e:
req = e.args[0]
raise CowException(
req.url, req.status_code, req.reason, req.content
)
return deco
def expected_argument_type(pos, types):
def deco(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
if not isinstance(args[pos], types):
raise TypeError(
"{0} Type error, Expected {1}".format(args[pos], types)
)
return func(*args, **kwargs)
return wrap
return deco
class UploadToken(object):
def __init__(self, access_key, secret_key, scope, ttl=3600):
self.access_key = access_key
self.secret_key = secret_key
self.scope = scope
self.ttl = ttl
self._token = None
self.generated = int(time.time())
@property
def token(self):
if int(time.time()) - self.generated > self.ttl - 60:
# 还有一分钟也认为过期了, make new token
self._token = self._make_token()
if not self._token:
self._token = self._make_token()
return self._token
def _make_token(self):
self.generated = int(time.time())
info = {
'scope': self.scope,
'deadline': self.generated + self.ttl
}
info = urlsafe_b64encode(json.dumps(info))
token = signing(self.secret_key, info)
return '%s:%s:%s' % (self.access_key, token, info)
class Cow(object):
def __init__(self, access_key, secret_key):
self.access_key = access_key
self.secret_key = secret_key
self.upload_tokens = {}
self.stat = functools.partial(self._stat_rm_handler, 'stat')
self.delete = functools.partial(self._stat_rm_handler, 'delete')
self.copy = functools.partial(self._cp_mv_handler, 'copy')
self.move = functools.partial(self._cp_mv_handler, 'move')
def get_bucket(self, bucket):
"""对一个bucket的文件进行操作,
推荐使用此方法得到一个bucket对象,
然后对此bucket的操作就只用传递文件名即可
"""
return Bucket(self, bucket)
def generate_access_token(self, url, params=None):
uri = urlparse(url)
token = uri.path
if uri.query:
token = '%s?%s' % (token, uri.query)
token = '%s\n' % token
if params:
if isinstance(params, basestring):
token += params
else:
token += urlencode(params)
return '%s:%s' % (self.access_key, signing(self.secret_key, token))
def build_requests_headers(self, token):
return {
'Content-Type': 'application/x-www-form-urlencoded',
'Authorization': 'QBox %s' % token
}
@requests_error_handler
def api_call(self, url, params=None):
token = self.generate_access_token(url, params=params)
if params:
res = requests.post(url, data=params, headers=self.build_requests_headers(token))
else:
res = requests.post(url, headers=self.build_requests_headers(token))
assert res.status_code == 200, res
return res.json() if res.text else ''
def list_buckets(self):
"""列出所有的buckets"""
url = '%s/buckets' % RS_HOST
return self.api_call(url)
def create_bucket(self, name):
"""不建议使用API建立bucket
测试发现API建立的bucket默认无法设置<bucket_name>.qiniudn.com的二级域名
请直接到web界面建立
"""
url = '%s/mkbucket/%s' % (RS_HOST, name)
return self.api_call(url)
def drop_bucket(self, bucket):
"""删除整个bucket"""
url = '%s/drop/%s' % (RS_HOST, bucket)
return self.api_call(url)
def list_files(self, bucket, marker=None, limit=None, prefix=None):
"""列出bucket中的文件"""
query = ['bucket=%s' % bucket]
if marker:
query.append('marker=%s' % marker)
if limit:
query.append('limit=%s' % limit)
if prefix:
query.append('prefix=%s' % prefix)
url = '%s/list?%s' % (RSF_HOST, '&'.join(query))
return self.api_call(url)
def generate_upload_token(self, scope, ttl=3600):
"""上传文件的uploadToken"""
if scope not in self.upload_tokens:
self.upload_tokens[scope] = UploadToken(self.access_key, self.secret_key, scope, ttl=ttl)
return self.upload_tokens[scope].token
@requests_error_handler
@expected_argument_type(2, (basestring, list, tuple))
def put(self, scope, filename, names=None):
"""上传文件
filename 如果是字符串,表示上传单个文件,
如果是list或者tuple,表示上传多个文件
names 是dict,key为filename, value为上传后的名字
如果不设置,默认为文件名
"""
url = '%s/upload' % UP_HOST
token = self.generate_upload_token(scope)
names = names or {}
def _uploaded_name(filename):
return names.get(filename, None) or os.path.basename(filename)
def _put(filename):
files = {
'file': (filename, open(filename, 'rb')),
}
action = '/rs-put/%s' % urlsafe_b64encode(
'%s:%s' % (scope, _uploaded_name(filename))
)
_type, _encoding = mimetypes.guess_type(filename)
if _type:
action += '/mimeType/%s' % urlsafe_b64encode(_type)
data = {
'auth': token,
'action': action,
}
res = requests.post(url, files=files, data=data)
assert res.status_code == 200, res
return res.json()
if isinstance(filename, basestring):
# 单个文件
return _put(filename)
# 多文件
return [_put(f) for f in filename]
@expected_argument_type(2, (list, tuple))
def _cp_mv_handler(self, action, args):
"""copy move方法
action: 'copy' or 'move'
args: [src_bucket, src_filename, des_bucket, des_filename]
or [(src_bucket, src_filename, des_bucket, des_filename), (), ...]
args 第一种形式就是对一个文件进行操作,第二种形式是多个文件批量操作
用户不用直接调用这个方法
"""
if isinstance(args[0], basestring):
return self._cp_mv_single(action, args)
if isinstance(args[0], (list, tuple)):
return self._cp_mv_batch(action, args)
@expected_argument_type(3, (basestring, list, tuple))
def _stat_rm_handler(self, action, bucket, filename):
"""stat delete方法
action: 'stat' or 'delete'
bucket: 哪个bucket
filenmae: 'aabb' or ['aabb', 'ccdd', ...]
filename 第一种形式就是对一个文件进行操作,第二种形式是多个文件批量操作
用户不用直接调用这个方法
"""
if isinstance(filename, basestring):
return self._stat_rm_single(action, bucket, filename)
if isinstance(filename, (list, tuple)):
return self._stat_rm_batch(action, bucket, filename)
def _cp_mv_single(self, action, args):
src_bucket, src_filename, des_bucket, des_filename = args
url = '%s/%s/%s/%s' % (
RS_HOST,
action,
urlsafe_b64encode('%s:%s' % (src_bucket, src_filename)),
urlsafe_b64encode('%s:%s' % (des_bucket, des_filename)),
)
return self.api_call(url)
def _cp_mv_batch(self, action, args):
url = '%s/batch' % RS_HOST
def _one_param(arg):
return 'op=/%s/%s/%s' % (
action,
urlsafe_b64encode('%s:%s' % (arg[0], arg[1])),
urlsafe_b64encode('%s:%s' % (arg[2], arg[3])),
)
param = '&'.join( map(_one_param, args) )
return self.api_call(url, param)
def _stat_rm_single(self, action, bucket, filename):
url = '%s/%s/%s' % (
RS_HOST, action, urlsafe_b64encode('%s:%s' % (bucket, filename))
)
return self.api_call(url)
def _stat_rm_batch(self, action, bucket, filenames):
url = '%s/batch' % RS_HOST
param = [
'op=/%s/%s' % (
action, urlsafe_b64encode('%s:%s' % (bucket, f))
) for f in filenames
]
param = '&'.join(param)
return self.api_call(url, param)
def transform_argument(func):
@functools.wraps(func)
def deco(self, *args, **kwargs):
filename = args[0] if len(args) == 1 else args
return func(self, filename, **kwargs)
return deco
class Bucket(object):
def __init__(self, cow, bucket):
self.cow = cow
self.bucket = bucket
@transform_argument
def put(self, *args, **kwargs):
names = kwargs.get('names', None)
if names and not isinstance(names, dict):
raise TypeError(
"names Type error, Expected dict, But got Type of {0}".format(type(names))
)
return self.cow.put(self.bucket, args[0], names=names)
@transform_argument
def stat(self, *args):
return self.cow.stat(self.bucket, args[0])
@transform_argument
def delete(self, *args):
return self.cow.delete(self.bucket, args[0])
@transform_argument
def copy(self, *args):
return self.cow.copy(self._build_cp_mv_args(args[0]))
@transform_argument
def move(self, *args):
return self.cow.move(self._build_cp_mv_args(args[0]))
def list_files(self, marker=None, limit=None, prefix=None):
return self.cow.list_files(self.bucket, marker=marker, limit=limit, prefix=prefix)
def _build_cp_mv_args(self, filename):
if isinstance(filename[0], basestring):
args = [self.bucket, filename[0], self.bucket, filename[1]]
else:
args = []
for src, des in filename:
args.append( (self.bucket, src, self.bucket, des) )
return args
|
24,522 | 635fe44ab82a1e07b44230d3a1f7a0fc82bc997e | """
Write a wrapper class TableData for database table, that when initialized with database name and table acts
as collection object (implements Collection protocol). Assume all data has unique values in 'name' column.
So, if presidents = TableData(database_name='example.sqlite', table_name='presidents')
then
len(presidents) will give current amount of rows in presidents table in database
presidents['Yeltsin'] should return single data row for president with name Yeltsin
'Yeltsin' in presidents should return if president with same name exists in table
object implements iteration protocol. i.e. you could use it in for loops::
for president in presidents:
print(president['name'])
all above mentioned calls should reflect most recent data. If data in table changed after you created collection
instance, your calls should return updated data.
Avoid reading entire table into memory. When iterating through records, start reading the first record, then go to the
next one, until records are exhausted. When writing tests, it's not always necessary to mock database calls completely.
Use supplied example.sqlite file as database fixture file.
"""
import sqlite3
from typing import Union
class TableData:
def __init__(self, database_name: str, table_name: str):
self.table_name = table_name
self.conn = sqlite3.connect(database_name).cursor()
self.row_index = -1
self.amount_of_rows = self.conn.execute(f"SELECT COUNT(*) FROM {self.table_name}").fetchone()[0]
def __len__(self) -> int:
return self.amount_of_rows
def __getitem__(self, name: str) -> tuple[Union[str, int]]:
return self.conn.execute(f"SELECT * FROM {self.table_name} WHERE name = '{name}'").fetchone()
def __iter__(self):
return self
def __next__(self) -> tuple:
if self.row_index >= self.amount_of_rows - 1:
raise StopIteration
self.row_index += 1
return self.conn.execute(f"SELECT * FROM {self.table_name} LIMIT 1 OFFSET {self.row_index}").fetchone()
|
24,523 | e97f0ef854c6776750cc669545c92c913d5284a6 | # link: https://leetcode.com/problems/interleaving-string/solution/
# hard question
# Recursion
class Solution(object):
def __init__(self):
self.dic = {}
def isInterleave(self, s1, s2, s3):
"""
:type s1: str
:type s2: str
:type s3: str
:rtype: bool
"""
return self.isInter(s1,0,s2,0,"",s3)
def isInter(self, s1, i, s2, j, res, s3):
print(i,j, res)
if res == s3 and i==len(s1) and j==len(s2):
return True
ans = False
if i < len(s1):
ans = ans or self.isInter(s1, i+1, s2, j, res + s1[i],s3 )
if j < len(s2):
ans = ans or self.isInter(s1, i, s2, j+1, res +s2[j], s3)
return ans
# Recursion with memoization
class Solution(object):
def __init__(self):
self.dic = {}
def isInterleave(self, s1, s2, s3, memo={}):
if len(s1) + len(s2) != len(s3): return False
if not s1 and not s2 and not s3: return True
if (s1, s2, s3) in memo: return memo[s1, s2, s3]
memo[s1,s2,s3] =\
(len(s1) > 0 and len(s3) > 0 and s1[0] == s3[0] and self.isInterleave(s1[1:], s2, s3[1:], memo)) or\
(len(s2) > 0 and len(s3) > 0 and s2[0] == s3[0] and self.isInterleave(s1, s2[1:], s3[1:], memo))
return memo[s1,s2,s3]
# DP solution
class Solution(object):
def isInterleave(self, s1, s2, s3, memo={}):
if len(s1) + len(s2) != len(s3):
return False
dp = [[False]*(len(s2)+1) for i in range(len(s1)+1)]
# print(dp)
for i in range(len(s1)+1):
for j in range(len(s2)+1):
if (i==0 and j==0):
dp[i][j] = True
elif i ==0:
dp[i][j] = dp[i][j-1] and s2[j-1]==s3[i+j-1]
elif j==0:
dp[i][j] = dp[i-1][j] and s1[i-1]==s3[i+j-1]
else:
dp[i][j] = (dp[i-1][j] and s1[i-1]==s3[i+j-1]) or (dp[i][j-1] and s2[j-1]==s3[i+j-1])
return dp[len(s1)][len(s2)]
|
24,524 | ee9a78d39a2340970c9642a9a691d159950d4a2f | n, k = map(int, input().split())
participants = list(map(int, input().split()))
kscore = participants[k - 1]
solution = 0
participants.sort()
for i in participants:
if i >= kscore and i > 0:
solution += 1
print(solution) |
24,525 | 62e3aa700bb5782db19eb63bb13fb436aab3b30e | # Gregory Jerian
# Period 4
from math import sqrt
def main():
try:
print("This program determines if a number is prime.")
number = eval(input("Enter a number to be tested: "))
# Checks if the number is less than or equal to 2
if number <= 2:
if number == 2:
print("The number is prime.")
else:
print("The number is not prime.")
# If number > 2, then the code runs
else:
x = 2
numberPrime = "prime"
while numberPrime == "prime" and x <= sqrt(number):
if number % x == 0:
numberPrime = "not prime"
else:
x = x + 1
if numberPrime == "prime":
print("The number is prime.")
elif numberPrime == "not prime":
print("The number is not prime.")
except:
print("Error! Make sure you enter a number.")
main()
|
24,526 | 5c82dbb9629d4af9df04a7600647683b2eb88e56 |
import random
import time
from utilities import util
from Characters import get_name
from Characters import weapons
from Characters import player
from Story import battle1
from Story import battle2
def GameLoop():
player.name = get_name.getName()
weapon = weapons.getWeapon()
player.att_die = weapon
win = battle1.BattleLoop()
if win == True:
win = battle2.BattleLoop()
if win == True:
pass
print("\n\t\tGAME OVER\n")
try:
GameLoop()
except KeyboardInterrupt: # If CTRL+C is pressed, exit cleanly:
print ("\t\tGoodbye\n")
|
24,527 | a03d30ddbe4b7e77f65b55962db746a851b8a1f6 | import os
trainset = '../../corpora/ls_dataset_benchmarking_train.txt'
testset = '../../corpora/ls_dataset_benchmarking_test.txt'
os.system('mkdir ../../sr_rankings/glavas')
output = '../../sr_rankings/glavas/ranks_glavas.txt'
comm = 'nohup python Run_Glavas.py '+trainset+' '+testset+' '+output+' &'
os.system(comm)
|
24,528 | 5bbd69fb58c11924129efb4fa89d0dd45321244c | from flask_injector import request
from injector import Injector
from services.UserService import UserService
from services.UserFeedbackService import UserFeedbackService, UserFeedbackServiceEvents
from services.EventService import EventService, EventServiceEvents
from services.EventCommentService import EventCommentService
from services.EventInvitationService import EventInvitationService, EventInvitationServiceEvents
from services.ServiceInterconnect import ServiceInterconnect
from validators.EventCommentValidator import EventCommentValidator
from validators.EventInvitationValidator import EventInvitationValidator
from validators.UserFeedbackValidator import UserFeedbackValidator
from validators.UserValidator import UserValidator
from validators.EventValidator import EventValidator
def configure_services(binder):
user_validator = UserValidator()
user_service = UserService(user_validator)
binder.bind(UserService, to=user_service, scope=request)
event_validator = EventValidator()
event_service = EventService(event_validator)
binder.bind(EventService, to=event_service, scope=request)
event_comment_validator = EventCommentValidator()
event_comment_service = EventCommentService(event_comment_validator)
binder.bind(EventCommentService, to=event_comment_service, scope=request)
event_invitation_validator = EventInvitationValidator()
event_invitation_service = EventInvitationService(event_invitation_validator)
binder.bind(EventInvitationService, to=event_invitation_service, scope=request)
user_feedback_validator = UserFeedbackValidator()
user_feedback_service = UserFeedbackService(user_feedback_validator)
binder.bind(UserFeedbackService, to=user_feedback_service, scope=request)
service_interconnect = ServiceInterconnect(user_service, event_service, event_invitation_service)
event_service.emitter.on(EventServiceEvents.EVENT_ADDED, service_interconnect.on_event_added)
event_service.emitter.on(EventServiceEvents.EVENT_DELETED, service_interconnect.on_event_deleted)
event_invitation_service.emitter.on(EventInvitationServiceEvents.INVITATION_ADDED,
service_interconnect.on_event_invitation_added)
event_invitation_service.emitter.on(EventInvitationServiceEvents.INVITATION_UPDATED,
service_interconnect.on_event_invitation_updated)
user_feedback_service.emitter.on(UserFeedbackServiceEvents.FEEDBACK_POINTS_UPDATED,
service_interconnect.on_feedback_points_updated)
services_injector = Injector([configure_services])
|
24,529 | 81a93db05f65704b7980d92462481881c711ae12 | from selenium import webdriver
import time
import getpass
import traceback
from tqdm import tqdm
def sleep(seconds):
for x in tqdm(seconds):
time.sleep(1)
def tear_down(driver):
driver.quit()
def get_videos(driver):
driver.get("https://www.midjobs.com/user/earn/youtube.aspx")
print('getting links')
sleep(range(2))
links = driver.find_elements_by_class_name("YouTubeLink")
return links
def watch_videos(ytlinks, driver):
for link in ytlinks:
print("watching ... " + link)
driver.get(link)
sleep(range(10))
required_frame = driver.find_element_by_xpath('//*[@id="player"]')
driver.switch_to.frame(required_frame)
ytbtn = driver.find_element_by_xpath("//button[@aria-label='Play']")
ytbtn.click()
sleep(range(50))
print("")
def main():
u = input('username? ')
p = getpass.getpass(prompt='password? ')
total_videos = int(input('input # of total videos to watch: '))
driver = webdriver.Chrome()
driver.get("https://www.midjobs.com/login.aspx")
try:
username_field = driver.find_element_by_xpath('//*[@id="ctl00_MainContentPlaceHolder_ctl00_Username"]')
password_field = driver.find_element_by_xpath('//*[@id="ctl00_MainContentPlaceHolder_ctl00_Password"]')
login_btn = driver.find_element_by_xpath('//*[@id="ctl00_MainContentPlaceHolder_ctl00_LoginButton"]')
username_field.send_keys(u)
password_field.send_keys(p)
login_btn.click()
ytlinks = []
ctr = 0
while ctr < total_videos:
links = get_videos(driver)
for i in range(len(links)):
src = links[i].get_attribute("href")
if src not in ytlinks and ctr < total_videos:
ytlinks.append(links[i].get_attribute("href"))
ctr += 1
print("")
print("=========================")
print("= watching " + str(len(ytlinks)) + " video/s...=")
print("=========================")
print("")
watch_videos(ytlinks,driver)
print("done. exiting ...")
sleep(range(2))
tear_down(driver)
except Exception:
traceback.print_exc()
tear_down(driver)
if __name__ == '__main__':
main()
|
24,530 | deafd29cdb20d2a9d349183adbfe0cc5917b91f3 | class Opov(object):
negMethod = 'ov_neg'
posMethod = 'ov_pos'
absMethod = 'ov_abs'
invertMethod = 'ov_invert'
complexMethod = 'ov_complex'
intMethod = 'ov_int'
longMethod = 'ov_long'
floatMethod = 'ov_float'
octMethod = 'ov_oct'
hexMethod = 'ov_hex'
indexMethod = 'ov_index'
lenMethod = 'ov_len'
addMethod = 'ov_add'
subMethod = 'ov_sub'
mulMethod = 'ov_mul'
floordivMethod = 'ov_floordiv'
modMethod = 'ov_mod'
divmodMethod = 'ov_divmod'
lshiftMethod = 'ov_lshift'
rshiftMethod = 'ov_rshift'
andMethod = 'ov_and'
xorMethod = 'ov_xor'
orMethod = 'ov_or'
divMethod = 'ov_div'
truedivMethod = 'ov_truediv'
raddMethod = 'ov_radd'
rsubMethod = 'ov_rsub'
rmulMethod = 'ov_rmul'
rdivMethod = 'ov_rdiv'
rtruedivMethod = 'ov_rtruediv'
rfloordivMethod = 'ov_rfloordiv'
rmodMethod = 'ov_rmod'
rdivmodMethod = 'ov_rdivmod'
rpowMethod = 'ov_rpow'
rlshiftMethod = 'ov_rlshift'
rrshiftMethod = 'ov_rrshift'
randMethod = 'ov_rand'
rxorMethod = 'ov_rxor'
rorMethod = 'ov_ror'
iaddMethod = 'ov_iadd'
isubMethod = 'ov_isub'
imulMethod = 'ov_imul'
idivMethod = 'ov_idiv'
itruedivMethod = 'ov_itruediv'
ifloordivMethod = 'ov_ifloordiv'
imodMethod = 'ov_imod'
ilshiftMethod = 'ov_ilshift'
irshiftMethod = 'ov_irshift'
iandMethod = 'ov_iand'
ixorMethod = 'ov_ixor'
iorMethod = 'ov_ior'
ltMethod = 'ov_lt'
leMethod = 'ov_le'
eqMethod = 'ov_eq'
neMethod = 'ov_ne'
gtMethod = 'ov_gt'
geMethod = 'ov_ge'
getitemMethod = 'ov_getitem'
delitemMethod = 'ov_delitem'
powMethod = 'ov_pow'
ipowMethod = 'ov_ipow'
callMethod = 'ov_call'
def __init__(self, x=''):
self.x = x if x else 'x' # expression string
def __str__(self):
return self.x
def _mcall1(self, m):
return self.x + '.' + m + '()'
def _mcall2(self, m, x):
return self.x + '.' + m + '(' + str(x) + ')'
def __neg__(self):
return Opov(self._mcall1(self.negMethod))
def __pos__(self):
return Opov(self._mcall1(self.posMethod))
def __abs__(self):
return Opov(self._mcall1(self.absMethod))
def __invert__(self):
return Opov(self._mcall1(self.invertMethod))
def __complex__(self):
return Opov(self._mcall1(self.complexMethod))
def __int__(self):
return Opov(self._mcall1(self.intMethod))
def __long__(self):
return Opov(self._mcall1(self.longMethod))
def __float__(self):
return Opov(self._mcall1(self.floatMethod))
def __oct__(self):
return Opov(self._mcall1(self.octMethod))
def __hex__(self):
return Opov(self._mcall1(self.hexMethod))
def __index__(self):
return Opov(self._mcall1(self.indexMethod))
def __len__(self):
return Opov(self._mcall1(self.lenMethod))
def __add__(self, x):
return Opov(self._mcall2(self.addMethod, x))
def __sub__(self, x):
return Opov(self._mcall2(self.subMethod, x))
def __mul__(self, x):
return Opov(self._mcall2(self.mulMethod, x))
def __floordiv__(self, x):
return Opov(self._mcall2(self.floordivMethod, x))
def __mod__(self, x):
return Opov(self._mcall2(self.modMethod, x))
def __divmod__(self, x):
return Opov(self._mcall2(self.divmodMethod, x))
def __lshift__(self, x):
return Opov(self._mcall2(self.lshiftMethod, x))
def __rshift__(self, x):
return Opov(self._mcall2(self.rshiftMethod, x))
def __and__(self, x):
return Opov(self._mcall2(self.andMethod, x))
def __xor__(self, x):
return Opov(self._mcall2(self.xorMethod, x))
def __or__(self, x):
return Opov(self._mcall2(self.orMethod, x))
def __div__(self, x):
return Opov(self._mcall2(self.divMethod, x))
def __truediv__(self, x):
return Opov(self._mcall2(self.truedivMethod, x))
def __radd__(self, x):
return Opov(self._mcall2(self.raddMethod, x))
def __rsub__(self, x):
return Opov(self._mcall2(self.rsubMethod, x))
def __rmul__(self, x):
return Opov(self._mcall2(self.rmulMethod, x))
def __rdiv__(self, x):
return Opov(self._mcall2(self.rdivMethod, x))
def __rtruediv__(self, x):
return Opov(self._mcall2(self.rtruedivMethod, x))
def __rfloordiv__(self, x):
return Opov(self._mcall2(self.rfloordivMethod, x))
def __rmod__(self, x):
return Opov(self._mcall2(self.rmodMethod, x))
def __rdivmod__(self, x):
return Opov(self._mcall2(self.rdivmodMethod, x))
def __rpow__(self, x):
return Opov(self._mcall2(self.rpowMethod, x))
def __rlshift__(self, x):
return Opov(self._mcall2(self.rlshiftMethod, x))
def __rrshift__(self, x):
return Opov(self._mcall2(self.rrshiftMethod, x))
def __rand__(self, x):
return Opov(self._mcall2(self.randMethod, x))
def __rxor__(self, x):
return Opov(self._mcall2(self.rxorMethod, x))
def __ror__(self, x):
return Opov(self._mcall2(self.rorMethod, x))
def __iadd__(self, x):
return Opov(self._mcall2(self.iaddMethod, x))
def __isub__(self, x):
return Opov(self._mcall2(self.isubMethod, x))
def __imul__(self, x):
return Opov(self._mcall2(self.imulMethod, x))
def __idiv__(self, x):
return Opov(self._mcall2(self.idivMethod, x))
def __itruediv__(self, x):
return Opov(self._mcall2(self.itruedivMethod, x))
def __ifloordiv__(self, x):
return Opov(self._mcall2(self.ifloordivMethod, x))
def __imod__(self, x):
return Opov(self._mcall2(self.imodMethod, x))
def __ilshift__(self, x):
return Opov(self._mcall2(self.ilshiftMethod, x))
def __irshift__(self, x):
return Opov(self._mcall2(self.irshiftMethod, x))
def __iand__(self, x):
return Opov(self._mcall2(self.iandMethod, x))
def __ixor__(self, x):
return Opov(self._mcall2(self.ixorMethod, x))
def __ior__(self, x):
return Opov(self._mcall2(self.iorMethod, x))
def __lt__(self, x):
return Opov(self._mcall2(self.ltMethod, x))
def __le__(self, x):
return Opov(self._mcall2(self.leMethod, x))
def __eq__(self, x):
return Opov(self._mcall2(self.eqMethod, x))
def __ne__(self, x):
return Opov(self._mcall2(self.neMethod, x))
def __gt__(self, x):
return Opov(self._mcall2(self.gtMethod, x))
def __ge__(self, x):
return Opov(self._mcall2(self.geMethod, x))
def __getitem__(self, x):
return Opov(self._mcall2(self.getitemMethod, x))
def __delitem__(self, x):
return Opov(self._mcall2(self.delitemMethod, x))
def __pow__(self, x, y=''):
if y:
ys = ', ' + str(y)
else:
ys = ''
return Opov(self.x + '.__pow__(' + str(x) + ys + ')')
def __ipow__(self, x, y=''):
if y:
ys = ', ' + str(y)
else:
ys = ''
return Opov(self.x + '.__ipow__(' + str(x) + ys + ')')
def __call__(self, *args):
z = ','.join(map(str, args))
return Opov(self.x + '.__call__(' + z + ')')
|
24,531 | 2ad3b3c813cf289633738103922a74eed3d0da7f | class UniqueNumber(object):
def single_number(self, nums):
occurences = {}
for n in nums:
occurences[n] = occurences.get(n, 0) + 1
for key, value in occurences.items():
if value == 1:
return key
def single_number_2(self, nums):
unique = 0
for n in nums:
unique ^= n
return unique
|
24,532 | b57f2db646ba388b38b96e8c1f5eb89356fe1e5f | from scipy.fftpack import fft
import numpy as np
import matplotlib.pyplot as plt
from pylab import*
from scipy.io import wavfile
y = [683, 766, 812, 912, 1024, 1085, 1230]
x = ["A", "B", "C", "D", "E", "F", "G"]
plt.plot(y)
ylabel('Frequency')
xlabel(x[0]+" "+
x[1]+" "+
x[2]+" "+
x[3]+" "+
x[4]+" "+
x[5]+" "+
x[6]+" ")
plt.grid()
plt.show()
|
24,533 | 74cb4688bf0029aed6b5fae9c01fe5e3b7583878 | from terminalWriter import terminalWriter
from decimal import Decimal
class progressBar:
rows = 0;
cols = 0;
total_items= 0;
items_done = 0;
title = ""
tw = terminalWriter()
def __init__(self, tot_items = 1, il = 0, name = ""):
self.total_items = tot_items;
self.items_done = il;
self.title = name;
self.rows = self.tw.get_rows();
self.cols = self.tw.get_columns();
self.show()
def show(self):
display = self.title;
if self.title != "":
display += ":["
else:
display += "["
# <TITLE>:[
end = "]"
end += str(round(100 * float(self.items_done) / float(self.total_items),2))
end += "%"
#]<some-number>%
total_area = self.cols - (len(display) + len(end))
filled_area = (float(self.items_done) / float(self.total_items))* total_area
filled_area = int(filled_area)
for i in range(filled_area):
display += "#";
for i in range(total_area - filled_area):
display += " "
display += end
self.tw.write_progress_bar(display);
def signal_job_done(self):
self.items_done += 1;
self.show()
if(self.items_done == self.total_items):
self.tw.done();
def reset(self, new_tot_items, new_name = ""):
self.total_items = new_tot_items;
self.title = new_name;
self.items_done = 0;
|
24,534 | 171fee2e634710cb1f6f63f0bc8a039d080d96de | from selenium import webdriver
from selenium.webdriver.common import action_chains, keys
browser = webdriver.Chrome('/usr/bin/chromedriver')
browser.get('https://gabrielecirulli.github.io/2048/')
keysTuple = (keys.Keys.ARROW_UP, keys.Keys.ARROW_RIGHT, keys.Keys.ARROW_DOWN,keys.Keys.ARROW_LEFT)
actions = action_chains.ActionChains(browser)
while True:
actions = action_chains.ActionChains(browser)
actions.send_keys(keysTuple).perform()
|
24,535 | b8c6432f47bf7e6d454e14cfad6b4ff6ae0a6a7c | # -*- coding: utf-8 -*-
"""
Created on Sun Dec 30 16:14:04 2018
@author: truthless
"""
import random
from stop import STOP_WORDS
import numpy as np
import torch
import torch.utils.data as data
import re
import json
import networkx as nx
import scipy.sparse as sp
PAD = 0
UNK = 1 #OOV
GO = 2
EOS = 3
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class DataManager:
def __init__(self, stopword_freq_lb, path, no_pretrain_word2vec, dim, context_len):
#read text
self.text = {}
for name in ["train", "valid", "test"]:
self.text[name] = []
entities = []
file_path = "{0}/{1}_ent_1.txt".format(path, name)
for line in open(file_path):
entities.append(line.strip())
sys_ans_utt_ori = []
file_path = "{0}/{1}_ans_utt_ori_1.txt".format(path, name)
for line in open(file_path):
sys_ans_utt_ori.append(line.strip())
cnt = 0
file_path = "{0}/{1}_utt_1.txt".format(path, name)
for line in open(file_path):
utterances = line.strip().split('\t')
utterances = [''] * (context_len - len(utterances)) + utterances
self.text[name].append([utterances[-context_len:-1], utterances[-1], entities[cnt], sys_ans_utt_ori[cnt]])
cnt += 1
#arrange words
wordscount = {}
for name in ["train", "valid"]:
texts = self.text[name]
for item in texts:
words = item[0][-1].split() + item[1].split()
for word in words:
if word in wordscount:
wordscount[word] += 1
else:
wordscount[word] = 1
wordssorted = sorted(wordscount.items(), key = lambda d: (d[1],d[0]), reverse=True)
output = open("word_cnt_stat.txt", "w")
for i, (key, value) in enumerate(wordssorted):
output.write(str(value) + ":" + str(key) + "\n")
self.word2index = {'<PAD>':0, '<UNK>':1, '<GO>':2, '<EOS>':3}
stopwords_self = set()
for i, (key, value) in enumerate(wordssorted):
if value <= 5:
break
self.word2index[key] = i + 4 #PAD,UNK,GO,EOS
if value >= stopword_freq_lb:
stopwords_self.add(key)
# to add all entity name into vocab
entity_list = json.load(open("./data/entity_list_simple.json"))
start_idx = len(self.word2index)
for entity_name in entity_list:
entity_name = entity_name.split("::")[-1]
if entity_name not in self.word2index:
self.word2index[entity_name] = start_idx
start_idx += 1
self.stop_words_index = set([PAD, UNK, GO, EOS])
#self.stop_words_index |= set([self.word2index[word] for word in STOP_WORDS
# if word in self.word2index])
# here we add all words into stopword list
self.stop_words_index |= set([self.word2index[word] for word in stopwords_self])
self.index2word = dict((v, k) for k, v in self.word2index.items())
#load word vector
if no_pretrain_word2vec:
self.vector = None
else:
self.vector = 0.1 * np.random.rand(len(self.word2index), dim)
with open("{0}/vector.txt".format(path)) as fl:
for line in fl:
vec = line.strip().split()
word = vec[0].lower()
vec = list(map(float, vec[1:]))
if word in self.word2index:
self.vector[self.word2index[word]] = np.asarray(vec)
self.vector = torch.Tensor(self.vector)
# compute tf
len_voc = len(self.word2index.values())
self.index2nonstop = {}
cnt = 0
for i in range(len_voc):
if i not in self.stop_words_index:
self.index2nonstop[i] = cnt
cnt += 1
# for graph initialization
self.node_id_map, self.id_nodename_map = self.get_node_id_map()
self.node_info_map, self.nodename_attr_map = self.get_node_info()
self.adj = self.get_adj_mat("./data/adj_simple.json", self.node_id_map)
self.nodes_rep = self.get_nodes_rep(self.node_id_map, self.node_info_map)
self.n_entity = len(self.node_id_map)
#get index
self.data = {}
for name in ["train", "valid", "test"]:
self.data[name] = []
for number, item in enumerate(self.text[name]):
len_u = len(item[0])
indices = [[], [[] for _ in range(len_u)], [], [], [], []] #src_len, src, trg, trg_entities, trg_entities_mask
indices[0] = [u.count(' ')+1 for u in item[0]] # on purpose
max_u_len = max(indices[0])
# history
for i in range(len_u):
words = item[0][i].split()
indices[1][i] = [self.word2index[word] if word in self.word2index
else UNK for word in words] + [PAD] * (max_u_len - len(words))
# answer
words = item[1].split()
#print("item1:: ", len(words))
indices[2] = [self.word2index[word] if word in self.word2index
else UNK for word in words]
indices[2].append(EOS)
# answer entity
entities = item[2].split()
#print("item2 entities:: ", len(entities))
indices[3] = [self.node_id_map[entity_name] for entity_name in entities]
indices[3].append(0)
indices[4] = []
for x in indices[3]:
if x == 0:
indices[4].append(0)
else:
indices[4].append(1)
# ansuer original sentence
words = item[3].split()
indices[5] = words
indices[5].append("<EOS>")
if len(indices[2]) != len(indices[3]):
print(number, len(indices[2]), len(indices[3]))
print(item[1])
print(item[2])
exit()
self.data[name].append(indices)
def get_node_info(self):
node_info_map = json.load(open("./data/entity_info.json"))
nodename_attr_map = {}
for node, res in node_info_map.items():
node_name = node.split("::")[-1]
nodename_attr_map[node_name] = res
return node_info_map, nodename_attr_map
def post_process(self, outputs, pred_ents, topK=1):
outputs = outputs.cpu().numpy().tolist()
pred_ents = pred_ents.cpu().numpy()
entity_attr_list = {
"[attraction_address]",
"[restaurant_address]",
"[attraction_phone]",
"[restaurant_phone]",
"[hotel_address]",
"[restaurant_postcode]",
"[attraction_postcode]",
"[hotel_phone]",
"[hotel_postcode]",
"[hospital_phone]"
}
lens_new = []
for i, out in enumerate(outputs):
new_out = []
for j, each in enumerate(out):
if self.index2word[each] == "<$>":
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
new_out.append(nodename)
elif self.index2word[each] in entity_attr_list:
attr_name = self.index2word[each]
cnt = 0
suc_flag = False
for idx, prob in sorted(enumerate(pred_ents[i][j]), key=lambda i: i[1], reverse=True):
if suc_flag or cnt >= topK:
break
nodename = self.id_nodename_map[idx]
if nodename not in self.nodename_attr_map:
cnt += 1
continue
for attr, val in self.nodename_attr_map[nodename].items():
if attr in attr_name:
new_out.append(val)
suc_flag = True
break
cnt += 1
if not suc_flag:
new_out.append("<UNK>")
else:
new_out.append(self.index2word[each])
"""
if each == self.word2index["<$>"]:
pred_ent = np.argmax(pred_ents[i][j])
nodename = self.id_nodename_map[pred_ent]
nodename_wordids = [self.word2index[x] for x in nodename.split()]
new_out += nodename_wordids
else:
new_out.append(each)
"""
outputs[i] = new_out
return outputs
def get_nodes_rep(self, node_id_map, node_info_map, max_len=50):
nodes_rep = []
nodes_rep_map = []
for name, id_ in sorted(node_id_map.items(), key=lambda i: i[1]):
if name == "none" and id_ == 0:
nodes_rep.append([PAD] * max_len)
nodes_rep_map.append({"words": ["none"], "idx": [0]})
continue
# the attributes used to build relationship
# attributes as nodes: {"pricerange", "area", "food"}
# attributes only as relation: {"internet", "parking", "stars", "attraction_type", "hotel_type"}
# only user node name as node's feature
name = name.split("::")[-1]
node_desc = [name]
nodes_rep_idx = [PAD] * max_len
nodes_rep_idx[0] = self.word2index[name]
nodes_rep_word = [name]
"""
for attr, val in node_info_map.items():
#if attr in {"address", "area", "pricerange", "introduction", "food", "stars"} or "type" in attr:
if attr == "introduction":
node_desc.append(val)
node_desc = " ".join(node_desc)
nodes_rep_idx = []
nodes_rep_word = []
for each_word in node_desc.split():
for word in re.split(r'[\[\](::)_]', each_word):
if word == "":
continue
else:
if word not in self.word2index:
continue
else:
word_idx = self.word2index[word]
nodes_rep_idx.append(word_idx)
nodes_rep_word.append(word)
len_ = len(nodes_rep_idx)
if len_ >= max_len:
nodes_rep_idx = nodes_rep_idx[0:max_len]
nodes_rep_word = nodes_rep_word[0:max_len]
else:
nodes_rep_idx += [PAD] * (max_len - len_)
"""
nodes_rep.append(nodes_rep_idx)
nodes_rep_map.append({"words": nodes_rep_word, "idx": nodes_rep_idx})
json.dump(nodes_rep_map, open("nodes_rep_words_idx.json", "w"))
json.dump(self.word2index, open("word2index.json", "w"))
#exit()
return nodes_rep
def get_node_id_map(self):
data = json.load(open("./data/entity_list_simple.json"))
node_id_map = {}
id_nodename_map = {}
for i, node in enumerate(data):
node_id_map[node] = i + 1
tmp = node.split("::")
#node_name = " ".join(tmp[1].split("_"))
node_name = tmp[1]
id_nodename_map[i+1] = node_name
node_id_map["none"] = 0
id_nodename_map[0] = ""
return node_id_map, id_nodename_map
def get_adj_mat(self, input_file, item_id_map):
adj = json.load(open(input_file))
new_adj = {}
for i, neibors in adj.items():
i_idx = item_id_map[i]
new_adj[i_idx] = []
for j in neibors:
j_idx = item_id_map[j]
new_adj[i_idx].append(j_idx)
new_adj = nx.adjacency_matrix(nx.from_dict_of_lists(new_adj))
new_adj = self.normalize_adj(new_adj + sp.eye(new_adj.shape[0]))
new_adj = torch.FloatTensor(np.array(new_adj.todense()))
return new_adj
def normalize_adj(self, mx):
"""Row-normalize sparse matrix"""
rowsum = np.array(mx.sum(1))
r_inv_sqrt = np.power(rowsum, -0.5).flatten()
r_inv_sqrt[np.isinf(r_inv_sqrt)] = 0.
r_mat_inv_sqrt = sp.diags(r_inv_sqrt)
return mx.dot(r_mat_inv_sqrt).transpose().dot(r_mat_inv_sqrt).tocoo()
def create_dataset(self, name, batch_size):
datas = self.data[name]
src_seq_lens = []
src_seqs, trg_seqs = [], []
trg_stops, src_tfs = [], []
trg_ents, trg_ents_mask, trg_seqs_ori = [], [], []
nonstop_voc_size = len(self.index2nonstop)
for item in datas:
src_len, src, trg, ents, ents_mask, trg_ori = item
tensor_src_len, tensor_src, tensor_trg = torch.LongTensor(src_len), \
torch.LongTensor(src), torch.LongTensor(trg)
src_seq_lens.append(tensor_src_len)
src_seqs.append(tensor_src)
trg_seqs.append(tensor_trg)
trg_stop = torch.zeros_like(tensor_trg)
for i, index in enumerate(trg):
if index in self.stop_words_index:
trg_stop[i] = 1
trg_stops.append(trg_stop)
src_tf = torch.zeros(nonstop_voc_size)
for j, uttr in enumerate(src):
for i, index in enumerate(uttr):
if i == src_len[j]:
break
if index not in self.stop_words_index:
src_tf[self.index2nonstop[index]] += 1
if src_tf.sum().item() > 0:
src_tf /= src_tf.sum()
src_tfs.append(src_tf)
trg_ents.append(torch.LongTensor(ents))
trg_ents_mask.append(torch.LongTensor(ents_mask))
trg_seqs_ori.append(trg_ori)
print(len(trg_stops), len(trg_seqs), len(trg_ents), len(trg_seqs_ori))
dataset = Dataset(src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori)
dataloader = data.DataLoader(dataset, batch_size, True, num_workers=0, collate_fn=pad_packed_collate)
return dataloader
def compute_stopword(self, y):
res = torch.zeros_like(y).to(device=device)
for i, row in enumerate(y):
words_index = row.tolist()
res[i] = torch.LongTensor([int(index in self.stop_words_index) for index in words_index])
return res
def interpret(self, preds, refs, lens, f):
i = random.randrange(0, len(lens))
l = max(lens)
for j in range(l):
word = self.index2word[preds[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
if word == '<EOS>':
break
print()
f.write('\n')
l = lens[i]
for j in range(l):
word = self.index2word[refs[i][j].item()]
print(word, end=' ')
f.write('{0} '.format(word))
print()
f.write('\n')
class Dataset(data.Dataset):
def __init__(self, src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori):
self.src_seq_lens = src_seq_lens
self.src_seqs = src_seqs
self.trg_seqs = trg_seqs
self.trg_stops = trg_stops
self.src_tfs = src_tfs
self.num_total_seqs = len(src_seqs)
self.trg_ents = trg_ents
self.trg_ents_mask = trg_ents_mask
self.trg_seqs_ori = trg_seqs_ori
def __getitem__(self, index):
src_seq_len = self.src_seq_lens[index]
src_seq = self.src_seqs[index]
trg_seq = self.trg_seqs[index]
trg_stop = self.trg_stops[index]
src_tf = self.src_tfs[index]
trg_ent = self.trg_ents[index]
trg_ent_mask = self.trg_ents_mask[index]
trg_seq_ori = self.trg_seqs_ori[index]
return src_seq_len, src_seq, trg_seq, trg_stop, src_tf, trg_ent, trg_ent_mask, trg_seq_ori
def __len__(self):
return self.num_total_seqs
def pad_packed_collate(batch_data):
def merge(sequences):
lengths = [len(seq) for seq in sequences]
padded_seqs = torch.zeros(len(sequences), max(lengths)).long()
for i, seq in enumerate(sequences):
end = lengths[i]
padded_seqs[i, :end] = seq
return padded_seqs, lengths
def hierarchical_merge(sequences, sequence_lengths):
lengths = torch.stack(sequence_lengths)
utterance_length = lengths.shape[1]
padded_seqs = torch.zeros(len(sequences), utterance_length, lengths.max().item()).long()
for i, seq in enumerate(sequences):
word_end = max(lengths[i]).item()
padded_seqs[i, :utterance_length, :word_end] = seq
return padded_seqs, lengths
# sort a list by sequence length (descending order) to use pack_padded_sequence
batch_data.sort(key=lambda x: len(x[0]), reverse=True)
# seperate source and target sequences
src_seq_lens, src_seqs, trg_seqs, trg_stops, src_tfs, trg_ents, trg_ents_mask, trg_seqs_ori = zip(*batch_data)
src_seqs, src_lens = hierarchical_merge(src_seqs, src_seq_lens)
trg_seqs, trg_lens = merge(trg_seqs)
trg_stops, _ = merge(trg_stops)
trg_ents, _ = merge(trg_ents)
trg_ents_mask, _ = merge(trg_ents)
return (src_seqs.to(device=device), src_lens.to(device=device),
trg_seqs.to(device=device), trg_lens,
trg_stops.to(device=device), torch.stack(src_tfs).to(device=device),
trg_ents.to(device=device), trg_ents_mask.to(device=device), trg_seqs_ori)
|
24,536 | 2c321a08102c2b8944328eae3bcac25a33558019 | '''
Test suite for the sarlac command line tool.
'''
# pylint: disable=protected-access
from pathlib import Path
import os
import re
from unittest.mock import patch
from click.testing import CliRunner
import pytest
import python_sarlac.sarlac as sarlac
def test_get_config_filename_from_env():
'''test that config file can be set with environment variable SARLAC_CONFIG'''
# setup environment variable
configfile = '/my/test/dir/myconfig.yaml'
os.environ["SARLAC_CONFIG"] = configfile
# check configfile can be configured through the environment variable
envfile = sarlac._get_config_filename()
assert envfile == configfile
# clean up environment variable
del os.environ['SARLAC_CONFIG']
def test_get_config_filename_from_home():
'''test that config file can be set in home directory'''
with patch.object(Path, 'home', return_value='/home/user') as mock_home:
with patch.object(Path, 'is_file', return_value=True) as mock_is_file:
config = sarlac._get_config_filename()
mock_home.assert_called()
mock_is_file.assert_called()
assert config == '/home/user/.sarlac.yaml'
def test_get_config_from_global():
'''test that config file has its global location set'''
config = sarlac._get_config_filename()
assert config == '/usr/local/etc/sarlac.yaml'
INSTR_LIST = [
('match rule 0', 'test 0'),
('match rule 1 YES2 done', 'YES2'),
('match rule 2 YES4.1 more complex YES4.2 done', 'YES4.1 YES4.2'),
('match rule 3 NO MATCHES FOUND', None)
]
INSTR_IDS = [f'match rule {n}' for n, _ in enumerate(INSTR_LIST)]
@pytest.mark.parametrize('instr,expected', INSTR_LIST, ids=INSTR_IDS)
def test_run_subs_with_small_sub_rules_list(instr, expected):
'''test that batch substitution rules run as expected'''
sub_rules = {'substitutions': [
{'match': re.compile(r'match rule 0'), 'replace': r'test 0'},
{'match': re.compile(r'match rule 1 (.*) done'), 'replace': r'\1'},
{'match': re.compile(r'match rule 2 (.*) more\s+complex (.*) done'), 'replace': r'\1 \2'},
]}
assert sarlac._run_subs(sub_rules, instr) == expected
def test_parse_config_file_collects_file_contents():
'''test that config file can be parsed'''
sub_rules = sarlac._parse_config('tests/sarlac.yaml')
assert 'substitutions' in sub_rules
def test_parse_config_file_no_file():
'''test that missing config file wil raise an exception'''
with pytest.raises(FileNotFoundError):
sarlac._parse_config('not_a_file.yaml')
RULE_LIST = [
(['testmatch', 'replaced', 'testmatch'], \
{'substitutions': [{'match': re.compile('testmatch'), 'replace': 'replaced'}]}),
([r'test(.*)', r'\1', 'testreplaced2'], \
{'substitutions': [{'match': re.compile('test(.*)'), 'replace': '\\1'}]}),
([r'test(.*)', r'\1', 'testreplaced3', 'testreplaced4'], \
{'substitutions': [{'match': re.compile('test(.*)'), 'replace': '\\1'}]})
]
RULE_IDS = [f'rule test {n}' for n, _ in enumerate(RULE_LIST)]
@pytest.mark.parametrize('rule_input,expected', RULE_LIST, ids=RULE_IDS)
def test_generate_cli_adhoc_rules(rule_input, expected):
'''test substitution rules generation for ad-hoc match/replace on the cli'''
sub_rules = sarlac._generate_cli_adhoc_rules(rule_input[0], rule_input[1])
print(sub_rules)
print(expected)
assert sub_rules == expected
CLI_LIST = [
(['--match', 'testmatch', '--replace', 'replaced', 'testmatch'], \
'replaced\n'),
(['--match', r'test(.*)', '--replace', r'\1', 'testreplaced2'], \
'replaced2\n'), \
(['--match', r'test(.*)', '--replace', r'\1', 'testreplaced3', 'testreplaced4'], \
'replaced3\nreplaced4\n')
]
CLI_IDS = [f'cli test {n}' for n, _ in enumerate(CLI_LIST)]
@pytest.mark.parametrize('cli_input,expected', CLI_LIST, ids=CLI_IDS)
def test_main_cli_thru_cli_args(cli_input, expected):
'''test that sarlac cli can run ad-hoc match/replace'''
runner = CliRunner()
result = runner.invoke(sarlac.main, cli_input)
assert result.exit_code == 0
assert result.output == expected
def test_main_cli_thru_stdin():
'''test that sarlac cli can run ad-hoc match/replace on piped stdin'''
runner = CliRunner()
cli_input = ['--match', 'testmatch', '--replace', 'replaced', '-']
result = runner.invoke(sarlac.main, cli_input, input="testmatch")
assert result.exit_code == 0
assert result.output == "replaced\n"
@pytest.mark.parametrize('cli_input', [(), ('--help'), ('-h')])
def test_main_cli_no_input_invokes_help(cli_input):
'''test scenarios where help is called'''
runner = CliRunner()
result = runner.invoke(sarlac.main, cli_input)
assert result.exit_code == 0
assert result.output.startswith('Usage:')
|
24,537 | 5e9665f264c4f3b1c3a3415e3d6656384408de50 | from . import CampoController, ClasificacionController, FaseController
from ..models import Partido, Grupo
from ..static.constantes import JUGANDO, ESPERA, TERMINADO
def crear_calendario(fase):
grupo_list = Grupo.objects.filter(fase=fase)
for grupo in grupo_list:
rondas = calcular_rondas(fase, grupo)
equipos_list = grupo.equipos.all()
calendario_grupo_list = crear_lista_partidos(list(equipos_list), rondas)
crear_partido_lista(grupo, calendario_grupo_list, fase.doble_partido)
def calcular_rondas(fase, grupo):
n_equipos = grupo.equipos.count()
if (n_equipos % 2) == 0:
rondas = n_equipos - 1
else:
rondas = n_equipos
if fase.doble_partido:
rondas *= 2
return rondas
def crear_lista_partidos(teams, rounds):
"""round robin: Devuelve una lista de listas de jornadas, dentro de esa lista de jornadas hay una tupla con los enfrentamientos
equipo_local vs equipo_visitante"""
if len(teams) % 2:
teams.append(None)
schedule = []
for turn in range(rounds):
pairings = []
for i in range(int(len(teams) / 2)):
pairings.append((teams[i], teams[len(teams) - i - 1]))
teams.insert(1, teams.pop())
schedule.append(pairings)
return schedule
def crear_partido_lista(grupo, calendario_list, doble_partido):
for jornada in range(1, len(calendario_list) + 1):
for enfrentamiento in calendario_list[jornada - 1]:
if enfrentamiento[0] and enfrentamiento[1]:
if doble_partido and jornada > (len(calendario_list) / 2):
Partido.objects.create(grupo=grupo, equipo_local=enfrentamiento[1],
equipo_visitante=enfrentamiento[0],
jornada=jornada)
else:
Partido.objects.create(grupo=grupo, equipo_local=enfrentamiento[0],
equipo_visitante=enfrentamiento[1],
jornada=jornada)
def get_partidos_grupo_list(grupo):
partidos = Partido.objects.filter(grupo=grupo)
return partidos
def get_partidos_jornadas_grupo(grupo):
jornadas_list = []
jornadas = calcular_rondas(grupo.fase, grupo)
for n_jornada in range(1, jornadas + 1):
partidos_jornada_list = Partido.objects.filter(grupo=grupo, jornada=n_jornada)
aux = [n_jornada, partidos_jornada_list]
jornadas_list.append(aux)
return jornadas_list
def get_partidos_jugando_list(fase):
partidos_jugando = Partido.objects.filter(grupo__fase=fase, estado=JUGANDO)
return partidos_jugando
def get_partidos_espera_list(fase):
partidos_espera = Partido.objects.filter(grupo__fase=fase, estado=ESPERA)
return partidos_espera
def get_partidos_no_terminados_list(fase):
partidos_espera = Partido.objects.filter(grupo__fase=fase, estado__lt=TERMINADO)
return partidos_espera
def get_partidos_espera_equipos_no_jugando_list(fase):
"""Devuelve una lista con los partidos que se pueden jugar sin que coincida alguno que este en juego"""
partidos_espera_list = get_partidos_espera_list(fase)
partidos_jugando_list = get_partidos_jugando_list(fase)
equipos_jugando = []
for partido_jugando in partidos_jugando_list:
equipos_jugando.append(partido_jugando.equipo_local)
equipos_jugando.append(partido_jugando.equipo_visitante)
partidos_espera_equipos_no_jugando_list = partidos_espera_list.exclude(equipo_local__in=equipos_jugando) \
.exclude(equipo_visitante__in=equipos_jugando)
return partidos_espera_equipos_no_jugando_list
def get_partidos_con_campos_para_forzar(fase):
"""Devuelve una lista de listas con los partidos que se pueden jugar en los campos disponibles
[[partido,[campo,campo]],[partido,[campo]],[partido,[]]...]"""
partidos_espera_list = get_partidos_espera_list(fase)
campos_para_forzar_list = CampoController.get_campos_para_forzar(fase)
partido_campo = []
for x in range(0, len(partidos_espera_list)):
aux = [partidos_espera_list[x], campos_para_forzar_list[x]]
partido_campo.append(aux)
return partido_campo
def set_partido_espera(partido):
partido.estado = ESPERA
partido.arbitro = None
partido.campo = None
partido.save()
def set_partido_jugar(partido, campo, arbitro=None):
partido.estado = JUGANDO
partido.campo = campo
partido.arbitro = arbitro
partido.save()
def set_partido_terminar(partido):
partido.estado = TERMINADO
partido.ganador = get_equipo_ganador(partido)
partido.save()
def get_equipo_ganador(partido):
ganador = None
if partido.estado == TERMINADO:
deporte = partido.grupo.fase.torneo.deporte
if deporte.set:
resultado_local = partido.get_numero_sets_local()
resultado_visitante = partido.get_numero_sets_visitante()
else:
resultado_local = partido.resultado_local
resultado_visitante = partido.resultado_visitante
# Compruebo resultados
if resultado_local > resultado_visitante:
ganador = partido.equipo_local
elif resultado_local < resultado_visitante:
ganador = partido.equipo_visitante
return ganador
def partido_posponer(fase, partido):
arbitro = partido.arbitro
campo = partido.campo
set_partido_espera(partido)
campo_list = CampoController.get_campo_fase_list(fase)
if campo in campo_list:
partidos_espera_list = get_partidos_espera_equipos_no_jugando_list(fase).exclude(pk=partido.id)
if partidos_espera_list:
partido_siguiente = partidos_espera_list.first()
set_partido_jugar(partido_siguiente, campo, arbitro)
else: # No se puede posponer el partido
set_partido_jugar(partido, campo, arbitro)
def partido_forzar(fase, partido, campo):
partido_reemplazar = Partido.objects.get(grupo__fase=fase, campo=campo, estado=JUGANDO)
arbitro = partido_reemplazar.arbitro
set_partido_espera(partido_reemplazar)
set_partido_jugar(partido, campo, arbitro)
FaseController.iniciar_siguiente_partido(fase)
def partido_terminar(partido):
# Cambiar estado
set_partido_terminar(partido)
# Actualizar clasificacion
ClasificacionController.actualizar_clasificacion(partido)
# Iniciar siguiente partido
arbitro = get_equipo_ganador(partido)
if not arbitro:
n_equipo_local_arbitro = Partido.objects.filter(arbitro=partido.equipo_local).count()
n_equipo_visitante_arbitro = Partido.objects.filter(arbitro=partido.equipo_visitante).count()
if n_equipo_local_arbitro <= n_equipo_visitante_arbitro:
arbitro = partido.equipo_local
else:
arbitro = n_equipo_visitante_arbitro
fase = partido.grupo.fase
FaseController.iniciar_siguiente_partido(fase, arbitro)
|
24,538 | 940fcddbe71904dd3f6e3c8501e6f3954b9ba17e | num=int(input("enter the number"))
while(num>0):
rev=num%10
print(rev,end="") #to print on same line
num=num//10 |
24,539 | 19a14fcc7948b167b195aa4694ac2bf76063adc2 | # Importing the necessary modules
import difflib
import datetime
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from netmiko import ConnectHandler
# Defining the device to monitor
ip = '10.10.10.2'
# Defining the device type for running netmiko
device_type = 'arista_eos'
# Defining the username and password for running netmiko
username = 'admin'
password = 'python'
# Defining the command to send to each device
command = 'show running'
# Connecting to the device via SSH
session = ConnectHandler(device_type=device_type, ip=ip, username=username, password=password, global_delay_factor=3)
# Entering enable mode
enable = session.enable()
# Sending the command and storing the output (running configuration)
output = session.send_command(command)
# Defining the file from yesterday, for comparison.
device_cfg_old = 'cfgfiles/' + ip + '_' + (datetime.date.today() - datetime.timedelta(days=1)).isoformat()
# Writing the command output to a file for today.
with open('cfgfiles/' + ip + '_' + datetime.date.today().isoformat(), 'w') as device_cfg_new:
device_cfg_new.write(output + '\n')
# Extracting the differences between yesterday's and today's files in HTML format
with open(device_cfg_old, 'r') as old_file, open('cfgfiles/' + ip + '_' + datetime.date.today().isoformat(),
'r') as new_file:
difference = difflib.HtmlDiff().make_file(fromlines=old_file.r.., tolines=new_file.r..,
fromdesc='Yesterday', todesc='Today')
# Sending the differences via email
# Defining the e-mail parameters
fromaddr = 'mihai.python3@gmail.com'
toaddr = 'mihai.python3@gmail.com'
# More on MIME and multipart: https://en.wikipedia.org/wiki/MIME#Multipart_messages
msg = MIMEMultipart()
msg['From'] = fromaddr
msg['To'] = toaddr
msg['Subject'] = 'Daily Configuration Management Report'
msg.attach(MIMEText(difference, 'html'))
# Sending the email via Gmail's SMTP server on port 587
server = smtplib.SMTP('smtp.gmail.com', 587)
# SMTP connection is in TLS (Transport Layer Security) mode. All SMTP commands that follow will be encrypted.
server.starttls()
# Logging in to Gmail and sending the e-mail
server.login('mihai.python3', 'python3.7')
server.sendmail(fromaddr, toaddr, msg.as_string())
server.quit()
# End Of Program |
24,540 | afe5bfde91b85bf5b05e1823ea9934d021b8e2fa | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
__author__ = 'Jia-Yu Lu <jeanie0807@gmail.com>'
import csv
import json
f = open('model.txt', 'w')
print('user_id, movie_id', file=f)
with open('../data/model.json') as fin:
data = json.load(fin)
for id_item in data:
id_item_s = id_item.split('user')[1]
for item in data[id_item]:
item_s = item.split('item')[1]
print(f'{id_item_s[1:]}, {item_s[1:]}', file=f)
f.close()
|
24,541 | 3a8ddb0160962325171449edebf17d50d930ba18 | from ScrolledText import ScrolledText
from tkMessageBox import *
from Tkinter import *
import tkFont
from PIL import ImageTk, Image
import threading
import platform
import client
from time import sleep
import os
import re
from helper import *
class GUI:
"""docstring for GUI"""
##########################################################################
# INIT
##########################################################################
def __init__(self, app):
self.stickers = []
self.preloadStickers = threading.Thread(target=lambda: self.showStickers(preload=True))
self.preloadStickers.start()
self.app = app
app.title("chatS")
self.images = []
self.tmp = {'current': ''}
self.stickersWindowIsOpen = False
# if platform.system() == 'Darwin':
self.boldfont = tkFont.Font(family='Courier New',
size=20, weight='bold')
self.mainfont = tkFont.Font(family='Courier New', size=14)
self.smallfont = tkFont.Font(family='Courier New', size=12)
# else:
# self.boldfont = tkFont.Font(family='Courier New',
# size=20, weight='bold')
# self.mainfont = tkFont.Font(family='Courier New', size=10)
# self.smallfont = tkFont.Font(family='Courier New', size=8)
self.lightGray = '#%02x%02x%02x' % (242, 242, 242)
self.lightBlue = '#%02x%02x%02x' % (98, 181, 197)
self.client = client.Client()
self.receive = threading.Thread(target=self.waitForUpdates)
self.receive.start()
self.geometrySetup()
self.showLoginWindow()
if not self.client.connectionEstablished:
showinfo("Connection Failed", "Perhaps, server is not running at the time")
os._exit(0)
app.bind("<Escape>", self.quit)
##########################################################################
# GEOMETRY
##########################################################################
def geometrySetup(self):
screen_height = app.winfo_screenheight()
screen_width = app.winfo_screenwidth()
dim = 600
x = (screen_width / 2 - dim) / 2
y = (screen_height - 600) / 2
app.geometry("%dx%d+%d+%d" % (dim, dim, x, y))
app.resizable(False, False)
if platform.system() == 'Darwin':
os.system('''/usr/bin/osascript -e 'tell app "Finder" to set \
frontmost of process "Python" to true' ''')
##########################################################################
# LOGIN WINDOW
##########################################################################
def showLoginWindow(self):
self.clearFrame()
mainCanvas = Canvas(app, width=610, height=610)
wp = ImageTk.PhotoImage(Image.open("./home/images/login.png"))
mainCanvas.create_image(610, 610, image=wp, anchor='se')
self.images.append(wp)
mainCanvas.place(x=-5, y=-5)
username = Entry(app, justify='center', bg=self.lightGray,
bd=0, exportselection=0, highlightthickness=0,
selectbackground=self.lightBlue, font=self.boldfont)
username.place(anchor='center', width=150, relx=.5, rely=.45)
username.insert(0, "username")
username.focus_set()
password = Entry(app, justify='center', bg=self.lightGray, show='*',
bd=0, exportselection=0, highlightthickness=0,
selectbackground=self.lightBlue, font=self.boldfont)
password.place(anchor='center', width=150, relx=.5, rely=.55)
password.insert(0, "login")
username.bind("<Return>", lambda event: self.logIn(
event, username.get(), password.get()))
password.bind("<Return>", lambda event: self.logIn(
event, username.get(), password.get()))
titlefont = tkFont.Font(family='Courier New',
size=80, weight='bold')
mainCanvas.create_text(300, 90, text='chatS', width=500,
justify='center', fill='#000000',
font=titlefont)
text = Label(app, text='New to chatS?', bg=self.lightBlue,
font=self.smallfont)
text.place(anchor='center', x=300, y=570)
text.bind("<Button-1>", self.showInfo)
##########################################################################
# SHOW INFO
##########################################################################
def showInfo(self, event):
widget = event.widget
if widget['text'] == 'New to chatS?':
notify = threading.Thread(
target=lambda: self.notify("New to chatS?", "Enter new username and password and hit enter",
anchor='n', x=300, ystart=600, yend=550, width=400, crop=False, wait=10))
notify.start()
elif widget['text'] == '?':
notify = threading.Thread(
target=lambda: self.notify("New to chatS?", "Click '+' to add new user, '-' to remove existing one.\nReturn to send message, Shift+Return newline",
crop=False, wait=10))
notify.start()
elif widget['text'] == '$':
notify = threading.Thread(
target=lambda: self.notify("Like chatS?",
"Rate it 100 on Moodle!",
wait=5))
notify.start()
##########################################################################
# LOGIN
##########################################################################
def logIn(self, event, usr, pwd):
if self.checkEntryIsDumb(usr, 'username'):
return
if self.checkEntryIsDumb(pwd, 'password', minLength=5,
file='./home/validation/dumb_passwords.txt'):
return
logInSuccess = self.client.logIn(usr, pwd)
if logInSuccess:
self.showMainWindow()
app.title("chatS - %s" % self.client.username)
else:
notify = threading.Thread(
target=lambda: self.notify("Log In Failed", "It may happen due to invalid username/password or bad connection.\nOtherwise, it is our fault and we are already working on it!",
anchor='n', x=300, ystart=600, yend=550, width=500, crop=False, wait=10))
notify.start()
##########################################################################
# USERNAME & PASSWORD VALIDATION
##########################################################################
def checkEntryIsDumb(self, entry, entryname, minLength=3,
file='./home/validation/swearWords.txt'):
if re.search('\W', entry):
notify = threading.Thread(
target=lambda: self.notify("Login Failed", "Please use only letters\n and digits for %s" % entryname,
anchor='n', x=300, ystart=600, yend=550, width=500))
notify.start()
return True
if len(entry) < minLength:
notify = threading.Thread(
target=lambda: self.notify("Login Failed", "Please pick longer %s" % entryname,
anchor='n', x=300, ystart=600, yend=550, width=500))
notify.start()
return True
if entryname == 'username' and len(entry) > 21:
notify = threading.Thread(
target=lambda: self.notify("Login Failed", "Please pick shorter %s" % entryname,
anchor='n', x=300, ystart=600, yend=550, width=500))
notify.start()
return True
with open(file) as f:
dumb = f.read().split()
if entry.lower() in dumb:
notify = threading.Thread(
target=lambda: self.notify("Login Failed", "Please pick proper %s" % entryname,
anchor='n', x=300, ystart=600, yend=550, width=500))
notify.start()
return True
else:
return False
##########################################################################
# LOGOUT
##########################################################################
def logOut(self):
self.client.logOut()
##########################################################################
# MAIN WINDOW
##########################################################################
def showMainWindow(self):
self.clearFrame()
mainCanvas = Canvas(app, width=610, height=610, bg=self.lightGray)
mainCanvas.place(x=-5, y=-5)
scrollbar = Scrollbar(app, width=200)
self.listbox = Listbox(app, yscrollcommand=scrollbar.set, borderwidth=0,
bg=self.lightBlue, fg='#000000',
selectbackground=self.lightGray, selectborderwidth=0,
font=self.mainfont)
for k in self.client.friends:
self.listbox.insert(END, " %s" % k)
scrollbar.place(x=0, y=0, height=550)
self.listbox.place(x=0, y=0, width=185, height=550)
scrollbar.config(command=self.listbox.yview)
self.listbox.bind('<<ListboxSelect>>', self.listboxSelect)
text = Label(app, text='+', bg=self.lightBlue,
font=self.boldfont)
text.place(anchor='center', x=25, y=576, width=50, height=49)
text.bind("<Button-1>", lambda event: threading.Thread(target=lambda: self.notify('', '', bind='addFriend',wait=30)).start())
text = Label(app, text='-', bg=self.lightBlue,
font=self.boldfont)
text.place(anchor='center', x=75, y=576, width=49, height=49)
text.bind("<Button-1>", self.deleteFriend)
text = Label(app, text='?', bg=self.lightBlue,
font=self.boldfont)
text.place(anchor='center', x=125, y=576, width=49, height=49)
text.bind("<Button-1>", self.showInfo)
text = Label(app, text='$', bg=self.lightBlue,
font=self.boldfont)
text.place(anchor='center', x=175, y=576, width=49, height=49)
text.bind("<Button-1>", self.showInfo)
updates = threading.Thread(target=self.waitForUpdates)
updates.start()
##########################################################################
# USER SELECTED FRIEND
##########################################################################
def listboxSelect(self, event):
try:
widget = event.widget
selection = widget.curselection()
name = widget.get(selection[0]).strip()
self.updateChatWindow(name)
except:
pass
##########################################################################
# DELETE FRIEND
##########################################################################
def deleteFriend(self, event):
selection = self.listbox.curselection()
if len(selection) == 0:
notify = threading.Thread(
target=lambda: self.notify('Delete User', 'Choose user you want to delete',
crop=False, wait=5))
notify.start()
else:
name = self.listbox.get(selection[0]).strip()
notify = threading.Thread(
target=lambda: self.notify('Delete User', name, wait=10,
bind='deleteFriend'))
notify.start()
##########################################################################
# ADD FRIEND
##########################################################################
def addFriend(self, name):
if name in self.client.friends:
threading.Thread(target=lambda: self.notify('Already Your Friend', name)).start()
return
success = self.client.addFriend(name)
if success == True:
self.client.friends[name] = []
self.showMainWindow()
self.updateChatWindow(name)
else:
threading.Thread(target=lambda: self.notify('Unknown User', name)).start()
return 'break'
##########################################################################
# WAIT FOR UPDATES
##########################################################################
def waitForUpdates(self):
while True:
data = self.client.waitForMessages()
if self.client.forceDisconnect[0]:
self.client.saveData()
showinfo("GoodBye", self.client.forceDisconnect[1])
os._exit(0)
if data:
sentBy, msg = data
if not sentBy in self.client.friends:
self.client.friends[sentBy] = []
self.client.friends[sentBy].append({'time': time(),
'sentBy': sentBy,
'message': msg,
'status': ''})
if self.tmp['current'] == '':
self.showMainWindow()
elif self.tmp['current'] == sentBy:
self.updateChatWindow(sentBy)
continue
else:
text = self.userInput.get(1.0, END)
self.showMainWindow()
self.updateChatWindow(self.tmp['current'], text)
notify = threading.Thread(
target=lambda: self.notify(sentBy, msg, bind='goToUser'))
notify.start()
##########################################################################
# NOTIFY
##########################################################################
def notify(self, sentBy, msg, x=400, ystart=0, yend=50, width=400,
height=50, anchor='s', bind='', wait=5, crop=True):
if crop:
showMsg = msg if len(msg) < 55 else msg[:52]+'...'
else:
showMsg = msg
if bind == '':
text = Label(app, text="%s\n%s" % (sentBy, showMsg),
bg=self.lightBlue, font=self.smallfont, width=width)
elif bind == 'goToUser':
text = Label(app, text="%s\n%s" % (sentBy, showMsg),
bg=self.lightBlue, font=self.smallfont, width=width)
text.bind("<Button-1>", lambda event: self.updateChatWindow(sentBy))
elif bind == 'deleteFriend':
text = Label(app, text="%s %s ?\n" % (sentBy, msg),
bg=self.lightBlue, font=self.smallfont, width=width)
noLabel = Label(text, text="NO", bg=self.lightBlue,
font=self.mainfont)
noLabel.place(relx=.4,rely=.8,anchor='c',width=50,height=20)
noLabel.bind('<Button-1>', lambda e: closeNotification())
yesLabel = Label(text, text="YES", bg=self.lightBlue,
font=self.mainfont)
yesLabel.place(relx=.6,rely=.8,anchor='c',width=50,height=20)
yesLabel.bind('<Button-1>', lambda e: yesDeleteFriend())
def yesDeleteFriend():
self.client.friends.pop(msg)
self.tmp['current'] = ''
self.showMainWindow()
text.destroy()
return
elif bind == 'addFriend':
text = Label(app, text="Enter Username\n\n",
bg=self.lightBlue, font=self.smallfont, width=width)
username = Entry(text, justify='center', bg=self.lightGray,
bd=0, exportselection=0, highlightthickness=0,
selectbackground=self.lightBlue, font=self.mainfont)
username.place(relx=.5,rely=.65,anchor='c',width=200,height=25)
username.bind('<Return>', lambda e: self.addFriend(username.get().strip()))
username.focus_set()
cancelLabel = Label(text, text="cancel", bg=self.lightBlue,
font=self.mainfont)
cancelLabel.place(relx=.1,rely=.65,anchor='c',width=50,height=20)
cancelLabel.bind('<Button-1>', lambda e: closeNotification())
okLabel = Label(text, text="ok", bg=self.lightBlue,
font=self.mainfont)
okLabel.place(relx=.9,rely=.65,anchor='c',width=50,height=20)
okLabel.bind('<Button-1>', lambda e: self.addFriend(username.get().strip()))
n = 100.
y = ystart
step = (yend - ystart) / n
while n:
sleep(0.001)
n -= 1
y += step
try:
text.place(anchor=anchor, x=x, y=y, width=width, height=50)
except:
continue
def removeNotification(n, y):
while n:
sleep(0.001)
n -= 1
y -= step
try:
text.place(anchor=anchor, x=x, y=y, width=width, height=50)
except:
continue
def closeNotification():
removeNotification(100, y)
text.destroy()
return
sleep(wait)
removeNotification(100, y)
text.destroy()
##########################################################################
# UPDATE CHAT WINDOW ON SELECT
##########################################################################
def updateChatWindow(self, name, entryText=''):
self.tmp["current"] = name
chatWindow = ScrolledText(app, undo=True, highlightthickness=0,
font=self.smallfont, bg=self.lightGray)
messages = self.client.friends[name]
messagesSorted = sorted(messages, key=lambda m: m['time'])
windowWidth = 54
for i, m in enumerate(messagesSorted):
header = "%s %s" % (m['time'], m['sentBy'])
headerLength = len(header)
statusString = "-"*(headerLength-1)+m['status']
endingString = "-"*headerLength
message = m['message']
if message.startswith("@@@sticker"):
try:
sticker = message.split(':')[1]
if len(sticker) > 0:
sticker = './home/stickers/%s' % sticker
wp = ImageTk.PhotoImage(Image.open(sticker).resize((200, 200)))
self.images.append(wp)
if m['sentBy'] == self.client.username:
message = "\n{:>54}\n{:>54}\n{:>54}\n".format(
statusString, header, endingString)
else:
message = "\n{}\n{}\n{}\n".format(
endingString, header, endingString)
chatWindow.insert(END, message)
padding = 180 if m['sentBy'] == self.client.username else 0
chatWindow.image_create(END, image=wp, padx=padding)
chatWindow.insert(END, '\n')
continue
except:
pass
if m['sentBy'] == self.client.username:
message = "\n{:>54}\n{:>54}\n{:>54}\n{:>54}\n".format(
statusString, header, endingString, message)
else:
message = "\n{}\n{}\n{}\n{}\n".format(
endingString, header, endingString, message)
chatWindow.insert(END, message)
chatWindow.place(x=200, y=0, width=400, height=550)
# chatWindow.see(END)
chatWindow.yview(END)
chatWindow.configure(state=DISABLED)
self.userInput = ScrolledText(app, undo=True, highlightthickness=0,
selectbackground=self.lightBlue, font=self.mainfont)
if entryText != '':
self.userInput.insert(END, entryText)
self.userInput.place(x=250, y=550, width=350, height=50)
self.userInput.bind("<Return>", self.sendMessage)
self.userInput.bind("<Shift-Return>", lambda e: self.userInput.insert(END, ""))
self.userInput.focus_set()
text = Label(app, text='@', bg='white',
font=self.boldfont, anchor=CENTER)
text.place(anchor='center', x=225, y=576, width=49, height=50)
text.bind("<Button-1>", lambda e: threading.Thread(target=self.showStickers).start())
##########################################################################
# STICKERS WINDOW
##########################################################################
def showStickers(self, preload=False):
if preload:
for sticker in os.listdir('./home/stickers/'):
try:
img = ImageTk.PhotoImage(Image.open('./home/stickers/'+sticker).resize((92,92)))
self.stickers.append([sticker, img])
except Exception, e:
pass
return
if self.stickersWindowIsOpen:
self.stickersWindow.destroy()
self.stickersWindowIsOpen = False
else:
self.stickersWindow = Canvas(app, bg=self.lightGray)
self.stickersWindow.place(anchor='sw', x=200,y=550,height=288,width=385)
k = 0
for sticker, img in self.stickers:
label = Label(self.stickersWindow, image=img, text=sticker)
label.grid(row=k / 4, column=k % 4)
label.bind('<Button-1>', lambda e: threading.Thread(target=lambda: self.sendMessage(e)).start())
k += 1
self.stickersWindowIsOpen = True
##########################################################################
# SEND MESSAGE
##########################################################################
def sendMessage(self, event):
if self.stickersWindowIsOpen:
self.stickersWindow.place_forget()
self.stickersWindowIsOpen = False
label = event.widget
msg = "@@@sticker:%s" % label['text']
else:
msg = self.userInput.get(1.0, END).strip()
rcpt = self.tmp['current']
self.client.sendMessage(rcpt, msg)
status = 'V' if self.client.messageDelivered else 'X'
self.client.friends[rcpt].append({'time': time(),
'sentBy': self.client.username,
'message': msg,
'status': status})
self.updateChatWindow(rcpt)
return 'break'
##########################################################################
# CLEAR FRAME
##########################################################################
def clearFrame(self):
for widget in app.winfo_children():
widget.destroy()
##########################################################################
# QUICK SHORTCUT HANDLERS
##########################################################################
def quit(self, event):
self.client.saveData()
app.quit()
##########################################################################
# MAIN
##########################################################################
if __name__ == '__main__':
app = Tk()
gui = GUI(app)
app.mainloop()
gui.logOut()
os._exit(0)
|
24,542 | dc73a150161dd7fac573d6411836ae76975b4df7 | """
author : QY
"""
from page.basepage import BasePage
from page.member_details_page import MemberDetailsPage
class ContactsPage(BasePage):
def find_message(self, sname):
# 输入想查询的成员信息
self.send_keys_by_class("qui_inputText.ww_inputText.ww_searchInput_text", sname)
return MemberDetailsPage(self.driver)
|
24,543 | f4499349b9cb60354d4b1630a5c5b970ffdf373c | from models import *
from utils import *
import cv2
import matplotlib.pyplot as plt
from PIL import Image
from sort import *
class track():
def __init__(self, object_detect, videopath, img_size, classes):
self.object_detect = object_detect
self.videopath = videopath
self.img_size = img_size
self.classes = classes
def track_object(self):
cmap = plt.get_cmap('tab20b')
colors=[(255,0,0),(0,255,0),(0,0,255),(255,0,255),(128,0,0),(0,128,0),(0,0,128),(128,0,128),(128,128,0),(0,128,128)]
# initialize Sort object and video capture
vid = cv2.VideoCapture(self.videopath)
mot_tracker = Sort()
cv2.namedWindow('Stream', cv2.WINDOW_NORMAL)
cv2.resizeWindow('Stream', (800, 600))
fourcc = cv2.VideoWriter_fourcc(*'XVID')
ret, frame = vid.read()
vw = frame.shape[1]
vh = frame.shape[0]
print("Video size", vw, vh)
outvideo = cv2.VideoWriter(self.videopath.replace(".mp4", "-det.mp4"), fourcc, 20.0, (vw, vh))
frames = 0
starttime = time.time()
while (True):
ret, frame = vid.read()
if not ret:
break
frames += 1
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
pilimg = Image.fromarray(frame)
detections = self.object_detect.detect_image(pilimg)
frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
img = np.array(pilimg)
pad_x = max(img.shape[0] - img.shape[1], 0) * (self.img_size / max(img.shape))
pad_y = max(img.shape[1] - img.shape[0], 0) * (self.img_size / max(img.shape))
unpad_h = self.img_size - pad_y
unpad_w = self.img_size - pad_x
if detections is not None:
tracked_objects = mot_tracker.update(detections.cpu())
unique_labels = detections[:, -1].cpu().unique()
n_cls_preds = len(unique_labels)
for x1, y1, x2, y2, obj_id, cls_pred in tracked_objects:
box_h = int(((y2 - y1) / unpad_h) * img.shape[0])
box_w = int(((x2 - x1) / unpad_w) * img.shape[1])
y1 = int(((y1 - pad_y // 2) / unpad_h) * img.shape[0])
x1 = int(((x1 - pad_x // 2) / unpad_w) * img.shape[1])
color = colors[int(obj_id) % len(colors)]
cls = self.classes[int(cls_pred)]
cv2.rectangle(frame, (x1, y1), (x1 + box_w, y1 + box_h), color, 4)
cv2.rectangle(frame, (x1, y1 - 35), (x1 + len(cls) * 19 + 80, y1), color, -1)
cv2.putText(frame, cls + "-" + str(int(obj_id)), (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 1,
(255, 255, 255), 3)
cv2.imshow('Stream', frame)
outvideo.write(frame)
ch = 0xFF & cv2.waitKey(1)
if ch == 27:
break
totaltime = time.time() - starttime
print(frames, "frames", totaltime / frames, "s/frame")
cv2.destroyAllWindows()
outvideo.release() |
24,544 | 66d566f2427c37e7adf5b2bd62a2916abd345ee9 | from models.PASMnet import *
from datasets.kitti_dataset import KITTIDataset
from torch.utils.data import DataLoader
import torch.backends.cudnn as cudnn
from utils import *
import argparse
from loss import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--device', type=str, default='cuda:0')
parser.add_argument('--max_disp', type=int, default=0, help='prior maximum disparity, 0 for unavailable')
parser.add_argument('--dataset', type=str, default='KITTI2015')
parser.add_argument('--datapath', default='D:/LongguangWang/Data/KITTI2015', help='data path')
parser.add_argument('--savepath', default='log/', help='save path')
parser.add_argument('--lr', type=float, default=1e-4, help='initial learning rate')
parser.add_argument('--batch_size', type=int, default=14)
parser.add_argument('--n_workers', type=int, default=2, help='number of threads in dataloader')
parser.add_argument('--gamma', type=float, default=0.1)
parser.add_argument('--n_epochs', type=int, default=80, help='number of epochs to train')
parser.add_argument('--n_steps', type=int, default=60, help='number of epochs to update learning rate')
parser.add_argument('--resume_model', type=str, default=None)
parser.add_argument('--print_freq', type=int, default=1, help='the frequency of printing losses (epchs)')
parser.add_argument('--save_freq', type=int, default=40, help='the frequency of saving models (epochs)')
return parser.parse_args()
def train(train_loader, cfg):
net = PASMnet().to(cfg.device)
net = nn.DataParallel(net, device_ids=[0,1])
net.train()
cudnn.benchmark = True
optimizer = torch.optim.Adam(net.parameters(), lr=cfg.lr)
loss_epoch = []
loss_list = []
EPE_epoch = []
D3_epoch = []
EPE_list = []
epoch_start = 0
if cfg.resume_model is None:
# load model pre-trained on SceneFlow
if cfg.max_disp == 0:
ckpt = torch.load('log/PASMnet_SceneFlow_epoch10.pth.tar')
else:
ckpt = torch.load('log/PASMnet_' + str(cfg.max_disp) + '_SceneFlow_epoch10.pth.tar')
if isinstance(net, nn.DataParallel):
if 'state_dict' in ckpt:
net.module.load_state_dict(ckpt['state_dict'])
else:
net.module.load_state_dict(ckpt)
else:
net.load_state_dict(ckpt['state_dict'])
else:
ckpt = torch.load(cfg.resume_model)
if isinstance(net, nn.DataParallel):
net.module.load_state_dict(ckpt['state_dict'])
else:
net.load_state_dict(ckpt['state_dict'])
epoch_start = ckpt['epoch']
loss_list = ckpt['loss']
EPE_list = ckpt['EPE']
for epoch in range(epoch_start, cfg.n_epochs):
# lr stepwise
lr = cfg.lr * (cfg.gamma ** (epoch // cfg.n_steps))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
for iteration, data in enumerate(train_loader):
img_left, img_right = data['left'].to(cfg.device), data['right'].to(cfg.device)
disp_gt = data['left_disp'].to(cfg.device).unsqueeze(1)
disp, att, att_cycle, valid_mask = net(img_left, img_right, max_disp=cfg.max_disp)
# loss-D
loss_P = loss_disp_unsupervised(img_left, img_right, disp, F.interpolate(valid_mask[-1][0], scale_factor=4, mode='nearest'))
# loss-S
loss_S = loss_disp_smoothness(disp, img_left)
# loss-PAM
loss_PAM_P = loss_pam_photometric(img_left, img_right, att, valid_mask)
loss_PAM_C = loss_pam_cycle(att_cycle, valid_mask)
loss_PAM_S = loss_pam_smoothness(att)
loss_PAM = loss_PAM_P + 5 * loss_PAM_S + 5 * loss_PAM_C
# losses
loss = loss_P + 0.5 * loss_S + loss_PAM
loss_epoch.append(loss.data.cpu())
# metrics
mask = disp_gt > 0
EPE_epoch += EPE_metric(disp, disp_gt, mask)
for i in range(cfg.batch_size):
D3_epoch += D1_metric(disp[i, :, :, :].unsqueeze(0), disp_gt[i, :, :, :].unsqueeze(0), mask[i, :, :, :].unsqueeze(0), 3)
# backward
optimizer.zero_grad()
loss.backward()
optimizer.step()
# print
if (epoch+1) % cfg.print_freq == 0:
print('Epoch----%5d, loss---%f, EPE---%f, D3---%f' %
(epoch + 1,
float(np.array(loss_epoch).mean()),
float(np.array(EPE_epoch).mean()),
float(np.array(D3_epoch).mean())))
if (epoch+1) % cfg.save_freq == 0:
loss_list.append(float(np.array(loss_epoch).mean()))
EPE_list.append(float(np.array(EPE_epoch).mean()))
if cfg.max_disp == 0:
filename = 'PASMnet_' + cfg.dataset + '_epoch' + str(epoch + 1) + '.pth.tar'
else:
filename = 'PASMnet_' + str(cfg.max_disp) + '_' + cfg.dataset + '_epoch' + str(epoch + 1) + '.pth.tar'
save_ckpt({
'epoch': epoch + 1,
'state_dict': net.module.state_dict() if isinstance(net, nn.DataParallel) else net.state_dict(),
'loss': loss_list,
'EPE': EPE_list
}, save_path=cfg.savepath, filename=filename)
loss_epoch = []
EPE_epoch = []
D3_epoch = []
def main(cfg):
if cfg.dataset == 'KITTI2012':
train_set = KITTIDataset(datapath=cfg.datapath, list_filename='filenames/kitti12_train.txt', training=True)
if cfg.dataset == 'KITTI2015':
train_set = KITTIDataset(datapath=cfg.datapath, list_filename='filenames/kitti15_train.txt', training=True)
train_loader = DataLoader(dataset=train_set, num_workers=cfg.n_workers, batch_size=cfg.batch_size, shuffle=True, drop_last=True, pin_memory=True)
train(train_loader, cfg)
if __name__ == '__main__':
cfg = parse_args()
main(cfg)
|
24,545 | 15168ecb8d16fe99db1947dcfe4d85792fd6e113 | """empty message
Revision ID: 26f2a71c1ad2
Revises:
Create Date: 2020-08-14 20:15:58.121281
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '26f2a71c1ad2'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('redditors',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('username')
)
op.create_table('templates',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('page', sa.String(length=100), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('train_data',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=False),
sa.Column('features', sa.ARRAY(sa.Float()), nullable=False),
sa.PrimaryKeyConstraint('id')
)
op.create_table('reddit_memes',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('reddit_id', sa.String(length=20), nullable=False),
sa.Column('subreddit', sa.String(length=50), nullable=False),
sa.Column('title', sa.String(length=500), nullable=False),
sa.Column('url', sa.String(length=200), nullable=False),
sa.Column('meme_text', sa.String(length=10000), nullable=True),
sa.Column('template', sa.String(length=100), nullable=True),
sa.Column('timestamp', sa.Integer(), nullable=False),
sa.Column('datetime', sa.DateTime(), nullable=False),
sa.Column('upvote_ratio', sa.Float(), nullable=False),
sa.Column('upvotes', sa.Integer(), nullable=False),
sa.Column('downvotes', sa.Integer(), nullable=False),
sa.Column('num_comments', sa.Integer(), nullable=False),
sa.Column('features', sa.ARRAY(sa.Float()), nullable=False),
sa.ForeignKeyConstraint(['username'], ['redditors.username'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('reddit_scores',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=20), nullable=False),
sa.Column('subreddit', sa.String(length=50), nullable=False),
sa.Column('time_delta', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.Integer(), nullable=False),
sa.Column('datetime', sa.DateTime(), nullable=False),
sa.Column('final_score', sa.Float(), nullable=False),
sa.Column('raw_score', sa.Float(), nullable=False),
sa.Column('num_in_bottom', sa.Integer(), nullable=False),
sa.Column('num_in_top', sa.Integer(), nullable=False),
sa.Column('shitposter_index', sa.Float(), nullable=False),
sa.Column('highest_upvotes', sa.Integer(), nullable=False),
sa.Column('hu_score', sa.Float(), nullable=False),
sa.Column('lowest_ratio', sa.Float(), nullable=False),
sa.ForeignKeyConstraint(['username'], ['redditors.username'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('reddit_scores')
op.drop_table('reddit_memes')
op.drop_table('train_data')
op.drop_table('templates')
op.drop_table('redditors')
# ### end Alembic commands ###
|
24,546 | 74daac7442ce8b49eabd9a2f13e16b9cd332322d | import os
import sys
import json
import requests
import nltk
import pymysql
# from nltk.tokenize import sent_tokenize,word_tokenize
#from chatterbot import ChatBot
from flask import Flask, request,jsonify
from nltk import ne_chunk,pos_tag
from sklearn.externals import joblib
import numpy as np
import spacy
nlp = spacy.load('en')
import token_function
from SpacyTraining_Products import predictEnt
# import os
# os.system('token_function.py')
import string
from nltk import word_tokenize
from nltk.stem.porter import PorterStemmer
import pandas as pd
stemmer = PorterStemmer()
def stem_tokens(tokens, stemmer):
stemmed = []
for item in tokens:
stemmed.append(stemmer.stem(item))
return stemmed
def tokenize(text):
text = "".join([ch for ch in text if ch not in string.punctuation])
tokens = word_tokenize(text)
stems = stem_tokens(tokens, stemmer)
return stems
clf = joblib.load('qa_clf.pkl')
vect = joblib.load('vectorizer.pkl')
app = Flask(__name__)
#This is to train the model with English corpus
# chatbot = ChatBot(
# 'RChat',
# trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
# )
# chatbot.train("chatterbot.corpus.english")
@app.route('/', methods=['GET'])
def verify():
# when the endpoint is registered as a webhook, it must echo back
# the 'hub.challenge' value it receives in the query arguments
if request.args.get("hub.mode") == "subscribe" and request.args.get("hub.challenge"):
if not request.args.get("hub.verify_token") == os.environ["VERIFY_TOKEN"]:
return "Verification token mismatch", 403
return request.args["hub.challenge"], 200
return "Hello world", 200
@app.route('/get/<string:prod>',methods=['GET'])
def get_raw_response(prod):
#return str(chatbot.get_response(query))
#uri="http://localhost:8080/prod/1234"#+prod_id
#res=getProductDetails(prod)
intent=getIntent('I want to buy a Blanket')
entity=getEntity('I want to buy a Blanket')
res=getProductDetails(intent,entity)
return res
def getProductDetails(intent,entity):
conn=pymysql.connect(host='kaushal',user='dbuser',password='Tesco@123',db='productdb')
a=conn.cursor()
sql="select title,price from orgproddetails where title like '%"+entity+"%'"
a.execute(sql)
countrow=a.execute(sql)
data=a.fetchall()
return json.dumps(data)
def getIntent(query):
X_test = np.array([query])
X_t = vect.transform(X_test)
# feature_names = vect.get_feature_names()
# for token in X_t.nonzero()[1]:
# return (feature_names[token], ' - ', X_t[0, token])
pred = clf.predict(X_t)
return pred[0]
def getEntity(query):
ent=predictEnt(query)
return ent
@app.route('/', methods=['POST'])
def webhook():
# endpoint for processing incoming messaging events
data = request.get_json()
#log(data["object"]) # you may not want to log every incoming message in production, but it's good for testing
intent=getIntent(data["message"])
entity=getEntity(data["message"])
res="Intent "+intent+" "+str(entity)
if data["object"] == "page":
for entry in data["entry"]:
for messaging_event in entry["messaging"]:
if messaging_event.get("message"): # someone sent us a message
sender_id = messaging_event["sender"]["id"] # the facebook ID of the person sending you the message
recipient_id = messaging_event["recipient"]["id"] # the recipient's ID, which should be your page's facebook ID
message_text = messaging_event["message"]["text"] # the message's text
#response = chatbot.get_response(message_text)
#response=getProductDetails(message_text)
if message_text=="Hi":
send_message(sender_id, "Hello, How can I help you?")
else:
send_message(sender_id, getIntent(message_text))
send_message(sender_id, getIntent)
send_message(sender_id, "Thanks For your message, we will get back to you at the earliest")
if messaging_event.get("delivery"): # delivery confirmation
pass
if messaging_event.get("optin"): # optin confirmation
pass
if messaging_event.get("postback"): # user clicked/tapped "postback" button in earlier message
pass
else:
sender_id = messaging_event["sender"]["id"]
send_message(sender_id, "Thanks For your message, we will get back to you at the earliest")
return res, 200
def send_message(recipient_id, message_text):
log("sending message to {recipient}: {text}".format(recipient=recipient_id, text=message_text))
params = {
"access_token": os.environ["PAGE_ACCESS_TOKEN"]
}
headers = {
"Content-Type": "application/json"
}
data = json.dumps({
"recipient": {
"id": recipient_id
},
"message": {
"text": message_text
}
})
r = requests.post("https://graph.facebook.com/v2.6/me/messages", params=params, headers=headers, data=data)
if r.status_code != 200:
log(r.status_code)
log(r.text)
def log(message): # simple wrapper for logging to stdout on heroku
print (str(message))
sys.stdout.flush()
if __name__ == '__main__':
app.run(debug=True,host='0.0.0.0')
|
24,547 | 5ff4a207b6c5205530946f17d1846ed3f46f9b2c | #! /usr/bin/python3
# -*- coding: utf-8 -*-
from .BERTEmbedding import BERTEmbedding
from .StaticEmbedding import StaticEmbedding
__author__ = 'Jayeol Chun'
|
24,548 | 533ba909414c8f40bf021ba4da329f956fa7bc30 | from .c2 import *
from .ui import * |
24,549 | 1a15b5eb9bbe4351b4a098e31fa2ef2655f2923b | # -*- coding:utf-8 -*-
from api.api import API
from pages.android.common.super_page import SuperPage
from cases.android.ffan.film.movie_goupiao_page_configs import moviegoupiaoConfigs as MGPPC
from pages.logger import logger
from appium.webdriver.common.mobileby import MobileBy
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
import time
class Moviegoupiaopage(SuperPage):
'''
作者 刘潇
首页=>电影=>电影模块操作
'''
def __init__(self, testcase, driver, logger):
super(Moviegoupiaopage, self).__init__(testcase, driver, logger)
def validSelf(self):
'''
usage : 检查是否到了订单提交页面
'''
logger.info("Check 飞凡收银台 begin")
API().assertElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_dingdanqueren,
90)
logger.info("Check 飞凡收银台 end")
def clickchengshi(self):
'''
usage: 点击左上角“城市"按钮
'''
logger.info("Click 切换城市 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_chengshi,
MGPPC.click_on_button_timeout)
logger.info("Click 切换城市 end")
def clickanyang(self):
'''
usage: 点击安阳市
'''
logger.info("Click 选择安阳市 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_anyang,
MGPPC.click_on_button_timeout)
logger.info("Click 选择安阳市 end")
def clickonbeijing(self):
'''
usage: 点击北京市
'''
logger.info("Click 选择北京市 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_beijingshi_button,
MGPPC.click_on_button_timeout)
logger.info("Click 选择北京市 end")
def clickyingyuan(self):
'''
usage: 点击影院按钮
'''
logger.info("Click 点击影院按钮 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_yingyuan_button,
MGPPC.click_on_button_timeout)
logger.info("Click 点击影院按钮 end")
def clickOnxuanzuo(self):
'''
usage: 点击“选座"按钮
'''
logger.info("Click 点击‘选座’按钮 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_xuanzuo_button,
MGPPC.click_on_button_timeout)
logger.info("Click 点击‘选座’按钮 end")
def clickOnyingpian(self):
'''
usage: 点击“影院"列表,进入影院详情
'''
logger.info("Click 点击‘影院’列表 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_yingyuan_button,
MGPPC.click_on_button_timeout)
logger.info("Click 点击‘影院’列表 end")
def clickonpingpai(self):
'''
usage: 点击“品牌"列表,展示所有影院
'''
logger.info("Click 点击‘品牌’按钮 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_pingpai_button,
MGPPC.click_on_button_timeout)
logger.info("Click 点击‘品牌’按钮 end")
def clickhengdian(self):
'''
usage: 点击“横店电影院"列表,展示横店电影院
'''
logger.info("Click 点击‘横店电影城’按钮 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_hengdian_button,
MGPPC.click_on_button_timeout)
logger.info("Click 点击‘横店电影城’按钮 end")
def clickonqita(self):
'''
usage: 点击“横店电影院"列表,展示横店电影院
'''
logger.info("Click 点击‘其他’按钮 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_qita_button,
MGPPC.click_on_button_timeout)
logger.info("Click 点击‘其他’按钮 end")
def clickonquanyeyingyuan(self):
'''
usage: 点击“劝业影院"列表,展示劝业影院
'''
logger.info("Click 点击‘劝业影院’按钮 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_quanyeyingyuan_button,
MGPPC.click_on_button_timeout)
logger.info("Click 点击‘劝业影院’按钮 end")
def clickyingyuanxiangqing(self):
'''
usage: 在列表页点击电影院,进入影院详情
'''
logger.info("Click 点击电影院 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_yingyuan_jiage,
MGPPC.click_on_button_timeout)
logger.info("Click 点击电影院 end")
def clickOnbuy(self):
'''
usage: 在影院详情中,点击选座/特惠按钮,跳转选座详情页
'''
logger.info("Click 点击选座按钮 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_buy,
MGPPC.click_on_button_timeout)
logger.info("Click 点击选座按钮 end")
def clickxuanhaole(self):
'''
usage: 在影院详情中,点击选好了按钮,跳转下单页
'''
logger.info("Click 点击选好了按钮 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_xuanhaole,
MGPPC.click_on_button_timeout)
logger.info("Click 点击选好了按钮 end")
def clickfanhuishouye(self):
'''
usage: 返回首页
'''
logger.info("Click 返回首页 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_fanhuishouye,
MGPPC.click_on_button_timeout)
logger.info("Click 返回首页 end")
def clicktijaiodingdan(self):
'''
usage: 点击提交订单按钮
'''
logger.info("Click 点击提交订单按钮 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_tijiaodingdan,
MGPPC.click_on_button_timeout)
logger.info("Click 点击提交订单按钮 end")
def clickxuanzuo(self):
'''
usage: 选择座位
'''
logger.info("Click 选择座位 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_xuanzuo,
MGPPC.click_on_button_timeout)
logger.info("Click 选择座位 end")
def clickfanhui(self):
'''
usage: 返回上一层
'''
logger.info("Click 返回按钮 begin")
API().clickElementByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_fanhui,
MGPPC.click_on_button_timeout)
logger.info("Click 返回按钮 end")
def validbeijing(self):
'''
usage : 检查是否切换回了北京市
'''
logger.info("Check 北京市 begin")
API().assertElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_movie_beijingshi_button,
90)
logger.info("Check 北京市 end")
def validcity(self):
'''
usage: 验证城市按钮
'''
logger.info("check 验证切换城市按钮 begin")
API().validElementByResourceId(self.driver,
self.logger,
MGPPC.resource_id_iv_chengshi,
15)
logger.info("check 验证切换城市按钮 end")
def clickBackKey(self):
'''
usage: 验证城市按钮
'''
logger.info("click 点击系统返回按钮 begin")
API().clickBackKeyForAndroid(self.driver,
self.logger)
logger.info("click 点击系统返回按钮 end")
def inputchengshi(self):
'''
usage: 输入城市名
'''
logger.info("Input 输入城市名 begin")
API().inputStringByResourceId(self.testcase,
self.driver,
self.logger,
MGPPC.resource_id_iv_sousuochengshi,
MGPPC.chengshi_name,
10)
logger.debug(MGPPC.chengshi_name)
logger.info("Input 输入城市名 end")
def clickonbaotoushi(self):
'''
usage: 选择包头市
'''
logger.info("Click 选择包头市 begin")
API().clickElementByText(self.testcase,
self.driver,
self.logger,
MGPPC.text_baotoushi,
MGPPC.click_on_button_timeout)
logger.info("Click 选择包头市 end") |
24,550 | f7038ce62667e62a2c50c644db838bd1310c3ae4 | # Generated by Django 3.1 on 2020-08-27 22:59
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sublets', '0008_subletplace_landlord'),
]
operations = [
migrations.AddField(
model_name='subletlisting',
name='end_date_search',
field=models.DateField(default=datetime.datetime(2020, 8, 27, 18, 59, 27, 222548)),
),
migrations.AddField(
model_name='subletlisting',
name='start_date_search',
field=models.DateField(default=datetime.datetime(2020, 8, 27, 18, 59, 27, 222548)),
),
]
|
24,551 | 170a1af25eaf61d7510921bd83bf7848dd23bd5c | from typing import List, Union, Callable
from autopipe import Coordinator, Pipe, APData, Output
from autopipe.input import RssInput
from autopipe.pipe import FileData, DownloaderPipe
class DownloadExample(Coordinator):
def __init__(self, query: str = "raccoon"):
super().__init__()
self.query = query
@classmethod
def name(cls):
return "DownloadExample"
@property
def input(self):
return RssInput(f"http://www.obsrv.com/General/ImageFeed.aspx?{self.query}",
lambda x: FileData(None, x["media_content"][0]["url"], False))
@property
def pipeline(self) -> List[Union[Pipe, Callable[[APData], Union[APData, Pipe]]]]:
return [Output(DownloaderPipe())]
|
24,552 | dc3189d786c313a55922dae66e162cda33d216ef | import sys
import warnings
from models import GeneralModel
from models.statistics.Metrics import Metrics
from utils.config_utils import *
from utils.model_utils import *
from utils.system_utils import *
warnings.filterwarnings("ignore")
def main(
arguments: argparse.Namespace,
metrics: Metrics
):
if arguments.disable_autoconfig:
autoconfig(arguments)
global out
out = metrics.log_line
out(f"starting at {get_date_stamp()}")
# if arguments.model == "ResNext":
# trainer = ResnextTrainer(arguments)
# trainer.train()
# return
# hardware
device = configure_device(arguments)
if arguments.disable_cuda_benchmark:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# for reproducibility
configure_seeds(arguments, device)
# filter for incompatible properties
assert_compatibilities(arguments)
# get model
model: GeneralModel = find_right_model(
NETWORKS_DIR, arguments.model,
device=device,
hidden_dim=arguments.hidden_dim,
input_dim=arguments.input_dim,
output_dim=arguments.output_dim,
is_maskable=arguments.disable_masking,
is_tracking_weights=arguments.track_weights,
is_rewindable=arguments.enable_rewinding,
is_growable=arguments.growing_rate > 0,
outer_layer_pruning=arguments.outer_layer_pruning,
maintain_outer_mask_anyway=(
not arguments.outer_layer_pruning) and (
"Structured" in arguments.prune_criterion),
l0=arguments.l0,
l0_reg=arguments.l0_reg,
N=arguments.N,
beta_ema=arguments.beta_ema,
l2_reg=arguments.l2_reg
).to(device)
# get criterion
criterion = find_right_model(
CRITERION_DIR, arguments.prune_criterion,
model=model,
limit=arguments.pruning_limit,
start=0.5,
steps=arguments.snip_steps,
device=arguments.pruning_device
)
# load pre-trained weights if specified
load_checkpoint(arguments, metrics, model)
# load data
train_loader, test_loader = find_right_model(
DATASETS, arguments.data_set,
arguments=arguments
)
# get loss function
loss = find_right_model(
LOSS_DIR, arguments.loss,
device=device,
l1_reg=arguments.l1_reg,
lp_reg=arguments.lp_reg,
l0_reg=arguments.l0_reg,
hoyer_reg=arguments.hoyer_reg
)
# get optimizer
optimizer = find_right_model(
OPTIMS, arguments.optimizer,
params=model.parameters(),
lr=arguments.learning_rate,
weight_decay=arguments.l2_reg if not arguments.l0 else 0,
# momentum=arguments.momentum if arguments.momentum else 0
)
from torch.optim.lr_scheduler import StepLR, OneCycleLR
scheduler = StepLR(optimizer, step_size=30000, gamma=0.2)
# scheduler = OneCycleLR(optimizer, max_lr=arguments.learning_rate,
# steps_per_epoch=len(train_loader), epochs=arguments.epochs)
# now, create the RigLScheduler object
# pruner = RigLScheduler(model,
# optimizer,
# dense_allocation=0.1,
# sparsity_distribution='uniform',
# T_end=5859,
# delta=100,
# alpha=0.3,
# grad_accumulation_n=1,
# static_topo=False,
# ignore_linear_layers=False,
# state_dict=None)
if not arguments.eval:
# build trainer
trainer = find_right_model(
TRAINERS_DIR, arguments.train_scheme,
model=model,
loss=loss,
optimizer=optimizer,
device=device,
arguments=arguments,
train_loader=train_loader,
test_loader=test_loader,
metrics=metrics,
criterion=criterion,
scheduler=scheduler,
# pruner=pruner
)
from codecarbon import EmissionsTracker
tracker = EmissionsTracker()
tracker.start()
trainer.train()
emissions: float = tracker.stop()
else:
tester = find_right_model(
TESTERS_DIR, arguments.test_scheme,
train_loader=train_loader,
test_loader=test_loader,
model=model,
loss=loss,
optimizer=optimizer,
device=device,
arguments=arguments,
)
return tester.evaluate()
out(f"finishing at {get_date_stamp()}")
def assert_compatibilities(arguments):
check_incompatible_props([arguments.loss != "L0CrossEntropy", arguments.l0], "l0", arguments.loss)
check_incompatible_props([arguments.train_scheme != "L0Trainer", arguments.l0], "l0", arguments.train_scheme)
check_incompatible_props([arguments.l0, arguments.group_hoyer_square, arguments.hoyer_square],
"Choose one mode, not multiple")
# check_incompatible_props(
# ["Structured" in arguments.prune_criterion, "Group" in arguments.prune_criterion, "ResNet" in arguments.model],
# "structured", "residual connections")
# todo: add more
def load_checkpoint(arguments, metrics, model):
if (not (arguments.checkpoint_name is None)) and (not (arguments.checkpoint_model is None)):
path = os.path.join(RESULTS_DIR, arguments.checkpoint_name, MODELS_DIR, arguments.checkpoint_model)
state = DATA_MANAGER.load_python_obj(path)
try:
model.load_state_dict(state)
except KeyError as e:
print(list(state.keys()))
raise e
out(f"Loaded checkpoint {arguments.checkpoint_name} from {arguments.checkpoint_model}")
def log_start_run():
arguments.PyTorch_version = torch.__version__
arguments.PyThon_version = sys.version
arguments.pwd = os.getcwd()
out("PyTorch version:", torch.__version__, "Python version:", sys.version)
out("Working directory: ", os.getcwd())
out("CUDA avalability:", torch.cuda.is_available(), "CUDA version:", torch.version.cuda)
out(arguments)
def get_arguments():
global arguments
arguments = parse()
if arguments.disable_autoconfig:
autoconfig(arguments)
return arguments
if __name__ == '__main__':
metrics = Metrics()
out = metrics.log_line
print = out
ensure_current_directory()
get_arguments()
log_start_run()
out("\n\n")
metrics._batch_size = arguments.batch_size
metrics._eval_freq = arguments.eval_freq
main(arguments, metrics)
|
24,553 | fd974e14237c5994627c3c875714d6855ed27412 | from binary_tree import BinarySearchTree
class Dataset:
def __init__(self):
self.dataset = None
def load(self, file):
"""
Loads a file containing patient names
and creates a list with this patient names.
Parameters
----------
file : str
The file path.
Returns
-------
list
A list of patient names.
"""
with open(file) as file:
self.dataset = [line.strip() for line in file]
return self.dataset
def as_binary_search_tree(self):
"""
Creates a binary tree from a list of patients name.
Returns
-------
BinaryTree
A binary tree of patient names.
"""
root = self.get_root()
bst = BinarySearchTree(root)
for patient in self.dataset:
bst.insert_node(patient)
return bst
def get_root(self):
"""
Gets the patient's name that corresponds to the middle element of the ascending sorted dataset
and removes it from the original dataset.
Returns
-------
str
A patient's name.
"""
sorted_dataset = sorted(self.dataset)
sorted_middle = round(len(sorted_dataset)/2)
root_name = sorted_dataset[sorted_middle]
self.dataset.remove(root_name)
return root_name |
24,554 | 6a2c552b433d094162a04f312e0cfb7c18c95388 | print('''
#include<stdio.h>
int main(){
int tom[2][3];
int i, j;
for(i=0; i<2; i++) {
for(j=0;j<3;j++) {
printf("Enter value for tom[%d][%d]:", i, j);
scanf("%d", &tom[i][j]);
}
}
printf("Two Dimensional array elements:\n");
for(i=0; i<2; i++) {
for(j=0;j<3;j++) {
printf("%d ", tom[i][j]);
printf("\t");
}
printf("\n");
}
int rangu[2][3];
for(i=0; i<2; i++) {
for(j=0;j<3;j++) {
printf("Enter value for rangu[%d][%d]:", i, j);
scanf("%d", &rangu[i][j]);
}
}
printf("Two Dimensional array elements:\n");
for(i=0; i<2; i++) {
for(j=0;j<3;j++) {
printf("%d ", rangu[i][j]);
}
}
int sum[2][3];
for(i=0; i<2; i++) {
for(j=0;j<3;j++) {
sum[i][j]=tom[i][j]+rangu[i][j];
}
printf("The Sum of the elements:\n");
for(i=0; i<2; i++) {
for(j=0;j<3;j++) {
printf("%d",sum[i][j]);
}
printf("\n");
}
return 0;
}''')
|
24,555 | 975501f5ccb5686f7941577c1fe24a8159522fdf | from lettuce import world
from questionnaire.features.pages.base import PageObject
from questionnaire.features.pages.home import HomePage
class LoginPage(PageObject):
url = "/accounts/login/"
def login(self, user, password):
details = {'username': user.username,
'password': password,}
self.browser.fill_form(details)
self.submit()
def links_present_by_text(self, links_text):
for text in links_text:
assert self.browser.find_link_by_text(text)
class UserListingPage(PageObject):
url = "/users/"
def validate_user_list_headers(self):
self.is_text_present("Username", "Email", "Roles", "Organization / Region / Country", "Status", "Actions")
def validate_select_not_present(self, name):
assert len(self.browser.find_option_by_text(name)) == 0
class CreateUserPage(PageObject):
url = "/users/new/"
def validate_only_organization_drop_down_visible(self):
assert (self.browser.is_element_present_by_id('id_organization'))
assert (not self.browser.find_by_id('id_region').visible)
assert (not self.browser.find_by_id('id_country').visible)
def validate_only_organization_and_region_drop_down_visible(self):
assert (self.browser.is_element_present_by_id('id_organization'))
assert (self.browser.is_element_present_by_id('id_region'))
assert (not self.browser.find_by_id('id_country').visible)
def validate_only_country_drop_down_visible(self):
assert (not self.browser.find_by_id('id_organization').visible)
assert (not self.browser.find_by_id('id_region').visible)
assert (self.browser.is_element_present_by_id('id_country')) |
24,556 | f2e1534a363248f9e127b0ab7c9ee72872ded69b | #!/usr/local/bin/python
# -*- coding: utf-8 -*-
# [服务器IP, 数据类型, 返回输入参数, shell返回值, 返回错误信息]
import time
def data_execute(li):
ip = li[0]
ti = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
s_list = li[3].split("#")
total = int(s_list[0])
free = int(s_list[2])
cached = int(s_list[3])
use = total - free - cached
a = (float(use) / float(total)) * 100
used = ("%.2f" % a)
return [li[0], ti, total, use, free, used]
|
24,557 | 2f27d8104269643e6ef86b35489e75ff53a4c82c | n=list(input())
if(len(n)==1):
print("1")
elif(len(n)==2):
print("2")
elif(len(n)==3):
print("3")
else:
print("More than 3 digits") |
24,558 | 86f21c341133eea2dcddcc726c2e21f372a8a823 | from django.shortcuts import render, redirect
from django.core.mail import EmailMessage
from django.core.urlresolvers import reverse
from django.template import Context
from django.template.loader import get_template
from django.contrib import messages
from django.conf import settings
from website.forms import ContactForm, VolunteerForm
from membership.models import Committee
from website.models import CMSBlock, Page
def home(request):
context = {'content': CMSBlock.objects.get(id='home').content}
return render(request, 'website/home.html', context)
def about(request):
committees = Committee.objects.all()
for committee in committees:
committee.sorted_role_set = sorted(committee.role_set.all())
context = {'committees': committees}
return render(request, 'website/about.html', context)
def contact(request):
if request.method == 'POST':
form = ContactForm(data=request.POST)
if form.is_valid():
contact_name = request.POST.get('contact_name', '')
contact_email = request.POST.get('contact_email', '')
subject = request.POST.get('subject', '')
content = request.POST.get('content', '')
email = EmailMessage(
'[Friends of GPL] %s' % subject,
'%s\n\n%s' % (content, contact_name),
'%s <%s>' % (contact_name, contact_email), # noreply@friendsgpl.org',
['info@friendsgpl.org'],
headers = {'Reply-To': contact_email }
)
email.send()
messages.add_message(request, messages.SUCCESS, 'Message Sent! We will get back to you soon.')
return redirect('contact')
else:
form = ContactForm()
return render(request, 'website/contact.html', {'contact_form': form,})
def downunder(request):
context = {'content': CMSBlock.objects.get(id='downunder').content}
return render(request, 'website/downunder.html', context)
def join(request):
context = {
'paypal_email': settings.PAYPAL_EMAIL,
'paypal_form_url': settings.PAYPAL_FORM_URL,
'return_uri': request.build_absolute_uri(reverse('payment_return', args=('completed',))),
'cancel_uri': request.build_absolute_uri(reverse('payment_return', args=('canceled',))),
}
return render(request, 'website/join.html', context)
def payment_return(request, payment_status):
if payment_status == 'completed':
messages.add_message(request, messages.SUCCESS, 'Thank you for your donation!')
else:
messages.add_message(request, messages.INFO, 'Your payment was canceled. Maybe next time.')
return redirect('join')
def volunteer(request):
if request.method == 'POST':
form = VolunteerForm(data=request.POST)
if form.is_valid():
name = request.POST.get('name', '')
email = request.POST.get('email', '')
notes = request.POST.get('notes', '')
volunteer_choices = '\n'.join(request.POST.getlist('volunteer', ['None selected']))
email = EmailMessage(
'[Friends of GPL] Volunteer Application',
'Name: {}\n\nVolunteer opportunities selected:\n{}\n\nNotes:\n{}'.format(name, volunteer_choices, notes),
'{} <{}>'.format(name, email),
['info@friendsgpl.org'],
headers = {'Reply-To': email }
)
email.send()
messages.add_message(request, messages.SUCCESS, 'Thank you for volunteering! We will get back to you soon.')
return redirect('volunteer')
else:
form = VolunteerForm()
return render(request, 'website/volunteer.html', {'volunteer_form': form})
def page(request):
slug = request.path.strip('/')
page = Page.objects.get(slug=slug)
context = {'title': page.title, 'content': page.content}
return render(request, 'website/page.html', context)
|
24,559 | 65402c809d65a052dc69c3f1f93f54b6dde6e9d0 | from django.http.response import HttpResponse, JsonResponse
def hello(request):
return HttpResponse('<h1>hello</h1>')
def hours_ahead(request, hours):
return HttpResponse('<h1>Hours ahead</h1>')
# def products_list(request):
# return HttpResponse('<h1>List of products</h1>')
# def products_detail(request, product_id):
# return HttpResponse('<h1>Proruct page {product_id}</h1>')
products = [
{
'id': i,
'name': f'Product {i}',
'price': i * 1000
} for i in range(1, 11)
]
def products_list(request):
return JsonResponse(products, safe=0)
def product_detail(request, product_id):
for product in products:
if product['id'] == product_id:
return JsonResponse(product)
return JsonResponse({'message': 'Product with selected ID not found'})
|
24,560 | c4b5fa0c9d2a16de9694976a6b11a4a094a82f39 | """Explore learning curves for classification of handwritten digits"""
import matplotlib.pyplot as plt
import numpy
from sklearn.datasets import *
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
import tensoflow_hub as tf
digits=load_digits()
def display_digits():
"""Read in the 8x8 pictures of numbers and display 10 of them"""
digits = load_digits()
print(digits.DESCR)
fig = plt.figure()
for i in range(10):
subplot = fig.add_subplot(5, 2, i+1)
subplot.matshow(numpy.reshape(digits.data[i], (8, 8)), cmap='gray')
plt.show()
def train_model(tensorflow_model=False):
if tensorflow_model==False:
"""Train a model on pictures of digits.
Read in 8x8 pictures of numbers and evaluate the accuracy of the model
when different percentages of the data are used as training data. This function
plots the average accuracy of the model as a function of the percent of data
used to train it.
"""
data = load_digits()
num_trials = 10
train_percentages = range(5, 95, 5)
test_accuracies = numpy.zeros(len(train_percentages))
# train models with training percentages between 5 and 90 (see
# train_percentages) and evaluate the resultant accuracy for each.
# You should repeat each training percentage num_trials times to smooth out
# variability.
# For consistency with the previous example use
# model = LogisticRegression(C=10**-10) for your learner
# TODO: your code here
fig = plt.figure()
plt.plot(train_percentages, test_accuracies)
plt.xlabel('Percentage of Data Used for Training')
plt.ylabel('Accuracy on Test Set')
plt.show()
if tensorflow_model==True:
class model ():
def __init__ (self):
base_model=ResNet50V2(include_top=False)
base_model.trainable=False
data_augmentetion=tf.keras.models.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip(mode='horizontal'),
tf.keras.layers.experimental.preprocessing.RandomHeight(0.2),
tf.keras.layers.experimental.preprocessing.RandomWidth(0.2)
])
data=tf.keras.preprocessing.image_dataset_from_directory(digits,
batch_size=32,
image_size=(224, 224),
label_mode='categorical')
inputs=tf.keras.layers.Input(shape=(224, 224, 3))
augmented_layer=data_augmentetion(inputs)
x=base_model(augmented_layer, training=False)
pool_layer_1=tf.keras.layers.GlobalMaxPooling2D()(x)
pool_layer_2=tf.keras.layers.GlobalAveragePooling2D()(tf.expand_dims(tf.expand_dims(pool_layer_1, axis=0), axis=0))
outputs=tf.keras.layers.Dense(13, activation='softmax')(pool_layer_2)
model=tf.keras.Model(inputs, outputs)
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.keras.optimizers.Adam(),
metrics='accuracy')
#This function helps in tracking the models performance and also the loss curves
history=model.fit(data, epochs=1, steps_per_epoch=len(data), callbacks=[tf.keras.callbacks.LearningRateScheduler(lambda epochs: 1e-4*10**(epochs/200)),
tf.keras.callbacks.ModelCheckpoint('checkpoin2.ckpt',
save_weights_only=True,
save_freq='epoch',
monitor='loss')]
verbose=0)
#The performance of the model resides over here
pd.DataFrame(history.history).plot()
plt.figure(figsize=(10,10))
lrs=1e-4*10**(np.arange(0, 1)/ 200)
plt.semilogx(lrs, history.history['loss'])
model()
if __name__ == "__main__":
# Feel free to comment/uncomment as needed
display_digits()
# train_model()
|
24,561 | 4ea5b7e88af543ef5db4f43e4871e3df8a0333bd | from datetime import datetime
from sqlalchemy import Column, Integer, DateTime, String
from db import db
# Models
''' Movie Model '''
class Movie(db.Model):
__tablename__ = "movies"
id = Column(Integer, primary_key=True, nullable=False)
title = Column(db.String, unique=True, nullable=False)
release_date = Column(DateTime, default=datetime.now(), nullable=False)
def __init__(self, title, release_date=datetime.now()):
self.title = title
self.release_date = release_date
def __repr__(self):
return f"( Movie {self.id} {self.title} {self.release_date} )"
# Insert Movie into the database
def insert(self):
db.session.add(self)
db.session.commit()
# Update Movie in the database
def update(self):
db.session.commit()
# Delete Movie from the database
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id,
'title': self.title,
'release_date': self.release_date
}
''' Actor Model '''
class Actor(db.Model):
__tablename__ = "actors"
id = Column(Integer, primary_key=True, nullable=False)
name = Column(String(128), unique=True, nullable=False)
age = Column(Integer, nullable=False)
gender = Column(String(12), nullable=False)
def __init__(self, name, age, gender):
self.name = name
self.age = age
self.gender = gender
def __repr__(self):
return f"( Actor {self.id} {self.name} {self.age} {self.gender} )"
# Insert Actor into the database
def insert(self):
db.session.add(self)
db.session.commit()
# Update Actor in the database
def update(self):
db.session.commit()
# Delete Actor from the database database
def delete(self):
db.session.delete(self)
db.session.commit()
def format(self):
return {
'id': self.id,
'name': self.name,
'age': self.age,
'gender': self.gender
}
|
24,562 | da2945a20cac3b54eb087be44380bd8d9e2b936e | # Given a string, find the length of the longest substring without repeating characters.
# two ways to do it:
# 1) Start with the largest string and check if it has repeating chars. Then if it does, reduce by 1 and repeat.
# 2) Make a list. Iterate over it, adding to a dict a value of 1 for each letter. When you hit a repeat, the value will be incremented to 2.
# Do the whole string then measure the distance between each 2, whichever has the largest distance wins.
strng = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCD"
'''
abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~ abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~
'''
def lengthOfLongestSubstring(s):
dstr = []
hstr = {}
sbstrln = 0
lrgst = 0
if len(s) == 1:
return 1
else:
for idx, x in enumerate(s):
gstr = []
gstr.append(x)
for y in s[(idx) + 1::]:
if x != y and y not in gstr:
sbstrln += 1
gstr.append(y)
continue
else:
hstr[sbstrln] = gstr
gstr = []
sbstrln += 1
sbstrln += 1
hstr[sbstrln] = gstr
for key in hstr:
if len(hstr[key]) > lrgst:
lrgst = len(hstr[key])
else:
pass
return lrgst
print(lengthOfLongestSubstring(strng)) |
24,563 | 327648dec0b1a80372877b3b5e5d088e702daf70 |
import json
import re
def shardingStatus(m):
db = m['config']
version = db.version.find_one()
if not version:
return 'ShardingStatus: not a shard db!\n'
status_string = '--- Sharding Status ---\n'
status_string += ' sharding version:' + json.dumps(version) + '\n'
shards = db.shards.find().sort('_id')
status_string += ' shards:\n ' + '\n '.join([json.dumps(i) for i in shards]) + '\n'
status_string += ' databases:\n'
dbnames = db.databases.find().sort('name')
for d in dbnames:
status_string += ' '*6 + json.dumps(d) + '\n'
if not d['partitioned']:
continue
cols = db.collections.find({'_id':{ '$regex' : re.compile("^" + d['_id'] + ".")}, "dropped":False}).sort('_id')
for c in cols:
status_string += ' '*10 + c['_id'] + ' chunks:\n'
chunks = db.chunks.group( key=['shard'], condition={ 'ns' : c['_id'] },
initial={ "nChunks" : 0 },
reduce="function (doc, out) { out.nChunks++; }")
for ck in chunks:
status_string += ' '*14 + ck['shard'] + ' ' + str(ck['nChunks']) + '\n'
return status_string
def printShardingStatus(m):
print shardingStatus(m)
if __name__ == '__main__':
import pymongo
printShardingStatus(pymongo.Connection(port=30000))
|
24,564 | 66d0fe1d328198539f4d79300ee6d024d0591190 | import time
from futu import *
# from py_vollib import black_scholes
# print(implied_volatility)
class OrderBookTest(OrderBookHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(OrderBookTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("OrderBookTest: error, msg: %s" % data)
return RET_ERROR, data
print("Option Bid", data['Bid'][0][0]) # OrderBookTest自己的处理逻辑
print("Option Ask", data['Ask'][0][0]) # OrderBookTest自己的处理逻辑
return RET_OK, data
# quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
# handler = OrderBookTest()
# quote_ctx.set_handler(handler)
# quote_ctx.subscribe(['US.AMZN190118C1700000'], [SubType.ORDER_BOOK])
# time.sleep(15)
# quote_ctx.close()
class StockQuoteTest(StockQuoteHandlerBase):
def on_recv_rsp(self, rsp_str):
ret_code, data = super(StockQuoteTest,self).on_recv_rsp(rsp_str)
if ret_code != RET_OK:
print("StockQuoteTest: error, msg: %s" % data)
return RET_ERROR, data
print("AMAZON:", data.data_time[0],' | ',data.last_price[0]) # StockQuoteTest自己的处理逻辑
return RET_OK, data
quote_ctx = OpenQuoteContext(host='127.0.0.1', port=11111)
handler = StockQuoteTest()
quote_ctx.set_handler(handler)
quote_ctx.subscribe(['US.AMZN'], [SubType.QUOTE])
optionsHandler = OrderBookTest()
quote_ctx.set_handler(optionsHandler)
quote_ctx.subscribe(['US.AMZN190118C1700000'], [SubType.ORDER_BOOK])
time.sleep(5)
quote_ctx.close() |
24,565 | 95b134b8bb5a55b1b8f60de30a6219e1e2113843 | from django.shortcuts import render, redirect, get_object_or_404
# Create your views here.
def home( request):
context = { 'username': 'Ian Wehmeyer'}
return render( request, 'stockapp1/home.html', context) |
24,566 | c1f8a2baac7113c12e05b7ba0f930b3099fa2b20 | from ctypes import cdll, c_char_p
from sys import platform
def build_lib_name(name):
prefix = "lib"
ext = "so"
if platform == 'darwin':
ext = 'dylib'
elif platform == 'win32':
prefix = ""
ext = 'dll'
return "{prefix}{name}.{ext}".format(prefix=prefix, name=name, ext=ext)
def main():
lib = cdll.LoadLibrary(build_lib_name("digest"))
lib.digest.restype = c_char_p
print("SHA256 of Hello World =", lib.digest(b"Hello World"))
if __name__ == "__main__":
main() |
24,567 | d1fdada69d06638940afd460aa46d43c9e471b7c | #!/home/bennyray/Projects/neuro-learn/env/bin/python3
from dipy.workflows.flow_runner import run_flow
from dipy.workflows.align import ImageRegistrationFlow
if __name__ == "__main__":
run_flow(ImageRegistrationFlow())
|
24,568 | b33b80fe2356348877c94aa166f715ef4bf1febe | import sys
size = len(sys.argv) - 1
text = ""
while size > 0:
text = text + str(sys.argv[size])
size = size - 1
if size:
text = text + " "
print text |
24,569 | 78468c32e012f81255940dcc155f48b836189db3 | import random
class Enemy:
hp= 200
def __init__(self,atkl,atkh):
self.atkl= atkl
self.atkh=atkh
def getatk(self):
print("atk is",self.atkl)
def gethp(self):
print("hp is",self.hp)
enemy1= Enemy(40,49)
enemy1.getatk()
enemy1.gethp()
enemy2= Enemy(75,90)
enemy2.getatk()
enemy2.gethp()
playerhp=260
enemyatkl= 60
enemyatkh=80
while playerhp > 0:
dmg = random.randrange(enemyatkl,enemyatkh)
playerhp= playerhp -dmg
if playerhp <=30:
playerhp=30
print("enemy strikes for", dmg,"points of damage. current hp is", playerhp)
if playerhp>30:
continue
print("you have low health, you have")
break
|
24,570 | 38c665cd755a0691a7521d366939bc835962407f | import pytest
import random
import pymongo
from logster.db import Model, StringField, IntegerField, get_db
from logster import conf
class TempCollection:
def __init__(self, db, name, initial_data=None):
self.db = db
self.name = name
self.initial_data = initial_data
def __enter__(self):
self.col = self.db[self.name]
if self.initial_data is not None:
self.col.insert(self.initial_data)
return self.col
def __exit__(self, type, value, traceback):
self.col.drop()
class DbTests:
def __init__(self):
self.db = get_db(conf.DB_CONN_STRING, conf.TEST_DB_NAME)
def test_collection(self, initial_data=None):
name = 'test%d' % random.randint(10000, 99999)
if initial_data is not None:
pass
return TempCollection(self.db, name, initial_data)
@pytest.fixture(scope='session')
def db():
return DbTests()
def test_db_fixture(db):
data = [{'test': 42}, {'test': 43}]
with db.test_collection(initial_data=data) as col:
assert isinstance(col, pymongo.collection.Collection)
doc = col.find_one({'test': 42})
assert doc is not None
assert doc['test'] == 42
class TestModel:
class Kitteh(Model):
name = StringField()
age = IntegerField()
owner_name = StringField()
def test_declared_fields(self):
fields = list(self.Kitteh._declared_fields.items())
assert fields[0][0] == 'name' and isinstance(fields[0][1], StringField)
assert fields[1][0] == 'age' and isinstance(fields[1][1], IntegerField)
assert fields[2][0] == 'owner_name' and isinstance(fields[2][1], StringField)
def test_field_assignment(self):
model = self.Kitteh(name='Ginger', age=3, owner_name='John', foo='bar')
di = model.__dict__
assert di.get('name') == 'Ginger'
assert di.get('age') == 3
assert di.get('owner_name') == 'John'
assert di.get('foo') == None
class TestCollection:
def test_find_in_collection(self, db):
data = [
{'name': 'Ginger', 'age': 2},
{'name': 'Kleo', 'age': 4},
{'name': 'Fluffy', 'age': 7}
]
with db.test_collection(initial_data=data) as col:
class Kitteh(Model):
collection_name = col.name
name = StringField()
age = IntegerField()
cat = Kitteh.find_one(name='Ginger')
assert cat.name == 'Ginger'
|
24,571 | ae3e6556c9a5c7ae29937ec24067e7ad8932c37d | class Solution:
def replaceElements(self, arr: List[int]) -> List[int]:
ans = []
maxim = 0
for i in range(1, len(arr)):
maxim = max(arr[i:])
if maxim < arr[i]:
ans.append(arr[i])
maxim = arr[i]
else:
ans.append(maxim)
ans.append(-1)
return (ans) |
24,572 | 452d0cd4d11da2a421cddd1e7a0cd66b81919520 | import requests
from bs4 import BeautifulSoup
import random
import bs4
import functools
my_headers = [
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.25 Safari/537.36 Core/1.70.3741.400 QQBrowser/10.5.3863.400"
"Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)",
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "
]
headers = {'User-Agent': random.choice(my_headers)}
def fans_num(url_offset): # 获取某人的粉丝数
try:
resp = requests.get("https://movie.douban.com" + url_offset, headers=headers)
resp.encoding = "utf-8"
s = BeautifulSoup(resp.text, "html.parser")
return str(s.find('div', id="fans", class_="mod").h2).split('\n')[1].split('(')[1].split(')')[0]
except:
return 0
def ave_all_movies(url_offset): # 获取某人所有的作品的平均评分
score = []
r = requests.get("https://movie.douban.com" + url_offset, headers=headers)
soup = BeautifulSoup(r.text, "html.parser")
best_url = soup.find('div', id="best_movies").find('div', class_='hd').h2.span.a.attrs['href'].split("?")[0]
r = requests.get(best_url, headers=headers)
soup = BeautifulSoup(r.text, "html.parser")
page_url = [best_url]
try:
for e in soup.find('div', class_="paginator").find_all('a')[:-1]:
page_url.append(e.attrs['href'])
except:
pass
for e in page_url:
new_url = best_url + e
new_r = requests.get(new_url, headers=headers)
soup = BeautifulSoup(new_r.text, "html.parser")
for it in soup.find_all('div', class_="star clearfix"):
try:
score.append(float(it.find_all('span')[1].string))
except:
continue
try:
return "{:.1f}".format(sum(score) / len(score))
except:
return 0.0
url = "https://movie.douban.com/j/search_subjects?type=movie&tag=%E7%BB%8F%E5%85%B8&sort=recommend&page_limit=20&page_start="
# cover_url = []
# 电影名 豆瓣评分 电影类型 制片国家/地区 语言 上映日期 时长 导演名 粉丝数 所有作品评分平均数 主演 粉丝数 所有作品评分平均数
with open("movies03.txt", 'w') as f:
for i in range(453, 500): # 0 页面开始位置, 结束位置1000,如果打印出 《"出错位置:" 一个数字》,一般是ip被封,换个网络之后将之前已经写入的文件movies——3.txt
try: # 中的内容拷贝保存到#一个保存最终所有数据的文件#中,再将起始位置 改为 打印出来的出错位置,重新运行,循环往复
movies_one_page = [] # ,由于要访问多级页面,爬虫速度较慢
r = requests.get(url + str(i), headers=headers)
r.encoding = r.apparent_encoding
item = eval(r.text.replace("false", '"false"').replace("true", '"true"'))["subjects"][0] # 每页 取第一个有效值
line = list() # 文件中每一行内容
line.append(item['title']) # 电影名
line.append(item['rate']) # 豆瓣评分
# cover_url.append(item['cover']) # 封面图片url
resp = requests.get(item['url'].replace("\\", ""), headers=headers) # 访问二级页面
soup = BeautifulSoup(resp.text, "html.parser")
movie_type = functools.reduce(lambda x, y: x + "/" + y,
[e.string for e in soup.find_all('span', property="v:genre")])
line.append(movie_type) # 电影类型
area_lang = []
for e in list(filter(lambda x: False if str(x) == '\n' or str(x) == '<br/>' or str(x) == " / " or
str(x) == ' ' else True, soup.find('div', id="info").contents)):
if type(e) is bs4.element.NavigableString:
area_lang.append(e.string.replace(" ", ''))
line.append(area_lang[0]) # 制片国家/地区
line.append(area_lang[1]) # 语言
line.append(str(soup.find('span', property="v:initialReleaseDate").string).split("(")[0]) # 上映日期
line.append(str(soup.find('span', property="v:runtime").string).split("分")[0]) # 时长
director = soup.find('span', class_="attrs").a
name = director.string
line.append(name) # 导演名
href = director.attrs['href']
fans = fans_num(href)
line.append(str(fans)) # 粉丝数
ave_score = ave_all_movies(href)
line.append(str(ave_score)) # 所有作品评分平均数
try:
actor = soup.find('span', class_='actor').find('span', class_='attrs').contents
except:
pass
try:
for elem in actor[0: 3]: # 可调节主演数目
if type(elem) is bs4.element.Tag:
offset_url = str(elem.attrs['href'])
fans = fans_num(offset_url)
line.append(str(elem.string)) # 主演
line.append(fans) # 粉丝数
line.append(str(ave_all_movies(offset_url))) # 所有作品评分平均数
except:
pass
# print(line)
res = ("\t".join(line))
f.write(res + "\n")
print(res)
except:
f.close()
print("出错位置:" + str(i))
break
|
24,573 | eada7a6fb26e14023b86250822b658a1bb874fd1 | from typing import List
class Solution:
def largeGroupPositions(self, s: str) -> List[List[int]]:
i=0
res = []
while i<len(s)-1:
j = i+1
while j<len(s):
if s[j] == s[i]:
j+=1
else:
break
if j-i>=3:
res.append([i,j-1])
i = j
return res
s = Solution()
test = "abcdddeeeeaabbbcd"
print(s.largeGroupPositions(test)) |
24,574 | 3800b054eae6a44d6299c2a8e8302758994bb024 |
from models.Vocab import Vocab
import logging
logger = logging.getLogger(__name__)
class TokenizerAdaptor:
def __init__(self, config, source):
self.config = config
self.source = source
self.buffer = []
self.idBuffer = []
self.vocab = self.loadVocab()
self.maximumSize = None
self.tokenCount = None
def loadVocab(self):
return Vocab(self.config)
def next(self):
self.fillBuffer()
return self.matchBestToken()
def fillBuffer(self):
while len(self.buffer) < self.vocab.getMaximumTokenSize():
character, documentId = self.source.next()
if len(character) == 0:
break
self.buffer.append(character)
self.idBuffer.append(documentId)
def matchBestToken(self):
token = self.tryMatchBestToken()
if not token is None:
return token
# expand unicode
while self.hasUnicode():
self.expandOneUnicodeCharacter()
token = self.tryMatchBestToken()
if not token is None:
return token
raise ValueError("Could not find token in buffer '" + str(self.buffer) + "'")
def tryMatchBestToken(self):
if len(self.buffer) == 0:
return None
possibleWord = self.buffer[0]
documentId = self.idBuffer[0]
match = None
for i in range(1, len(self.buffer)):
possibleWord += self.buffer[i]
if self.vocab.isPrefix(possibleWord):
continue
if not self.vocab.contains(possibleWord) and len(possibleWord) > 1:
match = possibleWord[:-1]
del self.buffer[:i]
del self.idBuffer[:i]
break
if match is None and self.vocab.contains(possibleWord):
match = possibleWord
del self.buffer[:]
del self.idBuffer[:]
if match is None:
return None
token = self.vocab.getToken(match)
logger.debug("string: '" + match + "' -> " + str(token) + " (" + str(documentId) + ")")
return token, documentId
def hasUnicode(self):
for character in self.buffer:
if self.isUnicode(character):
return True
return False
def isUnicode(self, character):
return ord(character) > 127
def expandOneUnicodeCharacter(self):
for index, character in enumerate(self.buffer):
if self.isUnicode(character):
expanded = list(repr(character.encode('unicode-escape')))
self.buffer = self.buffer[:index] + expanded + self.buffer[index + 1:]
self.idBuffer = self.idBuffer[:index] + [self.idBuffer[index] for
_ in range(len(expanded))] + self.idBuffer[index + 1:]
break
def size(self):
if self.tokenCount is None:
self.tokenCount = self.getTokenCount()
return self.tokenCount
def reset(self):
self.source.reset()
def setMaximumSize(self, size):
self.maximumSize = size
def getTokenCount(self):
count = 0
logger.info("Scanning token count...")
try:
while True:
token = self.next()
count += 1
if count % 1e6 == 0:
logger.info(" " + str(count))
if not self.maximumSize is None:
if count >= self.maximumSize:
break
except ValueError as e:
pass
logger.info("Scanning token count..." + str(count))
self.reset()
return count
def shuffleDocuments(self):
self.source.shuffleDocuments()
def clone(self):
return TokenizerAdaptor(self.config, self.source.clone())
|
24,575 | b807febeed8ef7bc4a6b04e1ce5df62414fbd4fc | #coding=utf-8
__author__ = 'tangyao'
import pytest
from config.log_config import logger
from config.driver_config import DriverConfig
log=logger()
base_driver=None
def pytest_addoption(parser):
parser.addoption("--cmdopt", action="store", default="device", help="None")
def pytest_collection_modifyitems(config,items):
"""
1、测试用例收集完成时,将收集到的item的name和nodeid的中文显示在控制台上
所有的测试用例收集完毕后调用, 可以再次过滤或者对它们重新排序
items (收集的测试项目列表)
2、config.getoption("--cmdopt")获取命令行传递过来的参数
3、config.getoption获取传递过来的设备信息,再添加到nodeid,这样针对每个设备都会生成一条用例
"""
for item in items:
item.name = item.name.encode("utf-8").decode("unicode_escape")
item._nodeid = item.nodeid.encode("utf-8").decode("unicode_escape")+" : "+eval(config.getoption("--cmdopt"))["deviceName"]
@pytest.fixture
def cmdopt(request):
return request.config.getoption("--cmdopt")
@pytest.fixture
def common_driver(cmdopt):
print(111111111111111111,eval(cmdopt))
global base_driver
global device
if base_driver==None:
base_driver=DriverConfig(eval(cmdopt))
driver=base_driver.get_driver()
yield driver
driver.close_app()
driver.quit()
|
24,576 | 9e0765087b8e92b2dc65cee1e99e170e84a1b74b | import os
import json
from bson import json_util
from flask import Flask
from flask import request
from src.mongo_handler import MongoHandler
from src.auth import CustomAuth
app = Flask(__name__)
app.config['BASIC_AUTH_FORCE'] = True
mongo_handler = MongoHandler(os.environ['MONGOLAB_URI'])
basic_auth = CustomAuth(app, os.environ['USERNAME'], os.environ['PASSWORD'])
@app.route('/adverts/')
def get():
adverts = mongo_handler.get_adverts(request.authorization.username)
return json.dumps(adverts, default=json_util.default, ensure_ascii=False).encode('utf8')
|
24,577 | 92867332cbd20b52ed97f93d962b2c3310f96f35 | # NEWTONIAN NON-RELATIVISTIC WHITE DWARF
# SOLAR MASSES & UNITLESS
# Kaitlin Williams
#
#
#
import numpy as np
import math
from matplotlib.pyplot import *
#seterr(all='print')
g=6.673*10**-8
c=299792458
#solarmass =
powerstuff = 4/3.
e0= 4.173
R0 = 1.473
K = (1./4.173)**(powerstuff-1)
p0 = [1.00]
#p0 = np.arange(0.001,1.00,0.001,object)
#-------------------------------
alpha = R0/((K*(e0**(powerstuff-1))))**(1/powerstuff)
#print alpha
#alpha = 1.473
beta = 52.46
#beta = (4*math.pi*e0)/((c**2)*(K*(e0**(powerstuff-1)))**(1/powerstuff))
# ^^ needs the solar mass term otherwise waaaay too small
#print beta
#-------------------------------
def diffpressure(p, r, m):
if m ==0:
return p
if p <=0:
# print "I'M NEGATIVE"
return -1
else:
return -(alpha*m*(p**(1./powerstuff)))/(r**2)
# return -(g*epsilon(r)*mass(r))/((r*c)**2)
def diffmass(p, r):
return (p**(1./powerstuff))*beta*r**2
def solve_rk4_coupled(mass, pressure, p0, m0, N, rinitial, rfinal):
p = p0
m = m0
r = rinitial
h = (rfinal - rinitial) / float(N)
data = [[r, p, m]]
for i in range(1,N):
r = r+h
k1 = h * mass(p, r)
l1 = h * pressure(p, r, m)
k2 = h * mass(p + k1 / 2., r + h / 2.)
l2 = h * pressure(p + l1 / 2., r + h / 2., m + k1 / 2.)
k3 = h * mass(p + k2 / 2., r + h / 2.)
l3 = h * pressure(p + l2 / 2., r + h / 2., m + k2 / 2.)
k4 = h * mass(p + k3, r + h)
l4 = h * pressure(p + l3, r + h, m + k3)
p += 1 / 6. * (l1 + 2 * l2 + 2 * l3 + l4)
m += 1 / 6. * (k1 + 2 * k2 + 2 * k3 + k4)
if p <= 0:
break
data.append([r, p, m])
else:
# print data[-1]
data = None
return data
finalrs = []
finalms = []
for press in p0:
stardata = solve_rk4_coupled(diffmass, diffpressure, press, 0., 1000000, 0, 1000.)
rs = []
ms = []
ps = []
for elem in stardata:
rs.append(elem[0])
ps.append(elem[1])
ms.append(elem[2])
finalrs.append(stardata[-1][0])
finalms.append(stardata[-1][2])
print stardata[-1]
plot(rs, ps, 'r')
#plot(rs, ms, 'b')
#(finalrs, finalms, 'r')
show()
|
24,578 | e3152f9f4962e123e38cac377b3d443da3df901e | from Spectrum import Spectrum
import numpy as np
### ANALYSING PEAKS OF THE BACKGROUND RADIATION ###
# create background object
bkgd = Spectrum()
bkgd.read_from_csv("IAEA_sample_not_present_22.11.18.csv")
Spectrum.calibration_a = -5.677763487790322e-08
Spectrum.calibration_b = 0.7626393415958842
Spectrum.calibration_c = -0.7553693869647748
bkgd.apply_polynomial_calibration()
bkgd.plot(calibrated=False)
# create arrays for positions and uncertainties
# of background peaks in channels
means = np.array([])
dmeans = np.array([])
vars = np.array([])
dvars = np.array([])
# Peak around 313 channel, 238 energy
mean, dmean = bkgd.fit_3sigma_gaussian(313, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 317 channel, 242 energy
# !AMPLITUDE IS 2 TIMES LARGER THAN IT SHOULD BE, CONSIDER INCREASING THE UNCERTAINTY! #
# > UNCERTAINTY IS HALF A BIN = 0.5 "
bkgd.plot(xMin=235, xMax=249, yMax=1.5e4)
mean, dmean = bkgd.fit_gaussian(bkgd.channels[317-2:317+3], bkgd.counts[317-2:317+3], "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, 0.5)
# Peak around 387 channel, 295 energy
mean, dmean = bkgd.fit_3sigma_gaussian(387, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 444 channel, 338 energy
mean, dmean = bkgd.fit_3sigma_gaussian(444, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 461 channel, 351 energy
mean, dmean = bkgd.fit_3sigma_gaussian(461, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 671 channel, 511 energy
popt, pcov = bkgd.fit_3sigma_gaussian(671, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 765 channel, 583 energy
mean, dmean = bkgd.fit_3sigma_gaussian(765, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 800 channel, 609 energy
mean, dmean = bkgd.fit_3sigma_gaussian(800, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 1196 channel, 911 energy
mean, dmean = bkgd.fit_3sigma_gaussian(1196, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 1272 channel, 969 energy
mean, dmean = bkgd.fit_3sigma_gaussian(1272, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 1917 channel, 1461 energy
mean, dmean = bkgd.fit_3sigma_gaussian(1917, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 2316 channel, 1765 energy
mean, dmean = bkgd.fit_3sigma_gaussian(2316, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# Peak around 3429 channel, 2614 energy
mean, dmean = bkgd.fit_3sigma_gaussian(3429, "Mean", show=False)
means = np.append(means, mean)
dmeans = np.append(dmeans, dmean)
# UNCERTAINTIES #
print("Channels means:", means)
print("Uncertainties:", dmeans)
# convert from channels to energy
energies = Spectrum.channel_to_energy(means)
upper_bound = Spectrum.channel_to_energy(np.add(means, dmeans))
lower_bound = Spectrum.channel_to_energy(np.add(means, -dmeans))
d_energies = np.multiply(np.add(upper_bound, -lower_bound), 0.5)
d_energies = np.add(d_energies, np.mean(bkgd.d_energies))
print("Energies means:", energies)
print("Uncertainty:", d_energies)
|
24,579 | 7615b6fd475217555008b2bb2dbae1e801208b15 | from django.conf.urls import url
from frontpage import views
urlpatterns = [
url('^$', views.post_list, name='post_list'),
url('^post-list/$', views.post_list, name='post_list'),
url('^post-view/$', views.post_view, name='post_view'),
] |
24,580 | 13baa4fa915446db078ca93632513fb997556e82 | """
Arquivo com métodos utilitários centralizados.
"""
import hashlib
class Utilities:
"""
Classe de métodos utilitários
"""
def hash_generator(self, value):
"""
Gerador de hash sha256.
:param value: string (texto a ser encriptado)
:return: string
"""
hash_string = hashlib.sha256(bytes(value))
return hash_string.hexdigest()
|
24,581 | 857785aa69d3fd15e023cab7389b3b482e713c97 | import numpy as np
import tensorflow as tf
from data.data_utils import *
class TextCNN2L(object):
"""文本分类,TextCNN模型"""
def __init__(self, config):
self.config = config
# 三个待输入的数据
self.input_x = tf.placeholder(tf.int32, [None, self.config.max_sen_len], name='input_x')
self.input_y = tf.placeholder(tf.float32, [None, self.config.num_classes], name='input_y')
self.is_training = tf.placeholder(tf.bool, name="is_training")
self.keep_prob = tf.where(self.is_training, config.dropout_keep_prob, 1.0)
self.cnn()
def cnn(self):
"""CNN模型"""
# 词向量映射
with tf.name_scope("embedding"):
init_embeddings = tf.random_uniform([self.config.vocab_size, self.config.embedding_dim])
embedding = tf.get_variable("embedding", initializer=init_embeddings, dtype=tf.float32, trainable=self.config.update_w2v)
self.embedding_inputs = tf.nn.embedding_lookup(embedding, self.input_x)
self.embedding_inputs = tf.expand_dims(self.embedding_inputs, 3)
with tf.variable_scope('CNN_Layer1'):
# 添加卷积层做滤波
conv1 = tf.contrib.layers.convolution2d(self.embedding_inputs
,self.config.num_filters
,[self.config.window_size, self.config.embedding_dim]
,padding='VALID')
# 添加RELU非线性
conv1 = tf.nn.relu(conv1)
# 最大池化
pool1 = tf.nn.max_pool(conv1
,ksize=[1, self.config.pooling_window, 1, 1]
,strides=[1, self.config.pooling_stride, 1, 1]
,padding='SAME')
# 对矩阵进行转置,以满足形状
pool1 = tf.transpose(pool1, [0, 1, 3, 2])
with tf.variable_scope('CNN_Layer2'):
# 第2卷积层
conv2 = tf.contrib.layers.convolution2d(pool1
,self.config.num_filters
,[self.config.window_size, self.config.num_filters]
,padding='VALID')
# 抽取特征
pool2 = tf.squeeze(tf.reduce_max(conv2, 1), squeeze_dims=[1])
with tf.name_scope("score"):
# 全连接层,后面接dropout以及relu激活
#fc = tf.layers.dense(pool2, self.config.hidden_dim, name='fc1')
#fc = tf.contrib.layers.dropout(fc, self.keep_prob)
#h_drop = tf.nn.relu(fc)
# 分类器
self.logits = tf.layers.dense(pool2, self.config.num_classes, name='fc2')
# 预测类别
self.y_pred_class = tf.argmax(tf.nn.softmax(self.logits), 1, output_type=tf.int32)
with tf.name_scope("optimize"):
# 损失函数,交叉熵
self.loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.logits, labels=self.input_y))
# 优化器
self.optimizer = tf.train.AdamOptimizer(learning_rate=self.config.learning_rate).minimize(self.loss)
with tf.name_scope("accuracy"):
# 准确率
correct_pred = tf.equal(tf.argmax(self.input_y, 1, output_type=tf.int32), self.y_pred_class)
self.accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name="accuracy")
# 获取batch
def get_batches(self, x, y=None, batch_size=64, is_shuffle=True):
for index in batch_index(len(x), batch_size, is_shuffle=is_shuffle):
n = len(index)
feed_dict = {
self.input_x: [x[i] for i in index]
}
if y is not None:
feed_dict[self.input_y] = [y[i] for i in index]
yield feed_dict, n
# 对一个batch训练
def train_on_batch(self, sess, feed):
feed[self.is_training]=True
_loss, _acc = sess.run([self.loss, self.accuracy], feed_dict=feed)
return _loss, _acc
# 对一个batch验证
def val_on_batch(self, sess, feed):
feed[self.is_training]=False
_loss, _acc = sess.run([self.loss, self.accuracy], feed_dict=feed)
return _loss, _acc
# 对一个batch预测
def predict_on_batch(self, sess, feed, prob=True):
feed[self.is_training]=False
result = tf.argmax(self.logits, 1)
if prob:
result = tf.nn.softmax(logits=self.logits, dim=1)
res = sess.run(result, feed_dict=feed)
return res
# 预测输入x
def predict(self, sess, x, prob=False):
y_pred = []
for _feed, _ in self.get_batches(x, batch_size=self.config.batch_size, is_shuffle=False):
_y_pred = self.predict_on_batch(sess, _feed, prob)
y_pred += _y_pred.tolist()
return np.array(y_pred)
def evaluate(self, sess, x, y):
"""评估在某一数据集上的准确率和损失"""
num = len(x)
total_loss = 0.0
total_acc = 0.0
for _feed, _n in self.get_batches(x, y, batch_size=self.config.batch_size):
loss, acc = self.val_on_batch(sess, _feed)
total_loss += loss * _n
total_acc += acc * _n
return total_loss / num, total_acc / num |
24,582 | 6845fd90f1d8262dfc0ae5d00f176a0d26099960 | import numpy as np
# yaş verileri
yas_verileri = [4, 4, 7, 13, 18, 23, 24, 27, 30, 33, 55, 63, 71]
# aritmetik ortalama (mean)
print("aritmetik ortalama", np.mean(yas_verileri))
# medyan ya da ortanca değer (median)
# eğer veri kümesi çift sayıdan oluşursa ortadaki iki sayının ortalaması alınır.
print("medyan:", np.median(yas_verileri))
# yüzdelik dilimler (percentile)
print("Yüzde 50'lik dilim: (medyan):", np.percentile(yas_verileri, 50))
print("Yüzde 25'lik dilim:", np.percentile(yas_verileri, 25))
print("Yüzde 75'lik dilim:", np.percentile(yas_verileri, 75))
# varyans değeri (variance)
# her bir verinin ortalamaya (mean) olan uzaklıklarının karelerinin toplamının veri sayısına bölümüdür.
print("varyans:", np.var(yas_verileri))
# standart sapma (standard deviation)
# varyans değerinin kareköküdür.
print("standart sapma:", np.std(yas_verileri))
|
24,583 | 6a230ce0f9fe44c160283f4e7f90ebb8e26fc65c | """
@author : Santiago Quiroga Turdera
@version : 1.0
"""
from django.test import TestCase
from django.urls import reverse, include, path
from rest_framework import status
from rest_framework.test import APITestCase
from .models import Student, Class
class ClassTest(APITestCase):
def test_create_a_class(self):
"""
Create a simple class instance of 'Class' model, and inspect its attributes.
"""
data = {
'code': 'CS101',
'title': 'Introduction to Programming',
'description': 'This is a basic yet exciting course',
'enrolled_students':[]
}
url = reverse('class-list')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Class.objects.count(), 1)
self.assertEqual(Class.objects.get().code, 'CS101')
self.assertEqual(Class.objects.get().title, 'Introduction to Programming')
self.assertEqual(Class.objects.get().description, 'This is a basic yet exciting course')
class StudentTest(APITestCase):
def test_create_a_student(self):
"""
Create a single instance of 'User' model and inspect its attributes.
"""
data_class = {
'code': 'CS101',
'title': 'Introduction to Programming',
'description': 'This is a basic yet exciting course',
'enrolled_students':[]
}
url_class = reverse('class-list')
response_class = self.client.post(url_class, data_class, format='json')
self.assertEqual(response_class.status_code, status.HTTP_201_CREATED)
class_pk = Class.objects.get().pk
data = {
'first_name': 'Santiago',
'last_name': 'Quiroga Turdera',
'enrolled_to': ['http://127.0.0.1:8000' + reverse('class-detail', kwargs={'pk':class_pk}),]
}
url = reverse('student-list')
response = self.client.post(url, data, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Student.objects.count(), 1)
self.assertEqual(Student.objects.get().first_name, 'Santiago')
self.assertEqual(Student.objects.get().last_name, 'Quiroga Turdera')
self.assertEqual(Student.objects.get().enrolled_to.count(), 1)
self.assertEqual(Student.objects.get().enrolled_to.get().code, 'CS101')
|
24,584 | 5b664be18aef55007dc80dc42469355ba23df07b | # Question 1:
# Create the class `Society` with following information:
# `society_name`, `house_no`, `no_of_members`, `flat`, `income`
# **Methods :**
# * An `__init__` method to assign initial values of `society_name`, `house_no`, `no_of_members`, `income`
# * `allocate_flat()` to allocate flat according to income using the below table -> according to income, it will decide to flat type
# * `show_data()` to display the details of the entire class.
# * Create one object for each flat type, for each object call `allocate_flat()` and `show_data()`
# | Income | Flat |
# | ------------- |:-------------:|
# | >=25000 | A Type |
# | >=20000 and <25000 | B Type |
# | >=15000 and <20000 | C Type |
# | <15000 | D Type |
class Society:
def __init__(self,society_name,house_no,no_of_members,income):
self.society_name=society_name
self.house_no=house_no
self.no_of_members=no_of_members
self.income=income
def allocate_flat(self):
if self.income>=25000:
flat='A Type'
elif 25000>self.income>=20000:
flat='B Type'
elif 20000>self.income>=15000:
flat='C Type'
else:
flat='D Type'
return flat
def show_data(self):
print(f'Society name: {self.society_name} \nHouse no: {self.house_no}\nNo of Members: {self.no_of_members}\nIncome: {self.income}\nFlat:{self.allocate_flat()}')
society_name1=Society('Test',25,6,22500)
society_name1.show_data()
society_name2=Society('Test2',12,4,16000)
society_name2.show_data() |
24,585 | 3ae78e21062e256209ccac1b6091f59d73b7874d | import os
SECRET_KEY = os.urandom(32)
# <!-- {% for variable, value in original_input.items() %}
# <b>{{ variable }}</b>: {{ value }}
# {% endfor %}
# <br> --> |
24,586 | 57de4e479bc0063efc2cdd16ab79a222b3a0315d | api_key = "05c562a0eea8ca8dc3cd57af3c89bd92"
gmap_key = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=-33.8670522,151.1957362&radius=500&types=food&name=harbour&key=AIzaSyDCHUjJpH2H_vP83IKMb3zhxBTiJgCothQ"
|
24,587 | 17eb4a3e99c34f12d26081eaf4c922a56b2dfb40 | from PIL import Image
from util import getFileData
import StringIO
wire = Image.open(StringIO.StringIO(getFileData('http://www.pythonchallenge.com/pc/return/wire.png'))).convert('RGB')
wirePixels = [wire.getpixel((i, 0)) for i in range(wire.size[0])]
squarePixels = [[None for i in range(100)] for i in range(100)]
x = 0
y = 0
i = 0
while i < len(wirePixels):
while y < 100 and squarePixels[x][y] == None:
squarePixels[x][y] = wirePixels[i]
i += 1
y += 1
y -= 1
x += 1
while x < 100 and squarePixels[x][y] == None:
squarePixels[x][y] = wirePixels[i]
i += 1
x += 1
x -= 1
y -= 1
while y >= 0 and squarePixels[x][y] == None:
squarePixels[x][y] = wirePixels[i]
i += 1
y -= 1
y += 1
x -= 1
while x >= 0 and squarePixels[x][y] == None:
squarePixels[x][y] = wirePixels[i]
i += 1
x -= 1
x += 1
y += 1
square = Image.new('RGB', (100, 100))
for x in range(100):
for y in range(100):
square.putpixel((x, y), squarePixels[x][y])
square.show()
|
24,588 | 4e40f98a41d7c943fc79fe1b150cc003df8c3474 | from headers import *
import numpy as np
from object import *
from colorama import Fore, Back, Style
from ball import *
import time
class Powerup:
def __init__(self , START_X , START_Y , powerup , yspeed = 3 , xspeed = 0):
self.x = START_X
self.y = START_Y
self.type = powerup
self.yspeed = yspeed
self.xspeed = xspeed
self.acceleration = 2
self.shape = 'P'
self.active = 0
self.start_time = round(time.time())
self.time_limit = 1000
self.active_time = round(time.time())
def render_powerup(self , grid):
grid[self.y][self.x] = self.shape
def isactive(self):
return self.active
def get_type(self):
return self.type
def delete(self , obj , grid , paddle):
grid[self.y][self.x] = " "
if self.active == 0:
self.active = -1
return
if self.type == "expand_paddle" and self.active == 2:
obj.reshape_paddle(grid , "expand")
if self.type == "shrink_paddle" and self.active == 2:
obj.reshape_paddle(grid , "shrink")
if self.type == "fast_ball" and self.active == 2:
if obj.get_yspeed() > 0:
obj.set_yspeed(obj.get_yspeed() - 2)
if obj.get_yspeed() < 0:
obj.set_yspeed(obj.get_yspeed() + 2)
if self.type == "ball_multiplier" and self.active == 2:
if len(obj) > 1:
for i in range(int(len(obj)/2) , len(obj)):
if i>=len(obj):
return
grid[obj[i].get_y()][obj[i].get_x()] = ' '
del obj[i]
obj = obj[:len(obj) - int(len(obj)/2)]
if self.type == "shooting_paddle":
paddle.type = "normal"
self.active = -1
def move_powerup(self , grid , obj , paddle):
grid[self.y][self.x] = ' '
new_y = self.y + self.yspeed
new_x = self.x + self.xspeed
if new_x > WIDTH-1 or new_x < 0:
self.xspeed = -1*self.xspeed
if new_y < 0:
self.yspeed = -1*self.yspeed
if HEIGHT-new_y <=1:
self.yspeed = -1*self.yspeed
if (self.type == "shooting_paddle" and self.active == 2 and round(time.time()) - self.start_time < self.time_limit):
## append to ball list
if time.time() - self.active_time > 1:
os.system('afplay laser.mp3 &')
self.active_time = time.time()
obj.append(Ball(int(paddle.get_x() + paddle.get_length()/2) , BALL_POS_Y , ball_type="shooting"))
if (self.active == 2 and round(time.time()) - self.start_time >= self.time_limit):
if self.type == "expand_paddle":
obj.reshape_paddle(grid , "expand")
if self.type == "shrink_paddle":
obj.reshape_paddle(grid , "shrink")
if self.type == "fast_ball":
if obj.get_yspeed() > 0:
obj.set_yspeed(obj.get_yspeed() - 2)
if obj.get_yspeed() < 0:
obj.set_yspeed(obj.get_yspeed() + 2)
if self.type == "ball_multiplier":
if len(obj) > 1:
ind = []
for i in range(int(len(obj)/2) , len(obj)):
grid[obj[i].get_y()][obj[i].get_x()] = ' '
ind.append(i)
ind_len = len(ind)
for i in range(0 , ind_len):
grid[obj[len(ind) - 1].get_y()][obj[len(ind) - 1].get_x()] = ' '
del obj[len(ind) - 1]
if self.type == "shooting_paddle":
paddle.type = "normal"
self.active = -1
delete_powerup(self)
return
elif new_y >= HEIGHT-PADDLE_POS_Y and paddle.get_x() <= self.x and paddle.get_x() + paddle.get_length() >= self.x and self.active == 0:
self.active = 1
return
elif ((new_y > HEIGHT - PADDLE_POS_Y or new_y < 0) and self.active == 0):
delete_powerup(self)
self.active = -1
return
elif self.active == 1:
self.make_change(grid , obj , paddle)
self.active = 2
self.start_time = round(time.time())
if self.type == "shooting_paddle":
self.time_limit = 4
else:
self.time_limit = 15
return
if self.active != -1 and self.active != 2:
self.y = new_y
self.x = new_x
if self.yspeed < 3:
self.yspeed = self.yspeed + self.acceleration
grid[self.y][self.x] = self.shape
def make_change(self, grid , obj , paddle):
if self.active == 1:
if self.type == "expand_paddle":
obj.update_shape(grid , "expand")
elif self.type == "shrink_paddle":
obj.update_shape(grid , "shrink")
elif self.type == "fast_ball":
if obj.get_yspeed() > 0:
obj.set_yspeed(obj.get_yspeed() + 2)
if obj.get_yspeed() < 0:
obj.set_yspeed(obj.get_yspeed() - 2)
elif self.type == "ball_multiplier":
ball_len = len(obj)
for _ in range(0 , ball_len):
obj.append(Ball(np.random.randint(40 , 50) , 24))
elif self.type == "shooting_paddle":
paddle.type = "shooting"
class paddleGrab(Powerup):
def __init__(self , START_X , START_Y , powerup , yspeed = 3 , xspeed = 0):
super(paddleGrab , self).__init__(START_X , START_Y , powerup)
self.time_limit = 8
self.yspeed = -1*yspeed
self.xspeed = xspeed
def delete(self , obj , grid , paddle):
grid[self.y][self.x] = " "
grid[self.y + 2][self.x] = " "
if self.active == -1:
return
if self.active == 0:
self.active = -1
return
obj.set_xspeed(obj.storage_xspeed)
obj.set_yspeed(-1*abs(obj.storage_yspeed))
paddle.move_ball = 0
if self.active == 0:
grid[self.y][self.x] = ' '
delete_powerup(self)
def move_powerup(self , grid , obj , paddle):
grid[self.y][self.x] = ' '
new_y = self.y - self.yspeed
new_x = self.x + self.xspeed
if new_x > WIDTH-1 or new_x < 0:
self.xspeed = -1*self.xspeed
if new_y < 0:
self.yspeed = -1*self.yspeed
if self.active == -1:
return
if (self.active == 2 and time.time() - self.start_time >= self.time_limit):
paddle.move_ball = 0
self.active = -1
delete_powerup(self)
return
elif new_y >= HEIGHT-PADDLE_POS_Y and paddle.get_x() <= self.x and paddle.get_x() + paddle.get_length() >= self.x and self.active == 0:
self.active = 1
return
elif ((new_y > HEIGHT - PADDLE_POS_Y or new_y < 0) and self.active == 0):
delete_powerup(self)
self.active == -1
return
elif self.active != 0:
self.make_change(grid , obj , paddle)
self.active = 2
self.start_time = time.time()
self.time_limit = 15
return
self.y = new_y
self.x = new_x
if self.yspeed < 4:
self.yspeed = self.yspeed - self.acceleration
grid[self.y][self.x] = self.shape
def make_change(self, grid , obj , paddle):
if self.active == 1:
obj.set_storage(obj.get_xspeed() , obj.get_yspeed())
grid[obj.get_y()][obj.get_x()] = ' '
obj.set_x(paddle.get_x() + int(paddle.get_length()/2) + obj.get_xspeed())
if paddle.get_y() < 10:
obj.set_y(HEIGHT - paddle.get_y() - 1)
elif paddle.get_y() > 30:
obj.set_y(paddle.get_y() - 1)
obj.set_xspeed(0)
obj.set_yspeed(0)
paddle.move_ball = 1
def relaunch_paddle(self , paddle , obj):
paddle.move_ball = 0
obj.set_xspeed(obj.get_x_storage())
obj.set_yspeed(-1*abs(obj.get_y_storage()))
delete_powerup(self)
self.active = -1
class thruBall(Powerup):
def __init__(self , START_X , START_Y , powerup , yspeed = 3 , xspeed = 0):
super(thruBall , self).__init__(START_X , START_Y , powerup)
self.time_limit = 15
self.yspeed = -1*yspeed
self.xspeed = xspeed
def delete(self , obj , grid , paddle):
grid[self.y][self.x] = " "
grid[self.y + 2][self.x] = " "
if self.active == 0:
self.active = -1
return
for j in range(0 , len(obj)):
obj[j].set_type('normal')
delete_powerup(self)
def move_powerup(self , grid , obj , paddle):
grid[self.y][self.x] = ' '
new_y = self.y - self.yspeed
new_x = self.x + self.xspeed
if new_x > WIDTH-1 or new_x < 0:
self.xspeed = -1*self.xspeed
if new_y < 0:
self.yspeed = -1*self.yspeed
if (self.active == 2 and time.time() - self.start_time >= self.time_limit):
for j in range(0 , len(obj)):
obj[j].set_type('normal')
self.active = -1
delete_powerup(self)
return
elif new_y >= HEIGHT-PADDLE_POS_Y and paddle.get_x() <= self.x and paddle.get_x() + paddle.get_length() >= self.x and self.active == 0:
self.active = 1
return
elif ((new_y > HEIGHT - PADDLE_POS_Y or new_y < 0) and self.active == 0):
delete_powerup(self)
self.active == -1
return
elif self.active != 0:
self.make_change(grid , obj , paddle)
self.active = 2
self.start_time = time.time()
self.time_limit = 15
return
self.y = new_y
self.x = new_x
if self.yspeed < 4:
self.yspeed = self.yspeed - self.acceleration
grid[self.y][self.x] = self.shape
def make_change(self, grid , obj , paddle):
ball_len = len(obj)
for j in range(0 , ball_len):
obj[j].set_type('thru')
|
24,589 | 38c723153ab412c31468066b38679f0fcf69df96 | #encoding:utf-8
import os
import sys
import pickle
import numpy as np
from sklearn.naive_bayes import MultinomialNB
import textprocess
from sklearn import metrics
reload(sys)
sys.setdefaultencoding('utf-8')
tp = textprocess.Textprocess()
tp.corpus_path ="test/"
tp.pos_path = "test_pos/"
tp.segment_path ="test_segment/"
#预处理测试语料库放在post_path路径下
#tp.preprocess()
#分词测试语料库
#tp.segment()
#test_data_corpus放测试语料库,actual放对应的类别索引
test_data_corpus =[]
actual = []
#测试语料库的类别列表
category = os.listdir(tp.segment_path)
#预测第三个类别的准确率
category_index = 4
test_doc_path = tp.segment_path +category[category_index]+"/"
test_dir = os.listdir(test_doc_path)
for myfile in test_dir:
#测试文件的路径
file_path = test_doc_path + myfile
file_obj = open(file_path,'rb')
test_data_corpus.append(file_obj.read())
actual.append(category_index)
file_obj.close()
tp.stopword_path ="ch_stop_words.txt"
#得到停词不列表
stopword_list = tp.getstopword(tp.stopword_path)
tp.wordbag_path ="text_corpus_wordbag/"
tp.word_weight_bag_name ="word_weight_bag.dat"
tp.load_word_weight_bag()
#得到测试语料的词典
tp.load_word_weight_bag()
myvocabulary = tp.word_weight_bag.vocabulary
tdm = tp.word_weight_bag.tdm
test_matrix = tp.tfidf_value(test_data_corpus,stopword_list,myvocabulary)
print "测试语料库tfidf矩阵的大小",test_matrix.shape
clf = MultinomialNB(alpha=0.001).fit(tdm,tp.word_weight_bag.label)
#预测分类结果
predict_test = clf.predict(test_matrix)
for file_name,exp in zip(test_dir,predict_test):
print "测试文件名:",file_name,"实际类别:",category[category_index],"预测类别:",tp.word_weight_bag.target_name[exp]
actual = np.array(actual)
m_precision = metrics.accuracy_score(actual,predict_test)
print "准确率:",m_precision
|
24,590 | eb972a0f4a9145d3702f850a295ab19b7f175ef3 | import xlrd
import os
import sqlite3
"""
状态枚举:
task_type:
1:完善个人信息
2:发卡友圈(已下线)
3:邀请好友办卡
4:每日打车
5:每周购买火车票
6:每周花费充值
status:
1:待完成
2:已完成
3:已发放奖励
"""
if __name__ == "__main__":
file_name = 'C:\\Users\\anve\\Desktop\\Data\\orderInfo_2019\\20190101-0114\\2019-01-01~2019-01-15_0.xlsx'
wb = xlrd.open_workbook(file_name)
sheet = wb.sheet_by_name('Export')
row_num = sheet.nrows
# 连接数据库
conn = sqlite3.connect('C:\Document\BlackMagic.db')
cursor = conn.cursor()
row_list = []
for row_position in range(3, row_num):
userNum = sheet.cell(row_position, 0).value
orderNum = sheet.cell(row_position, 1).value
childOrderNum = sheet.cell(row_position, 2).value
businessType = sheet.cell(row_position, 3).value
orderStatus = sheet.cell(row_position, 4).value
orderTitle = sheet.cell(row_position, 5).value
orderTime = sheet.cell(row_position, 6).value
amount = sheet.cell(row_position, 7).value
sellPrice = sheet.cell(row_position, 9).value
costValue = sheet.cell(row_position, 10).value
costPrice = sheet.cell(row_position, 11).value
payType = sheet.cell(row_position, 12).value
gold = sheet.cell(row_position, 13).value
couponValue = sheet.cell(row_position, 14).value
couponId = sheet.cell(row_position, 22).value
column_tuple = (userNum, orderNum, childOrderNum, businessType, orderStatus, orderTitle, orderTime, amount, sellPrice,
costPrice, payType, gold, couponValue, couponId)
print('data extracting', column_tuple)
row_list.append(column_tuple)
cursor.executemany('insert into orderInfo_2019'
'(userNum, orderNum, childOrderNum, businessType, orderStatus, orderTitle, orderTime, '
'amount, sellPrice,costPrice, payType, gold, couponValue, couponId) '
'values(?,?,?,?,?,?,?,?,?,?,?,?,?,?)', row_list)
conn.commit()
cursor.close()
conn.close()
print(file_name, '提取完成')
|
24,591 | f04509aacdd373c08dee5c8ac0ffeb042e49aac4 | def insert(intervals, newInterval):
if len(intervals) == 0:
return [newInterval]
res = []
temp = intervals+[newInterval]
temp = list(sorted(temp))
low = temp[0][0]
high = temp[0][1]
for i in range(1,len(temp)):
if high >= temp[i][0]:
if high < temp[i][1]:
high = temp[i][1]
else:
res.append([low,high])
low = temp[i][0]
high = temp[i][1]
res.append([low,high])
return res
if __name__ == '__main__':
num=eval(input())
new_num=eval(input())
result=insert(num,new_num)
print(result)
|
24,592 | b198cebb8769e3c33356692e9ead6607b102886b | # -*- coding: utf-8 -*-
class SocialOAuthException(Exception):
pass
class SocialConfigError(SocialOAuthException):
pass
class SocialGetTokenError(SocialOAuthException):
"""Occurred when get Access Token"""
pass
class SocialAPIError(SocialOAuthException):
"""Occurred when doing API call"""
pass |
24,593 | 451e4015ac300927a1f13b7df41dbc2ec3471474 | import simplejson as json
import requests
import jsonpath
pass_ticket = '%2BjccTPYRP4OQQWt1JcT45akaZnF%2F1SxRA0SvysLBX0HfKd6i2plB79%2B4QSqoKLmb'
key = '961d6b7b3101fe37b759a3ecce817473253ecfb5c0886d76b8a98bd4ddc82e3273ddd265ee7c50c4c7a1a0d8bd17133a3611cbe2d958ff63778a9905f76599244c22abe564333297528a6cb9779c4c39'
def get_index(openid):
url = 'https://game.weixin.qq.com/cgi-bin/gamewap/getusermobagameindex?openid=' + openid + '&uin=&key=&pass_ticket=' + pass_ticket
cookie = '''sd_userid=4071530372494441; sd_cookie_crttime=1530372494441; pgv_pvid=2339141787; pgv_info=ssid=s5304679219; qv_als=A4gcSKjK6mBr3SfiA11531534126NEW8Zw==; httponly; uin=MzM0OTE1OTY5OQ%3D%3D; key=''' + key + '; pass_ticket=' + pass_ticket
header = {
'Host': 'game.weixin.qq.com',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.0; BKL-AL20 Build/HUAWEIBKL-AL20; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044113 Mobile Safari/537.36 MicroMessenger/6.6.7.1321(0x26060739) NetType/WIFI Language/zh_CN',
'Accept': '*/*',
'Referer': 'https://game.weixin.qq.com/cgi-bin/h5/static/smobadynamic/dynamic.html?isFromWeappEntry=1&ssid=29&openid=' + openid + '&abtest_cookie=BAABAAgACgALAAwABQCfhh4APoseACSXHgDImB4A%2BpgeAAAA&pass_ticket=' + pass_ticket + 'k&wx_header=1',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-CN;q=0.8,en-US;q=0.6',
'Cookie': cookie,
'X-Requested-With': 'com.tencent.mm',
'Q-UA2': 'QV=3&PL=ADR&PR=WX&PP=com.tencent.mm&PPVN=6.6.7&TBSVC=43610&CO=BK&COVC=044113&PB=GE&VE=GA&DE=PHONE&CHID=0&LCID=9422&MO= BKL-AL20 &RL=1080*2160&OS=8.0.0&API=26',
'Q-GUID': '7b964e09fb9ce102a95ea3db13b788cb',
'Q-Auth': '31045b957cf33acf31e40be2f3e71c5217597676a9729f1b'
}
r = requests.get(url, headers=header, timeout=5)
r.enconding = 'utf-8'
temp=json.loads(r.text)
print(temp)
game_seq = jsonpath.jsonpath(temp,'$..game_seq')
game_svr_entity = jsonpath.jsonpath(temp,'$..game_svr_entity')
relay_svr_entity = jsonpath.jsonpath(temp,'$..relay_svr_entity')
for i in range(0,5):
with open('battle_list.txt','a+') as f:
f.seek(0)
text=f.read()
if text.find('{} {} {}'.format(game_seq[i],game_svr_entity[i],relay_svr_entity[i]))!=-1:
continue
else:
f.write('{} {} {}\n'.format(game_seq[i],game_svr_entity[i],relay_svr_entity[i]))
print('{} {} {}\n'.format(game_seq[0], game_svr_entity[0], relay_svr_entity[0]))
get_battle(game_seq[i],game_svr_entity[i],relay_svr_entity[i],openid)
def get_battle(game_seq,game_svr_entity,relay_svr_entity,openid):
game_seq=str(game_seq)
game_svr_entity = str(game_svr_entity)
relay_svr_entity = str(relay_svr_entity)
url = 'https://game.weixin.qq.com/cgi-bin/gamewap/getbattledetail?game_svr_entity='+game_svr_entity+'&game_seq='+game_seq+'&relay_svr_entity='+relay_svr_entity+'&openid=' + openid + '&uin=&key=&pass_ticket=' + pass_ticket
cookie = '''sd_userid=4071530372494441; sd_cookie_crttime=1530372494441; pgv_pvid=2339141787; pgv_info=ssid=s5304679219; qv_als=A4gcSKjK6mBr3SfiA11531534126NEW8Zw==; httponly; uin=MzM0OTE1OTY5OQ%3D%3D; key=''' + key + '; pass_ticket=' + pass_ticket
header = {
'Host': 'game.weixin.qq.com',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (Linux; Android 8.0; BKL-AL20 Build/HUAWEIBKL-AL20; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/044109 Mobile Safari/537.36 MicroMessenger/6.6.7.1321(0x26060739) NetType/WIFI Language/zh_CN',
'Accept': '*/*',
'Referer': 'https://game.weixin.qq.com/cgi-bin/h5/static/smobadynamic/index.html?game_svr_entity=71548&game_seq=1532005684&relay_svr_entity=504169056&openid=' + openid + '&zone_area_id=3174&ssid=1024&uin=&key=&pass_ticket=' + pass_ticket + '&abtest_cookie=BAABAAgACgALAAwABwCfhh4APoseACSXHgD2lx4AyJgeAPSYHgD6mB4AAAA%3D&wx_header=1',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'zh-CN,zh-CN;q=0.8,en-US;q=0.6',
'Cookie': cookie,
'X-Requested-With': 'com.tencent.mm',
'Q-UA2': 'QV=3&PL=ADR&PR=WX&PP=com.tencent.mm&PPVN=6.6.7&TBSVC=43610&CO=BK&COVC=044109&PB=GE&VE=GA&DE=PHONE&CHID=0&LCID=9422&MO= BKL-AL20 &RL=1080*2160&OS=8.0.0&API=26',
'Q-GUID': '7b964e09fb9ce102a95ea3db13b788cb',
'Q-Auth': '31045b957cf33acf31e40be2f3e71c5217597676a9729f1b'
}
r = requests.get(url, headers=header, timeout=5)
r.enconding = 'utf-8'
'''
with open('{}_{}_{}.txt'.format(game_seq,game_svr_entity,relay_svr_entity),'w') as f:
f.write(r.text)
'''
temp = json.loads(r.text)
print(temp)
openid = jsonpath.jsonpath(temp, '$..open_id')
print(openid)
with open('openid.txt','a+') as f:
f.seek(0)
text=f.read()
for i in range(0,len(openid)):
if text.find('{}'.format(openid[i]))!=-1:
continue
else:
f.write('{}\n'.format(openid[i]))
get_index(openid[i])
print(openid)
get_index('owanlsiBXzvBkUypZ7BSRUdFuBeU') |
24,594 | af40231f542de6a7536aca247aa665e045d6f711 | def arrayReplace(a, e, s):
return [i if i!=e else s for i in a]
|
24,595 | e785365c2f83f29e3e899b492a5f04e86f76d5ec | #!usr/bin/env python
# coding:utf-8
from django.forms import ModelForm, widgets
from . import models
from django import forms
from django.core.exceptions import NON_FIELD_ERRORS
class BaseAssetForm(ModelForm):
class Meta:
model = models.Asset
# fields = ['name', 'idc', 'tags', 'admin']
fields = ['sn', 'name', 'asset_type', 'manage_ip', 'trade_date', 'expire_date', 'price', 'idc',
'contract', 'business_unit', 'admin', 'manufacturer', 'tags']
localized_fields = ('trade_date', 'expire_date')
widgets = {
'sn': widgets.TextInput(attrs={'class': 'form-control'}),
'name': widgets.TextInput(attrs={'class': 'form-control'}),
'asset_type': widgets.Select(attrs={'class': 'select-single form-control'}),
'manage_ip': widgets.TextInput(attrs={'class': 'form-control'}),
'trade_date': widgets.TextInput(attrs={'class': 'form-control form-datetime'}),
'expire_date': widgets.TextInput(attrs={'class': 'form-control form-datetime'}),
'price': widgets.TextInput(attrs={'class': 'form-control'}),
'idc': widgets.Select(attrs={'class': 'form-control select-single'}),
'business_unit': widgets.Select(attrs={'class': 'select-single form-control'}),
'admin': widgets.Select(attrs={'class': 'select-single form-control'}),
'contract': widgets.Select(attrs={'class': 'select-single form-control'}),
'tags': widgets.SelectMultiple(attrs={'class': 'select-multiple form-control'}),
'manufacturer': widgets.Select(attrs={'class': 'form-control select-single'})
}
error_messages = {
'sn': {
'required': u'SN不能为空',
'unique': u'该SN已存在'
},
'name': {
'required': u'名称不能为空',
'unique': u'该名称已存在'
},
'trade_date': {
'invalid': u'日期格式yyyy-mm-dd',
},
'expire_date': {
'invalid': u'日期格式yyyy-mm-dd',
},
'manage_ip': {
'invalid': u'请输入有效的ipv4或ipv6地址',
},
'price': {
'invalid': u'请输入数字'
}
}
def clean(self):
clean_data = super(BaseAssetForm, self).clean()
if not clean_data:
clean_data = self.cleaned_data
trade_date = clean_data.get('trade_date')
expire_date = clean_data.get('expire_date')
if trade_date and expire_date and trade_date > expire_date:
msg = u'购买日期必须小于过保日期'
self.add_error('trade_date', msg)
self.add_error('expire_date', msg)
class AssetSoftwareForm(BaseAssetForm):
class Meta(BaseAssetForm.Meta):
exclude = ['asset_type', 'manage_ip', 'idc', 'business_unit', 'tags']
class SoftwareForm(ModelForm):
# 这个表是专属Software,上面的那个是在Asset中Software应该需要填写的部分,继承自BaseAssetForm,选取了其中software需要的部分
class Meta:
model = models.Software
fields = ['version', 'software_type', 'platform', 'language']
widgets = {
'software_type': widgets.Select(attrs={'class': 'select-single form-control'}),
'platform': widgets.Select(attrs={'class': 'select-single form-control'}),
'version': widgets.TextInput(attrs={'class': 'form-control'}),
'language': widgets.Select(attrs={'class': 'form-control select-single'}),
}
class AssetServerUpdateForm(BaseAssetForm):
class Meta(BaseAssetForm.Meta):
exclude = ['sn', 'asset_type']
class AssetStorageForm(BaseAssetForm):
class Meta(BaseAssetForm.Meta):
exclude = ['asset_type', 'tags']
help_texts = {
'manage_ip': u'网络存储请填此项',
'idc': u'网络存储请选择此项',
'business_unit': u'网络存储请选择此项',
}
class AssetNetworkDeviceForm(BaseAssetForm):
class Meta(BaseAssetForm.Meta):
exclude = ['asset_type']
class NetworkDeviceForm(ModelForm):
class Meta:
model = models.NetworkDevice
fields = ['device_type', 'vlan_ip', 'intranet_ip', 'port_num', 'model', 'macaddress', 'firmware', 'device_detail']
widgets = {
'device_type': widgets.Select(attrs={'class': 'form-control select-single'}),
'vlan_ip': widgets.TextInput(attrs={'class': 'form-control'}),
'intranet_ip': widgets.TextInput(attrs={'class': 'form-control'}),
'port_num': widgets.TextInput(attrs={'class': 'form-control'}),
'model': widgets.TextInput(attrs={'class': 'form-control'}),
'macaddress': widgets.TextInput(attrs={'class': 'form-control'}),
'firmware': widgets.TextInput(attrs={'class': 'form-control'}),
'device_detail': widgets.Textarea(attrs={'class': 'form-control'}),
}
class NetworkDeviceNoDetailForm(NetworkDeviceForm):
class Meta(NetworkDeviceForm.Meta):
exclude = ['device_detail']
class StorageForm(ModelForm):
class Meta:
model = models.Storage
fields = ['model', 'capacity', 'storage_type', 'interface_type']
widgets = {
'model': widgets.TextInput(attrs={'class': 'form-control'}),
'capacity': widgets.TextInput(attrs={'class': 'form-control'}),
'storage_type': widgets.Select(attrs={'class': 'form-control'}),
'interface_type': widgets.Select(attrs={'class': 'form-control'})
}
def clean(self):
clean_data = super(StorageForm, self).clean()
if not clean_data:
clean_data = self.cleaned_data
storage_type = clean_data.get('storage_type')
interface_type = clean_data.get('interface_type')
if storage_type == 'ram' and interface_type not in ['ddr3', 'ddr4']:
self.add_error('interface_type', u'内存只能是DDR3或DDR4')
self.add_error('storage_type', u'内存只能是DDR3或DDR4')
elif storage_type in ['disk', 'nas'] and interface_type not in ['sata', 'sas', 'scsi', 'ssd']:
self.add_error('storage_type', u'硬盘只能是SATA,SAS或SCSI')
self.add_error('interface_type', u'硬盘只能是SATA,SAS或SCSI')
# 这里本来遗留了一个问题,硬盘只在form表单这里验证过不了,就不会再到模型层去验证了,而内存在表单验证这里已经有问题了,居然还会继续到模型层去验证,
# 导致同样的错误提示出现了两个,一个来自表单,一个来自模型层,不知道为什么,但是后面看Django的官方说明,self.add_error会把field从clean_data里面删掉
# 那么很有可能就是删的问题导致了BUG的出现,本来clean里面的是NonFieldError针对多个field而言,现在指定到某个可能就是会导致问题
# 现在只要把相关的field都用add_error方法添加相关的错误信息,就可以不出现这种问题,暂时算是解决问题
class BatchServerConfigForm(BaseAssetForm):
class Meta(BaseAssetForm.Meta):
exclude = ['sn', 'contract', 'trade_date', 'expire_date', 'price', 'asset_type', 'manufacturer',
'manage_ip', 'name']
class VirtualMachineForm(ModelForm):
host = forms.ModelChoiceField(
queryset=models.Asset.objects.filter(asset_type='server'),
label=u'宿主机',
widget=forms.Select(attrs={'class': 'form-control select-single'}),
required=False
)
class Meta:
model = models.VirtualMachine
fields = ['host', 'name', 'vm_type', 'manage_ip', 'macaddress', 'os_type', 'os_distribution',
'os_release', 'os_arch']
widgets = {
'name': widgets.TextInput(attrs={'class': 'form-control'}),
'vm_type': widgets.TextInput(attrs={'class': 'form-control'}),
'manage_ip': widgets.TextInput(attrs={'class': 'form-control'}),
'macaddress': widgets.TextInput(attrs={'class': 'form-control'}),
'os_type': widgets.TextInput(attrs={'class': 'form-control'}),
'os_release': widgets.TextInput(attrs={'class': 'form-control'}),
'os_arch': widgets.TextInput(attrs={'class': 'form-control'}),
'os_distribution': widgets.TextInput(attrs={'class': 'form-control'}),
}
error_messages = {
'name': {
'required': u'名称不能为空',
'unique': u'该名称已存在'
},
'manage_ip': {
'invalid': u'不是有效的IPv4或IPv6地址',
},
}
|
24,596 | 732eb151d14b4bb369b4c03fe18a1f7e8879820c | #! /usr/bin/env python
import os
os.chdir("C:\\Users\\gaidi_000\\OpenGL")
del(os)
|
24,597 | f174ae15ba6caa4475b97cfd847e22159424570d | import urllib.parse
import urllib.request
import re
dic = {}
tabell = []
"""
with urllib.request.urlopen('https://www.cia.gov/library/publications/resources/the-world-factbook/fields/rawdata_211.txt') as response:
html = response.read()
str_convert = html.decode("UTF-8")
"""
url = "https://www.cia.gov/library/publications/resources/the-world-factbook/fields/rawdata_211.txt"
txt = urllib.request.urlopen("https://www.cia.gov/library/publications/resources/the-world-factbook/fields/rawdata_211.txt").read().decode("UTF-8")
print(txt)
url = 'https://www.cia.gov/library/publications/resources/the-world-factbook/fields/rawdata_211.txt'
req = urllib.request.Request(url)
# En generator som gir deg et element/linje
response = urllib.request.urlopen(req)
ordbok = {}
# Konverterer fra generator til liste
# Slider fra index 2 til slutten
for line in list(response)[2:]:
# rstrip() fjerner new lines bak, strip() fjernr white spaces (mellomrom) foran og bak
string = line.decode('utf-8').rstrip()
#Regex - Dersom vi har mer en 2 spaces, så vil den splitte mellom Land og GDP
string = re.split(r" {2,}", string)
ordbok[string[1]] = string[2]
print(ordbok)
letter = "L"
for country in ordbok.keys():
if country[0] == letter:
print(country)
snakke = input("Skriv inn land: ")
while snakke != "stopp":
if snakke in ordbok.keys():
print(ordbok[snakke])
else:
print("Skriv på nytt")
snakke = input("Skriv inn land: ")
|
24,598 | 867d337daf06e4baf0e3b2675845dfe0ae716837 | from Qt.GUI.Core.kao_widget_with_menu import KaoWidgetWithMenu
from Qt.GUI.Transaction.Table.transaction_account_table_widget import TransactionAccountTableWidget
from Qt.GUI.Transaction.Menu.transaction_menu_widget import TransactionMenuWidget
from PySide.QtGui import QHBoxLayout, QFrame, QWidget
class TransactionsWidget(KaoWidgetWithMenu):
""" Represents the Widget that holds the Transaction table and menu """
def setupWidgets(self):
""" Setup the Transactions Widget """
self.transactionMenuWidget = TransactionMenuWidget(self)
self.transactionTableWidget = TransactionAccountTableWidget(self.transactionMenuWidget)
self.transactionMenuWidget.table = self.transactionTableWidget
return self.transactionTableWidget, self.transactionMenuWidget
def setToolbar(self, toolbar):
""" Set the toolbar for the widget """
self.toolbar = toolbar
self.transactionTableWidget.toolbar = toolbar
def tabSelected(self):
""" Do Nothing when this tab is selected """
self.transactionMenuWidget.tabSelected() |
24,599 | 7201fe92d4c76617e808b59ebb1e568621c54821 | st = input()
s = set()
for char in st:
s.add(char)
if((len(s) & 1) == 1):
print("IGNORE HIM!")
else:
print("CHAT WITH HER!") |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.