text stringlengths 38 1.54M |
|---|
import numpy as np
import cv2 as cv
import math
import sys
def matrix_translation(tx, ty):
return np.array([[1, 0, tx], [0, 1, ty]], dtype=np.float32)
def matrix_scale_pixel_replication(tx, ty):
return np.array([[tx, 0, 0], [0, ty, 0]], dtype=np.float32)
# Angle debe estar en radianes, tx y ty representan el centro de la imagen
def matrix_rotate(angle, tx, ty, scale=1.0):
math_cos = math.cos(angle)
math_sin = math.sin(angle)
alpha = scale * math_cos
beta = scale * math_sin
calculate_1 = (1 - alpha) * tx - beta * ty
calculate_2 = beta*tx+(1-alpha)*ty
return np.array([[alpha, beta, calculate_1], [-beta, alpha, calculate_2]], dtype=np.float32)
def matrix_shear(tx, ty):
return np.array([[1, tx, 0], [ty, 1, 0]], dtype=np.float32)
def matrix_get_affine(pts1, pts2):
answer = np.zeros((2, 3), dtype=np.float64)
matrix_a = np.zeros((3, 3), dtype=np.float64)
matrix_b = np.zeros((3, 1), dtype=np.float64)
for i in range(len(pts1)):
matrix_a[i, :] = np.array([pts1[i][0], pts1[i][1], 1], dtype=np.float64)
matrix_b[i, 0] = pts2[i][0]
value = cv.solve(matrix_a, matrix_b)[1]
answer[0, :] = np.transpose(value)
for i in range(len(pts1)):
matrix_b[i, 0] = pts2[i][1]
value = cv.solve(matrix_a, matrix_b)[1]
answer[1, :] = np.transpose(value)
return answer
def scale_by_interpolate_pixels(image, dim_out, scale=1.0):
rows1, columns1 = image.shape[0:2]
n_row = math.ceil(float(rows1) * scale)
n_col = math.ceil(float(columns1) * scale)
# image_blank = np.zeros([dim_out[1], dim_out[0], 3], dtype=np.uint32)
image_blank = np.zeros([n_row, n_col, 3], dtype=np.float32)
if scale < 1.0:
steps = int(scale ** (-1))
for i in range(n_row):
for j in range(n_col):
sal_x = int(i*steps)
sal_y = int(j * steps)
for canal in range(3):
image_blank[i, j, canal] = np.mean(image[sal_x:sal_x+steps, sal_y:sal_y+steps, canal])
else:
steps = int(scale)
for i in range(0, n_row, steps):
for j in range(0, n_col, steps):
image_blank[i, j] = image[int(i/steps), int(j/steps)]
if i - steps >= 0:
for canal in range(3):
difference = image_blank[i, j, canal] - image_blank[i-steps, j, canal]
constant_val = float(difference) / float(steps)
val_start = image_blank[i-steps, j, canal]
for val in range(i-steps+1, i):
val_start = val_start + constant_val
image_blank[val, j, canal] = int(val_start)
if j - steps >= 0:
for canal in range(3):
difference = image_blank[i, j, canal] - image_blank[i, j-steps, canal]
constant_val = float(difference) / float(steps)
val_start = image_blank[i, j-steps, canal]
for val in range(j-steps+1, j):
val_start = val_start + constant_val
image_blank[i, val, canal] = int(val_start)
# if i - steps >= 0 and j - steps >= 0:
return np.uint8(image_blank)
def affine_copy(image, matrix, dim_out):
image_blank = np.zeros([dim_out[1], dim_out[0], 3], dtype=np.uint32)
rows1, columns1 = (dim_out[0], dim_out[1])
matrix_a = matrix[:2, :2]
matrix_b = matrix[:, 2:]
for u in range(rows1):
for v in range(columns1):
value_y = np.array([[u], [v]], dtype=np.float32) - matrix_b
answer = cv.solve(matrix_a, value_y)[1]
valor_x = int(answer[0, 0])
valor_y = int(answer[1, 0])
image_blank[v, u] = image[valor_y, valor_x]
return np.uint8(image_blank)
original = cv.imread("perro.jpeg")
if original is None:
sys.exit("No se puede leer la imagen")
rows, columns = original.shape[0:2]
#Prueba de scale
# reduccion mitad
"""
final = cv.warpAffine(original, matrix_scale_pixel_replication(0.5, 0.5), (int(columns), int(rows)))
final1 = affine_copy(original, matrix_scale_pixel_replication(0.5, 0.5), (int(columns), int(rows)))
"""
# ampliación
val_to_add = 2.0
new_tam = (int(columns*val_to_add), int(rows*val_to_add))
final = cv.warpAffine(original, matrix_scale_pixel_replication(val_to_add, val_to_add), new_tam )
final1 = affine_copy(original, matrix_scale_pixel_replication(val_to_add, val_to_add), new_tam)
pts_1 = np.float32([[50, 50], [200, 50], [50, 200]])
pts_2 = np.float32([[10, 100], [200, 50], [100, 250]])
M = cv.getAffineTransform(pts_1, pts_2)
M_copy = matrix_get_affine(pts_1, pts_2)
print(M)
print(M_copy)
print(final[146, 148])
print(final1[146, 148])
# cv.imshow("Original", original)
cv.imshow("Imagen con warp propio", final1)
cv.imshow("Imagen con warp opencv", final)
k = cv.waitKey(0)
if k == ord("s"):
cv.imwrite("Shear_opencv.png", final)
|
import pandas as pd
import numpy as np
from random import gauss, uniform
def get_makespan(curr_plan, num_resources, workflow_inaccur, positive=False, dynamic_res=False):
'''
Calculate makespan
'''
under = False
reactive_resource_usage = [0] * num_resources
resource_usage = [0] * num_resources
expected = [0] * num_resources
tmp_idx = [0] * num_resources
for placement in curr_plan:
workflow = placement[0]
resource = placement[1]
resource_id = resource['id']
expected_finish = placement[3]
if dynamic_res:
perf = gauss(resource['performance'], resource['performance'] * 0.0644)
else:
perf = resource['performance']
if positive:
inaccur = uniform(0, workflow_inaccur)
else:
inaccur = uniform(-workflow_inaccur, workflow_inaccur)
exec_time = (workflow['num_oper'] * (1 + inaccur)) / perf
reactive_resource_usage[resource_id - 1] += exec_time
resource_usage[resource_id - 1] = max(resource_usage[resource_id - 1] + exec_time, expected_finish)
expected[resource_id - 1] = expected_finish
tmp_idx[resource_id - 1] += 1
return max(resource_usage), max(reactive_resource_usage), max(expected)
# ------------------------------------------------------------------------------
# 5%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_5perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.05, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_5perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.05, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_5perc.csv', index=False)
# ------------------------------------------------------------------------------
# 10%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_10perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.1, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_10perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.1, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_10perc.csv', index=False)
# ------------------------------------------------------------------------------
# 20%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_20perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.2, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_20perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.2, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_20perc.csv', index=False)
# ------------------------------------------------------------------------------
# 30%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_30perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.3, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_30perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.3, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_30perc.csv', index=False)
# ------------------------------------------------------------------------------
# 40%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_40perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.4, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_40perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.4, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_40perc.csv', index=False)
# ------------------------------------------------------------------------------
# 50%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_50perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.5, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_50perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.5, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_50perc.csv', index=False)
# ------------------------------------------------------------------------------
# 60%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_60perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.6, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_60perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.6, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_60perc.csv', index=False)
# ------------------------------------------------------------------------------
# 70%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_70perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.7, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_70perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.7, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_70perc.csv', index=False)
# ------------------------------------------------------------------------------
# 80%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_80perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.8, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_80perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.8, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_80perc.csv', index=False)
# ------------------------------------------------------------------------------
# 90%
test_case = pd.read_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_90perc.csv')
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.9, dynamic_res=True)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4DynHeteroResourcesGA50_inaccur_90perc.csv', index=False)
results = pd.DataFrame(columns=['size','planner','plan','makespan', 'reactive', 'expected','mpn_snt', 'rect_snt', 'time'])
for idx, row in test_case.iterrows():
size = row['size']
planner = row['planner']
plan = eval(row['plan'])
makespan, reactive, expected = get_makespan(plan, 4, 0.9, dynamic_res=False)
time = row['time']
results.loc[len(results)] = [size, planner, plan, makespan, reactive, expected, makespan - expected, reactive - expected, time]
results.to_csv('../Data/ga/perc_050/StHeteroCampaigns_4StHeteroResourcesGA50_inaccur_90perc.csv', index=False)
|
#!/usr/bin/env python3.6
from controller import Robot, Motor, PositionSensor, InertialUnit, Lidar, Display
import numpy as np
#PLOTTING FUNCTIONS
class Grapher:
def __init__(self, display):
'''
display: The display you wish to use with this.
'''
self.display = display
self.width = display.getWidth()
self.height = display.getHeight()
self.defaultPointSize = int((self.height + self.width)/200)
def drawPointCorner(self, x, y, size=None, color=0x00FFFF):
'''
Draws square points wrt the top right corner. The size is in pixels.
'''
if(size is None):
size = self.defaultPointSize
self.display.setColor(color)
for row in range(max(0, y - size//2), min(self.height, y + size//2)):
for column in range(max(0, x - size//2), min(self.width, x + size//2)):
self.display.drawPixel(column, row)
def drawPointCenter(self, x, y, size=None, color=0x00FFFF):
'''
Draws square points taking the center (width//2, height//2) to be 0, 0.
'''
self.drawPointCorner(x + self.width//2, self.height//2-y, size, color)
def drawPointsListCenter(self, x_s, y_s, size = None, color=0x00FFFF):
'''
Draws points from two lists. If len_x != len_y, the trailing of x or y are truncated.
'''
length = min(len(x_s), len(y_s))
for i in range(length):
self.drawPointCenter(x_s[i], y_s[i], size, color)
def clear(self, color=0x000000):
self.display.setColor(color)
self.display.fillRectangle(0, 0, self.width, self.height)
def lidar_plot(self, x_data, y_data, DISPLAY_SCALING_FACTOR, color=0x00FFFF):
self.clear()
self.lidar_plot_without_clearing(x_data, y_data, DISPLAY_SCALING_FACTOR, color)
def lidar_plot_without_clearing(self, x_data, y_data, DISPLAY_SCALING_FACTOR, color=0x00FFFF):
x_s = []
y_s = []
for i in range(len(x_data)):
x_s.append(int(x_data[i]*DISPLAY_SCALING_FACTOR))
y_s.append(int(y_data[i]*DISPLAY_SCALING_FACTOR))
# self.drawPointCenter(0, 0, color=0xFF0000)
self.drawPointsListCenter(x_s, y_s, size=5,color=color)
def lidar_filter(imageArray, SIZES,RANGES,EPSILON):
theta_data = np.zeros((SIZES[1],)).tolist()
for layer in range(SIZES[0]):
for theta in range(SIZES[1]):
point_range = imageArray[layer][theta]
if(point_range < RANGES[layer]*(EPSILON)):
theta_data[theta] = point_range
return theta_data
#INITIALIZATION CODE HERE.
VELOCITY = 30
WHEEL_RADIUS = 0.0075
#ROBOT INITIALIZATION
robot = Robot()
timestep = int(robot.getBasicTimeStep())
#LIDAR AND PLOTTING INITIALIZATION
SIZES = (16, 512)
ranges_str = "1.13114178 0.85820043 0.57785118 0.43461093 0.38639969 0.31585345 0.2667459 0.23062678 0.21593061 0.19141567 0.17178488 0.15571462 0.14872716 0.13643947 0.12597121 0.11696267"
RANGES = [float(i) for i in ranges_str.split(' ')]
EPSILON = 0.6
DISPLAY_SIZE = (1024, 1024)
DISPLAY_SCALING_FACTOR = 0.9*1024/5
PLOT_UPDATE_RATE = 1
lidar = robot.getLidar("lidar")
lidar.enable(timestep)
i = 0
display = robot.getDisplay("extra_display")
grapher = Grapher(display)
#MOTOR AND ENCODER INITIALIZATION
left_motor = robot.getMotor("left motor")
right_motor = robot.getMotor("right motor")
left_motor_sensor = left_motor.getPositionSensor()
right_motor_sensor = right_motor.getPositionSensor()
left_motor_sensor.enable(timestep)
right_motor_sensor.enable(timestep)
left_motor.setPosition(float('inf'))
right_motor.setPosition(float('inf'))
#KEYBOARD MESSAGE
print("Keyboard control has been disabled.")
#INERTIAL UNIT INITIALIZATION
inertial_unit = robot.getInertialUnit("inertial unit")
inertial_unit.enable(timestep)
#ODOMETRY INITIALIZATION
_, _, alpha = inertial_unit.getRollPitchYaw()
x = 0
z = 0
DTHETA_L = 0
DTHETA_R = 0
THETA_L_PREV = left_motor_sensor.getValue()
THETA_R_PREV = right_motor_sensor.getValue()
distance = 0
#WALL-FOLLOWING BEHAVIOUR INITIALIZATION
#ENSURE THAT LEFT + SPREAD < 512
CENTER_DELTA = 0.4
LEFT_DELTA = 0.7
FRONT = 0
RIGHT = 128
BACK = 256
LEFT = 384
LEFT_SPREAD_LOW = -30
LEFT_SPREAD_HIGH = 100
CENTER_SPREAD = 50
turnLeft = True
goForward = False
LAG = 50
j = LAG
while robot.step(timestep) != -1:
#HANDLING INERTIAL UNIT BEHAVIOUR -- INCOMPLETE!
_, _, alpha = inertial_unit.getRollPitchYaw()
DTHETA_L = left_motor_sensor.getValue() - THETA_L_PREV
DTHETA_R = right_motor_sensor.getValue() - THETA_R_PREV
THETA_L_PREV += DTHETA_L
THETA_R_PREV += DTHETA_R
if(goForward):
DTHETA = (DTHETA_L + DTHETA_R)/2
x += WHEEL_RADIUS*DTHETA*np.sin(alpha)
z += WHEEL_RADIUS*DTHETA*np.cos(alpha)
distance += abs(WHEEL_RADIUS*DTHETA)
#LIDAR SENSOR HANDLING PART 1 for WALL-FOLLOWING BEHAVIOUR
imageArray = np.array(lidar.getRangeImageArray()).T
theta_data = lidar_filter(imageArray, SIZES,RANGES,EPSILON)
theta_data_aligned = []
for theta in range(len(theta_data)):
new_theta =(-np.pi/2 - 2*np.pi*theta/512 + alpha)
if(theta_data[theta] != 0):
theta_data_aligned.append((new_theta, theta_data[theta]))
x_data = []
y_data = []
for i in range(len(theta_data_aligned)):
x_data.append(theta_data_aligned[i][1]*np.cos(theta_data_aligned[i][0]))
y_data.append(theta_data_aligned[i][1]*np.sin(theta_data_aligned[i][0]))
for i in range(len(x_data)):
x_data[i] += x
y_data[i] -= z
grapher.lidar_plot_without_clearing([x],[-z],DISPLAY_SCALING_FACTOR,color=0xFF0000)
if(goForward and not turnLeft and j > LAG):
grapher.lidar_plot_without_clearing(x_data, y_data, DISPLAY_SCALING_FACTOR)
elif(goForward and not turnLeft):
j += 1
#WALL-FOLLOWING BEHAVIOUR
turnLeft = True
goForward = False
for i in range(LEFT-LEFT_SPREAD_LOW,LEFT+LEFT_SPREAD_HIGH+1):
if(theta_data[i] != 0 and theta_data[i] < LEFT_DELTA):
turnLeft = False
if(turnLeft):
j = 0
left_motor.setVelocity(-VELOCITY)
right_motor.setVelocity(VELOCITY)
else:
goForward = True
for i in range(FRONT - CENTER_SPREAD , FRONT + CENTER_SPREAD + 1):
if(theta_data[i] != 0 and theta_data[i] < CENTER_DELTA):
goForward = False
if(goForward):
left_motor.setVelocity(VELOCITY)
right_motor.setVelocity(VELOCITY)
else:
j = 0
left_motor.setVelocity(VELOCITY)
right_motor.setVelocity(-VELOCITY)
|
def build_request(method, parameters):
params = {}
data = {}
url = method.method_path
headers = {}
files = {}
form_data = {}
for parameter_pair in parameters:
parameter, val = parameter_pair
body = parameter.raw_body
parameter_type = body["in"]
if parameter_type == "header":
headers[body["name"]] = val
elif parameter_type == "query":
params[body["name"]] = val
elif parameter_type == "path":
url = str(url)
url = url.replace('{' + str(body["name"]) + '}', str(val))
elif parameter_type == "formData":
if body.__contains__('type') and body['type'] == 'file':
files[body['name']] = val
continue
form_data[body["name"]] = val
elif parameter_type == 'body':
data = val
else:
raise Exception("Unrecognized type", parameter.raw_body)
return url, params, data, headers, files,form_data
|
x=5
print('Before 5')
if x==5:
print('this is 5')
print('still 5')
print('After 5')
print('Before 6')
if x==6:
print('this is 6')
print('After 6') |
import os
import numpy as np
import torch
from torch.utils.data.dataset import Subset
from torchvision import datasets, transforms
from chexpert_dataset import CheXpertDataset
from utils.utils import set_random_seed
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # This is your Project Root
DATA_PATH = os.path.join(ROOT_DIR, 'data')
IMAGENET_PATH = os.path.join(ROOT_DIR, 'data/ImageNet')
CIFAR10_SUPERCLASS = list(range(10)) # one class
IMAGENET_SUPERCLASS = list(range(30)) # one class
CHEXPERT_SUPERCLASS = list(range(2)) # one class
CIFAR100_SUPERCLASS = [
[4, 31, 55, 72, 95],
[1, 33, 67, 73, 91],
[54, 62, 70, 82, 92],
[9, 10, 16, 29, 61],
[0, 51, 53, 57, 83],
[22, 25, 40, 86, 87],
[5, 20, 26, 84, 94],
[6, 7, 14, 18, 24],
[3, 42, 43, 88, 97],
[12, 17, 38, 68, 76],
[23, 34, 49, 60, 71],
[15, 19, 21, 32, 39],
[35, 63, 64, 66, 75],
[27, 45, 77, 79, 99],
[2, 11, 36, 46, 98],
[28, 30, 44, 78, 93],
[37, 50, 65, 74, 80],
[47, 52, 56, 59, 96],
[8, 13, 48, 58, 90],
[41, 69, 81, 85, 89],
]
class MultiDataTransform(object):
def __init__(self, transform):
self.transform1 = transform
self.transform2 = transform
def __call__(self, sample):
x1 = self.transform1(sample)
x2 = self.transform2(sample)
return x1, x2
class MultiDataTransformList(object):
def __init__(self, transform, clean_trasform, sample_num):
self.transform = transform
self.clean_transform = clean_trasform
self.sample_num = sample_num
def __call__(self, sample):
set_random_seed(0)
sample_list = []
for i in range(self.sample_num):
sample_list.append(self.transform(sample))
return sample_list, self.clean_transform(sample)
def get_transform(image_size=None):
# Note: data augmentation is implemented in the layers
# Hence, we only define the identity transformation here
if image_size: # use pre-specified image size
train_transform = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.Resize((image_size, image_size)),
transforms.ToTensor(),
])
else: # use default image size
train_transform = transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
])
return train_transform, test_transform
def get_subset_with_len(dataset, length, shuffle=False):
set_random_seed(0)
dataset_size = len(dataset)
index = np.arange(dataset_size)
if shuffle:
np.random.shuffle(index)
index = torch.from_numpy(index[0:length])
subset = Subset(dataset, index)
assert len(subset) == length
return subset
def get_transform_imagenet():
train_transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
train_transform = MultiDataTransform(train_transform)
return train_transform, test_transform
def get_transform_chexpert(size):
train_transform = transforms.Compose([
transforms.RandomEqualize(p=1.0),
transforms.Resize((size, size)),
transforms.ToTensor(),
])
test_transform = transforms.Compose([
transforms.RandomEqualize(p=1.0),
transforms.Resize((size, size)),
transforms.ToTensor(),
])
return train_transform, test_transform
def get_dataset(P, dataset, test_only=False, image_size=None, download=False, eval=False):
if dataset in ['imagenet', 'cub', 'stanford_dogs', 'flowers102',
'places365', 'food_101', 'caltech_256', 'dtd', 'pets']:
if eval:
train_transform, test_transform = get_simclr_eval_transform_imagenet(P.ood_samples,
P.resize_factor, P.resize_fix)
else:
train_transform, test_transform = get_transform_imagenet()
elif dataset == 'chexpert':
print(f'getting chexpert with image size={image_size}')
train_transform, test_transform = get_transform_chexpert(size=image_size)
else:
train_transform, test_transform = get_transform(image_size=image_size)
if dataset == 'cifar10':
image_size = (32, 32, 1)
n_classes = 10
train_set = datasets.CIFAR10(DATA_PATH, train=True, download=download, transform=train_transform)
test_set = datasets.CIFAR10(DATA_PATH, train=False, download=download, transform=test_transform)
elif dataset == 'cifar100':
image_size = (32, 32, 3)
n_classes = 100
train_set = datasets.CIFAR100(DATA_PATH, train=True, download=download, transform=train_transform)
test_set = datasets.CIFAR100(DATA_PATH, train=False, download=download, transform=test_transform)
elif dataset == 'svhn':
assert test_only and image_size is not None
test_set = datasets.SVHN(DATA_PATH, split='test', download=download, transform=test_transform)
elif dataset == 'lsun_resize':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'LSUN_resize')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'lsun_fix':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'LSUN_fix')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'imagenet_resize':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'Imagenet_resize')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'imagenet_fix':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'Imagenet_fix')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'imagenet':
image_size = (224, 224, 3)
n_classes = 30
train_dir = os.path.join(IMAGENET_PATH, 'one_class_train')
test_dir = os.path.join(IMAGENET_PATH, 'one_class_test')
train_set = datasets.ImageFolder(train_dir, transform=train_transform)
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
elif dataset == 'stanford_dogs':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'stanford_dogs')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'cub':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'cub200')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'flowers102':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'flowers102')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'places365':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'places365')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'food_101':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'food-101', 'images')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'caltech_256':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'caltech-256')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'dtd':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'dtd', 'images')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'pets':
assert test_only and image_size is not None
test_dir = os.path.join(DATA_PATH, 'pets')
test_set = datasets.ImageFolder(test_dir, transform=test_transform)
test_set = get_subset_with_len(test_set, length=3000, shuffle=True)
elif dataset == 'chexpert':
# image_size = (64, 64, 1) # TODO - check size is legal
image_size = (image_size, image_size, 1)
n_classes = 2 # TODO - right now - classes are only no_finding=0/1
train_set_frontal = CheXpertDataset(
path_to_images=os.path.join(DATA_PATH, 'CheXpert-v1.0-small'),
fold='train_frontal',
include_uncertainty=False,
transform=train_transform,
sample=P.sample)
train_set_lateral = CheXpertDataset(
path_to_images=os.path.join(DATA_PATH, 'CheXpert-v1.0-small'),
fold='train_lateral',
include_uncertainty=False,
transform=train_transform,
sample=P.sample)
# image, label = train_set.__getitem__(1)
test_set = CheXpertDataset(
path_to_images=os.path.join(DATA_PATH, 'CheXpert-v1.0-small'),
fold=f'valid_{P.ood_type}',
include_uncertainty=False,
transform=test_transform)
else:
raise NotImplementedError()
if test_only:
return test_set
else:
return train_set_frontal, train_set_lateral, test_set, image_size, n_classes
def get_superclass_list(dataset):
if dataset == 'cifar10':
return CIFAR10_SUPERCLASS
elif dataset == 'cifar100':
return CIFAR100_SUPERCLASS
elif dataset == 'imagenet':
return IMAGENET_SUPERCLASS
elif dataset == 'chexpert':
return CHEXPERT_SUPERCLASS
else:
raise NotImplementedError()
def get_subclass_dataset(dataset, classes):
if not isinstance(classes, list):
classes = [classes]
indices = []
for idx, tgt in enumerate(dataset.targets):
if tgt in classes:
indices.append(idx)
dataset = Subset(dataset, indices)
return dataset
def get_simclr_eval_transform_imagenet(sample_num, resize_factor, resize_fix):
resize_scale = (resize_factor, 1.0) # resize scaling factor
if resize_fix: # if resize_fix is True, use same scale
resize_scale = (resize_factor, resize_factor)
transform = transforms.Compose([
transforms.Resize(256),
transforms.RandomResizedCrop(224, scale=resize_scale),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
])
clean_trasform = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
transform = MultiDataTransformList(transform, clean_trasform, sample_num)
return transform, transform
|
from django.db import models
from datetime import datetime
# Create your models here.
class Photos(models.Model):
title = models.CharField(max_length=32)
photo_name = models.CharField(max_length=32)
addtime = models.DateTimeField(default=datetime.now)
def __str__(self):
return "%d,%s:%s"%(self.id,self.title,self.photo_name)
|
# -*- coding: utf-8 -*-
# flask
DEBUG = True
SESSION_COOKIE_NAME = 'niku_cms'
SECRET_KEY = '\xdbY/DNiT\xfe\x91\xf8*\xf2o*\x96\xbe\xfc\xbd\x083=5\xd7\x17'
# flask-wtf
CSRF_ENABLED = True
CSRF_SESSION_KEY = '_csrf_token'
|
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 10 14:04:37 2015
@author: Raghav Saboo and Sharrin Manor
"""
import csv
import time
import urllib
import sys
## arguments to be provided when running the script from the prompt <python wuArchiveTemp.py dd mm yyyy numDays>
dd = str(sys.argv[1])
mm = str(sys.argv[2])
yyyy = str(sys.argv[3])
numDays = int(sys.argv[4])
#wuArchive(dd, mm, yyyy, numDays):
## dd/mm/yyyy format
dateStr = str(yyyy+'/'+mm+'/'+dd)
yearInt = int(dateStr[0:4])
monthInt = int(dateStr[5:7])
# if the month or day is less than ten, the zero before it gets cut off
dayInt = int(dateStr[8:10])
# open a file for writing.
csv_out = open("wuTempArchive.csv",'wb')
# create the csv writer object.
mywriter = csv.writer(csv_out)
###---------------------------------------------------------------------------------------------------------
while numDays !=0:
url = 'http://www.wunderground.com/history/airport/KRDU/' +dateStr+ '/DailyHistory.html?format=1'
print url
data = urllib.urlopen(url)
#datareader = data.splitlines(data)
reader = csv.reader(data)
tempD = [row for row in reader]
mywriter.writerows(tempD[2:])
numDays = numDays-1
if dayInt == 1:
monthInt = monthInt-1
#0 month means 12 this is fixed later below
if (monthInt == 1) or (monthInt == 3) or (monthInt == 5) or (monthInt == 7) or (monthInt == 8) or (monthInt==10) or (monthInt==0):
dayInt = 31
elif (monthInt == 4) or (monthInt == 6) or (monthInt ==9) or (monthInt==11):
dayInt = 30
else:
dayInt = 28
else:
dayInt = dayInt-1
if monthInt == 0:
monthInt = 12
yearInt = yearInt - 1
## add zeros for the string numbers
if dayInt < 10:
dayStr = '0' + str(dayInt)
else:
dayStr = str(dayInt)
if monthInt < 10:
monthStr = '0' + str(monthInt)
else:
monthStr = str(monthInt)
dateStr = str(yearInt)+'/'+monthStr+'/'+dayStr
csv_out.close() |
# /usr/bin/python3
"""
File collector walks through a given directory tree finding given file
formats and stores it in a zip file.
Copyright (C) 2017 rafael valera
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import argparse
import os
import sys
import warnings
import zipfile
docs = (".txt", ".doc", ".xls", ".xlsx", ".docx", ".pdf", ".odt")
videos = ('.m1v', '.mpeg', '.mov', '.qt', '.mpa', '.mpg', '.mpe', '.avi', '.movie', '.mp4')
audios = ('.ra', '.aif', '.aiff', '.aifc', '.wav', '.au', '.snd', '.mp3', '.mp2')
images = ('.ras', '.xwd', '.bmp', '.jpe', '.jpg', '.jpeg', '.xpm', '.ief', '.pbm',
'.tif', '.gif', '.ppm', '.xbm', '.tiff', '.rgb', '.pgm', '.png', '.pnm')
class CollectedFile:
def __init__(self, file_path):
self.file_path = file_path
self.filename = os.path.basename(file_path)
self.extension = os.path.splitext(file_path)
def get_absolute_file_path(self):
return self.file_path
def get_file_extension(self):
return self.extension
def get_filename(self):
return self.filename
def __str__(self):
return "<MediaFile: {}>".format(self.filename)
def append_to_zipfile(container, file, verbose=False):
"""
Appends file to zipfile. If verbose, prints the filename
lto the standard output stream
:param container: zip file full path
:param file: file to be added
:param verbose: if true, prints '.../{filename}' to stdout
"""
append = "a"
with warnings.catch_warnings():
try:
warnings.simplefilter("ignore")
with zipfile.ZipFile(container, append) as temp_zipfile:
temp_zipfile.write(file.get_absolute_file_path(), file.get_filename())
except FileNotFoundError as not_found_exception:
print(not_found_exception, file=sys.stderr)
sys.exit(1)
else:
if verbose:
print("..." + file, file=sys.stdout)
def collect_files(src, file_extensions):
"""
Walks through source file tree and yields a CollectedFile object
that meets the file extensions criteria. If source directory does
not exists, a FileNotFoundError exception is raised and if the
argument file_extensions is not a tuple a TypeError exception will
be raised
Params:
:param src: a source directory to collect files from
:param file_extensions: a tuple of file extensions (.txt, .xls, .jpeg)
yields: CollectedFile object
"""
if not os.path.exists(src):
raise FileNotFoundError()
if not isinstance(file_extensions, tuple):
raise TypeError()
else:
for path, dirs, files in os.walk(src):
for file in files:
if file.lower().endswith(file_extensions):
yield CollectedFile(os.path.join(path, file))
def main():
flags_parser = argparse.ArgumentParser(description="Collects files by extension and stores it in a zip file")
flags_parser.add_argument("source", help="source directory to collect files from", type=str)
flags_parser.add_argument("--verbose", "-v", help="prints to stdout the files being appended to the zipfile",
action="store_true", default=False)
flags_parser.add_argument("--media", "-m", help="includes most commom media file extensions to the search criteria",
action="store_true", default=False)
flags_parser.add_argument("zipfile", help="/path/to/my_file.zip ", type=str)
flags_parser.add_argument("extensions", help="file extensions to be added to the search criteria ex: txt pdf jpeg"
"png wav", type=tuple, nargs="*")
# Arguments
arguments = flags_parser.parse_args()
source = arguments.source
is_verbose = arguments.verbose
zip_file_path_container = arguments.zipfile
media = arguments.media
program_extensions = tuple(audios + images + docs + videos)
user_extensions = tuple(["".join(ext) for ext in arguments.extensions])
if media:
all_extensions = program_extensions + user_extensions
else:
all_extensions = tuple(["".join(ext) for ext in arguments.extensions])
for collected_file in collect_files(source, all_extensions):
append_to_zipfile(zip_file_path_container, collected_file.get_absolute_file_path(), is_verbose)
if __name__ == "__main__":
main()
|
import sys
from PyQt5.QtWidgets import (QWidget, QMessageBox, QApplication, QDesktopWidget,QMainWindow,
QAction,qApp,QMenu,QTextEdit,QLineEdit,QGridLayout,QApplication,QLabel)
from PyQt5.QtGui import QIcon
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
exitAct = QAction(QIcon('spyder.png'), '&Exit', self)
exitAct.setShortcut("Ctrl+Q")
exitAct.setStatusTip('Exit application')
exitAct.triggered.connect(qApp.quit)
openAct = QAction("Open", self)
openAct.setShortcut("Ctrl+O")
openAct.setStatusTip('Open File')
openAct.setStatusTip("Open file")
####QMenu
impMenu = QMenu("Import",self)
impMenu.addAction(exitAct)
## check
viewStatAct = QAction("View Statusbar", self,checkable=True)
viewStatAct.setStatusTip("View Statusbar")
viewStatAct.setChecked(True)
viewStatAct.triggered.connect(self.toggleMenu)
menubar = self.menuBar()
## define actions
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAct)
fileMenu.addAction(openAct)
fileMenu.addMenu(impMenu)
fileMenu.addAction(viewStatAct)
editMenu = menubar.addMenu('&Edit')
#self.resize(250,150)
self.center()
#self.textEditor()
self.setWindowTitle('defViewer')
self.statusBar().showMessage("ready")
self.show()
def center(self):
qr = self.frameGeometry()
cp = QDesktopWidget().availableGeometry().center()
#print(type(cp), type(qr))
qr.moveCenter(cp)
self.move(qr.topLeft())
textEdit = QTextEdit()
self.setCentralWidget(textEdit)
def textEditor(self):
title = QLabel('Tile')
author = QLabel('Author')
review = QLabel('Review')
tileEdit = QLineEdit()
authorEdit = QLineEdit()
reviewEdit = QTextEdit()
grid = QGridLayout()
grid.setSpacing(10)
grid.addWidget(title,1,0)
grid.addWidget(tileEdit,1,1)
grid.addWidget(author,2,0)
grid.addWidget(authorEdit,2,1)
grid.addWidget(review,3,0)
grid.addWidget(reviewEdit,3,1,5,1)
self.setLayout(grid)
self.setGeometry(300,300,350,300)
self.setWindowTitle("Review")
def toggleMenu(self,state):
if True:
self.statusbar.show()
else:
self.statusbar.hide()
def closeEvent(self, event):
reply = QMessageBox.question(self, 'Message',
"Are you sure to quit?", QMessageBox.Yes |
QMessageBox.No, QMessageBox.No)
if reply == QMessageBox.Yes:
event.accept()
else:
event.ignore()
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec_()) |
'''
Created on Apr 24, 2015
@author: baumanst
'''
import unittest
import time
import py_brew.cook as cook
import py_brew.datalogger as datalogger
dlt = None
class Test(unittest.TestCase):
def setUp(self):
# For the test increase update speed
self.dlt = datalogger.DataLoggerThread(cook.status, cook.dlt_state_cb, 0.01)
def tearDown(self):
self.dlt.exit()
def testCookStatus(self):
self.assertEqual(cook.status['dlt_state'], 'Initialized')
self.dlt.start()
time.sleep(0.1)
self.assertEqual(cook.status['dlt_state'], 'Idle')
self.dlt.start_logging()
time.sleep(0.1)
self.assertEqual(cook.status['dlt_state'], 'Logging')
self.dlt.stop_logging()
time.sleep(0.1)
self.assertEqual(cook.status['dlt_state'], 'Idle')
self.dlt.exit()
time.sleep(0.1)
self.assertEqual(cook.status['dlt_state'], 'Not running')
def testGetData(self):
EMPTY_DATA = {'list': []}
self.assertEqual(self.dlt.get_data(), EMPTY_DATA)
self.dlt.start()
self.assertEqual(self.dlt.get_data(), EMPTY_DATA)
self.dlt.start_logging()
time.sleep(0.1)
self.assertNotEqual(self.dlt.get_data(), EMPTY_DATA)
# Check dedicated entries in dataset
dl = self.dlt.get_data()['list']
self.assertGreater(len(dl), 0)
entry = dl[0]
self.assertEqual(entry['time'], 0)
self.assertEqual(entry['tempk1'], cook.status['tempk1'])
self.assertEqual(entry['tempk2'], cook.status['tempk2'])
self.assertEqual(entry['settempk1'], cook.status['settempk1'])
self.assertEqual(entry['settempk2'], cook.status['settempk2'])
self.assertEqual(entry['pump1'], cook.status['pump1'])
self.assertEqual(entry['pump2'], cook.status['pump2'])
self.assertEqual(entry['heater'], cook.status['heater'])
self.dlt.stop_logging()
time.sleep(0.2)
self.assertEqual(self.dlt.get_data(), EMPTY_DATA)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
from time import time
t000 = time()
import os
import pickle
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import torch
from torch.optim import lr_scheduler
from torch.nn.init import xavier_normal,xavier_normal_
from torch import nn
import torch.utils.data.sampler as sampler
import dgl
from config import DefaultConfig
from models.egret_attention_visual import egret_ppi
import data_generator_attention_visual as data_generator
from data_generator_attention_visual import graph_collate
from feature_generator import ProtBERT_feature_generator, distance_and_angle_generator
configs = DefaultConfig()
THREADHOLD = 0.2
def test(model, loader, root_dir):
global configs
model.eval()
result = []
attention_scores = []
edges = []
protein_infos_ = []
for batch_idx, (protbert_data, graph_batch, protein_info) in enumerate(loader):
with torch.no_grad():
if torch.cuda.is_available():
protbert_var = torch.autograd.Variable(protbert_data.cuda().float())
graph_batch.edata['ex'] = torch.autograd.Variable(graph_batch.edata['ex'].cuda().float())
else:
protbert_var = torch.autograd.Variable(protbert_data.float())
graph_batch.edata['ex'] = torch.autograd.Variable(graph_batch.edata['ex'].float())
# compute output
# t0 = time.time()
output, head_attn_scores = model(protbert_var, graph_batch)
# print(output.__len__(), output.shape)
shapes = output.data.shape
output = output.view(shapes[0], configs.max_sequence_length)
output = output.data.cpu().numpy()
head_attn_scores = head_attn_scores[0].view(shapes[0], configs.max_sequence_length, 20).numpy()
graph_list = dgl.unbatch(graph_batch)
for i, graph in enumerate(graph_list):
__len_limit__ = min(configs.max_sequence_length, protein_info[i]['seq_length'])
protein_infos_.append(protein_info[i])
result.append(output[i][:__len_limit__])
attention_scores.append(head_attn_scores[i, :__len_limit__, :])
edges.append(graph.edges()[0].view(__len_limit__, 20).numpy())
predict_result = {}
predict_result["pred"] = result
predict_result["protein_info"] = protein_infos_
predict_result["edges"] = edges
predict_result["attention_scores"] = attention_scores
result_file = "{}/outputs/prediction_and_attention_scores.pkl".format(root_dir)
with open(result_file,"wb") as fp:
pickle.dump(predict_result,fp)
def predict(model_file, root_dir):
# test_protBERT_file = [root_dir+'/inputs/ProtBert_features.pkl.gz']
protein_list_file = root_dir+'/inputs/protein_list.txt'
test_dataSet = data_generator.dataSet(root_dir, protein_list_file)
# print(protein_list.__len__(), protein_list)
test_loader = torch.utils.data.DataLoader(test_dataSet,
batch_size=configs.batch_size,
shuffle=False,
pin_memory=(torch.cuda.is_available()),
num_workers=configs.num_workers, drop_last=False, collate_fn=graph_collate)
model = egret_ppi()
pretrained_dict = torch.load(model_file)
model_dict = model.state_dict()
# 1. filter out unnecessary keys
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
# 2. overwrite entries in the existing state dict
model_dict.update(pretrained_dict)
# 3. load the new state dict
model.load_state_dict(model_dict)
if torch.cuda.is_available():
model = model.cuda()
else:
model = model.cpu()
test(model, test_loader, root_dir)
if __name__ == '__main__':
root_dir = '.'
# generate_distance_and_angle_matrix_and_fasta
distance_and_angle_generator.generate_distance_and_angle_matrix(root_dir)
# generate_protbert_features
ProtBERT_feature_generator.generate_protbert_features(root_dir)
t111 = time()
model_dir = "{}/models/egret_model_weight.dat".format(root_dir)
predict(model_dir, root_dir)
print('Prediction completed. Results saved at:', "{}/outputs/prediction_and_attention_scores.pkl".format(root_dir))
t222 = time()
print('\nOnly Feature Generation Time:', t111-t000)
print('Only Inference Time:', t222-t111)
print('Total Time:', t222-t000)
|
from multiprocessing.connection import Listener, Client, AuthenticationError
from multiprocessing import Process, Lock, Manager
import random
import time
def pedirPalabras(conn):
try:
palabra = conn.recv()
print(palabra)
except EOFError:
print('algo no ha funcionado')
#return palabra
def notificar_inicio_juego(pareja):
print('INICIO JUEGO')
lonPalabra = random.randint(4,8)
for (jugador, [jugador_info, _]) in pareja.items():
print ("Mandando longitud de palabra a ", jugador)
conn = Client(address=jugador_info[0], authkey=jugador_info[1])
conn.send("Elige una palabra de longitud "+str(lonPalabra))
# Process(target=pedirPalabras, args=(conn,)).start()
conn.close()
def serve_client(jugador, ipPuerto, jugadores, cerrojo):
j = len(jugadores)
apodo = jugadores[ipPuerto][1]
if j == 1:
jugador.send('Hola '+apodo+' tu papel es de Jugador 1. \n Esperando al segundo jugador...')
if j == 2:
jugador.send('Hola '+apodo+' tu papel es de Jugador 2. \n Ya tenemos dos jugadores, empieza la partida.')
cerrojo.acquire()
pareja = jugadores.copy()
Process(target=notificar_inicio_juego, args=(pareja,)).start()
jugadores.clear()
cerrojo.release()
jugador.close()
#print ('Conexion', ipPuerto, 'cerrada')
if __name__ == '__main__':
servidor = Listener(address=('127.0.0.1', 6000), authkey=b'secret password SERVER')
print ('Iniciando servidor del ahorcado...')
manager = Manager()
jugadores = manager.dict()
cerrojo = Lock()
while True:
print ('Aceptando jugadores...')
try:
jugador = servidor.accept()
ipPuerto = servidor.last_accepted
print ('Jugador aceptado desde la IP y puerto siguientes: ', ipPuerto)
infoListenerApodoJugador = jugador.recv()
jugadores[ipPuerto] = infoListenerApodoJugador
p = Process(target=serve_client, args=(jugador, ipPuerto, jugadores, cerrojo))
p.start()
except AuthenticationError:
print ('Conexión rechaza, contraseña incorrecta')
servidor.close()
print ('FIN')
|
"""
We have a collection of stones, each stone has a positive integer weight.
Each turn, we choose the two heaviest stones and smash them together. Suppose the stones have weights x and y with x <= y. The result of this smash is:
If x == y, both stones are totally destroyed;
If x != y, the stone of weight x is totally destroyed, and the stone of weight y has new weight y-x.
At the end, there is at most 1 stone left. Return the weight of this stone (or 0 if there are no stones left.)
"""
from typing import List
def last_stone_weight(stones: List[int]) -> int:
n = len(stones)
for i in range(n):
stones = sorted(stones)
if n == 1:
return stones[0]
if n == 2:
return stones[1] - stones[0]
if stones[n-1] == stones[n-2]:
stones.pop()
stones.pop()
n -= 2
else:
stones[n-2] = stones[n-1] - stones[n-2]
stones.pop()
n -= 1
if __name__ == '__main__':
print(last_stone_weight([2, 7, 4, 1, 8, 1]))
print(last_stone_weight([2, 2]))
print(last_stone_weight([1]))
|
import unittest
from app.modules.scrapers.googlescholar import parser
class GoogleScholarTests(unittest.TestCase):
def verify_papers(self, expected, actual):
actual_dict = {x['title']: x for x in actual}
for pe in expected:
self.assertTrue(pe.pop('citations') > 0)
self.assertTrue(pe.pop('data_url').startswith('/citations'))
self.assertDictContainsSubset(pe, actual_dict[pe['title']])
def test_google_scholar_parser(self):
actual = parser.crawl('https://scholar.google.com.hk/citations?hl=en&user=5qvdHjQAAAAJ')
expected = {'overall_citations_list': [7569, 1799, 32, 14, 195, 37],
'citations_by_year': {1984: 48, 1985: 34, 1986: 82, 1987: 62, 1988: 52, 1989: 62, 1990: 52,
1991: 55, 1992: 95, 1993: 57, 1994: 78, 1995: 59, 1996: 82, 1997: 63,
1998: 92, 1999: 84, 2000: 88, 2001: 72, 2002: 114, 2003: 145, 2004: 219,
2005: 175, 2006: 278, 2007: 335, 2008: 392, 2009: 183, 2010: 283, 2011: 257,
2012: 321, 2013: 324, 2014: 362, 2015: 270, 2016: 280, 1978: 85,
1979: 39, 1980: 35, 1981: 47, 1982: 36, 1983: 60},
'full_name': 'Theodore Dru Alison Cockerell (1866–1948)', 'labels': {
'particular bees and scale insects': '/citations?view_op=search_authors&hl=en&mauthors=label:particular_bees_and_scale_insects',
'Natural history': '/citations?view_op=search_authors&hl=en&mauthors=label:natural_history'},
'email_suffix': 'melipona.org', 'papers': [{
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:M0j1y4EgrScC',
'citations': 224,
'title': 'LIII.—Descriptions and records of bees.—XIX',
'year': 1908}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:QxtoOqDH1aQC',
'citations': 213,
'title': 'Descriptions and records of bees, no. 74',
'year': 1917}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:xIusEVNJREcC',
'citations': 207,
'title': 'LIII.—Descriptions and records of bees.—XIX',
'year': 1908}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:BulkYocH2doC',
'citations': 167,
'title': 'Descriptions and records of bees. VI',
'year': 1905}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:Ri6SYOTghG4C',
'citations': 150,
'title': 'LXXX.—Descriptions and records of bees.—XCIV',
'year': 1922}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:bEWYMUwI8FkC',
'citations': 121,
'title': 'LXXVIII.—Descriptions and records of bees.—C',
'year': 1924}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:UebtZRa9Y70C',
'citations': 96,
'title': 'Bees in the collection of the United States national museum. 1-4',
'year': 1911}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:DrR-2ekChdkC',
'citations': 88,
'title': 'XVII.—Descriptions and records of bees.—LXXIX',
'year': 1918}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:u-x6o8ySG0sC',
'citations': 87,
'title': 'Arthropods in Burmese amber',
'year': 1917}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:9yKSN-GCB0IC',
'citations': 86, 'title': 'Insects in Burmese amber',
'year': 1916}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:-f6ydRqryjwC',
'citations': 78,
'title': 'XXV.—Fossil Arthropods in the British Museum.—IV',
'year': 1920}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:ULOm3_A8WrAC',
'citations': 75,
'title': 'XLIII.—Descriptions and records of bees.—XXIII',
'year': 1909}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:08ZZubdj9fEC',
'citations': 69,
'title': 'Fossil insects from Florissant, Colorado',
'year': 1910}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:d1gkVwhDpl0C',
'citations': 69,
'title': 'The Coleoptera of New Mexico',
'year': 1907}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:uCYQzKCmtZwC',
'citations': 57,
'title': 'XXVIII.—Descriptions and records of bees.—VIII',
'year': 1906}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:vYYylRVofzEC',
'citations': 54,
'title': 'Monograph of the Bombycine moths of North America.',
'year': 1914}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:2osOgNQ5qMEC',
'citations': 54,
'title': 'Observations on fish scales',
'year': 1913}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:LkGwnXOMwfcC',
'citations': 50,
'title': 'Eocene insects from the Rocky Mountains',
'year': 1921}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:UeHWp8X0CEIC',
'citations': 49,
'title': 'Some American Cretaceous fish scales, with notes on the classification and distribution of Cretaceous fishes',
'year': 1919}, {
'data_url': '/citations?view_op=view_citation&hl=en&user=5qvdHjQAAAAJ&citation_for_view=5qvdHjQAAAAJ:R6aXIXmdpM0C',
'citations': 48,
'title': 'The insects of the dipterous family Phoridae in the United States National Museum',
'year': 1912}],
'occupation': 'Professor, University of Colorado'}
# self.assertEqual(expected, actual)
self.assertEqual(expected['full_name'], actual['full_name'])
self.assertEqual(expected['email_suffix'], actual['email_suffix'])
self.assertEqual(expected['occupation'], actual['occupation'])
self.assertEqual(expected['labels'], actual['labels'])
self.assertTrue(actual['overall_citations_list'][0] >= expected['overall_citations_list'][0])
self.assertTrue(actual['overall_citations_list'][2] >= expected['overall_citations_list'][2])
self.assertTrue(actual['overall_citations_list'][4] >= expected['overall_citations_list'][4])
self.assertDictContainsSubset(expected['citations_by_year'], actual['citations_by_year'])
self.verify_papers(expected['papers'], actual['papers'])
if __name__ == '__main__':
unittest.main()
|
import numpy as np
import pandas as pds
import matplotlib.pyplot as plt
class Node():
def __init__(self):
self.feature = 0
self.threshold = 0
self.left_child = None
self.right_child = None
self.is_leaf = False
self.leaf_prediction = 0
def cross_entropy(x):
x_dict = {}
x_size = len(x)
for x_i in x:
x_i = int(x_i)
if x_i in x_dict:
x_dict[x_i] += 1
else:
x_dict[x_i] = 1
entropy = 0
for x_j in x_dict:
x_j = int(x_j)
p = x_dict[x_j]/x_size
entropy += p * np.log(p+0.0000000001)
return -entropy
def gini_index(x):
x_dict = {}
x_size = len(x)
for x_i in x:
x_i = int(x_i)
if x_i in x_dict:
x_dict[x_i] += 1
else:
x_dict[x_i] = 1
gini = 0
for x_j in x_dict:
x_j = int(x_j)
p = x_dict[x_j] / x_size
gini += p * (1 - p)
return gini
def maximum_element(x):
x_dict = {}
for x_i in x:
x_i = int(x_i)
if x_i in x_dict:
x_dict[x_i] += 1
else:
x_dict[x_i] = 0
max_index = -1
max_value = -1
for x_i in x_dict:
if x_dict[x_i] > max_value:
max_value = x_dict[x_i]
max_index = x_i
return max_index
def create_node(x, y, root, depth, max_depth, min_sample_size, method = 'NCE'):
'''
:param x:
:param y:
:param root:
:param depth:
:param max_depth:
:param min_sample_size:
:param method: can be 'NCE' and 'GINI' for classification or 'MSE' for regression
:return:
'''
example_size = x.shape[0]
example_dimension = x.shape[1]
if example_size <= min_sample_size or depth == max_depth:
root.is_leaf = True
if method == 'NCE' or method == 'GINI':
root.leaf_prediction = maximum_element(y)
elif method == 'MSE':
root.leaf_prediction = np.sum(y) / np.size(y)
root.left_child = None
root.right_child = None
return
measure_matrix = np.zeros(x.shape)
for i in range(example_dimension):
for j in range(example_size):
feature = x[j][i]
left_y = []
right_y = []
for k in range(len(x)):
if x[k][i] < feature:
# left.append(x[k])
left_y.append(y[k])
else:
# right.append(x[k])
right_y.append(y[k])
left_y = np.array(left_y)
right_y = np.array(right_y)
left_measure = 0
right_measure = 0
if np.size(left_y) != 0:
if method == 'NCE':
left_measure = cross_entropy(left_y)
elif method == 'GINI':
left_measure = gini_index(left_y)
elif method == 'MSE':
left_measure = np.sum((left_y - np.sum(left_y) / np.size(left_y)) ** 2)
if np.size(right_y) != 0:
if method == 'NCE':
right_measure = cross_entropy(right_y)
elif method == 'GINI':
right_measure = gini_index(right_y)
elif method == 'MSE':
right_measure = np.sum((right_y - np.sum(right_y) / np.size(right_y)) ** 2)
measure_matrix[j][i] = left_measure + right_measure
# find the minimum position
min_measure = np.min(measure_matrix)
min_position = np.where(measure_matrix == min_measure)
threshold = x[min_position[0][0]][min_position[1][0]]
feature = min_position[1][0]
root.threshold = threshold
root.feature = feature
# split data into two sets
left_x = []
left_y = []
right_x = []
right_y = []
for j in range(example_size):
if x[j][feature] < threshold:
left_x.append(x[j])
left_y.append(y[j])
else:
right_x.append(x[j])
right_y.append(y[j])
left_x = np.array(left_x)
left_y = np.array(left_y)
right_x = np.array(right_x)
right_y = np.array(right_y)
if len(left_x) == 0 or len(right_x) == 0 or min_measure ==0:
root.is_leaf = True
if method == 'NCE' or method == 'GINI':
root.leaf_prediction = maximum_element(y)
elif method == 'MSE':
root.leaf_prediction = np.sum(y)/np.size(y)
root.left_child = None
root.right_child = None
return
else:
root.left_child = Node()
create_node(left_x, left_y, root.left_child, depth + 1, max_depth, min_sample_size, method)
root.right_child = Node()
create_node(right_x, right_y, root.right_child, depth + 1, max_depth, min_sample_size, method)
class CART():
def __init__(self, max_depth, method='MSE', min_sample_size_in_leaf=2):
self.max_depth = max_depth
self.method = method
self.root = Node()
self.min_sample_size_in_leaf = min_sample_size_in_leaf
def training(self, x, y):
create_node(x, y, self.root, 0, self.max_depth, self.min_sample_size_in_leaf, self.method)
def test(self, x, node):
root = node
while not root.is_leaf:
if x[root.feature] < root.threshold:
root = root.left_child
else:
root = root.right_child
return root.leaf_prediction
def predicting(self, x):
return self.test(x, self.root)
if __name__ == '__main__':
data_file = pds.read_csv('./data/4.csv')
x = np.array([data_file['x_1'] / 320. - 1,data_file['x_2'] / 240. - 1]).transpose()
y = np.array(data_file['Label']).transpose()
cart = CART(max_depth=8, method='NCE')
cart.training(x, y)
tree = cart.root
y_hat = np.zeros(y.shape)
for i in range(y.shape[0]):
y_hat[i] = cart.predicting(x[i])
# -----------------------------------visualization---------------------------------------
plt.figure(1)
y_dict = {}
y_hat_dict = {}
for y_i in y:
y_i = int(y_i)
if y_i not in y_dict:
y_dict[y_i] = np.random.rand(1,3)
for i in range(0, len(y)):
plt.scatter(x[i][0], x[i][1], color=y_dict[int(y[i])], marker='o', s=60, alpha=0.5)
plt.figure(2)
for i in range(0, len(y)):
plt.scatter(x[i][0], x[i][1], color=y_dict[int(y_hat[i])], marker='x', s=30, alpha=0.5)
plt.show() |
import json
import os
from PIL import Image
import torch
from torch.utils.data import Dataset, SubsetRandomSampler
from torchvision import models
from torchvision import transforms
import torch.nn as nn
import torch.optim as optim
import numpy as np
from sklearn import metrics
import matplotlib.pyplot as plt
import time
import albumentations as A
weather_dict_file = open('weather_dict.json','r')
weather_dict = json.load(weather_dict_file)
w_by_i = {int(i):key for i,key in weather_dict[0].items()}
i_by_w = {key:int(i) for key,i in weather_dict[1].items()}
train_folder = 'drivedata_v2/train'
test_folder = 'drivedata_v2/test_weather/'
if torch.cuda.is_available():
device = torch.device('cuda:0')
print(torch.cuda.get_device_name(0))
def init_aug():
augmentation_pipeline = A.Compose(
[
A.ShiftScaleRotate(rotate_limit=10),
A.HorizontalFlip(p = 0.5),
A.OneOf(
[
A.RandomContrast(),
A.RandomGamma(),
A.RandomBrightness(),
],
p = 0.5
),
],
p = 1
)
return augmentation_pipeline
class WeatherDataset(Dataset):
def __init__(self, folder, transform=None, augmentation=None):
self.transform = transform
self.augmentation=augmentation
self.folder = folder
filenames = os.listdir(self.folder)
self.filenames = [name for name in filenames if name.split('_')[2]!='undefined.jpg']
splited = [x.replace('.jpg','').split('_') for x in self.filenames]
self.splited = [[i_by_w[x[2]],x[0]] for x in splited]
self.transform = transform
def __len__(self):
return len(self.filenames)
def __getitem__(self, index):
img = Image.open(os.path.join(self.folder, self.filenames[index]))
y = self.splited[index][0]
img_id = self.splited[index][1]
if self.augmentation:
img = self.augmentation(image = np.array(img))['image']
if self.transform:
img = self.transform(img)
return img, y, img_id
def train_model(model, train_loader, val_loader, loss, optimizer, num_epochs):
loss_history = []
train_history = []
val_history = []
for epoch in range(num_epochs):
model.train() # Enter train mode
loss_accum = 0
correct_samples = 0
total_samples = 0
start = time.time()
for i_step, (x, y,_) in enumerate(train_loader):
start = time.time()
x_gpu = x.to(device)
y_gpu = y.to(device)
prediction = model(x_gpu)
loss_value = loss(prediction, y_gpu)
optimizer.zero_grad()
loss_value.backward()
optimizer.step()
_, indices = torch.max(prediction, 1)
correct_samples += torch.sum(indices == y_gpu)
total_samples += y.shape[0]
loss_accum += loss_value
print(i_step, time.time()- start)
ave_loss = loss_accum / i_step
train_accuracy = float(correct_samples) / total_samples
val_accuracy = compute_accuracy(model, val_loader)
name = 'scene_'+time.strftime('%Y-%m-%d_%H-%M_')+str(epoch)+'_'+str(val_accuracy)+'.pt'
PATH = os.path.join('weather_models_checkpoint/',name)
model.cpu()
torch.save(model, PATH)
model.to(device)
loss_history.append(float(ave_loss))
train_history.append(train_accuracy)
val_history.append(val_accuracy)
print("Average loss: %f, Train accuracy: %f, Val balanced accuracy: %f" % (ave_loss, train_accuracy, val_accuracy))
return loss_history, train_history, val_history
def compute_accuracy(model, loader):
"""
Computes accuracy on the dataset wrapped in a loader
Returns: accuracy as a float value between 0 and 1
"""
model.eval() # Evaluation mode
pred = list()
gr_tr = list()
for (x,y,_) in loader:
x = x.to(device=device)
y = y.to(device=device)
predict = model(x)
x.cpu()
_, indices = torch.max(predict, 1)
gr_tr.extend(y.cpu().tolist())
pred.extend(indices.cpu().tolist())
val_accuracy = metrics.balanced_accuracy_score(gr_tr, pred)
with open('val_result_weather.json', 'w') as outfile:
json.dump([pred,gr_tr], outfile)
return val_accuracy
def main():
train_dataset = WeatherDataset(train_folder,
transform=transforms.Compose([
transforms.ToPILImage(),
transforms.Resize((224, 224)),
transforms.ToTensor(),
# Use mean and std for pretrained models
# https://pytorch.org/docs/stable/torchvision/models.html
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
]),
augmentation=init_aug()
)
test_dataset =WeatherDataset(test_folder,
transform=transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
# Use mean and std for pretrained models
# https://pytorch.org/docs/stable/torchvision/models.html
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
)
batch_size = 16
data_size = len(train_dataset)
validation_fraction = .1
val_split = int(np.floor((validation_fraction) * data_size))
indices = list(range(data_size))
np.random.seed(42)
np.random.shuffle(indices)
indices = indices
val_indices, train_indices = indices[:val_split], indices[val_split:]
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
train_loader = torch.utils.data.DataLoader(train_dataset, pin_memory=True,batch_size=batch_size,
sampler=train_sampler,num_workers=16)
val_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size,
sampler=val_sampler,pin_memory=True,num_workers=16)
#test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size,pin_memory=True,num_workers=16)
model = torch.load('weather_models_checkpoint/scene_2020-02-24_04-55_4_0.8679472128533139.pt')
model.eval()
#model = models.resnext50_32x4d(pretrained=True)
#num_ftrs = model.fc
#num_ftrs = model.fc.in_features
#model.fc = nn.Linear(num_ftrs, 5)
model.to(device)
loss = nn.CrossEntropyLoss()
optimizer = optim.SGD([{'params': model.fc.parameters(), 'lr': 1e-4},
{'params': list(model.parameters())[:-2]}],
lr=0.00001, momentum=0.9)
loss_history, train_history, val_history = train_model(model, train_loader, val_loader, loss, optimizer, 5)
if __name__ == '__main__':
main() |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import re
import datetime
from website.identifiers.clients.base import AbstractIdentifierClient
from website import settings
from datacite import DataCiteMDSClient, schema40
from django.core.exceptions import ImproperlyConfigured
from osf.metadata.utils import datacite_format_subjects, datacite_format_contributors, datacite_format_creators
logger = logging.getLogger(__name__)
class DataCiteClient(AbstractIdentifierClient):
def __init__(self, node):
try:
assert settings.DATACITE_URL and (getattr(node.provider, 'doi_prefix', None) or settings.DATACITE_PREFIX)
except AssertionError:
raise ImproperlyConfigured('OSF\'Datacite client\'s settings are not configured')
self._client = DataCiteMDSClient(
url=settings.DATACITE_URL,
username=settings.DATACITE_USERNAME,
password=settings.DATACITE_PASSWORD,
prefix=getattr(node.provider, 'doi_prefix', None) or settings.DATACITE_PREFIX
)
def build_metadata(self, node):
"""Return the formatted datacite metadata XML as a string.
"""
data = {
'identifier': {
'identifier': self.build_doi(node),
'identifierType': 'DOI',
},
'creators': datacite_format_creators([node.creator]),
'contributors': datacite_format_contributors(node.visible_contributors),
'titles': [
{'title': node.title}
],
'publisher': 'Open Science Framework',
'publicationYear': str(datetime.datetime.now().year),
'resourceType': {
'resourceType': 'Pre-registration' if node.type == 'osf.registration' else 'Project',
'resourceTypeGeneral': 'Text'
},
'dates': [
{
'date': node.created.isoformat(),
'dateType': 'Created'
},
{
'date': node.modified.isoformat(),
'dateType': 'Updated'
},
]
}
article_doi = node.article_doi
if article_doi:
data['relatedIdentifiers'] = [
{
'relatedIdentifier': article_doi,
'relatedIdentifierType': 'DOI',
'relationType': 'IsSupplementTo'
}
]
if node.description:
data['descriptions'] = [{
'descriptionType': 'Abstract',
'description': node.description
}]
if node.node_license:
data['rightsList'] = [{
'rights': node.node_license.name,
'rightsURI': node.node_license.url
}]
data['subjects'] = datacite_format_subjects(node)
# Validate dictionary
assert schema40.validate(data)
# Generate DataCite XML from dictionary.
return schema40.tostring(data)
def build_doi(self, object):
return settings.DOI_FORMAT.format(
prefix=getattr(object.provider, 'doi_prefix', None) or settings.DATACITE_PREFIX,
guid=object._id
)
def get_identifier(self, identifier):
self._client.doi_get(identifier)
def create_identifier(self, node, category):
if category == 'doi':
if settings.DATACITE_ENABLED:
metadata = self.build_metadata(node)
resp = self._client.metadata_post(metadata)
# Typical response: 'OK (10.70102/FK2osf.io/cq695)' to doi 10.70102/FK2osf.io/cq695
doi = re.match(r'OK \((?P<doi>[a-zA-Z0-9 .\/]{0,})\)', resp).groupdict()['doi']
self._client.doi_post(doi, node.absolute_url)
return {'doi': doi}
logger.info('TEST ENV: DOI built but not minted')
return {'doi': self.build_doi(node)}
else:
raise NotImplementedError('Creating an identifier with category {} is not supported'.format(category))
def update_identifier(self, node, category):
if settings.DATACITE_ENABLED and not node.is_public or node.is_deleted:
if category == 'doi':
doi = self.build_doi(node)
self._client.metadata_delete(doi)
return {'doi': doi}
else:
raise NotImplementedError('Updating metadata not supported for {}'.format(category))
else:
return self.create_identifier(node, category)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: ai ts=4 sts=4 et sw=4 nu
from __future__ import (unicode_literals, absolute_import,
division, print_function)
from django.conf.urls import patterns, url
from snisi_web.url_regexp import (RGXP_ENTITY, RGXP_PERIOD,
RGXP_PERIODS, RGXP_RECEIPT)
urlpatterns = patterns(
'',
url(r'^/mission/{receipt}?$'
.format(receipt=RGXP_RECEIPT),
'snisi_cataract.views.cataract_mission_viewer',
name='cataract_mission'),
url(r'^/view/{entity}/{period}/?$'
.format(entity=RGXP_ENTITY, period=RGXP_PERIOD),
'snisi_cataract.views.cataract_mission_browser',
name='cataract_missions'),
url(r'^/view/{entity}/?$'
.format(entity=RGXP_ENTITY),
'snisi_cataract.views.cataract_mission_browser',
name='cataract_missions'),
url(r'^/view/?$',
'snisi_cataract.views.cataract_mission_browser',
name='cataract_missions'),
url(r'^/dashboard/{entity}/{periods}/?$'
.format(entity=RGXP_ENTITY, periods=RGXP_PERIODS),
'snisi_cataract.views.cataract_dashboard', name='cataract_dashboard'),
url(r'^/dashboard/{entity}/?$'
.format(entity=RGXP_ENTITY),
'snisi_cataract.views.cataract_dashboard', name='cataract_dashboard'),
url(r'^/dashboard/?$',
'snisi_cataract.views.cataract_dashboard', name='cataract_dashboard'),
)
|
"""ncc4roma URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.i18n import i18n_patterns
from django.views.generic import RedirectView
from website.views import HtmlTemplate, TemplateView
urlpatterns = [
url(r'^(?P<lang>\w{2})/*$', RedirectView.as_view(url='/%(lang)s/index.html', permanent=True)),
url(r'^$', RedirectView.as_view(url='/it/index.html', permanent=True), name='index'),
url(r'^robots.txt$', TemplateView.as_view(template_name='tpl/robots.txt'), name='robots'),
] + i18n_patterns('',
url(r'^(?P<page_name>[^/.]+\.html)', HtmlTemplate.as_view())
)
|
"""
Bitbucket tools.
"""
import os
import requests
BUILD_STATUS_STARTED = 'INPROGRESS'
BUILD_STATUS_STOPPED = 'SUCCESSFUL'
BUILD_STATUS_FAILURE = 'FAILED'
URL='https://api.bitbucket.org/2.0/repositories/%s/commit/%s/statuses/build'
def get_url():
"Build the URL"
_ = URL % (os.environ['CI_REPO_NAME'], os.environ['CI_COMMIT_ID'])
return _
def get_build_data(state):
"Get the build data"
_ = {'state':state,
'key':os.environ['CI_NAME'],
'name':os.environ['CI_BUILD_NUMBER'],
'url':os.environ['CI_BUILD_URL']}
return _
def _build_info(state):
"Upload build status."
url = get_url()
data = get_build_data(state)
auth = (os.environ['BB_USERNAME'], os.environ['BB_PASSWORD'])
post = requests.post(url, json=data, auth=auth)
# An unsuccessful status post should not stop a build
if post.status_code != 200:
print('# Bitbucket POST error:', post.status_code, post.reason)
def build_started():
"Build has started"
_build_info(BUILD_STATUS_STARTED)
def build_stopped():
"Build has started"
_build_info(BUILD_STATUS_STOPPED)
def build_failure():
"Build has started"
_build_info(BUILD_STATUS_FAILURE)
raise SystemExit(1)
|
class Node(object):
def __init__(self,val:int):
self.left,self.right = None,None
self.val = val
def traverse(root:Node, res:list,nd:Node,cur:list):
if root is None or len(res) == 1:
return
elif root is nd:
res.append(cur[:])
else:
cur.append(root)
traverse(root.left,res,nd,cur[:])
traverse(root.right,res,nd,cur[:])
def find_path(nd:Node,root:Node) -> list:
res = []
traverse(root,res,nd,[])
return res[0]
def find_paths(nds:set,root:Node) ->list:
res = []
traverse(root,res,nds,[])
return res
def traverses(root:Node, res:list,nds:set,cur:list):
if root is None or len(res) == len(nds):
return
elif root in nds:
res.append(cur[:])
else:
cur.append(root)
traverses(root.left,res,nds,cur[:])
traverses(root.right,res,nds,cur[:])
'''
really silly,search the par the same time has no meaning only if the nd1 and nd2 are on the same level
'''
# def par(nd1:Node,nd2:Node,root:Node):
# s = []
# res = []
# while len(s) or root is not None:
# while root is not None:
# s.append(root)
# root = root.left
#
# root = s.pop()
# if nd1 in (root.left,root.right):
# res.append(root)
# print(nd1.val)
# if len(res) == 2:
# return tuple(res)
# if nd2 in (root.left,root.right):
# res.append(root)
# print(nd2.val)
# if len(res) == 2:
# return tuple(res)
# root = root.right
def lca(nd1:Node,nd2:Node,root:Node) ->Node:
if nd2 in (nd1.left,nd1.right):
return nd1.val
elif nd1 in (nd2.left,nd2.right):
return nd2.val
elif nd1 is root:
return nd1.val
elif nd2 is root:
return nd2.val
else:
path1 = find_path(nd1,root)
path2 = find_path(nd2,root)
path1,path2 = (path1,path2) if len(path1) <= len(path2) else (path2,path1)
while len(path1):
node = path1.pop()
if node in path2:
return node.val
rt = Node(1)
left = Node(2)
right = Node(3)
rt.left = left
rt.right = Node(3)
print(lca(rt.left,rt.right,rt))
|
import os
def parseFile(filename, outfile, header, stopline="\n"):
f=open(filename, 'r')
g=open(outfile, 'w')
line=f.readline()
start=False
while line!="":
if header in line:
g.write("\t".join(line.strip("\n").split()))
start=True
elif line==stopline:
start=False
elif start:
g.write("\n"+"\t".join(line.strip("\n").split()))
line=f.readline()
f.close()
g.close()
dir_name="/Users/cmelton/Documents/Lab/SnyderLab/CancerPipeline/RecalDataAnalysis/"
for file_name in os.listdir(dir_name):
if ".grp" in file_name:
parseFile(os.path.join(dir_name, file_name),
os.path.join(dir_name, file_name+".RecalTable2"),
"#:GATKTable:RecalTable2:") |
import pytest
import requests
class TestEventsUserScope(object):
env_id = "5"
account_id = "1"
# test create
def test_events_create(self, api):
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
description="test event",
id="create"
))
return create_resp.box().data
def test_role_events_create_id_duplicate(self, api):
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
id="duplicate"
))
name_duplicate = create_resp.json()['data']['id']
exc_message = f"'Event.id' ({name_duplicate}) already exists in the scope (Environment)."
with pytest.raises(requests.exceptions.HTTPError) as err:
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
id=name_duplicate
))
assert err.value.response.status_code == 409
assert exc_message in err.value.response.text
def test_role_events_create_id_invalid(self, api):
exc_message = "(create events) is invalid. Identifier has to match the pattern "
with pytest.raises(requests.exceptions.HTTPError) as err:
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
id="create events"
))
assert err.value.response.status_code == 400
assert exc_message in err.value.response.text
# test list
def test_events_list(self, api):
resp = api.list(
"/api/v1beta0/user/envId/events/",
params=dict(
envId=self.env_id))
assert resp.json()['data'][0]['id']
def test_events_list_invalid_envId(self, api):
exc_message = "Invalid environment."
with pytest.raises(requests.exceptions.HTTPError) as err:
resp = api.list(
"/api/v1beta0/user/envId/events/",
params=dict(
envId=4))
assert err.value.response.status_code == 404
assert exc_message in err.value.response.text
# test get
def test_events_get(self, api):
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
description="test event",
id="events"
))
id_events = create_resp.json()['data']['id']
get_resp = api.get(
"/api/v1beta0/user/envId/events/eventId/",
params=dict(
envId=self.env_id,
eventId=id_events))
assert get_resp.json()['data']['id'] == id_events
def test_events_get_invalid_eventId(self, api):
exc_message = "(999999999) either was not found or isn't in the current scope (Environment)."
with pytest.raises(requests.exceptions.HTTPError) as err:
resp = api.list(
"/api/v1beta0/user/envId/events/eventId/",
params=dict(
envId=self.env_id,
eventId=999999999))
assert err.value.response.status_code == 404
assert exc_message in err.value.response.text
# test delete
def test_events_delete(self, api):
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
id="delete"
))
id_events = create_resp.json()['data']['id']
delete_resp = api.delete("/api/v1beta0/user/envId/events/eventId/",
params=dict(
envId=self.env_id,
eventId=id_events))
with pytest.raises(requests.exceptions.HTTPError) as err:
get_resp = api.get("/api/v1beta0/user/envId/events/eventId/",
params=dict(
envId=self.env_id,
eventId=id_events))
assert err.value.response.status_code == 404
errors = err.value.response.json()['errors']
assert errors[0]['code'] == "ObjectNotFound"
def test_events_delete_invalid_eventid(self, api):
exc_message = "(999999999999) either was not found or isn't in the current scope (Environment)."
with pytest.raises(requests.exceptions.HTTPError) as err:
delete_resp = api.delete("/api/v1beta0/user/envId/events/eventId/",
params=dict(
envId=self.env_id,
eventId=999999999999))
assert err.value.response.status_code == 404
assert exc_message in err.value.response.text
errors = err.value.response.json()['errors']
assert errors[0]['code'] == "ObjectNotFound"
# test edit
def test_events_edit(self, api):
create_resp = api.create("/api/v1beta0/user/envId/events/",
params=dict(envId=self.env_id),
body=dict(
description="exensts",
id="edit"
))
id_events = create_resp.json()['data']['id']
edit_resp = api.edit("/api/v1beta0/user/envId/events/eventId/",
params=dict(
envId=self.env_id,
eventId=id_events),
body=dict(
id=id_events,
description="test"
))
assert edit_resp.json()['data']['description'] == "test"
class TestEventsAccountScope(object):
account_id = "1"
# test create
def test_events_create(self, api):
create_resp = api.create("/api/v1beta0/account/accountId/events/",
params=dict(accountId=self.account_id),
body=dict(
description="test event",
id="creatac"
))
return create_resp.box().data
def test_role_events_create_id_duplicate(self, api):
create_resp = api.create("/api/v1beta0/account/accountId/events/",
params=dict(accountId=self.account_id),
body=dict(
id="duplicateid1"
))
name_duplicate = create_resp.json()['data']['id']
exc_message = f"'Event.id' ({name_duplicate}) already exists in the scope (Account)."
with pytest.raises(requests.exceptions.HTTPError) as err:
create_resp = api.create("/api/v1beta0/account/accountId/events/",
params=dict(accountId=self.account_id),
body=dict(
id=name_duplicate
))
assert err.value.response.status_code == 409
assert exc_message in err.value.response.text
def test_role_events_create_id_invalid(self, api):
exc_message = "(create events) is invalid. Identifier has to match the pattern "
with pytest.raises(requests.exceptions.HTTPError) as err:
create_resp = api.create("/api/v1beta0/account/accountId/events/",
params=dict(accountId=self.account_id),
body=dict(
id="create events"
))
assert err.value.response.status_code == 400
assert exc_message in err.value.response.text
# test list
def test_events_list(self, api):
resp = api.list(
"/api/v1beta0/account/accountId/events/",
params=dict(
accountId=self.account_id))
assert resp.json()['data'][0]['id']
def test_events_list_invalid_accountId(self, api):
exc_message = "Invalid account."
with pytest.raises(requests.exceptions.HTTPError) as err:
resp = api.list(
"/api/v1beta0/account/accountId/events/",
params=dict(
accountId=9999999999999))
assert err.value.response.status_code == 404
assert exc_message in err.value.response.text
# test get
def test_events_get(self, api):
create_resp = api.create("/api/v1beta0/account/accountId/events/",
params=dict(accountId=self.account_id),
body=dict(
description="test eventac",
id="eventac"
))
id_events = create_resp.json()['data']['id']
get_resp = api.get(
"/api/v1beta0/account/accountId/events/eventId/",
params=dict(
accountId=self.account_id,
eventId=id_events))
assert get_resp.json()['data']['id'] == id_events
def test_events_get_invalid_accountId(self, api):
exc_message = "(999999999) either was not found or isn't in the current scope (Account)."
with pytest.raises(requests.exceptions.HTTPError) as err:
resp = api.get(
"/api/v1beta0/account/accountId/events/eventId/",
params=dict(
accountId=self.account_id,
eventId=999999999))
assert err.value.response.status_code == 404
assert exc_message in err.value.response.text
# test delete
def test_events_delete(self, api):
create_resp = api.create("/api/v1beta0/account/accountId/events/",
params=dict(accountId=self.account_id),
body=dict(
id="delete"
))
id_events = create_resp.json()['data']['id']
delete_resp = api.delete("/api/v1beta0/account/accountId/events/eventId/",
params=dict(
accountId=self.account_id,
eventId=id_events))
with pytest.raises(requests.exceptions.HTTPError) as err:
get_resp = api.get("/api/v1beta0/account/accountId/events/eventId/",
params=dict(
accountId=self.account_id,
eventId=id_events))
assert err.value.response.status_code == 404
errors = err.value.response.json()['errors']
assert errors[0]['code'] == "ObjectNotFound"
def test_events_delete_invalid_eventid(self, api):
exc_message = "(999999999999) either was not found or isn't in the current scope (Account)."
with pytest.raises(requests.exceptions.HTTPError) as err:
delete_resp = api.delete("/api/v1beta0/account/accountId/events/eventId/",
params=dict(
accountId=self.account_id,
eventId=999999999999))
assert err.value.response.status_code == 404
assert exc_message in err.value.response.text
errors = err.value.response.json()['errors']
assert errors[0]['code'] == "ObjectNotFound"
# test edit
def test_events_edit(self, api):
create_resp = api.create("/api/v1beta0/account/accountId/events/",
params=dict(accountId=self.account_id),
body=dict(
description="exensts",
id="editac"
))
id_events = create_resp.json()['data']['id']
edit_resp = api.edit("/api/v1beta0/account/accountId/events/eventId/",
params=dict(
accountId=self.account_id,
eventId=id_events),
body=dict(
id=id_events,
description="test"
))
assert edit_resp.json()['data']['description'] == "test"
|
# Generated by Django 2.2 on 2020-04-02 06:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hello', '0007_auto_20200401_1643'),
]
operations = [
migrations.AlterField(
model_name='user',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False),
),
]
|
from django.db import models
import re
# Create your models here.
class UserManager(models.Manager):
def validator_field(self, postData):
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
JUST_LETTERS = re.compile(r'^[a-zA-Z.]+$')
PASSWORD_REGEX = re.compile(r'^(?=\w*\d)(?=\w*[A-Z])(?=\w*[a-z])\S{8,16}$')
errors = {}
if len(User.objects.filter(email=postData['email'])) > 0:
errors['email_exits'] = "Email ya registrado"
else:
if len(postData['nombre'].strip()) < 2 or len(postData['nombre'].strip()) > 30:
errors['nombre_len'] = "Nombre debe tener entre 2 y 30 caracteres"
if len(postData['alias'].strip()) < 2 or len(postData['alias'].strip()) > 15:
errors['alias_len'] = "Alias debe tener entre 2 y 15 caracteres"
#if not JUST_LETTERS.match(postData['nombre']) or not JUST_LETTERS.match(postData['alias']):
# errors['just_letters'] = "Solo se permite el ingreso de letras en el nombre y alias"
if not EMAIL_REGEX.match(postData['email']):
errors['email'] = "Formato correo no válido"
if not PASSWORD_REGEX.match(postData['password']):
errors['password_format'] = "Formato contraseña no válido"
if postData['password'] != postData['password_confirm']:
errors['password_confirm'] = "Contraseñas no coinciden"
return errors
class User(models.Model):
nombre = models.CharField(max_length=100)
alias = models.CharField(max_length=15, unique=True)
email = models.EmailField(max_length=100, unique=True)
password = models.CharField(max_length=250)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class Meta:
verbose_name = "usuario"
verbose_name_plural = "usuarios"
ordering = ["alias",]
def __str__(self):
return self.nombre
def __repr__(self):
return self.alias + " " + self.nombre
|
# -*- coding: utf-8 -*-
# Application: Vimba - CMS
# Module: www
# Copyright (c) 2010 Vimba inc. All rights reserved.
# Created by Francois Lebel on 30-05-2010.
from django.db import models
from vcms.www.fields import StatusField
class ContainerWidgetsManager(models.Manager):
def get_all(self, page):
return self.order_by('relative_position').filter(page=page)
def get_widgets(self, page, container):
return self.get_all(page=page).filter(container=container)
def get_published_widget(self, page, container):
return self.get_all(page=page).filter(container=container).filter(status=StatusField.PUBLISHED)
def get_page_for_widget(self, widget):
containers = self.filter(widget_id=widget.id)
if containers:
return containers[0].page
return None
class DashboardElementManager(models.Manager):
def get_PublishedAll(self):
return self.filter(published=True)
def get_Published(self, current_page):
return self.filter(published=True).filter(page=current_page)
|
#!/usr/bin/env python3
# Python 3.6
# Import the Halite SDK, which will let you interact with the game.
import hlt
import pickle
# This library contains constant values.
from hlt import constants
# This library contains direction metadata to better interface with the game.
from hlt.positionals import Direction, Position
from hlt.game_map import MapCell
# heap
from heapq import heappush, heappop, merge
# This library allows you to generate random numbers.
import random
from collections import deque
# Logging allows you to save messages for yourself. This is required because the regular STDOUT
# (print statements) are reserved for the engine-bot communication.
import logging
import time
import sys
import os
from pyclustering.cluster.kmedians import kmedians
from pyclustering.cluster.center_initializer import kmeans_plusplus_initializer
from pyclustering.utils.metric import distance_metric, type_metric
import numpy as np
from math import ceil
from copy import deepcopy
# stderr = sys.stderr
# sys.stderr = open(os.devnull, 'w')
dropoff_clf = pickle.load(open('mlp.sav', 'rb'))
# This game object contains the initial game state.
game = hlt.Game()
# At this point "game" variable is populated with initial map data.
# This is a good place to do computationally expensive start-up pre-processing.
import bot.GlobalConstants as GC
#constantFile = f"{len(game.players.keys())}P{game.game_map.width}.json"
constantFile = "4P64.json" # just default for now.
GC.load_global_constants(constantFile)
logging.info(f"Constants {constantFile} loaded!")
from bot.ClusterProcessor import ClusterProcessor
from bot.DestinationProcessor import DestinationProcessor
from bot.GlobalFunctions import GlobalFunctions
from bot.GlobalVariablesSingleton import GlobalVariablesSingleton
from bot.MoveProcessor import MoveProcessor
from bot.StateMachine import StateMachine
game.ready("v71")
class main():
def __init__(self, game):
self.game = game
self.game_map = game.game_map
self.me = game.me
GV = GlobalVariablesSingleton(game)
self.GF = GlobalFunctions(self.game)
self.ENABLE_BACKUP = GV.ENABLE_BACKUP
self.ENABLE_COMBAT = GV.ENABLE_COMBAT
self.ENABLE_INSPIRE = GV.ENABLE_INSPIRE
self.ship_state = GV.ship_state
self.ship_path = GV.ship_path
self.ship_dest = GV.ship_dest
self.previous_position = GV.previous_position
self.previous_state = GV.previous_state
self.fleet_leader = GV.fleet_leader
self.ship_obj = GV.ship_obj
self.NR_OF_PLAYERS = GV.NR_OF_PLAYERS
self.crashed_positions = []
self.cluster_centers = []
self.clusters_determined = False
self.crashed_ship_positions = []
self.dropoff_last_built = 0
def mainloop(self):
while True:
self.game.update_frame(self.ENABLE_INSPIRE)
self.game_map = self.game.game_map
self.me = self.game.me
GlobalVariablesSingleton.getInstance().turn_start = time.time()
self.clear_dictionaries() # of crashed or transformed ships
command_queue = []
if self.game.turn_number == 1:
self.game_map.HALITE_STOP = GC.INITIAL_HALITE_STOP
self.game_map.c = [GC.A, GC.B, GC.C, GC.D,
GC.E, GC.F] # set the heuristic constants
for s in self.me.get_ships():
if s.id not in self.ship_state:
self.ship_state[s.id] = "exploring"
GlobalVariablesSingleton.getInstance().ENABLE_COMBAT = not self.have_less_ships(
0.8) and self.NR_OF_PLAYERS == 2
self.ENABLE_COMBAT = GlobalVariablesSingleton.getInstance().ENABLE_COMBAT
GlobalVariablesSingleton.getInstance().ENABLE_BACKUP = self.ENABLE_COMBAT
self.ENABLE_BACKUP = GlobalVariablesSingleton.getInstance().ENABLE_BACKUP
GlobalVariablesSingleton.getInstance().ENABLE_INSPIRE = not self.have_less_ships(0.9)
self.ENABLE_INSPIRE = GlobalVariablesSingleton.getInstance().ENABLE_INSPIRE
# initialize shipyard halite, inspiring stuff and other
self.game_map.init_map(self.me, list(
self.game.players.values()), self.ENABLE_BACKUP)
logging.info(f"Map initialized.{self.GF.time_left()} left")
if self.game.turn_number == 1:
TOTAL_MAP_HALITE = self.game_map.total_halite
self.game_map.prcntg_halite_left = self.game_map.total_halite / TOTAL_MAP_HALITE
# if clusters_determined and not cluster_centers:
if self.game.turn_number >= GC.SPAWN_TURN:
self.game_map.HALITE_STOP = self.game_map.prcntg_halite_left * GC.INITIAL_HALITE_STOP
if self.crashed_ship_positions and self.game.turn_number < GC.CRASH_TURN and self.ENABLE_BACKUP:
self.process_backup_sending()
# Dijkstra the graph based on all dropoffs
self.game_map.create_graph(self.GF.get_dropoff_positions())
if self.game.turn_number == GC.DETERMINE_CLUSTER_TURN:
self.clusters_determined = True
self.cluster_centers = ClusterProcessor(
game).clusters_with_classifier()
if self.game.turn_number == GC.CRASH_SELECTION_TURN:
GC.CRASH_TURN = self.select_crash_turn()
self.return_percentage = GC.BIG_PERCENTAGE if self.game.turn_number < GC.PERCENTAGE_SWITCH else GC.SMALL_PERCENTAGE
if self.game_map.prcntg_halite_left < 0.15 and self.game_map.percentage_occupied >= GC.BUSY_PERCENTAGE:
self.return_percentage = GC.BUSY_RETURN_AMOUNT / constants.MAX_HALITE
if self.should_build():
self.process_building()
# has_moved ID->True/False, moved or not
# ships priority queue of (importance, ship)
ships, self.has_moved = self.ship_priority_q()
# True if a ship moves into the shipyard this turn
move_into_shipyard = False
# whether a dropoff has been built this turn so that wouldnt use too much
# halite
dropoff_built = False
SM = StateMachine(
self.game, self.return_percentage)
MP = MoveProcessor(self.game, self.has_moved, command_queue, SM)
while ships: # go through all ships
ship = heappop(ships)[1]
if self.has_moved[ship.id]:
continue
if self.GF.time_left() < 0.15:
logging.info("STANDING STILL TOO SLOW")
command_queue.append(ship.stay_still())
self.ship_state[ship.id] = "collecting"
continue
if ship.id not in self.previous_position: # if new ship the
self.previous_position[ship.id] = self.me.shipyard.position
# setup state
# if ship hasnt received a destination yet
if ship.id not in self.ship_dest or not ship.id in self.ship_state:
DestinationProcessor(self.game).find_new_destination(
self.game_map.halite_priority, ship)
self.previous_state[ship.id] = "exploring"
self.ship_state[ship.id] = "exploring" # explore
# transition
SM.state_transition(ship)
# logging.info("SHIP {}, STATE {}, DESTINATION {}".format(
# ship.id, self.ship_state[ship.id], self.ship_dest[ship.id]))
# if ship is dropoff builder
if self.is_builder(ship):
# if enough halite and havent built a dropoff this turn
if (ship.halite_amount + self.me.halite_amount) >= constants.DROPOFF_COST and not dropoff_built:
command_queue.append(ship.make_dropoff())
self.dropoff_last_built = game.turn_number
GC.SPAWN_TURN += 10
dropoff_built = True
else: # cant build
# wait in the position
self.ship_state[ship.id] = "waiting"
self.game_map[ship.position].mark_unsafe(ship)
command_queue.append(ship.move(Direction.Still))
else: # not associated with building a dropoff, so move regularly
move = MP.produce_move(ship)
if move is not None:
MP.move_ship(ship, move)
self.clear_dictionaries() # of crashed or transformed ships
# This ship has made a move
self.has_moved[ship.id] = True
surrounded_shipyard = self.game_map.is_surrounded(
self.me.shipyard.position)
total_ships = max(1, sum([self.get_ship_amount(pID) for pID in self.game.players.keys()]))
halite_per_ship = self.game_map.total_halite / total_ships
if self.NR_OF_PLAYERS == 4:
if self.game_map.width <= 40:
VALUE_PER_SHIP = 1
else:
VALUE_PER_SHIP = 1.5
else:
VALUE_PER_SHIP = 0.8
if not dropoff_built and self.me.halite_amount >= constants.SHIP_COST and self.max_enemy_ships() + 6 > len(
self.me.get_ships()) and self.game.turn_number <= GC.SPAWN_TURN\
and halite_per_ship >= VALUE_PER_SHIP * constants.SHIP_COST and self.game_map.prcntg_halite_left > (1 - 0.65) and \
not (self.game_map[
self.me.shipyard].is_occupied or surrounded_shipyard or "waiting" in self.ship_state.values()):
if not ("build" in self.ship_state.values() and self.me.halite_amount <= (
constants.SHIP_COST + constants.DROPOFF_COST)):
command_queue.append(self.me.shipyard.spawn())
logging.info(self.GF.time_left())
self.game.end_turn(command_queue)
def max_enemy_ships(self):
if self.NR_OF_PLAYERS not in [2, 4]: # for testing solo games
return 100000
ships = 0
for player_id in self.game.players.keys():
if not player_id == self.me.id:
ships = max(ships, self.get_ship_amount(player_id))
return ships
def get_ship_amount(self, playerID):
return len(self.game.players[playerID].get_ships())
def process_building(self):
dropoff_val, dropoff_pos = self.cluster_centers.pop(0) # remove from list
# sends ships to position where closest will build dropoff
dropoff_pos = self.game_map.normalize(dropoff_pos)
fleet = self.get_fleet(dropoff_pos, 1)
if fleet:
closest_ship = fleet.pop(0) # remove and get closest ship
# will build dropoff
self.GF.state_switch(closest_ship.id, "build")
# if dropoffs position already has a structure (e.g. other dropoff) or
# somebody is going there already
if self.game_map[dropoff_pos].has_structure or dropoff_pos in self.ship_dest.values():
# bfs for closer valid unoccupied position
dropoff_pos = self.GF.bfs_unoccupied(dropoff_pos)
self.ship_dest[closest_ship.id] = dropoff_pos # go to the dropoff
if self.game_map.width >= GC.EXTRA_FLEET_MAP_SIZE:
self.send_ships(dropoff_pos, int(
GC.FLEET_SIZE / 2), "fleet", leader=closest_ship)
else: # if builder not available
self.cluster_centers.insert(0, (dropoff_val, dropoff_pos))
def send_ships(self, pos, ship_amount, new_state, condition=None, leader=None):
'''sends a fleet of size ship_amount to explore around pos
new_state : state to switch the fleet members
condition : boolean function that qualifies a ship to send'''
fleet = self.get_fleet(
self.game_map.normalize(pos), ship_amount, condition)
# for rest of the fleet to explore
h = self.GF.halite_priority_q(pos, GC.SHIP_SCAN_AREA)
for fleet_ship in fleet: # for other fleet members
if len(h) == 0:
break
if fleet_ship.id not in self.previous_state.keys(): # if new ship
self.previous_state[fleet_ship.id] = "exploring"
self.has_moved[fleet_ship.id] = False
# explore in area of the new dropoff
if leader is not None:
self.fleet_leader[fleet_ship.id] = leader
self.GF.state_switch(fleet_ship.id, new_state)
DestinationProcessor(self.game).find_new_destination(h, fleet_ship)
def get_fleet(self, position, fleet_size, condition=None):
''' returns list of fleet_size amount
of ships closest to the position'''
if condition is None:
condition = self.is_fleet
distances = []
for s in self.me.get_ships():
if condition(s) and not s.position == position:
distances.append(
(self.game_map.calculate_distance(position, s.position), s))
distances.sort(key=lambda x: x[0])
fleet_size = min(len(distances), fleet_size)
return [t[1] for t in distances[:fleet_size]]
def have_less_ships(self, ratio):
for player in self.game.players.values():
if len(self.me.get_ships()) < ratio * len(player.get_ships()):
return True
return False
def should_build(self):
if self.game_map.prcntg_halite_left <= 0.25\
or self.game.turn_number >= GC.SPAWN_TURN\
or len(self.me.get_dropoffs()) > GC.MAX_CLUSTERS\
or self.any_builders()\
or self.game.turn_number <= self.dropoff_last_built + 10\
or not self.clusters_determined:
return False
# calculate amount of ships going to each dropoff
dropoff_count = {} # dropoff_pos -> ships going there
for d in self.GF.get_dropoff_positions():
dropoff_count[d] = 0
for s in self.me.get_ships():
if self.ship_state[s.id] == "returning" and self.ship_dest[s.id] in self.GF.get_dropoff_positions():
dropoff_count[self.ship_dest[s.id]] += 1
elif self.ship_state[s.id] == "collecting" and self.game_map[s.position].dijkstra_dest in self.GF.get_dropoff_positions():
dropoff_count[self.game_map[s.position].dijkstra_dest] += 1
# if there is a dropoff with many ships assigned to it this is true
is_crowded_dropoff = max(dropoff_count.values()) >= 25
# if clusters determined, more than 13 ships, we have clusters and nobody
# is building at this turn (in order to not build too many)
if is_crowded_dropoff and not self.any_builders():
# there are more than 40 ships per dropoff
if self.cluster_centers: # there is already a dropoff position
return True
# there is no good dropoff position yet, make one
self.game_map.set_close_friendly_ships(self.me)
pos = self.game_map.get_most_dense_dropoff_position(
self.GF.get_dropoff_positions(), int(0.2 * self.game_map.width))
if pos is not None:
# fake 10000 halite for new needed cluster
self.cluster_centers.append((10000, pos))
return True
else:
return False
return self.clusters_determined and self.game.turn_number >= self.dropoff_last_built + 15 and self.cluster_centers \
and len(self.me.get_ships()) > (len(self.GF.get_dropoff_positions()) + 1) * GC.FLEET_SIZE \
and self.fleet_availability() >= 1.5 * GC.FLEET_SIZE and not self.any_builders()
def any_builders(self):
return "waiting" in self.ship_state.values() or "build" in self.ship_state.values()
def fleet_availability(self):
''' returns how many ships are available atm'''
amount = 0
for s in self.me.get_ships():
if self.is_fleet(s):
amount += 1
if amount >= len(list(self.me.get_ships())) * 0.9:
amount /= 2
return int(amount)
def is_fleet(self, ship):
''' returns if a ship is good for adding it to a fleet '''
return self.me.has_ship(ship.id) and (
ship.id not in self.ship_state or not (
self.ship_state[ship.id] in ["fleet", "waiting", "returning", "build"]))
def is_builder(self, ship):
''' checks if this ship is a builder '''
return self.ship_state[ship.id] == "waiting" or (
self.ship_state[ship.id] == "build" and self.ship_dest[ship.id] == ship.position)
def select_crash_turn(self):
'''selects turn when to crash'''
distance = 0
for ship in self.me.get_ships():
shipyard = self.GF.get_shipyard(
ship.position) # its shipyard position
d = self.game_map.calculate_distance(shipyard, ship.position)
if d > distance: # get maximum distance away of shipyard
distance = d
crash_turn = constants.MAX_TURNS - distance - 5
# set the crash turn to be turn s.t. all ships make it
crash_turn = max(crash_turn, GC.CRASH_SELECTION_TURN)
return crash_turn
def ship_priority_q(self):
ships = [] # ship priority queue
has_moved = {}
for s in self.me.get_ships():
self.ship_obj[s.id] = s
has_moved[s.id] = False
if s.id in self.ship_state:
# get ships shipyard
shipyard = self.GF.get_shipyard(s.position)
# importance, the lower the number, bigger importance
if s.position == shipyard:
importance = -10000
elif self.ship_state[s.id] in ["returning", "harikiri"]:
importance = round((self.game_map.width * -2) / self.game_map[
s.position].dijkstra_distance, 2)
elif self.ship_state[s.id] in ["exploring", "build", "backup", "fleet"]:
if s.id in self.ship_dest:
destination = self.ship_dest[s.id]
else:
destination = shipyard
importance = self.game_map.calculate_distance(
s.position, destination) * self.game_map.width + 1
else: # other
importance = self.game_map.calculate_distance(
s.position, shipyard) * self.game_map.width ** 2
else:
importance = -1000 # newly spawned ships max importance
heappush(ships, (importance, s))
return ships, has_moved
def clear_dictionaries(self):
# clear dictionaries of crushed ships
for ship_id in list(self.ship_dest.keys()):
if not self.me.has_ship(ship_id):
self.crashed_ship_positions.append(
self.ship_obj[ship_id].position)
del self.ship_dest[ship_id]
del self.ship_state[ship_id]
del self.previous_state[ship_id]
del self.previous_position[ship_id]
if ship_id in self.ship_path:
del self.ship_path[ship_id]
def add_crashed_position(self, pos):
""" adds a carshed position ot the crashed positions list '"""
neighbours = self.game_map.get_neighbours(self.game_map[pos])
h_amount = -1
distance_to_enemy_dropoff = self.GF.dist_to_enemy_doff(pos)
for n in neighbours:
h_amount = max(h_amount, n.halite_amount)
if h_amount > 800:
heappush(self.crashed_positions, (-1 * h_amount, pos))
def process_backup_sending(self):
""" Processes sending backup ships to a position
where a ship crashed previously """
to_remove = []
for pos in self.crashed_ship_positions:
if self.GF.dist_to_enemy_doff(pos) > 4:
self.add_crashed_position(pos)
to_remove.append(pos)
for s in to_remove: # remove the crashed positions
if s in self.crashed_ship_positions:
self.crashed_ship_positions.remove(s)
if self.crashed_positions: # if there are any crashed positions to process
hal, crashed_pos = heappop(self.crashed_positions) # get pos info
# if there are little enemies in that area
if self.game_map[crashed_pos].enemy_amount <= GC.UNSAFE_AREA and self.game_map[
crashed_pos].halite_amount >= constants.MAX_HALITE:
# send a backup fleet there
self.send_ships(crashed_pos, 2, "backup", StateMachine(
self.game, self.return_percentage).is_savior)
def is_savior(self, ship):
return self.me.has_ship(ship.id) and ship.halite_amount <= self.return_percentage * 0.5 * constants.MAX_HALITE \
and (ship.id not in self.ship_state or not (
self.ship_state[ship.id] in ["waiting", "returning", "build"]))
backuped_dropoffs = []
maingameloop = main(game)
maingameloop.mainloop()
|
from typing import Dict
from .constants import SUFFIX_TYPE
from wai.common.adams.imaging.locateobjects import LocatedObjects
def fix_labels(objects: LocatedObjects, mappings: Dict[str, str]):
"""
Fixes the labels in the parsed objects, using the specified mappings (old: new).
:param objects: The parsed objects.
:param mappings: The label mappings (old: new).
"""
# Process each object
for obj in objects:
# If the object doesn't have a label, skip it
if SUFFIX_TYPE not in obj.metadata:
continue
# Get the object's current label
label: str = obj.metadata[SUFFIX_TYPE]
# If there is a mapping for this label, change it
if label in mappings:
obj.metadata[SUFFIX_TYPE] = mappings[label]
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-13 03:23
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Veiculos',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tipo_veiculo', models.CharField(blank=True, choices=[('O', 'Ônibus'), ('M', 'Microônibus'), ('C', 'Carro')], max_length=2)),
('placa', models.CharField(max_length=8, verbose_name='Placa do veículo')),
('estado', models.CharField(blank=True, choices=[('AC', 'Acre'), ('AL', 'Alagoas'), ('AP', 'Amapá'), ('BA', 'Bahia'), ('CE', 'Ceará'), ('DF', 'Distrito Federal'), ('ES', 'Espírito Santo'), ('GO', 'Goiás'), ('MA', 'Maranão'), ('MG', 'Minas Gerais'), ('MS', 'Mato Grosso do Sul'), ('MT', 'Mato Grosso'), ('PA', 'Pará'), ('PB', 'Paraíba'), ('PE', 'Pernanbuco'), ('PI', 'Piauí'), ('PR', 'Paraná'), ('RJ', 'Rio de Janeiro'), ('RN', 'Rio Grande do Norte'), ('RO', 'Rondônia'), ('RR', 'Roraima'), ('RS', 'Rio Grande do Sul'), ('SC', 'Santa Catarina'), ('SE', 'Sergipe'), ('SP', 'São Paulo'), ('TO', 'Tocantins')], max_length=2)),
('cidade', models.CharField(max_length=20, verbose_name='Cidade')),
('chassi', models.CharField(max_length=18, verbose_name='Chassi')),
('cor', models.CharField(max_length=11, verbose_name='Cor')),
('marca_modelo', models.CharField(max_length=20, verbose_name='Marca/Modelo')),
('situacao', models.CharField(blank=True, choices=[('D', 'Disponível'), ('R', 'Reservado'), ('E', 'Em manutenção'), ('A', 'Alugado')], max_length=2)),
],
),
]
|
# HANDLES ALL INTERACTION WITH SAVE FILE
class SaveSettings:
# READ INITIAL SAVE FILES
def __init__(self):
# CREATE BLANK LISTS
self.records_format = []
self.records_raw = []
self.records_names = []
# ACQUIRE ALL LINES IN TEXT FILE
with open("Sudoku Settings.txt", "r") as file:
self.txt_file = file.read().split("\n")
# CLEAR FILE HISTORY
def reset_settings(self):
# CREATE DEFAULT FILE LINES
blank_file = [
"HIGHLIGHT:#ffffc7",
"DIFFICULTY:EASY",
"ERRORS ON:1",
"RECORDS: EASY",
"RECORDS: MEDIUM",
"RECORDS: HARD",
"RECORDS: EXPERT",
"RECORDS: END OF LIST"
]
# SAVE DEFAULTS TO FILE
SaveSettings.list_to_file(blank_file)
# ACQUIRE ALL HIGH SCORES
def get_all_records(self):
# STARTING POSITIONS FOR EACH DIFFICULTY
index_0 = self.txt_file.index('RECORDS: EASY')
index_1 = self.txt_file.index('RECORDS: MEDIUM')
index_2 = self.txt_file.index('RECORDS: HARD')
index_3 = self.txt_file.index('RECORDS: EXPERT')
index_4 = self.txt_file.index('RECORDS: END OF LIST')
# FIND QUANTITIES OF SAVES FOR EACH DIFFICULTY
easy_count = (index_1 - index_0 - 1) // 2
medium_count = (index_2 - index_1 - 1) // 2
hard_count = (index_3 - index_2 - 1) // 2
expert_count = (index_4 - index_3 - 1) // 2
# CREATE 2D LIST OF HIGH SCORES - EASY, MEDIUM, HARD, EXPERT
results = [[], [], [], []]
for i in range(easy_count):
name = self.txt_file[index_0 + 1 + easy_count + i]
score = SaveSettings.format_time(self.txt_file[index_0 + 1 + i])
results[0].append(f" {i + 1}. {name} - {score}")
for i in range(medium_count):
name = self.txt_file[index_1 + 1 + medium_count + i]
score = SaveSettings.format_time(self.txt_file[index_1 + 1 + i])
results[1].append(f" {i + 1}. {name} - {score}")
for i in range(hard_count):
name = self.txt_file[index_2 + 1 + hard_count + i]
score = SaveSettings.format_time(self.txt_file[index_2 + 1 + i])
results[2].append(f" {i + 1}. {name} - {score}")
for i in range(expert_count):
name = self.txt_file[index_3 + 1 + expert_count + i]
score = SaveSettings.format_time(self.txt_file[index_3 + 1 + i])
results[3].append(f" {i + 1}. {name} - {score}")
# ADD MORE BLANK ROWS UNTIL TEN ROWS TAKEN
for diff in results:
if len(diff) < 10:
for i in range(10 - len(diff)):
diff.append("")
return results
# OVERWRITE A FILE WITH VARIABLE LIST
@staticmethod
def list_to_file(lines):
# WRITE TO TEXT FILE LINE BY LINE
with open("Sudoku Settings.txt", "w") as file:
for i, element in enumerate(lines):
file.write(element)
if i < len(lines) - 1:
file.write("\n")
# CHANGE HIGHLIGHT VALUE IN SAVE FILE
def update_highlight(self, color):
# FIND "HIGHLIGHT" IN TEXT FILE - OVERWRITE NEW COLOR
new_file = []
for line in self.txt_file:
index = line.find("HIGHLIGHT:")
if index != -1:
new_file.append("HIGHLIGHT:" + color)
else:
new_file.append(line)
# SAVE NEW TEXT LINES TO FILE
self.txt_file = new_file
self.list_to_file(self.txt_file)
# ACQUIRE HIGHLIGHT PREFERENCES
def get_highlight(self):
# FIND "HIGHLIGHT" LINE - RETURN END OF LINE
for line in self.txt_file:
index = line.find("HIGHLIGHT:")
if index != -1:
return line[10:]
# CHANGE ERROR RECOGNITION IN SAVE FILE
def update_error_recog(self, toggle):
# CONVERT TRUE / FALSE TO 1 / 0
new_file = []
show = 1 if toggle else 0
# FIND "ERRORS ON" IN TEXT FILE - OVERWRITE PREFERENCE
for line in self.txt_file:
index = line.find("ERRORS ON:")
if index != -1:
new_file.append("ERRORS ON:" + str(show))
else:
new_file.append(line)
# SAVE NEW TEXT LINES TO FILE
self.txt_file = new_file
self.list_to_file(self.txt_file)
# CHANGE DIFFICULTY VALUE IN SAVE FILE
def update_difficulty(self, difficulty):
# FORCE CUSTOM DIFFICULTY TO "EASY"
if difficulty == "CUSTOM":
difficulty = "EASY"
# FIND "DIFFICULTY" IN TEXT FILE - OVERWRITE NEW DIFFICULTY
new_file = []
for line in self.txt_file:
index = line.find("DIFFICULTY:")
if index != -1:
new_file.append("DIFFICULTY:" + difficulty)
else:
new_file.append(line)
# SAVE NEW TEXT LINES TO FILE
self.txt_file = new_file
self.list_to_file(self.txt_file)
# ACQUIRE DIFFICULTY PREFERENCE
def get_difficulty(self):
# FIND "DIFFICULTY" LINE - RETURN END OF LINE
for line in self.txt_file:
index = line.find("DIFFICULTY:")
if index != -1:
return line[11:]
# ACQUIRE ERROR-RECOGNITION PREFERENCE
def get_error_recog(self):
# FIND "ERRORS ON" LINE - RETURN END OF LINE
for line in self.txt_file:
index = line.find("ERRORS ON:")
if index != -1:
ans = line[10:]
break
if ans == "1":
return True
return False
# EVALUATE LAST GAME FOR NEW HIGH SCORE
def check_record(self, diff, time):
# DISALLOW RECORD SAVING IF PLAYING CUSTOM GAME
if diff == "CUSTOM":
return False
# SET MAXIMUM RANKINGS - DEFAULT NO RECORD
max_rankings = 10
new_record = False
# LIST OF TOP RANKINGS AND NAMES
st_index = self.txt_file.index("RECORDS: " + diff) + 1
for i, line in enumerate(self.txt_file[st_index:], start=st_index):
if line.find("RECORDS: ") != -1:
end_index = i
break
# SET UP ARRAYS OF INTEREST
section_rankings = self.txt_file[st_index:end_index]
section_times = section_rankings[:len(section_rankings) // 2]
section_names = section_rankings[len(section_rankings) // 2:]
# STARTING FROM BLANK
if len(section_times) == 0:
self.records_format = [SaveSettings.format_time(time)]
self.records_raw = [str(time)]
self.records_names = ["*YOUR NAME*"]
self.pending_file = self.txt_file[:st_index] + self.records_raw + \
self.records_names + self.txt_file[end_index:]
# NEW HIGH SCORE
return True
# INSERT PLAYER NAME IF APPROVED
for i, entry in enumerate(section_times):
if time < int(entry):
new_record = True
section_times.insert(i, str(time))
section_names.insert(i, "*YOUR NAME*")
break
# APPEND PLAYER NAME TO END OF LIST
else:
if len(section_times) < max_rankings:
new_record = True
section_times.append(str(time))
section_names.append("*YOUR NAME*")
# TRIM RECORD LIST IF NEED BE
if len(section_times) > max_rankings:
section_times = section_times[:max_rankings]
section_names = section_names[:max_rankings]
# CREATE FORMATTED RECORD LIST
section_formatted = []
for entry in section_times:
section_formatted.append(SaveSettings.format_time(int(entry)))
self.records_format = section_formatted
self.records_raw = section_times
self.records_names = section_names
# SAVE NEW FILE PENDING CONFIRMATION
self.pending_file = self.txt_file[:st_index] + section_times + \
section_names + self.txt_file[end_index:]
# RETURN IF NEW RECORD BOOLEAN
return new_record
# CONFIRM HIGH SCORE WITH PLAYER-SUBMITTED NAME
def update_record(self, my_name, diff):
# UPDATE WITH PLAYER NAME
for i, line in enumerate(self.pending_file):
if line.find("*YOUR NAME*") != -1:
self.pending_file[i] = my_name
# CONFIRM SAVE PENDING FILE
self.txt_file = []
for line in self.pending_file:
self.txt_file.append(line)
# WRITE FILE LINES TO TEXT FILE
SaveSettings.list_to_file(self.txt_file)
# FORMAT TOTAL SECONDS AS "MM:SS"
@staticmethod
def format_time(seconds):
# CALCULATE MINUTES AND REMAINING SECONDS
seconds = int(seconds)
sec = seconds % 60
min = (seconds - sec) // 60
# ADD ZEROS UNTIL TWO-DIGIT MINUTE
str_min = str(min)
if min == 0:
str_min = "00"
elif min < 10:
str_min = "0" + str_min
# ADD ZEROS UNTIL TWO-DIGIT SECONDS
str_sec = str(sec)
if sec == 0:
str_sec = "00"
elif sec < 10:
str_sec = "0" + str_sec
return f"{str_min}:{str_sec}"
|
import numpy
import warnings
def quad3d(xx, yy, zz, hh):
x = xx.flatten()
y = yy.flatten()
z = zz.flatten()
h = hh.flatten()
# Form design matrix.
A = numpy.empty([ len(z), 10 ])
A[:,0] = 1.0
A[:,1] = x
A[:,2] = x * x
A[:,3] = y
A[:,4] = y * y
A[:,5] = z
A[:,6] = z * z
A[:,7] = x * y
A[:,8] = x * z
A[:,9] = y * z
c, chisq, rank, s = numpy.linalg.lstsq(A, h, rcond=-1)
# Location of extremum by solving matrix equation. This is a bit
# lazy, probably should do the analytic solution, but I doubt it
# would make much difference.
M = numpy.array([[2*c[2], c[7], c[8]],
[ c[7], 2*c[4], c[9]],
[ c[8], c[9], 2*c[6]]])
b = numpy.array([-c[1], -c[3], -c[5]])
v = numpy.linalg.solve(M, b)
xbest, ybest, zbest = v
if xbest < numpy.min(x) or xbest > numpy.max(x):
warnings.warn("x out of range during interpolation")
if ybest < numpy.min(y) or ybest > numpy.max(y):
warnings.warn("y out of range during interpolation")
if zbest < numpy.min(z) or zbest > numpy.max(z):
warnings.warn("z out of range during interpolation")
# plt.imshow(zz,
# interpolation='none',
# extent=[x[0], x[-1], y[-1], y[0]],
# cmap='Greys')
# plt.axvline(xbest)
# plt.axhline(ybest)
# plt.show()
#
# zmod = c[0] + (c[2]*xx + c[1]) * xx + (c[5]*xx + c[4]*yy + c[3]) * yy
# plt.imshow(zmod,
# interpolation='none',
# extent=[x[0], x[-1], y[-1], y[0]],
# cmap='Greys')
# plt.axvline(xbest)
# plt.axhline(ybest)
# plt.show()
hbest = c[0] + (c[2]*xbest + c[1]) * xbest + (c[7]*xbest + c[4]*ybest + c[3]) * ybest + (c[8]*xbest + c[9]*ybest + c[6]*zbest + c[5]) * zbest
return xbest, ybest, zbest, hbest
|
def int_to_en(num):
d = { 0 : 'Zero', 1 : 'One', 2 : 'Two', 3 : 'Three', 4 : 'Four', 5 : 'Five',
6 : 'Six', 7 : 'Seven', 8 : 'Eight', 9 : 'Nine', 10 : 'Ten'}
return d[num]
num=int(input())
print(int_to_en(num))
|
import os
import sys
from robot.result import ExecutionResult
def parse_output_xml(output_xml):
result = ExecutionResult(output_xml)
all_stat = result.statistics.total.all
return all_stat.passed, all_stat.failed
if __name__ == '__main__':
if len(sys.argv) < 2:
output_xml = 'output.xml'
else:
output_xml = sys.argv[1]
if not os.path.exists(output_xml):
raise RuntimeError('%s does not exist!' % output_xml)
pass_cnt, failed_cnt = parse_output_xml(output_xml)
pass_rate = pass_cnt * 100 / (pass_cnt + failed_cnt)
print('%d,%d,%d' % (pass_cnt, failed_cnt, pass_rate))
|
#!/usr/bin/env python
# coding: utf-8
# ## (a) **Import/Install Required Libraries**
# In[199]:
get_ipython().system('pip3 install fuzzywuzzy')
get_ipython().system('pip3 install pygal')
get_ipython().system('pip3 install keplergl')
# In[225]:
# Import required libraries
import pandas as pd
import numpy as np
import datetime
import unicodedata
import warnings
import os
from IPython.display import Image
pd.options.display.max_columns = None
pd.set_option('max_rows',50000)
warnings.filterwarnings('ignore')
# Useful libraries to check inconsistency data entries
import fuzzywuzzy
from fuzzywuzzy import process
import chardet
# In[226]:
import cufflinks as cf
from plotly.offline import plot,iplot
import plotly.graph_objects as go
pd.options.plotting.backend = "plotly"
import re
import plotly.express as px
cf.go_offline()
# In[227]:
import sys
sys.path.append('Examen1_libreria.py')
from Examen1_libreria import *
# In[228]:
# Read dataset => 'dataset_examen_1.csv'
path = '/home/roman/Documents/Diplomado en Ciencia de Datos - UNAM FES Acatlán/Módulo 1/Examen/Examen 1/dataset_examen_1.csv'
df = pd.read_csv(path)
print(df.shape)
df.head(5)
# # (b) **Part 1:**
# **PARTE I**
# * Etiquetado de variables
#
# * Calidad de datos: Duplicados, Completitud, Consistencia
#
# * Limpieza de Texto
# **PARTE I**
# * Variable Tagging
#
# * Data Quality: Duplicate Records, Completeness, Consistency
#
# * Text Cleaning
# ### **Variable Tagging**
# * id : Identificador
# * ao hechos : Año en el que se reporta el evento
# * mes hechos : Mes en el que sucedió el evento
# * fecha hechos : Momento en el que sucedió el evento
# * delito : Descripción del evento
# * categoria delito : Categorı́a del evento
# * fiscalı́a : Fiscalı́a en donde se denunció el delito
# * agencia : Agencia
# * unidad investigacion : Unidad encargada de la investigación del delito
# * colonia hechos : Colonia donde ocurrió el delito
# * alcaldia hechos : Alcaldı́a de la CDMX donde ocurrió el delito
# * fecha inicio : Momento en el que se reporta el evento
# * mes inicio : Mes en el que se reporta el evento
# * ao inicio : Año en el que se reporta el evento
# * calle hechos : Nombre de la calle donde sucedió el delito
# * calle hechos2 : Nombre de la calle donde sucedió el delito
# * longitud : Longitud donde sucedió el delito
# In[6]:
# Check columns names to identify them
df.columns
# In[7]:
# In the step above, 'Unnamed Columns' appeared => inspect and delete them
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
print(df.shape)
df.head(5)
# In[8]:
# Validate 'Unnamed' columns were deleted
df.columns
# In[9]:
# Prefixes for variable types
# 'c_' --> Numeric Variables: Discrete & Continous
# 'v_' --> Categorical Variables
# 'd_' --> Date Type Variables
# 't_' --> Text Type Variables
c_feats = ['longitud','latitud','geopoint']
v_feats = ['ao_hechos','','mes_hechos','agencia','unidad_investigacion','mes_inicio','ao_inicio','id']
t_feats = ['delito','categoria_delito','fiscalia','colonia_hechos','alcaldia_hechos','calle_hechos',
'calle_hechos2']
d_feats = ['fecha_hechos','fecha_inicio']
# In[10]:
# Rename variables in each list according to their type
c_feats_new = ['c_' + x for x in c_feats]
v_feats_new = ['v_' + x for x in v_feats]
d_feats_new = ['d_' + x for x in d_feats]
t_feats_new = ['t_' + x for x in t_feats]
# In[11]:
# Verify new headers for variables were renammed
print(list(v_feats))
print(list(v_feats_new))
# In[12]:
# Rename columns according to their type of variable
df.rename(columns=dict(zip(d_feats,d_feats_new)),inplace=True)
df.rename(columns=dict(zip(v_feats,v_feats_new)),inplace=True)
df.rename(columns=dict(zip(t_feats,t_feats_new)),inplace=True)
df.rename(columns=dict(zip(c_feats,c_feats_new)),inplace=True)
print(df.shape)
df.head(3)
# ### **Check for Duplicate Records**
# In[13]:
df.duplicated()
# In[14]:
# Get a subset with all duplicate records using ".duplicated"
df[df.duplicated()]
# In[15]:
df.drop_duplicates(inplace = True)
# In[16]:
df.duplicated().sum()
# In[17]:
# Create a new index and delete the previous one
df.reset_index(drop = True, inplace = True)
# In[18]:
# Verify dataframe shape after remove duplicates
print(df.shape)
df.head(5)
# In[19]:
# Function used to get completeness values
# The input/argument is --> df
def completeness(dataframe):
comp = pd.DataFrame(dataframe.isnull().sum())
comp.reset_index(inplace = True)
comp = comp.rename(columns = {'index':'column', 0:'total'})
comp['completeness'] = (1 - comp['total']/dataframe.shape[0])*100
comp = comp.sort_values(by = 'completeness', ascending = True)
comp.reset_index(drop = True, inplace = True)
return comp
# In[20]:
# Check for "completeness"
completeness(df)
# In[21]:
# **(d)** **Delete Variables With >= 20% of Missings Values**
df.drop(columns = ['t_calle_hechos2'], inplace = True)
df.reset_index(drop = True, inplace = True)
print(df.shape)
df.head(5)v
# In[21]:
# **(d)** **Delete Variables With >= 20% of Missings Values**
df.drop(columns = ['t_calle_hechos2'], inplace = True)
df.reset_index(drop = True, inplace = True)
print(df.shape)
df.head(5)
# In[21]:
# **(d)** **Delete Variables With >= 20% of Missings Values**
df.drop(columns = ['t_calle_hechos2'], inplace = True)
df.reset_index(drop = True, inplace = True)
print(df.shape)
df.head(5)
# In[21]:
# **(d)** **Delete Variables With >= 20% of Missings Values**
df.drop(columns = ['t_calle_hechos2'], inplace = True)
df.reset_index(drop = True, inplace = True)
print(df.shape)
df.head(5)
# In[21]:
# **(d)** **Delete Variables With >= 20% of Missings Values**
df.drop(columns = ['t_calle_hechos2'], inplace = True)
df.reset_index(drop = True, inplace = True)
print(df.shape)
df.head(5)
# In[21]:
# **(d)** **Delete Variables With >= 20% of Missings Values**
df.drop(columns = ['t_calle_hechos2'], inplace = True)
df.reset_index(drop = True, inplace = True)
print(df.shape)
df.head(5)
# In[21]:
# **(d)** **Delete Variables With >= 20% of Missings Values**
df.drop(columns = ['t_calle_hechos2'], inplace = True)
df.reset_index(drop = True, inplace = True)
print(df.shape)
df.head(5)
# In[21]:
# **(d)** **Delete Variables With >= 20% of Missings Values**
df.drop(columns = ['t_calle_hechos2'], inplace = True)
df.reset_index(drop = True, inplace = True)
print(df.shape)
df.head(5)
# ### **Check for Consistency**
# In[22]:
df.columns
# In[23]:
# Check for consistency => 'v_ao_hechos'
## "v_ao_hechos" cannot be: negatives, floats, month names
df['v_ao_hechos'].unique()
# In[24]:
df[df['v_ao_hechos'] == 'Junio']
# In[25]:
# Check for consistency => 'v_mes_hechos'
## "v_mes_hechos" must be categorical values belonging to 'calendar months'
df['v_mes_hechos'].unique()
# In[26]:
# Verify if incorrect 'v_mes_hechos' match exactly with month in 'd_fecha_hechos' => 2 cases are taken
df[df['v_mes_hechos'] == 'August']
# In[27]:
# Verify if incorrect 'v_mes_hechos' match exactly with month in 'd_fecha_hechos' => 2 cases are taken
df[df['v_mes_hechos'] == 'ENRO']
#
# * **Month' names that must be adjusted:**
#
# * diciiembre | december => diciembre
# * maro | march => marzo
# * novembre | novimbre | november => noviembre
# * septiemb | september => septiembre
# * agossto | agosto-2017 | agosto3 | august => agosto
# * abrill | april | aabril => abril
# * mayo-2018 | may => mayo
# * june => junio
# * october => octubre
# * february | febrero-2015 =>febrero
# * july => julio
# * january | enro => enero
#
# In[28]:
# This function is used to invalid month' names
def correct_month(month):
if month == 'Diciiembre' or month == 'December' or month == 'Diciembre':
return 'Diciembre'
elif month == 'Maro' or month == 'March' or month == 'Marzo':
return 'Marzo'
elif month == 'Novembre' or month == 'Novimbre' or month == 'November' or month == 'Noviembre':
return 'Noviembre'
elif month == 'Septiemb' or month == 'September' or month == 'Septiembre':
return 'Septiembre'
elif month == 'Agossto' or month == 'Agosto-2017' or month == 'agosto3' or month == 'August' or month == 'Agosto':
return 'Agosto'
elif month == 'abrill' or month == 'April' or month == 'Aabril' or month == 'Abril':
return 'Abril'
elif month == 'Mayo-2018' or month == 'May' or month == 'Mayo':
return 'Mayo'
elif month == 'June' or month == 'Junio':
return 'Junio'
elif month == 'October' or month == 'Octubre':
return 'Octubre'
elif month == 'February' or month == 'Febrero-2015' or month == 'Febrero':
return 'Febrero'
elif month == 'July' or month == 'Julio' or month == 'Julio .':
return 'Julio'
elif month == 'January' or month == 'ENRO' or month == 'Enero':
return 'Enero'
else:
return 'Check'
# In[29]:
# Apply function 'correct_month' to adjust invalid month'names
df['v_mes_hechos'].astype(str)
df['v_mes_hechos'] = df['v_mes_hechos'].apply(lambda row: correct_month(row))
df.head(5)
# In[30]:
# Validate the results in 'v_mes_hechos' are correct
df['v_mes_hechos'].value_counts()
# In[31]:
# This function is used to correct invalid month' names => 'v_ao_hechos'
def correct_ao_hechos(year):
if year == '-2017' or year == '-215' or year == '1800' or year == '321':
return '2017'
elif year == '-2016' or year == '1258' or year == '2058' or year == '235' or year == '19000' or year == 'Junio':
return '2016'
elif year == '2023':
return '2015'
elif year == 'Febrero':
return '2018'
else:
return year
# In[32]:
# Check for consistency => 'v_mes_hechos'
## "v_mes_hechos" must be categorical values belonging to 'calendar months'
df['v_ao_hechos'].unique()
# In[33]:
# Apply function 'ao_hechos' to adjust invalid 'v_ao_hechos'
df['v_ao_hechos'] = df['v_ao_hechos'].astype(str)
df['v_ao_hechos'] = df['v_ao_hechos'].apply(lambda row: correct_ao_hechos(row))
df.head(5)
# In[34]:
# Validate the results in 'v_ao_hechos' are correct
df['v_ao_hechos'].value_counts()
# In[35]:
# Check for consistency => 'd_fecha_hechos'
## "d_fecha_hechos" must be date format, any other value is incorrect and must be adjusted
aux_fecha_hechos = df[df['d_fecha_hechos'].str.contains("[a-zA-Z]")]
print(aux_fecha_hechos.shape)
aux_fecha_hechos.head(5)
# In[36]:
# This function is used to clean invalid data entries in 'd_fecha_hechos' column
def clean_fecha_hechos(text):
try:
text = text.replace('lunes','').replace('martes','').replace('miércoles','').replace('jueves','').replace('viernes','')
text = text.replace('sábado','').replace('sabado','').replace('domingo','').replace('|','').replace('()','').replace('de','')
text = text.replace('enero','-1-').replace('febrero','-2-').replace('marzo','-3-').replace('abril','-4-')
text = text.replace('mayo','-5-').replace('junio','-6-').replace('julio','-7-').replace('agosto','-8-')
text = text.replace('septiembre','-9-').replace('octubre','-10-').replace('noviembre','-11-').replace('diciembre','-12-')
return text
except:
return 'Check'
# In[37]:
aux_fecha_hechos['d_fecha_hechos'] = aux_fecha_hechos['d_fecha_hechos'].apply(lambda row: clean_fecha_hechos(row))
aux_fecha_hechos.head(5)
# In[38]:
aux_fecha_hechos['d_fecha_hechos'] = pd.to_datetime(aux_fecha_hechos['d_fecha_hechos']).dt.strftime('%Y-%m-%d %H:%M:%S')
aux_fecha_hechos.head(5)
# In[39]:
# Apply 'clean_fecha_hechos' to df
df['d_fecha_hechos'] = df['d_fecha_hechos'].apply(lambda row: clean_fecha_hechos(row))
df.head(5)
# In[40]:
# Convert to date format 'd_fecha_hechos' column
df['d_fecha_hechos'] = pd.to_datetime(df['d_fecha_hechos']).dt.strftime('%Y-%m-%d %H:%M:%S')
df.head(5)
# In[41]:
# Check for consistency => 'd_fecha_inicio'
## "d_fecha_hechos" must be date format, any other value is incorrect and must be adjusted
aux_fecha_inicio = df[df['d_fecha_inicio'].str.contains("[a-zA-Z]]")]
print(aux_fecha_inicio.shape)
aux_fecha_inicio.head(5)
# In[42]:
# Convert to date format 'd_fecha_inicio' column
df['d_fecha_inicio'] = pd.to_datetime(df['d_fecha_inicio'], errors = 'coerce').dt.strftime('%Y-%m-%d %H:%M:%S')
df.head(5)
# In[43]:
df.head(3)
# Pending features:
# 'v_mes_inicio' => DONE
# 'v_ao_inicio' => DONE
# 'c_longitud' => DONE
# 'c_latitud' => DONE
# In[44]:
# Verify 'v_mes_inicio' values are correct and consistent => OK
df['v_mes_inicio'].unique()
# In[45]:
# Verify 'v_ao_inicio' values are correct and consistent => OK
df['v_ao_inicio'].unique()
# In[46]:
# This functions is used to validate 'lat' & 'lng' values
## Latitude must be a number between -90 and 90
## Longitude must a number between -180 and 180
def lng_val(value):
try:
if -180<=value<=+180:
return value
else:
return 'NaN'
except:
value
# In[47]:
# Check for null values in 'c_longitud' variable
df['c_longitud'].isnull().sum()
# In[48]:
df['c_longitud'] = df['c_longitud'].fillna(0)
df.head(3)
# In[49]:
# Check for null values in 'c_longitud' variable
df['c_longitud'].isnull().sum()
# In[50]:
# Use '\D+' for replace non numeric values to '0':
df['c_longitud'].replace('\D+','0',regex=True,inplace=True)
df.head(3)
# In[51]:
# Verify 'c_longitud' values are correct and consistent
df['c_longitud'] = df['c_longitud'].astype(float)
df['c_longitud'] = df['c_longitud'].apply(lambda row: lng_val(row))
df.head(5)
# In[52]:
# This functions is used to validate 'lat' & 'lng' values
## Latitude must be a number between -90 and 90
## Longitude must a number between -180 and 180
def lat_val(value):
try:
if -90<=value<=+90:
return value
else:
return 'NaN'
except:
value
# In[53]:
# Verify 'c_latitud' values are correct and consistent
df['c_latitud'] = df['c_latitud'].astype(float)
df['c_latitud'] = df['c_latitud'].apply(lambda row: lat_val(row))
df.head(5)
# In[54]:
# Check for consistency => 't_delito'
df['t_delito'].value_counts()
# In[55]:
# This function is used to replace invalid letters in text columns => 't_delito' column
def replace_words(text):
try:
text = text.replace('DAÃO','DAÑO').replace('TRÃNSITO','TRANSITO').replace('USURPACIÃN','USURPACION')
text = text.replace('PÃBLICO','PUBLICO').replace('POSESIÃN','POSESION').replace('EXTORSIÃN','EXTORSION')
text = text.replace('VEHÃCULO','VEHICULO').replace('COMUNICACIÃN','COMUNICACION').replace('AUTOBÃS','AUTOBUS')
text = text.replace('FORÃNEO','FORANEO').replace('PÃBLICA','PUBLICA').replace('EXPLOTACIÃN','EXPLOTACION')
text = text.replace('REGULACIÃN','REGULACION').replace('GESTIÃN','GESTION').replace('CONTAMINACIÃN','CONTAMINACION')
text = text.replace('INFORMACIÃN','INFORMACION').replace('2015-09-15 00:00:00','NaN').replace('2017-10-09 08:00:00','NaN')
text = text.replace('2016-02-27 22:30:00','NaN').replace('EXTORSIÃN','EXTORSION').replace('2016-06-14 16:30:00','NaN')
return text
except:
return 'Check'
# In[56]:
# Apply 'replace_words' function to correct invalid data entries
df['t_delito'] = df['t_delito'].astype(str)
df['t_delito'] = df['t_delito'].apply(lambda row: replace_words(row))
df.head(5)
# In[57]:
# Check invalid data entries in 't_delito' were corrected
df['t_delito'].value_counts()
# In[58]:
# This function is used to replace invalid letters in text columns => 't_categoria_delito' column
def replace_words_catdelito(text):
try:
text = text.replace('VEHÃCULO','VEHICULO').replace('VÃA','VIA').replace('PÃBLICA','PUBLICA')
text = text.replace('VIOLACIÃN','VIOLACION').replace('HABITACIÃN','HABITACION')
return text
except:
return 'Check'
# In[59]:
# Apply 'replace_words_catdelito' to correct invalid data entries in 't_categoria_delito'
df['t_categoria_delito'] = df['t_categoria_delito'].astype(str)
df['t_categoria_delito'] = df['t_categoria_delito'].apply(lambda row: replace_words_catdelito(row))
df.head(5)
# In[60]:
# Check invalid data entries in 't_delito' were corrected => (text cleaning)
df['t_categoria_delito'].value_counts()
# In[61]:
# Check for consistency => 't_fiscalia'
df['t_fiscalia'].value_counts()
# In[62]:
# This function is used to replace invalid letters in text columns => 't_categoria_delito' column
def replace_words_fiscalia(text):
try:
text = text.replace('INVESTIGACIÃN','INVESTIGACION').replace('JUÃREZ','JUAREZ').replace('COYOACÃN','COYOACAN')
text = text.replace('ÃLVARO','ALVARO').replace('OBREGÃN','OBREGON').replace('ATENCIÃN','ATENCION').replace('NIÃOS','NIÑOS')
text = text.replace('NIÃAS','NIÑAS').replace('ATENCIÃN','ATENCION').replace('PROTECCIÃN','PROTECCION').replace('VÃCTIMAS','VICTIMAS')
text = text.replace('BÃSQUEDA','BUSQUEDA').replace('LOCALIZACIÃN','LOCALIZACION').replace('DIRECCIÃN','DIRECCION')
return text
except:
return 'Check'
# In[63]:
# Apply function 'replace_words_fiscalia' to correct invalid data entries in 't_fiscalia' column
df['t_fiscalia'] = df['t_fiscalia'].astype(str)
df['t_fiscalia'] = df['t_fiscalia'].apply(lambda row: replace_words_fiscalia(row))
df.head(5)
# In[64]:
# Check invalid data entries in 't_categoria_delito' were corrected => (text cleaning)
df['t_fiscalia'].value_counts()
# In[594]:
# Check for consistency => 't_colonia_hechos'
for c in df['t_colonia_hechos']:
print(c)
# In[65]:
# This function is used to correct invalid data entries in 't_colonia_hechos' => accented characters
# With the same purpose like functions described above (e.g. AMPLIACIÃN)
def accents_colonia_hechos(text):
try:
text = unicode(text, 'utf-8')
except NameError:
pass
text = unicodedata.normalize('NFD', text) .encode('ascii', 'ignore') .decode("utf-8")
return str(text)
# In[66]:
# Apply function 'accents_colonia_hechos' to correct accented characters
df['t_colonia_hechos'] = df['t_colonia_hechos'].astype(str)
df['t_colonia_hechos'] = df['t_colonia_hechos'].apply(lambda row: accents_colonia_hechos(row))
df.head(5)
# In[597]:
# Check invalid data entries in 't_colonia_hechos' were corrected => (text cleaning)
for c in df['t_colonia_hechos'].unique():
print(c)
# In[67]:
# This function is used to correct invalid data entries in 't_colonia_hechos' column
def replace_words_colonia_hechos(text):
try:
text = text.replace('SECCIAN','SECCION').replace('HIPADROMO','HIPODROMO').replace('ARSULA','URSULA')
text = text.replace('AMPLIACIAN','AMPLIACION').replace('MAXICO','MEXICO').replace('CONSTITUCIAN','CONSTITUCION')
text = text.replace('PEAON','PEÑON').replace('SIMAN','SIMON').replace('MARTAN','MARTIN').replace('ESCANDAN','ESCANDON')
text = text.replace('HAROES','HEROES').replace('EDUCACIAN','EDUCACION').replace('ANDRAS','ANDRES').replace('PEAAS','PEÑA')
text = text.replace('MARAA','MARIA').replace('RAO','RIO').replace('JERANIMO','JERONIMO').replace('GAMEZ','GOMEZ')
text = text.replace('MARTAN','MARTIN').replace('AMPLIACIAN0','AMPLIACION').replace('HAROES','HEROES')
text = text.replace('RENOVACIAN','RENOVACION').replace('DOMANGUEZ','DOMINGUEZ').replace('SIMAN','SIMON')
text = text.replace('BOLAVAR','BOLIVAR').replace('IRRIGACIAN','IRRIGACION').replace('PARAASO','PARAISO')
text = text.replace('LIBERACIAN','LIBERACION').replace('POLATICA','POLITICA').replace('CLAVERAA','CLAVERA')
text = text.replace('BAAOS','BAÑOS').replace('PEAAN','PEÑA').replace('MARTANEZ','MARTINEZ').replace('JERANIMO','JERONIMO')
text = text.replace('CONCEPCIAN','CONCEPCION').replace('BARTOLOMA','BARTOLOME').replace('NIAOS','NIÑOS').replace('GAMEZ','GOMEZ')
text = text.replace('SIMAN','SIMON').replace('AMARICA','AMERICA').replace('AVIACIAN','AVIACION').replace('DOMANGUEZ','DOMINGUEZ')
text = text.replace('JESAS','JESUS').replace('AAO','AÑO').replace('CONSTITUCIAN','CONSTITUCION').replace('SECCIAN','SECCION')
text = text.replace('PEAA','PEÑA').replace('REFINERAA','REFINERIA').replace('AMPLIACIAN','AMPLIACION').replace('BOLAVAR','BOLIVAR')
text = text.replace('MONTAAA','MONTAÑA').replace('LIBERACIAN','LIBERACION').replace('HAROES','HEROES').replace('MAXICO','MEXICO')
text = text.replace('JESAS','JESUS').replace('SIMAN','SIMON').replace('ELAAS','ELIAS').replace('NIAO','NIÑO').replace('ARAAA','ARENA')
text = text.replace('M?XICO','MEXICO').replace('RINCAN','RINCON').replace('ESCANDAN','ESCANDON').replace('TAXQUEAA','TAXQUEÑA')
text = text.replace('MARAA','MARIA').replace('JOSA','JOSE').replace('PEAON','PEATON').replace('AGRACOLA','AGRICOLA').replace('HAROES','HEROES')
text = text.replace('AMPLIACIAN','AMPLIACION').replace('ESCANDAN','ESCANDON').replace('JARDAN','JARDIN').replace('TAXQUEAA','TAXQUEÑA')
text = text.replace('AGRACOLA','AGRICOLA').replace('COMPAAAA','CAÑADA').replace('LOTERAA','LOTERIA').replace('RINCAN','RINCON')
text = text.replace('PEAA','PEÑA').replace('PERIFARICO','PERIFERICO').replace('PEQUE?A','PEQUEÑA').replace('ANDRAS','ANDRES')
text = text.replace('FORTAN','FORTIN').replace('JAZMAN','JAZMIN').replace('ESPAAA','ESPAÑA').replace('CAAADA','CASADA')
text = text.replace('MARAA','MARIA').replace('ECHEVERRAA','ECHEVERRIA').replace('ERMITAAO','ERMITAÑO').replace('REFINERAA','REFINERIA')
return text
except:
return 'Check'
# In[68]:
# Convert to uppercase 't_colonia_hechos'
df['t_colonia_hechos'] = df['t_colonia_hechos'].str.upper()
df.head(5)
# In[69]:
# Apply 'replace_words_colonia_hechos' to correct invalid data entries in 't_colonia_hechos' and spelling errors
df['t_colonia_hechos'] = df['t_colonia_hechos'].apply(lambda row: replace_words_colonia_hechos(row))
df.head(5)
# In[70]:
# Check for errors in 't_alcaldia_hechos' =>(text cleaning)
## In this case data entries are correct: no invalid records and spelling error were identified
df['t_alcaldia_hechos'].value_counts()
# In[71]:
# Check for errors and apply correct format to 'd_fecha_inicio'
def check_fecha_inicio(texto):
if texto == texto.str.contains("[a-zA-Z]"):
return 'NaN'
else:
return texto.str()
# In[73]:
# Replace alphabet characters with '0'
df['d_fecha_inicio'] = df['d_fecha_inicio'].str.replace(r'[a-z A-Z]','NaT')
print(df.shape)
df.head(5)
# In[74]:
# Convert 'd_fecha_inicio' to date format => (consistency validation)
## After this operation: an error was identified and corrected in the next cell: there is no blank space between date and hour '2016-01-05018:35:37'
pd.to_datetime(df['d_fecha_inicio'], format='%Y-%m-%d %H:%M:%S',errors='coerce')
print(df.shape)
df.head(5)
# In[75]:
# Replace 'NaT' by 'empty space'
df['d_fecha_inicio'] = df['d_fecha_inicio'].str.replace('NaT',' ')
print(df.shape)
df.head(5)
# In[76]:
# Check for errors in 'v_mes_inicio'
## Data entries are correct => OK
df['v_mes_inicio'].unique()
# In[77]:
# Check for errors in 'v_ao_inicio'
## Data entries are correct => OK
df['v_ao_inicio'].unique()
# # (c) **Part 2:**
# # **Análisis Exploratorio de Datos** (Exploratory Data Analysis => EDA)
# * En cada punto se debe contestar la siguiente pregunta: **¿Qué puede concluir de esto?**
# ## **Años y meses con más delitos**
# In[79]:
df.head(3)
# In[80]:
delitos = df.groupby(['v_ao_hechos','v_mes_hechos', 't_delito']).size().sort_values(ascending=False)
delitos.head(10)
# In[82]:
delitos = pd.DataFrame(delitos)
delitos = delitos.rename(columns={0:'Sum'})
delitos.head(15).style.background_gradient()
# * **¿Qué puede concluir de esto?**
# **Conclusiones:**
#
# * 1.- Los delitos con mayor incidencia durante los años y meses con mayor cantidad de delitos fueron (Top 5):
#
# * **Violencia Familiar**
#
# * **Robo de Objetos**
#
# * **Denuncia de Hechos**
#
# * **Robo a Negocio sin Violencia**
#
# * **Robo a Transeunte en Vía Pública con Violencia**
#
# * 2.- En el periodo 2016 - 2019 el delito con mayor frecuencia fue:
#
# * **Violencia Familiar**
#
#
# ## **Delegaciones con más actividad delictiva**
# In[83]:
delegacion_delito = df.groupby(['t_alcaldia_hechos','t_delito']).size().sort_values(ascending=False)
delegacion_delito.head(15)
# In[84]:
delegacion_delito = pd.DataFrame(delegacion_delito)
delegacion_delito = delegacion_delito.rename(columns={0:'Sum'})
delegacion_delito.head(20).style.background_gradient()
# In[85]:
delegacion = df.groupby(['t_alcaldia_hechos']).size().sort_values(ascending=False)
delegacion
# In[86]:
# Convert 'delegacion' to DataFrame (just for better visualization)
delegacion = pd.DataFrame(delegacion)
delegacion = delegacion.rename(columns={0:'Sum'})
delegacion.style.background_gradient()
# In[87]:
# Select Top 10 't_alcaldia_hechos' to highlight more criminal activity
delegacion.iloc[0:10]
# In[88]:
# Using pygal to plot 'Top 10 Delegaciones con Mayor Actividad Delictiva'
import pygal
from IPython.display import SVG, display
from pygal import Config
config = Config()
config.show_legend = True
config.human_readable = True
line_chart = pygal.HorizontalBar(print_labels=True, print_values=True)
line_chart.title = 'Top 10 Delegaciones con Mayor Actividad Delictiva'
line_chart.add('CUAUHTEMOC', 126168)
line_chart.add('IZTAPALAPA', 114682)
line_chart.add('GUSTAVO A. MADERO', 76935)
line_chart.add('BENITO JUAREZ', 69342)
line_chart.add('COYOACAN', 52767)
line_chart.add('MIGUEL HIDALGO', 52502)
line_chart.add('ALVARO OBREGON', 51135)
line_chart.add('TLALPAN', 44888)
line_chart.add('VENUSTIANO CARRANZA', 44474)
line_chart.add('AZCAPOTZALCO', 38876)
display(SVG(line_chart.render(disable_xml_declaration=True,show_legend=True, human_readable=True, fill=True
)))
# * **¿Qué puede concluir de esto?**
# * 1.- De acuerdo a fuentes externas (medios publicitarios y páginas oficiales del gobierno de la ciudad):
#
# * **Las delegaciones con mayor actividad delicitiva en 2020 fueron:**
# * Álvaro Obregón, Cuauhtémoc, Coyoacán, Gustavo A. Madero, Iztapalapa, Miguel Hidalgo, Tlalpan, Venustiano Carranza y Xochimilco [**Infobae - Argentina**](https://www.infobae.com/america/mexico/2020/09/13/cuales-son-las-8-alcaldias-que-concentran-la-mayor-parte-de-los-delitos-en-cdmx/). De esta publicación podemos observar que de las 8 citadas, el 70% aparecen en el Top 10 de nuestro análisis.
#
# * **Iztapalapa ocupara el 1er lugar en actividad delictiva en la categoría de:** Violencia Familiar
# ## **¿Cuáles son los delitos más frecuentes?**
# In[89]:
df.head(3)
# In[90]:
delito_frq = df.groupby(['t_delito']).size().sort_values(ascending=False)
delito_frq.head(15)
# In[91]:
# Convert 'delito_frq' to DataFrame (just for better visualization)
delito_frq = pd.DataFrame(delito_frq)
delito_frq = delito_frq.rename(columns={0:'Sum'})
delito_frq.head(15).style.background_gradient()
# In[216]:
# Show Top 10 'delito_frq' to highlight 'most frequent crimes'
config = Config()
config.show_legend = True
config.human_readable = True
line_chart = pygal.HorizontalBar(print_labels=True, print_values=True)
line_chart.title = 'Delitos Más Frecuentes'
line_chart.add('VIOLENCIA FAMILIAR', 68004)
line_chart.add('ROBO DE OBJETOS', 51510)
line_chart.add('ROBO A NEGOCIO SIN VIOLENCIA', 51172)
line_chart.add('FRAUDE', 44198)
line_chart.add('DENUNCIA DE HECHOS', 40377)
line_chart.add('AMENAZAS', 36939)
line_chart.add('ROBO A TRANSEUNTE EN VIA PUBLICA CON VIOLENCIA', 28887)
line_chart.add('ROBO A TRANSEUNTE DE CELULAR CON VIOLENCIA', 25779)
line_chart.add('ROBO DE ACCESORIOS DE AUTO', 25332)
line_chart.add('ROBO DE OBJETOS DEL INTERIOR DE UN VEHICULO', 23652)
display(SVG(line_chart.render(disable_xml_declaration=True,show_legend=True, human_readable=True, fill=True
)))
# In[92]:
# Select Top 10 'delito_frq' to highlight 'most frequent crimes'
delito_frq.iloc[0:10]
# * **¿Qué puede concluir de esto?**
# * **Violencia Familiar** predomina como el crimen/delito con mayor incidencia y se replica en delegaciones como Iztapalapa en donde presenta un mayor foco de concentración, así como en Cuauhtémoc, con 68,004 y 5,554 respectivamente.
# ## **¿Qué tipo de delitos son los más frecuentes en la delegación con mayor incidencia delictiva?**
# In[95]:
# Filter DataFrame by 't_alcaldia_hechos' == 'CUAUHTEMOC' to identify most frequent crimes
delegacion_mayor = df[df['t_alcaldia_hechos'] == 'CUAUHTEMOC']
print(delegacion_mayor.shape)
delegacion_mayor.head(5)
# In[97]:
cua_frq = delegacion_mayor.groupby(['t_delito']).size().sort_values(ascending=False)
cua_frq.head(20)
# In[98]:
# Convert 'cua_frq' to DataFrame (just for better visualization)
cua_frq = pd.DataFrame(cua_frq)
cua_frq = cua_frq.rename(columns={0:'Sum'})
cua_frq.head(15).style.background_gradient()
# In[99]:
# Select Top 10 crimes in 'Cuauthemoc'
cua_frq.iloc[0:10]
# In[217]:
# Show Top 10 'delito_frq' to highlight 'most frequent crimes'
config = Config()
config.show_legend = True
config.human_readable = True
line_chart = pygal.HorizontalBar(print_labels=True, print_values=True)
line_chart.title = 'Delitos Más Frecuentes en la Delegación Cuauhtémoc'
line_chart.add('FRAUDE', 11610)
line_chart.add('ROBO DE OBJETOS', 10235)
line_chart.add('DENUNCIA DE HECHOS', 8207)
line_chart.add('ROBO A NEGOCIO SIN VIOLENCIA', 8074)
line_chart.add('ROBO A TRANSEUNTE DE CELULAR SIN VIOLENCIA', 5995)
line_chart.add('VIOLENCIA FAMILIAR', 5554)
line_chart.add('ROBO A TRANSEUNTE EN VIA PUBLICA CON VIOLENCIA', 5424)
line_chart.add('AMENAZAS', 5069)
line_chart.add('ROBO A TRANSEUNTE DE CELULAR CON VIOLENCIA', 4169)
line_chart.add('ROBO DE ACCESORIOS DE AUTO', 3975)
display(SVG(line_chart.render(disable_xml_declaration=True,show_legend=True, human_readable=True, fill=True
)))
# * **¿Qué puede concluir de esto?**
# * Esta delegación mantiene un comportamiento muy similar al volumen general de incidencia del Top 10 de los delitos/crímenes con mayor frecuencia, uno de sus principales diferenciadores es el número de casos.
# ## **¿En qué delegación suceden los delitos más graves (categoría_delito)?**
# In[100]:
df.head(3)
# In[101]:
cat_delito = df.groupby(['t_alcaldia_hechos','t_categoria_delito']).size().sort_values(ascending=False)
cat_delito.head(15)
# In[102]:
# Convert 'cat_delito' to DataFrame (just for better visualization)
cat_delito = pd.DataFrame(cat_delito)
cat_delito = cat_delito.rename(columns={0:'Sum'})
cat_delito.head(15).style.background_gradient()
# In[103]:
df['t_categoria_delito'].value_counts()
# ## **En la fiscalía de 'juzgados familiares, ¿Cuáles son los delitos más frecuentes?**
# In[104]:
df['t_fiscalia'].unique()
# In[105]:
# Filter 'juzgados civiles' in 't_fiscalia' column
juzgados = df[df['t_fiscalia'] == 'INVESTIGACION PARA LA ATENCION DE NIÑOS, NIÑAS Y ADOLESCENTES']
juzgados.reset_index(drop = True, inplace = True)
juzgados.head(15)
# In[106]:
juzgados_fam = juzgados.groupby(['t_delito']).size().sort_values(ascending=False)
juzgados_fam.head(15)
# In[107]:
# Convert 'juzgados_fam' to DataFrame (just for better visualization)
juzgados_fam = pd.DataFrame(juzgados_fam)
juzgados_fam = juzgados_fam.rename(columns={0:'Sum'})
juzgados_fam.head(15).style.background_gradient()
# In[108]:
juzgados_fam.columns
# In[111]:
# Select Top 15 crimes in fiscalía 'juzgados familiares'
juzgados_fam.iloc[0:15]
# In[112]:
# Using pygal to plot: 'Top 10 Delegaciones con Mayor Actividad Delictiva'
import pygal
from IPython.display import SVG, display
from pygal import Config
config = Config()
config.show_legend = True
config.human_readable = True
line_chart = pygal.HorizontalBar(print_labels=True, print_values=True)
line_chart.title = 'Top 10 Delitos más Frecuentes en Juzgados Familiares'
line_chart.add('VIOLENCIA FAMILIAR', 7427)
line_chart.add('SUSTRACCION DE MENORES', 2232)
line_chart.add('ROBO A NEGOCIO SIN VIOLENCIA', 2034)
line_chart.add('ABANDONO DE PERSONA', 1773)
line_chart.add('INSOLVENCIA ALIMENTARIA', 1531)
line_chart.add('AMENAZAS', 1243)
line_chart.add('NARCOMENUDEO POSESION SIMPLE', 994)
line_chart.add('DENUNCIA DE HECHOS', 887)
line_chart.add('ROBO A TRANSEUNTE EN VIA PUBLICA CON VIOLENCIA', 721)
line_chart.add('ABUSO SEXUAL', 502)
display(SVG(line_chart.render(disable_xml_declaration=True,show_legend=True, human_readable=True, fill=True
)))
# In[468]:
from IPython.display import HTML
import pygal
from pygal import Config
html_pygal = u"""
<!DOCTYPE html>
<html>
<head>
<script type="text/javascript" src="http://kozea.github.com/pygal.js/javascripts/svg.jquery.js"></script>
<script type="text/javascript" src="http://kozea.github.com/pygal.js/javascripts/pygal-tooltips.js"></script>
</head>
<body><figure>{pygal_render}</figure></body>
</html>
"""
line_chart = pygal.HorizontalBar()
line_chart.title = 'Top 10 Delitos más Frecuentes en Juzgados Familiares'
line_chart.add('VIOLENCIA FAMILIAR', 7427)
line_chart.add('SUSTRACCION DE MENORES', 2232)
line_chart.add('ROBO A NEGOCIO SIN VIOLENCIA', 2034)
line_chart.add('ABANDONO DE PERSONA', 1773)
line_chart.add('INSOLVENCIA ALIMENTARIA', 1531)
line_chart.add('AMENAZAS', 1243)
line_chart.add('NARCOMENUDEO POSESION SIMPLE', 994)
line_chart.add('DENUNCIA DE HECHOS', 887)
line_chart.add('ROBO A TRANSEUNTE EN VIA PUBLICA CON VIOLENCIA', 721)
line_chart.add('ABUSO SEXUAL', 502)
line_chart.render_in_browser()
# * **¿Qué puede concluir de esto?**
# * Violencia Familiar, al igual que en los casos de Iztapalapa y Cuauhtémoc se mantiene vigente y con un comportamiento a la alza en volumen de incidencia, seguida de Sustracción de Menores y Robo a Negocio Sin Violencia.
# ## **¿Cuál es el comportamiento del tiempo que pasa desde que el delito sucede hasta que se reporta?**
# In[113]:
df.head(3)
# In[114]:
df['d_fecha_hechos'] = pd.to_datetime(df['d_fecha_hechos']).dt.strftime('%d-%m-%Y')
df['d_fecha_inicio'] = pd.to_datetime(df['d_fecha_inicio']).dt.strftime('%d-%m-%Y')
df.head(5)
# In[115]:
df['d_fecha_hechos']= pd.to_datetime(df['d_fecha_hechos'])
df['d_fecha_inicio']= pd.to_datetime(df['d_fecha_inicio'])
# In[116]:
df.info()
# In[117]:
df['difference_in_datetime'] = abs(df['d_fecha_hechos'] - df['d_fecha_inicio'])
df.head(10)
# In[118]:
df['v_mes_hechos'].unique()
# In[119]:
df['v_mes_inicio'].unique()
# In[121]:
mes_hecho_inicio = df.groupby(['v_mes_hechos','v_mes_inicio','t_delito']).size().sort_values(ascending = False)
mes_hecho_inicio.head(15)
# In[122]:
# Convert 'mes_inicio' to DataFrame (just for better visualization)
mes_hecho_inicio = pd.DataFrame(mes_hecho_inicio)
mes_hecho_inicio = mes_hecho_inicio.rename(columns={0:'Sum'})
mes_hecho_inicio.head(20).style.background_gradient()
# In[123]:
fecha_hecho_inicio = df.groupby(['d_fecha_hechos','d_fecha_inicio','difference_in_datetime']).size().sort_values(ascending=False)
fecha_hecho_inicio.head(20)
# In[124]:
# Convert 'mes_inicio' to DataFrame (just for better visualization)
fecha_hecho_inicio = pd.DataFrame(fecha_hecho_inicio)
fecha_hecho_inicio = fecha_hecho_inicio.rename(columns={0:'Sum'})
fecha_hecho_inicio.head(20).style.background_gradient()
# * **¿Qué puede concluir de esto?**
# * 1.- Uno de los primeros puntos que podemos observar de este análisis es:
#
# * **Los meses en los que se suscita el delito/crimen y en el que es reportado a las autoridades es el mismo.**
#
# * **Lo anterior nos indica que no se presenta un desfase en 'meses' para realizar el reporte**
#
# * **Dentro de los principales delitos reportados, se encuentran:**
#
# * Violencia Familiar, Robo a Negocio Sin Violencia, Robo de Objetos, Amenazas y Robo a Transeunte en Vía Pública con Violencia.
#
#
# * **Sin embargo para complementar nuestro análisis, necesitamos conocer el rango de días en el cual el delito/crimen es reportado dentro del mismo mes como lo observamos antes**
#
# * **Se tienen 5 tipos de casos:**
# * El delito/crimen es reportado en la misma fecha en la que ocurre. (Únicamente transcurren algunas horas)
# * El delito/crimen es reportado 1 días después en el que ocurre.
# * El delito/crimen es reportado 2 días después en el que ocurre.
# * El delito/crimen es reportado 3 días después en el que ocurre.
#
# * **Un alto porcentaje de los delitos/crimenes son reportados dentro del mismo periodo en el que ocurren**
# ## **Añada al menos tres análisis más que considere relevantes para mostrar**
# # (d) **Part 3:**
# # **Ingeniería de Variables/Normalización**
# In[125]:
df.head(2)
# ### **Reduzca la tabla de tal forma que solo se consideren los delitos que ocurrieron después de 2013.**
# In[126]:
# Filter 'v_ao_hechos' > 2013
df['v_ao_hechos'] = df['v_ao_hechos'].astype(int)
df_norm = df[df['v_ao_hechos'] > 2013]
print(df_norm.shape)
df.head(3)
# In[127]:
# Verify data entries in 'v_ao_hechos' are correct
df_norm['v_ao_hechos'].unique()
# ### **Normalice la variable categorı́a delito , disminuyendo el número de posibilidades, por ejemplo: secuestro,violacion, homicidio doloso podrı́a ser una categorı́a llamada ”delito de alto impacto”, la forma de normalización es abierta, solo se debe justificar en el PDF ¿por qué se hizo de esa forma?**
# In[128]:
# Using value_counts() to see total of records per category => 't_categoria_delito'
df_norm['t_categoria_delito'].value_counts()
# In[130]:
# "LabelEncoder" is used to encode categorical values, in this case 't_categoria_delito'
## and normalize it to: "Delito de Alto Impacto", "Delito de Medio Impacto", "Delito de Bajo Impacto"
df_norm_sklearn = df_norm.copy()
from sklearn.preprocessing import LabelEncoder
lb_make = LabelEncoder()
df_norm_sklearn['t_categoria_delito_label'] = lb_make.fit_transform(df_norm['t_categoria_delito'])
df_norm_sklearn.head()
# In[131]:
aux_norm = df_norm_sklearn[['t_delito','t_categoria_delito','t_categoria_delito_label']]
aux_norm.head(10)
# In[132]:
aux_norm['t_categoria_delito_label'].unique()
# In[133]:
catorce = aux_norm[aux_norm['t_categoria_delito_label'] == 14]
catorce.reset_index(drop=True, inplace=True)
print(catorce.shape)
catorce.head(5)
# In[134]:
trece = aux_norm[aux_norm['t_categoria_delito_label'] == 13]
trece.reset_index(drop=True, inplace=True)
print(trece.shape)
trece.head(5)
# In[135]:
quince = aux_norm[aux_norm['t_categoria_delito_label'] == 15]
quince.reset_index(drop=True, inplace=True)
print(quince.shape)
quince.head(5)
# In[136]:
cero = aux_norm[aux_norm['t_categoria_delito_label'] == 0]
cero.reset_index(drop=True, inplace=True)
print(cero.shape)
cero.head(5)
# ### **After normalization crime category is defined as follows:**
# * **15, 14, 13, 12 y 11 => "Delito de Alto Impacto"**
#
#
# * **10, 9, 8, 7 y 6 => "Delito de Medio Impacto"/"Delito de Impacto Medio"**
#
#
# * **5, 4, 3, 2, 1 y 0 => "Delito de Bajo Impacto"**
# In[137]:
df_norm = df_norm_sklearn
print(df_norm.shape)
df_norm.head(5)
# In[138]:
# This function is used to assign new colum: "categoria_delito_norm"
# whih belongs to 't_categoria_delito' but normalized
def cat_delito_norm(value):
if 11<=value<=15:
return 'Delito de Alto Impacto'
elif 6<=value<=10:
return 'Delito de Medio Impacto'
elif 0<=value<=5:
return 'Delito de Bajo Impacto'
else:
return 'Check'
# In[139]:
df_norm['t_categoria_delito_label'] = df_norm['t_categoria_delito_label'].astype(int)
df_norm['categoria_delito_norm'] = df_norm['t_categoria_delito_label'].apply(lambda row: cat_delito_norm(row))
print(df_norm.shape)
df_norm.head(5)
# In[140]:
# Validate operation and changes above were applied
test = df_norm[df_norm['t_categoria_delito_label'] == 14]
test.reset_index(drop = True, inplace = True)
print(test.shape)
test.head(5)
# In[141]:
df_norm['categoria_delito_norm'].value_counts()
# ### **Normalice la variable delegación de la siguiente forma:**
# #### **Zona Centro Poniente** ####
#
# * **Cuauhtémoc**
# * **Miguel Hidalgo**
# * **Álvaro Obregón**
# * **Cuajimalpa**
# * **Azcapotzalco**
#
# #### **Zona Sur** ####
#
# * **Benito Juárez**
# * **Coyoacán**
# * **Tlalpan**
# * **Magdalena Contreras**
#
# #### **Zona Norte** ####
#
# * **Gustavo A. Madero**
# * **V. Carranza**
# * **Iztacalco**
#
# #### **Zona Oriente** ####
#
# * **Iztapalapa**
# * **Tláhuac**
# * **Xochimilco**
# * **Milpa Alta**
# In[142]:
df_norm.head(2)
# In[150]:
df_norm['t_alcaldia_hechos'].unique()
# In[151]:
# This function is used to normalice "t_alcaldia_hechos"
def norm_delegacion(delegacion):
if delegacion == 'CUAUHTEMOC' or delegacion == 'MIGUEL HIDALGO' or delegacion == 'ALVARO OBREGON' or delegacion == 'CUAJIMALPA DE MORELOS' or delegacion == 'AZCAPOTZALCO':
return 'Zona Centro Poniente'
elif delegacion == 'BENITO JUAREZ' or delegacion == 'COYOACAN' or delegacion == 'TLALPAN' or delegacion == 'LA MAGDALENA CONTRERAS':
return 'Zona Sur'
elif delegacion == 'GUSTAVO A MADERO' or delegacion == 'VENUSTIANO CARRANZA' or delegacion == 'IZTACALCO':
return 'Zona Norte'
elif delegacion == 'IZTAPALAPA' or delegacion == 'TLAHUAC' or delegacion == 'XOCHIMILCO' or delegacion == 'MILPA ALTA':
return 'Zona Oriente'
else:
return 'NaN'
# In[152]:
# Apply "norm_delegacion" function to normalize 'delegacion_hechos'
df_norm['t_alcaldia_hechos'] = df_norm['t_alcaldia_hechos'].astype(str)
df_norm['delegacion_norm'] = df_norm['t_alcaldia_hechos'].apply(lambda row: norm_delegacion(row))
print(df_norm.shape)
df_norm.head(5)
# In[153]:
# Validate results
df_norm['delegacion_norm'].value_counts()
# ### **Normalice la colonia de los hechos**
# In[155]:
df_norm['t_colonia_hechos'].value_counts()
# ### **Normalice la variable delito**
# In[156]:
df_norm['t_delito'].value_counts()
# In[157]:
# Normalize "t_delito" variable
df_norm_sklearn = df_norm.copy()
from sklearn.preprocessing import LabelEncoder
lb_make = LabelEncoder()
df_norm_sklearn['t_delito_label'] = lb_make.fit_transform(df_norm['t_delito'])
df_norm_sklearn.head(15)
# In[160]:
test1 = df_norm_sklearn[df_norm_sklearn['t_delito_label'] == 235]
test1.head(20)
# ### **Normalice la variable unidad de investigación**
# In[218]:
# Normalize "t_delito" variable
df_norm_sklearn = df_norm.copy()
from sklearn.preprocessing import LabelEncoder
lb_make = LabelEncoder()
df_norm_sklearn['v_unidad_investigacion_label'] = lb_make.fit_transform(df_norm['v_unidad_investigacion'])
df_norm_sklearn.head(15)
# In[221]:
df_norm = df_norm_sklearn
print(df_norm.shape)
df_norm.head(10)
# ### **Normalice la variable unidad de fiscalía**
# In[219]:
# Normalize "t_delito" variable
df_norm_sklearn = df_norm.copy()
from sklearn.preprocessing import LabelEncoder
lb_make = LabelEncoder()
df_norm_sklearn['t_fiscalia_label'] = lb_make.fit_transform(df_norm['t_fiscalia'])
df_norm_sklearn.head(15)
# In[222]:
df_norm = df_norm_sklearn
print(df_norm.shape)
df_norm.head(10)
# ### **Genere las siguientes variables para fecha_hechos: Cuatrimestre, día de la semana, si/no es fin de semana y día del evento**
# In[170]:
df_norm['d_fecha_hechos'] = pd.to_datetime(df_norm['d_fecha_hechos'])
print(df_norm.shape)
df_norm.head(5)
# In[172]:
# Get weekday from 'd_fecha_hechos'
df_norm['weekday'] = df_norm['d_fecha_hechos'].dt.day_name()
df_norm.head(5)
# In[173]:
# Get calendar month from 'd_fecha_hechos'
df_norm['Full_Month'] = df_norm['d_fecha_hechos'].dt.strftime('%B')
df_norm.head(5)
# In[174]:
def quarter(month):
if month == 'Enero' or month == 'Febrero' or month == 'Marzo':
return 'Q1'
elif month == 'Abril' or month == 'Mayo' or month == 'Junio':
return 'Q2'
elif month == 'Julio' or month == 'Agosto' or month == 'Septiembre':
return 'Q3'
elif month == 'Octubre' or month == 'Noviembre' or month == 'Diciembre':
return 'Q4'
else:
return 'Check'
# In[175]:
# Apply "quarter" function to 'v_mes_hechos' to get the correct Q.
df_norm['Quarter'] = df_norm['v_mes_hechos'].apply(lambda row: quarter(row))
print(df_norm.shape)
df_norm.head(5)
# In[177]:
# This function is used to know if weekend or not: 1 => YES and 0 => NO
def weekend(day):
if day == 'Friday' or day == 'Saturday' or day == 'Sunday':
return '1'
else:
return '0'
# In[178]:
df_norm['IsWeekend'] = df_norm['weekday'].apply(lambda row: weekend(row))
print(df_norm.shape)
df_norm.head(10)
# In[180]:
# Get month' day from 'd_fecha_hechos'
df_norm['MonthDay'] = df_norm['d_fecha_hechos'].dt.strftime('%d')
df_norm.head(10)
# ### **Results Validation**
# In[181]:
# Show results for: 'FIN DE SEMANA'
df_norm['IsWeekend'].value_counts()
# In[191]:
# Results for Weekends:
import plotly.graph_objects as go
days = ['Weekends']
fig = go.Figure(data=[
go.Bar(name = '0', x = days, y = [453234]),
go.Bar(name = '1', x = days, y = [315419])
])
# Change the bar mode
fig.update_layout(barmode='stack', title = 'Fecha Hechos Fin de Semana')
fig.show()
# In[185]:
# Show results for: 'DIA DE LA SEMANA'
df_norm['weekday'].value_counts()
# In[190]:
# Show results for: 'DIA DE LA SEMANA'
import plotly.graph_objects as go
days = ['Weekdays']
fig = go.Figure(data=[
go.Bar(name = 'Lunes', x = days, y = [114093]),
go.Bar(name = 'Martes', x = days, y = [112977]),
go.Bar(name = 'Miércoles', x = days, y = [111799]),
go.Bar(name = 'Jueves', x = days, y = [111799]),
go.Bar(name = 'Viernes', x = days, y = [116068]),
go.Bar(name = 'Sábado', x = days, y = [102994]),
go.Bar(name = 'Domingo', x = days, y = [96357])
])
# Change the bar mode
fig.update_layout(barmode='stack', title = 'Fecha Hechos Día de la Semana')
fig.show()
# In[187]:
# Show results for: 'CUATRIMESTRE'
df_norm['Quarter'].value_counts()
# In[189]:
# Show results for: 'CUATRIMESTRE'
import plotly.graph_objects as go
days = ['Quarters']
fig = go.Figure(data=[
go.Bar(name = 'Q4', x = days, y = [173401]),
go.Bar(name = 'Q3', x = days, y = [167281]),
go.Bar(name = 'Q2', x = days, y = [215870]),
go.Bar(name = 'Q1', x = days, y = [212101])
])
# Change the bar mode
fig.update_layout(barmode='stack', title = 'Fecha Hechos Cuatrimestre')
fig.show()
# In[212]:
# Show results for: 'DIA DEL MES'
df_norm['MonthDay'].value_counts().sort_values(ascending = False)
# In[ ]:
# Show results for: 'DIA DEL MES'
import plotly.graph_objects as go
days = ['MonthDays']
fig = go.Figure(data=[
go.Bar(name = '01', x = days, y = []),
go.Bar(name = '02', x = days, y = []),
go.Bar(name = '03', x = days, y = []),
go.Bar(name = '04', x = days, y = []),
])
# Change the bar mode
fig.update_layout(barmode='stack', title = 'Fecha Hechos Día del Mes')
fig.show()
# # (e) **Part 4:**
# # **Adicional**
# ## **Genere mapa en Kepler u otra herramienta mostrando los delitos**
# In[192]:
df_norm.head(3)
# In[196]:
df_norm['c_longitud'] = df_norm['c_longitud'].astype(float)
df_norm['c_latitud'] = df_norm['c_latitud'].astype(float)
df_norm = df_norm.rename(columns = {'c_latitud':'Latitude','c_longitud':'Longitude'})
df_norm.dropna(inplace=True)
df_norm['Latitude'] = df_norm['Latitude'].map(lambda x:round(x,2))
df_norm['Longitude'] = df_norm['Longitude'].map(lambda x:round(x,2))
# In[197]:
df_norm.head(5)
# In[202]:
from keplergl import KeplerGl
map_1 = KeplerGl()
# In[205]:
df_norm_map = df_norm[['t_delito','Longitude','Latitude']]
print(df_norm_map.shape)
df_norm_map.head(5)
# In[206]:
map_1.add_data(data = df_norm_map, name='data_1')
# In[207]:
df_norm.shape
# In[208]:
map_1.save_to_html()
# # (f) **Part 5:**
# ## **Cuestionario**
# ### **¿Qué es la ciencia de datos?**
# * La ciencia de datos es una combinación multidisciplinaria de inferencia de datos, desarrollo de algoritmos y tecnología para resolver problemas analíticamente complejos con ayuda de herramientas estadísticas.
#
# * Además, contribuye a revelar tendencias y genera información que las empresas pueden utilizar para tomar decisiones comerciales de manera inteligente, basada en datos y resultados.
# ### **¿Qué habilidades debe dominar un científico de datos?**
# * Un Científico de Datos debe dominar las siguientes habilidades:
#
# * Conocimientos en matemáticas y estadística.
#
# * Habilidades sólidas y robustas en programación y manejo de bases de datos.
#
# * Habilidades para comunicar de manera efectiva sus hallazgos de forma sencilla y en términos que sean fáciles de entender e interpretar por el negocio.
#
# * Establecer canales de comunicación efectivos con expertos en materia empresarial y liderazgo.
#
# * Conseguir elaborar gráficos atractivos, explicables y fáciles de interpretar por el negocio.
#
# * Del lado de Soft Skills, un Científico de Datos debe ser: estratégico, proactivo y cooperativo, además de innovador y apasionado por su trabajo en la manipulación/tratamiento de la información y su respectiva comunicación con stakeholders.
# ### **¿Qué es una tabla analítica?**
# * Una tabla analítica es el resultado de procesar los datos desde su estado en crudo, aplicando las herramientas estadísticas, matemáticas y de programación necesarias para que la información sea utilizada para la toma de decisiones dentro de un ambiente empresarial, así como para servir de datos de entrada para modelos de machine learning.
# ### **¿Qué es la ingeniería de variables?**
# * Es el proceso de tranformar datos en características/features que permitan representar de mejor forma el problema, así como brindar un mejor entendimiento del mismo. A través de este proceso se manipulan los datos para corregir errores, ajustar variables y crear nuevas (si así se requiere) y obtener como resultado un mejor rendimiento durante su uso en modelos de machine learning.
# ### **Describe la ingeniería de variables posible por cada tipo de variable**
# * **Codificación a Nivel Ordinal**
#
# * **Codificación a Nivel Nominal** => Variables Categóricas
#
# * One-Hot Encoding
#
# * Count Encoding
#
# * Target Encoding
#
# * **Variables Continuas**
#
# * Min-Max Standard Scaler
#
# * Standard Scaler
#
# * **Texto**
#
# * Count Vectorizer
#
# * TF-IDF Vectorizer
# In[ ]:
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
'''
Converta uma temperatura digitada em Celsius para Fahrenheit. F = 9*C/5 + 32
'''
celsius = float(input("Digite a temperatura em Celsius: "))
fahrenheit = (9 * celsius / 5) + 32
print ("A temperatura em Fahrenheit é %.2f °F" %fahrenheit)
|
# Generated by Django 2.1.2 on 2019-04-01 23:16
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('study', '0016_auto_20190317_1657'),
]
operations = [
migrations.AlterModelOptions(
name='videoinfolectureclassfy',
options={'ordering': ['register_date'], 'verbose_name': '视频区一级分类表', 'verbose_name_plural': '视频区一级分类表'},
),
migrations.AddField(
model_name='videoinfolectureclassfy',
name='sequeue',
field=models.IntegerField(default=9999, verbose_name='排序'),
),
migrations.AlterField(
model_name='videoinfolecture',
name='lecture_type_first',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='vlist', to='study.VideoInfoLectureClassfy', verbose_name='视频一级分类'),
),
]
|
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
# machine learning is for analyzing data and predicting the outcome!
# dataset - any collection of data
DATA_SET = [1, 2, 3, 4, 14, 45, 16, 87, 2, 35]
print("Dataset:\t\t {}".format(DATA_SET))
# there are 3 types of data: numerical (discrete or continuous), categorical, and ordinal
# 3 relevant values: MEAN (average value), MEDIAN (midpoint value), MODE (most common value)
# to find the mean, add all values in the relevant dataset and divide by the length of the dataset
print("Mean:\t\t\t {}".format(np.mean(DATA_SET)))
# to find the median, sort the values of the relevant dataset and locate the middle one
# if the dataset is of an even length, return the sum divided by 2 of the two middle numbers
print("Median:\t\t\t {}".format(np.median(DATA_SET)))
# for mode, return the number that occurs most often in the dataset
print("Mode:\t\t\t {}".format(stats.mode(DATA_SET)[0][0]))
# the VARIANCE and STANDARD DEVIATION measure the spread of the values in a dataset
# to get the variance, take the mean of the squared difference from the mean of each value in the dataset
print("Variance:\t\t {}".format(np.var(DATA_SET)))
# the standard deviation is the square root of the variance
print("Standard Deviation:\t {}".format(np.std(DATA_SET)))
# a PERCENTILE returns the value that a given percent of the values in the dataset are less than
# so, if the 75th percentile is 23, 75% of the values in the dataset are less than 23
print("25th Percentile:\t {}".format(np.percentile(DATA_SET, 25)))
print("50th Percentile:\t {}".format(np.percentile(DATA_SET, 50)))
print("75th Percentile:\t {}".format(np.percentile(DATA_SET, 75)))
# machine learning often deals with large amounts of data, so it is useful to be able to generate large datasets for testing
RAND_DATA_SET_20 = np.random.uniform(0,10,20)
print("Randomly Generated Dataset of 50 Values: \n{}".format(RAND_DATA_SET_20))
# set up multiple charts
fig, axs = plt.subplots(2)
# sometimes it is useful to have a dataset based around a certain mean values with a certain standard deviation
# this is a NORMAL DISTRIBUTION. see the histograms to see how 250 values in a normal distribution are distinct from 250 random ones
# to visualize the values in a dataset, we can use a HISTOGRAM
# the histogram is a bar chart that shows how many values in the dataset fall into each interval
# below, we make a histogram with 15 intervals dividing the range from 0 to 10
RAND_DATA_SET_250 = np.random.uniform(0,10,250)
NORM_DATA_SET_250 = np.random.normal(5,1,250)
axs[0].hist(RAND_DATA_SET_250, 15, alpha=0.5)
axs[0].hist(NORM_DATA_SET_250, 15, alpha=0.5)
# another useful visualization, a SCATTER PLOT
axs[0].scatter(RAND_DATA_SET_250, NORM_DATA_SET_250)
# the term REGRESSION is used when we try to find the relationship between 2 variables
# this relationship can be used to predict future trends
# LINEAR REGRESSION, for example, is when we find a linear relationship between the x and y variables
# below we get the slope and y inercept of the linear relationship to chart the line
# we also get the R-VALUE, from -1 to 1, where 0 indicates no relationship
# in such a case, there may be no relationship or linear regression may not be the best regression model
slope, intercept, r_value, p_value, std_error = stats.linregress(RAND_DATA_SET_250, NORM_DATA_SET_250)
print("Relationship between random values on the x-axis and normal values on the y axis:\n\t {}\
[close to zero]".format(r_value))
# if there is a strong relationship, we can roughly predict future y-values given an x-value w the function below:
def getPoint(x):
return slope * x + intercept
print("We predict that when x is 3, y is {}.\t[close to five]".format(getPoint(3)))
# now we will create a list of points that fall on the linear relationship within the range of our scatterplot
LINE = list(map(getPoint, RAND_DATA_SET_250))
axs[0].plot(LINE, RAND_DATA_SET_250)
# POLYNOMIAL REGRESSION can be used for nonlinear relationships, as with the example below:
time = list(range(0,16))
speed = [90,87,82,89,76,65,40,34,46,49,52,40,56, 67, 89, 92]
axs[1].scatter(time, speed)
POLYNOMIAL_MODEL = np.poly1d(np.polyfit(time, speed, 3))
POLYNOMIAL_LINE = np.linspace(time[0], len(time), 100)
axs[1].plot(POLYNOMIAL_LINE, POLYNOMIAL_MODEL(POLYNOMIAL_LINE))
print("\n\nScatterplot Values: ")
for x in range(0,16):
print("[{}, {}] ".format(x, speed[x]), end="")
print("\n\nPolynomial regression line equation:\n{}\n".format(POLYNOMIAL_MODEL))
# the strength of a relationship in polyomial regression is measured using the R^2 VALUE [-1 to 1, 0 = no relationship]
print("R^2 Value:\t\t {}".format(r2_score(speed, POLYNOMIAL_MODEL(time))))
# if the relationship is strong enough we can accurately predict future values, like so:
print("At time 16, the speed should be around {}".format(POLYNOMIAL_MODEL(16)))
plt.show()
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__title__ = ''
__author__ = 'zhangjingjun'
__mtime__ = '2018/3/19'
# ----------Dragon be here!----------
┏━┓ ┏━┓
┏━┛ ┻━━━━━━┛ ┻━━┓
┃ ━ ┃
┃ ━┳━┛ ┗━┳━ ┃
┃ ┻ ┃
┗━━━┓ ┏━━━━┛
┃ ┃神兽保佑
┃ ┃永无BUG!
┃ ┗━━━━━━━━━┓
┃ ┣━┓
┃ ┏━┛
┗━━┓ ┓ ┏━━━┳━┓ ┏━┛
┃ ┫ ┫ ┃ ┫ ┫
┗━┻━┛ ┗━┻━┛
"""
from django.urls import re_path
from app02 import views
from django.conf import settings
from django.conf.urls.static import static
app_name = 'app02'
urlpatterns = [
re_path('^index$', views.index, {'name':'root'}),
re_path('^testurl$', views.testurl),
re_path(r'^(?P<pk>\d+)/', views.detail,name='detail'),
] |
import pandas as pd
import numpy as np
prices = pd.read_csv('data/stock_prices.csv')
print(prices.head())
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ipruleUI.ui'
#
# Created by: PyQt5 UI code generator 5.15.2
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(613, 523)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName("horizontalLayout")
self.tabWidget = QtWidgets.QTabWidget(self.centralwidget)
self.tabWidget.setObjectName("tabWidget")
self.tab = QtWidgets.QWidget()
self.tab.setObjectName("tab")
self.pushButton_4 = QtWidgets.QPushButton(self.tab)
self.pushButton_4.setGeometry(QtCore.QRect(90, 20, 131, 41))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
self.pushButton_4.setFont(font)
self.pushButton_4.setObjectName("pushButton_4")
self.pushButton_5 = QtWidgets.QPushButton(self.tab)
self.pushButton_5.setGeometry(QtCore.QRect(340, 20, 131, 41))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
self.pushButton_5.setFont(font)
self.pushButton_5.setObjectName("pushButton_5")
self.pushButton_2 = QtWidgets.QPushButton(self.tab)
self.pushButton_2.setGeometry(QtCore.QRect(90, 80, 131, 41))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
self.pushButton_2.setFont(font)
self.pushButton_2.setObjectName("pushButton_2")
self.pushButton_3 = QtWidgets.QPushButton(self.tab)
self.pushButton_3.setGeometry(QtCore.QRect(340, 80, 131, 41))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
self.pushButton_3.setFont(font)
self.pushButton_3.setObjectName("pushButton_3")
self.tableWidget = QtWidgets.QTableWidget(self.tab)
self.tableWidget.setGeometry(QtCore.QRect(9, 129, 571, 301))
self.tableWidget.setObjectName("tableWidget")
self.tableWidget.setColumnCount(11)
self.tableWidget.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(2, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(3, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(4, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(5, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(6, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(7, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(8, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(9, item)
item = QtWidgets.QTableWidgetItem()
self.tableWidget.setHorizontalHeaderItem(10, item)
self.tabWidget.addTab(self.tab, "")
self.tab_2 = QtWidgets.QWidget()
self.tab_2.setObjectName("tab_2")
self.label_5 = QtWidgets.QLabel(self.tab_2)
self.label_5.setGeometry(QtCore.QRect(280, 140, 61, 21))
self.label_5.setObjectName("label_5")
self.lineEdit_5 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_5.setGeometry(QtCore.QRect(360, 190, 113, 20))
self.lineEdit_5.setObjectName("lineEdit_5")
self.label_2 = QtWidgets.QLabel(self.tab_2)
self.label_2.setGeometry(QtCore.QRect(30, 190, 71, 21))
self.label_2.setObjectName("label_2")
self.label_8 = QtWidgets.QLabel(self.tab_2)
self.label_8.setGeometry(QtCore.QRect(280, 250, 61, 21))
self.label_8.setObjectName("label_8")
self.pushButton = QtWidgets.QPushButton(self.tab_2)
self.pushButton.setGeometry(QtCore.QRect(220, 340, 141, 41))
font = QtGui.QFont()
font.setFamily("黑体")
font.setPointSize(12)
self.pushButton.setFont(font)
self.pushButton.setObjectName("pushButton")
self.lineEdit_2 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_2.setGeometry(QtCore.QRect(110, 190, 113, 20))
self.lineEdit_2.setObjectName("lineEdit_2")
self.label_3 = QtWidgets.QLabel(self.tab_2)
self.label_3.setGeometry(QtCore.QRect(30, 250, 61, 21))
self.label_3.setObjectName("label_3")
self.lineEdit = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit.setGeometry(QtCore.QRect(110, 140, 113, 20))
self.lineEdit.setObjectName("lineEdit")
self.label = QtWidgets.QLabel(self.tab_2)
self.label.setGeometry(QtCore.QRect(30, 140, 61, 21))
self.label.setObjectName("label")
self.lineEdit_3 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_3.setGeometry(QtCore.QRect(110, 250, 113, 20))
self.lineEdit_3.setObjectName("lineEdit_3")
self.label_7 = QtWidgets.QLabel(self.tab_2)
self.label_7.setGeometry(QtCore.QRect(280, 190, 71, 21))
self.label_7.setObjectName("label_7")
self.lineEdit_4 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_4.setGeometry(QtCore.QRect(360, 140, 113, 20))
self.lineEdit_4.setObjectName("lineEdit_4")
self.lineEdit_6 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_6.setGeometry(QtCore.QRect(360, 250, 113, 20))
self.lineEdit_6.setObjectName("lineEdit_6")
self.label_4 = QtWidgets.QLabel(self.tab_2)
self.label_4.setGeometry(QtCore.QRect(30, 90, 61, 21))
self.label_4.setObjectName("label_4")
self.lineEdit_7 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_7.setGeometry(QtCore.QRect(110, 90, 113, 20))
self.lineEdit_7.setObjectName("lineEdit_7")
self.label_6 = QtWidgets.QLabel(self.tab_2)
self.label_6.setGeometry(QtCore.QRect(280, 90, 61, 21))
self.label_6.setObjectName("label_6")
self.lineEdit_8 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_8.setGeometry(QtCore.QRect(360, 90, 113, 20))
self.lineEdit_8.setObjectName("lineEdit_8")
self.label_18 = QtWidgets.QLabel(self.tab_2)
self.label_18.setGeometry(QtCore.QRect(270, 290, 81, 21))
self.label_18.setObjectName("label_18")
self.lineEdit_9 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_9.setGeometry(QtCore.QRect(110, 290, 113, 21))
self.lineEdit_9.setObjectName("lineEdit_9")
self.label_19 = QtWidgets.QLabel(self.tab_2)
self.label_19.setGeometry(QtCore.QRect(20, 290, 81, 21))
self.label_19.setObjectName("label_19")
self.lineEdit_10 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_10.setGeometry(QtCore.QRect(360, 290, 113, 20))
self.lineEdit_10.setObjectName("lineEdit_10")
self.lineEdit_11 = QtWidgets.QLineEdit(self.tab_2)
self.lineEdit_11.setGeometry(QtCore.QRect(110, 40, 113, 20))
self.lineEdit_11.setText("")
self.lineEdit_11.setObjectName("lineEdit_11")
self.label_20 = QtWidgets.QLabel(self.tab_2)
self.label_20.setGeometry(QtCore.QRect(30, 40, 61, 21))
self.label_20.setObjectName("label_20")
self.tabWidget.addTab(self.tab_2, "")
self.horizontalLayout.addWidget(self.tabWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 613, 23))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
self.tabWidget.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "IP拦截"))
self.pushButton_4.setText(_translate("MainWindow", "开启ip拦截"))
self.pushButton_5.setText(_translate("MainWindow", "关闭ip拦截"))
self.pushButton_2.setText(_translate("MainWindow", "查询规则"))
self.pushButton_3.setText(_translate("MainWindow", "删除规则"))
item = self.tableWidget.horizontalHeaderItem(0)
item.setText(_translate("MainWindow", "id"))
item = self.tableWidget.horizontalHeaderItem(1)
item.setText(_translate("MainWindow", "protocol"))
item = self.tableWidget.horizontalHeaderItem(2)
item.setText(_translate("MainWindow", "direction"))
item = self.tableWidget.horizontalHeaderItem(3)
item.setText(_translate("MainWindow", "srcip"))
item = self.tableWidget.horizontalHeaderItem(4)
item.setText(_translate("MainWindow", "srcipmask"))
item = self.tableWidget.horizontalHeaderItem(5)
item.setText(_translate("MainWindow", "srcport"))
item = self.tableWidget.horizontalHeaderItem(6)
item.setText(_translate("MainWindow", "srcportmask"))
item = self.tableWidget.horizontalHeaderItem(7)
item.setText(_translate("MainWindow", "dstip"))
item = self.tableWidget.horizontalHeaderItem(8)
item.setText(_translate("MainWindow", "dstipmask"))
item = self.tableWidget.horizontalHeaderItem(9)
item.setText(_translate("MainWindow", "dstport"))
item = self.tableWidget.horizontalHeaderItem(10)
item.setText(_translate("MainWindow", "dstportmask"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("MainWindow", "查询删除规则"))
self.label_5.setText(_translate("MainWindow", "dst ip"))
self.lineEdit_5.setText(_translate("MainWindow", "255.255.255.0"))
self.label_2.setText(_translate("MainWindow", "src ip mask"))
self.label_8.setText(_translate("MainWindow", "dst port"))
self.pushButton.setText(_translate("MainWindow", "提交"))
self.lineEdit_2.setText(_translate("MainWindow", "0.0.0.0"))
self.label_3.setText(_translate("MainWindow", "src port"))
self.lineEdit.setText(_translate("MainWindow", "0.0.0.0"))
self.label.setText(_translate("MainWindow", "src ip"))
self.lineEdit_3.setText(_translate("MainWindow", "0"))
self.label_7.setText(_translate("MainWindow", "dst ip mask"))
self.lineEdit_4.setText(_translate("MainWindow", "202.115.54.0"))
self.lineEdit_6.setText(_translate("MainWindow", "80"))
self.label_4.setText(_translate("MainWindow", "protocol"))
self.lineEdit_7.setText(_translate("MainWindow", "6"))
self.label_6.setText(_translate("MainWindow", "direction"))
self.lineEdit_8.setText(_translate("MainWindow", "2"))
self.label_18.setText(_translate("MainWindow", "dst port mask"))
self.lineEdit_9.setText(_translate("MainWindow", "0"))
self.label_19.setText(_translate("MainWindow", "src port mask"))
self.lineEdit_10.setText(_translate("MainWindow", "65535"))
self.label_20.setText(_translate("MainWindow", "id"))
self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("MainWindow", "增加规则"))
|
#!/usr/local/bin/python3
# coding: UTF-8
# Author: David
# Email: youchen.du@gmail.com
# Created: 2017-02-17 08:24
# Last modified: 2017-02-17 14:26
# Filename: items.py
# Description:
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ShoeCommentItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
_id = scrapy.Field()
iid = scrapy.Field()
uid = scrapy.Field()
creation_time = scrapy.Field()
score = scrapy.Field()
user_province = scrapy.Field()
user_level = scrapy.Field()
color = scrapy.Field()
size = scrapy.Field()
class ShoeDetailItem(scrapy.Item):
iid = scrapy.Field()
name = scrapy.Field()
shop = scrapy.Field()
scores = scrapy.Field()
|
import time
import pandas as pd
import numpy as np
from scipy import stats
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
months = ['january', 'february', 'march', 'april', 'may', 'june', 'all']
days = ['monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday', 'sunday', 'all']
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
try:
city = input('Would you like to see data for Chicago, New York City, or Washington?: ').lower()
while city not in CITY_DATA:
print('Sorry! You must enter either Chicago, New York or Washington. Try again!: ')
city = input('Would you like to see data for Chicago, New York City, or Washington?: ').lower()
month = input('Which month would you like to look at? (January-June, or all?): ').lower()
while month not in months:
print('Sorry! You must enter a valid month between January-June, or all. Try again!')
month = input('Which month would you like to look at? (January-June, or all?): ').lower()
day = input('Which day you would like to look at? (Monday-Sunday, or all?: ').lower()
while day not in days:
print('Sorry! You must enter a valid day (Monday-Sunday, or all): ')
day = input('Which day you would like to look at? (Monday-Sunday, or all?: ').lower()
return city, month, day
except Exception as e:
print('Cannot compute from given inputs. Error: {}'.format(e))
print('-'*40)
<<<<<<< HEAD
#take input from user to obtain city, month, day
||||||| 8578635
=======
>>>>>>> documentation
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
try:
#convert to datetime in order to be able to sort and filter through the DataFrame
df = pd.read_csv(CITY_DATA[city])
df['Start Time'] = pd.to_datetime(df['Start Time'])
df['End Time'] = pd.to_datetime(df['End Time'])
df['month'] = df['Start Time'].dt.month
df['day'] = df['Start Time'].dt.weekday_name
df['hour'] = df['Start Time'].dt.hour
if month != 'all': #if user chooses a month, filter DataFrame with given month
months_list = ['january', 'february', 'march', 'april', 'may', 'june']
month = months_list.index(month) + 1
df = df[df['month'] == month]
if day != 'all': #if user chooses a day, filter DataFrame with given day
df = df[df['day'] == day.title()]
return df
except Exception as e:
print('Cannot load. Error: {}'.format(e))
def time_stats(df, city):
#provide user info on times of travel in given city
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
#find the month number and name that appears the most in the Start Time column in the DataFrame
try:
most_common_month_num = df['Start Time'].dt.month.mode()[0]
most_common_month_name = months[most_common_month_num-1].title()
print('Most common month to rent bikes in ', city, ': ', most_common_month_name)
except Exception as e:
print('Cannot find most common month. Error: {}'.format(e))
#find the day that appears the most in the Start Time column in the DataFrame
try:
most_common_day = df['day'].mode()[0]
print('Most common day to rent bikes in ', city, ' is: ', most_common_day)
except Exception as e:
print('Cannot find most common day. Error: {}'.format(e))
#find the hour that appears the most in the Start Time column in the DataFrame
try:
most_common_hour = df['hour'].mode()[0]
print('Most common hour to rent bikes in ', city, ' is: ', most_common_hour)
except Exception as e:
print('Cannot find most common hour. Error: {}'.format(e))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df, city):
#provide user info on stations/routes for given city
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
#find the most common start station that bikers use by sorting through the Start Station column in the df
try:
most_common_start_station = df['Start Station'].mode()[0]
most_common_start_station_count = df['Start Station'].value_counts()[0]
print('Most common start station in ', city, ' is: ', most_common_start_station, '\n', most_common_start_station, 'was used: ', most_common_start_station_count, ' times')
except Exception as e:
print('Cannot find most common start station. Error: {}'.format(e))
#find the most common end station that bikers use by sorting through the End Station column in the df
try:
most_common_end_station = df['End Station'].mode()[0]
most_common_end_station_count = df['End Station'].value_counts()[0]
print('Most common end station in ', city, ' is: ', most_common_end_station, '\n', most_common_end_station, 'was used: ', most_common_end_station_count, ' times')
except Exception as e:
print('Cannot find most common end station. Error: {}'.format(e))
#find the most common route bikers follow by looking at Start to End Stations and finding the most common routes in the df
try:
most_common_route = df.loc[:, 'Start Station':'End Station'].mode()[0:]
most_common_route_count = df.groupby(["Start Station", "End Station"]).size().max()
print('Most common route in ', city, ' is: ',most_common_route, '\n', most_common_route, 'was used: ', most_common_route_count, ' times')
except Exception as e:
print('Cannot find most common route. Error: {}'.format(e))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df, city):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
#find the duration by creating a new column of the difference of End Time and Start Time from the df
try:
df['Duration'] = df['End Time'] - df['Start Time']
total_duration = df['Duration'].sum()
print('Total duration: ', total_duration)
except Exeption as e:
print('Cannot find total duration. Error: {}'.format(e))
#find the average duration by simply taking the mean of the new Duration column in the df
try:
average_duration = df['Duration'].mean()
print('Mean duration: ', average_duration)
except Exception as e:
print('Cannot find mean duration. Error: {}'.format(e))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df, city):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
#find the count of user types from the User Type column in the df
try:
print('Count and types of users in ', city, ': ', df['User Type'].value_counts())
except Execption as e:
print('Cannot find count and types of users. Error: {}'.format(e))
#find the count of each gender from the Gender columns in the df
try:
print('Count and genders of users in ', city, ': ', df['Gender'].value_counts())
except Exception as e:
print('Cannot find count and genders of users. Error: {}'.format(e))
#find the earliest, most recent, and most common birth year from of the Birth Year column in the df
try:
earliest_yob = df['Birth Year'].min()
most_recent_yob = df['Birth Year'].max()
most_common_yob = df['Birth Year'].mode()
print('Oldest user in ', city, 'born in: ', int(earliest_yob), '\n Youngest user in ', city, 'born in: ', int(most_recent_yob),'\n Most common birth year: ', int(most_common_yob))
except Exception as e:
print('Cannot find earliest birth year, most recent birth year, or most common birth year. Error: {}'.format(e))
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def raw_data(df, city):
count = 0
answer = input('Would you like to see 5 lines of raw data? Enter yes or no: ').lower()
while True:
if answer == 'yes':
print(df[count: count + 5])
count += 5
else:
break
answer = input("Would you like to see five more lines of raw data? Enter yes or no").lower()
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df, city)
station_stats(df, city)
trip_duration_stats(df, city)
user_stats(df, city)
raw_data(df, city)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
|
import types
from actionnetwork_activist_sync.sync import sync
def lambda_handler(event, context):
sync()
return {
'statusCode': 200,
'body': 'Sync Complete'
}
if __name__ == '__main__':
lambda_handler({}, types.SimpleNamespace())
|
import numpy as np
import mdtraj as md
import pickle
def load_trajectory():
xyz_file = 'dft_traj.xyz'
pdb_file = '../300K_water128_1.pdb'
print(f'Loading trajectory from {xyz_file}.....')
traj = md.load_xyz(xyz_file, pdb_file)
print(f"Number of frames {traj.n_frames}")
print(f'Number of atoms {traj.n_atoms}')
return traj
def load_charges():
nframes = 100000
natoms = 384
charges = np.zeros((natoms, nframes), dtype=np.float64)
file_header = '/scratch/users/scguo/dftb-dft-dipoles/data/data-'
file_name = '/detailed.out'
for i in range(nframes):
full_name = "{file_header}{i}{file_name}"
with open(full_name, mode='r') as f:
for j, line in enumerate(f):
if 15 <= j <= 398:
# line has format "Atom Charge"
charges[j - 15, i] = float(line.split()[1])
return charges
def main():
traj = load_trajectory()
charges = load_charges()
dipoles = md.dipole_moments(traj, charges)
pickle.dump(dipoles, 'dipoles.pickle')
if __name__ == "__main__":
main()
|
## pinCyper.py
## Author: nmessa
## This is a Julius Ceasar cipher for PIN numbers
#This function encrypts your PIN
def PINCypher(pin, shift):
cypherText = ""
#Add code here
return cypherText
#This function will calculate your encryption key based on your last name
def calcShift(name):
#Add code here
#This function will decode an encryted PIN
def PINDecode(ePin, shift):
#Add code here
#Test code
pin = input("Enter your 5 digit PIN: ")
name = input("Enter your last name: ")
shift = calcShift(name)
ePin = PINCypher(pin, shift)
print("Your encrypted PIN is", ePin)
oPin = PINDecode(ePin, shift)
print("Your original PIN was", oPin)
|
import numpy as np
import matplotlib.pyplot as plt
import scipy.sparse as sp
import scipy.sparse.linalg as la
from functools import partial
import time
# Creating the MAtrix as describve din the report
def make_L(Nx, Ny):
Dx = sp.diags((Nx-1)*[1])
Dx += sp.diags((Nx-2)*[-1],-1)
rowx = sp.csr_matrix((1,Nx-1))
rowx[0,-1] = -1
Dx = sp.vstack((Dx, rowx))
Lx = Dx.transpose().dot(Dx)
Dy = sp.diags((Ny-1)*[1])
Dy += sp.diags((Ny-2)*[-1],-1)
rowy = sp.csr_matrix((1,Ny-1))
rowy[0,-1] = -1
Dy = sp.vstack((Dy, rowy))
Ly = Dy.transpose().dot(Dy)
#Kronsum is jsut a cleaner way than creating Identity matrices and stuff
return sp.kronsum(Lx,Ly)
def discretize(x_d, y_d, h):
nx = int(x_d/h)
ny = int(y_d/h)
return make_L(nx,ny)/h/h
def get_grid(x_d, y_d, h):
grid = np.mgrid[h:y_d:h, h:x_d:h]
return (grid[1,:,:], grid[0,:,:])
def source(xx,yy):
a = -10
b = 5
return np.exp(a*(np.square(xx-b)+np.square(yy-b)))
def sourcevec(xx,yy):
return np.reshape(source(xx,yy), (xx.shape[0]*xx.shape[1]))
imshow = partial(plt.imshow)
#Domain is (0,0)x(x,y)
x = 10
y = 10
#choose grid spacing
h = 0.1
gamma = [-40, 0, 40]
solutions = []
sources = []
residues = []
for i in range(3):
# Creating grid, L
grid = get_grid(x,y,h)
L = discretize(x,y,h)
L = L - gamma[i] * sp.eye(L.shape[0])
#Creation of the source vector
sv = sourcevec(*grid)
#Solving the system
residuals = []
def cb(rk):
print("\rIn iteration number %4d, rk is %1.5e"%(len(residuals)+1,rk), end="")
residuals.append(rk)
start = time.time()
solution, succ = la.gmres(L, sv, maxiter=5000, restart=5000, tol=1e-12, callback=cb)
# check if GMRES COnverged
if succ == 0:
print("\nGMRES Converged")
elif succ > 0:
print("\nGMRES Converged but given tolerance not achieved or max iterations reached")
else:
print("\nYeah, you made an oopsie")
print("GMRES took %3.2fs"%(time.time()-start))
residues.append(residuals)
solutions.append(solution)
sources.append(sv)
print("This should be small:", np.linalg.norm(sv-L@solution)/np.linalg.norm(sv)-residuals[-1])
start = time.time()
solution = la.spsolve(L, sv)
print("spsolve took %3.2fs"%(time.time()-start))
#Showing source then Solution
reshaper = lambda u: np.reshape(u, [grid[0].shape[0], grid[0].shape[1]])[::-1,:]
for i in range(3):
plt.semilogy(residues[i], label="$\gamma=$%d"%(gamma[i]))
plt.legend()
plt.ylabel("Residual")
plt.xlabel("Iteration")
plt.show()
fig = plt.figure()
plt.subplot(2,2,1)
plt.title("Source function")
plt.imshow(reshaper(sources[0]))
plt.colorbar()
for i in range(3):
plt.subplot(2,2,i+2)
plt.title("$\gamma=$%d"%(gamma[i]))
plt.imshow(reshaper(solutions[i]))
plt.colorbar()
plt.show() |
from flask import Flask, request
from flask_restful import Resource, Api
from sqlalchemy import create_engine
from flask.ext.jsonpify import jsonify
import hashlib as hasher
import time
db_connect = create_engine('sqlite:///chinook.db')
app = Flask(__name__)
api = Api(app)
class AddData(Resource):
def post(self):
"""Adding data to SQLite. If number of rows has reached five create block"""
conn = db_connect.connect()
print(request.json)
data = request.json['data']
conn.execute("insert into data values(null,'{0}')".format(data))
number_of_data = conn.execute("SELECT COUNT (*) AS number FROM data")
for row in number_of_data:
if row["number"] % 5 == 0:
create_block()
return {'status': 'success'}
class ReturnBlocks(Resource):
"""Return list of "N" last blocks. Use http://127.0.0.1:5002/last_blocks/N"""
def get(self, number_of_blocks):
conn = db_connect.connect()
query = conn.execute("SELECT * FROM blocks ORDER BY timestamp DESC limit %d" % int(number_of_blocks))
result = {'data': [dict(zip(tuple(query.keys()), i)) for i in query.cursor]}
return jsonify(result)
def create_block():
"""Create any NOT genesis block"""
data = ""
conn = db_connect.connect()
previous_conn = conn.execute("SELECT block_hash FROM blocks ORDER BY timestamp DESC limit 1")
for row in previous_conn:
previous = row["block_hash"]
data_conn = conn.execute("SELECT Data FROM (SELECT * FROM data ORDER BY DataId DESC limit 5) ORDER BY DataId ASC")
for row in data_conn:
data += row["Data"] + "; "
timestamp = int(time.time())
hash = hash_block(timestamp, data, previous)
conn.execute("insert into blocks values('{0}','{1}','{2}','{3}')"
.format(previous, data, timestamp, hash))
def hash_block(timestamp, data, previous):
"""Create hash (sha256) for given block"""
sha = hasher.sha256()
sha.update((str(timestamp) + str(data) + str(previous)).encode('utf-8'))
return sha.hexdigest()
def create_genesis_block():
"""Create initial "genesis" block"""
conn = db_connect.connect()
hash = hash_block(int(time.time()), "Genesis Block", "0")
conn.execute("insert into blocks values('{0}','{1}','{2}','{3}')"
.format("0", "Genesis Block", int(time.time()), hash))
api.add_resource(ReturnBlocks, '/last_blocks/<number_of_blocks>') # Route_1
api.add_resource(AddData, '/add_data') # Route_2
# check does any block exist and create genesis block if not
conn = db_connect.connect()
number_of_blocks = conn.execute("SELECT COUNT (*) AS number FROM blocks")
for row in number_of_blocks:
if row["number"] == 0:
create_genesis_block()
if __name__ == '__main__':
app.run(port=5002) |
import time
import os
import pytest
from niveristand.legacy import NIVeriStand
from niveristand.legacy.NIVeriStand import NIVeriStandException
from tests.testutilities import configutilities
def sleep():
time.sleep(1)
def test_worspace_api():
TEST_ID = 124123
wks = NIVeriStand.Workspace()
print("")
SYSDEFINITION = os.path.join(configutilities.get_autotest_projects_path(),
"TestWorkspaceAPI",
"TestWorkspaceAPI.nivssdf")
print("Deploying %s" % SYSDEFINITION)
wks.RunWorkspaceFile(SYSDEFINITION, 0, 1, 80000, "", "")
try:
# Verify the TEST_ID var on test file.
test_ID = wks.GetSingleChannelValue("TEST_ID")
assert test_ID == TEST_ID, "Deployed wrong test file"
result = wks.GetEngineState()
assert result['systemdefinition_file'] == SYSDEFINITION, "Workspace file is not the same as deployed"
wks.LockWorkspaceFile("", 'LOCK_PASSWORD')
with pytest.raises(NIVeriStandException):
wks.StopWorkspaceFile("")
with pytest.raises(NIVeriStandException):
wks.UnlockWorkspaceFile("")
wks.UnlockWorkspaceFile('LOCK_PASSWORD')
print("Get System Node Childern")
result = wks.GetSystemNodeChildren(r"Controller/Simulation Models/Models/sinewave/Execution")
assert len(result) == 4, "Model Exceution should return 4 nodes"
print("Get System Node Channel List")
result = wks.GetSystemNodeChannelList('')
assert len(result) >= 100, "At the very least we always have 100 channel"
print(result[2])
print("Get Alias List")
result = wks.GetAliasList()
assert len(result) == 3, "Expected 3 alias but get something different %d" % len(result)
assert result['TEST_ID'] == r"Targets Section/Controller/User Channel/TEST_ID", "Expected an alias for TEST_ID incorrect"
nodes = ('Controller/User Channel', 'Controller/User Channel/TEST_ID')
result = wks.GetMultipleSystemNodesData(nodes)
assert len(result) == 2, "Ask for 2 node info get no info"
print("Validating channels")
section = result[0]
print(section)
assert section['isChannel'] is False, "Expecting a section to returned"
testNode = result[1]
print(testNode)
assert testNode['isChannel'] is True, "Expecteing a node to returned"
print("Test PASSED")
print("")
# Report your result here
assert True
finally:
wks.StopWorkspaceFile("")
|
#!/usr/bin/env python
# Authors: Huy Pham, Emile Shehada, Shane Stahlheber
# Date: July 11, 2017
# Bacterial Growth Simulation Project
from __future__ import print_function
import argparse
import os
import sys
import time
from multiprocessing import Lock, cpu_count
# To prevent crashing during a keyboard interrupt (must be before numpy/scipy)
os.environ['FOR_DISABLE_CONSOLE_CTRL_HANDLER'] = '1'
import cv2
import numpy as np
from scipy import misc
from constants import Config, Globals
from findconsistentpaths import create_consistent
from helperMethods import (collision_matrix, deepcopy_list, find_k_best_moves,
generate_image_edge_cv2, generate_universes,
get_frames, improve, init_space, process_init,
write_state)
from mphelper import InterruptablePool, kwargs
__version__ = "2.2"
def main():
default_processes = max(cpu_count() // 2, 1)
parser = argparse.ArgumentParser(description="Cell-Universe Cell Tracker.")
parser.add_argument("-f", "--frames",
metavar="DIR",
type=str,
default='frames',
help="directory of the frames (default: 'frames')")
parser.add_argument("-v", "--version",
action="version",
version="%(prog)s {}".format(__version__))
parser.add_argument("-s", "--start",
metavar="FRAME",
type=int,
default=0,
help="start from specific frame (default: 0)")
parser.add_argument("-p", "--processes",
metavar="COUNT",
type=int,
default=default_processes,
help="number of concurrent processes to run "
"(default: {})".format(default_processes))
parser.add_argument("-o", "--output",
metavar="OUTDIR",
type=str,
default="Output",
help="directory of output states and images "
"(default: 'Output')")
parser.add_argument("initial",
type=str,
help="initial properties file ('example.init.txt')")
cli_args = parser.parse_args()
# get starting frame
t = cli_args.start
frames_dir = cli_args.frames
output_dir = cli_args.output
frames = get_frames(frames_dir, t)
# Initialize the Space S
with open(cli_args.initial, 'r') as file:
S = init_space(t, file)
# Creating directories
image_dirs = []
state_dirs = []
for i in range(Config.K):
image_dirs.append(os.path.join(output_dir, Config.images_dir, str(i)))
if not os.path.exists(image_dirs[i]):
os.makedirs(image_dirs[i])
state_dirs.append(os.path.join(output_dir, Config.states_dir, str(i)))
if not os.path.exists(state_dirs[i]):
os.makedirs(state_dirs[i])
# Creating a pool of processes
lock = Lock()
pool = InterruptablePool(cli_args.processes,
initializer=process_init,
initargs=(lock,
Globals.image_width,
Globals.image_height,
cli_args.initial))
# Processing
for frame in frames:
print("Processing frame {}...".format(t))
sys.stdout.flush()
start = time.time()
frame_array = (cv2.cvtColor(frame, cv2.COLOR_RGB2GRAY) > 0)*np.int16(1)
# generate the list of arguments for find_k_best_moves_mapped
args = []
for index, U in enumerate(S):
# calculate U's matrix for collision detection
M = collision_matrix(U)
for bacterium_index in range(len(U)):
args.append(kwargs(
U=deepcopy_list(U), # deep copy of universes
frame_array=frame_array, # current image
index=index, # index of universe
i=bacterium_index, # index of bacterium
count=len(S), # number of universes
M=M, # the collision matrix
start=start)) # start time for current frame
# Find best moves for each bacterium in each universe
moves_list = pool.map(find_k_best_moves, args)
# initialize the best move lists
best_moves = [[None for _ in universe] for universe in S]
# organize the best move dictionary into a list
for moves in moves_list:
best_moves[moves[0]][moves[1]] = moves[2]
# generate the list of arguments for generate_universes
args = []
for index, U in enumerate(S):
args.append(kwargs(
U=deepcopy_list(U), # deepcopy of universes
frame_array=frame_array, # current image
index=index, # index of universe
count=len(S), # number of universes
best_moves=best_moves[index], # list of best moves
start=start)) # start time for current frame
# Generate future universes from S
new_S = pool.map(generate_universes, args)
# flatten new_S in to a list of universes's (S)
S = [universe for universe_list in new_S for universe in universe_list]
# Pulling stage
S.sort(key=lambda x: x[0])
# improve the top 3K universes
k3 = min(3*Config.K, len(S))
args = []
for index in range(k3):
args.append(kwargs(
Si=S[i],
frame_array=frame_array,
index=index,
count=k3,
start=start))
S = pool.map(improve, args)
# pick the K best universes
S.sort(key=lambda x: x[0])
S = S[:Config.K]
# Combine all best-match bacteria into 1 new universe
# best_U = best_bacteria(S, frame_array)
# S.append(best_U)
# Output to files
# runtime = str(int(time.time() - start))
for i, (c, U, index, _) in enumerate(S):
new_frame = np.array(frame)
# TODO: change the name of the output images and place this info
# somewhere else.
# file_name = "{}{} - {} - {} - {}.png".format(image_dirs[i],
# str(t),
# str(c),
# str(index),
# runtime)
image_filename = "{}.png".format(t)
image_path = os.path.join(image_dirs[i], image_filename)
generate_image_edge_cv2(U, new_frame)
misc.imsave(image_path, new_frame)
state_filename = "{}.txt".format(t)
state_path = os.path.join(state_dirs[i], state_filename)
write_state(state_path, index, U)
S = [U for _, U, _, _ in S[:Config.K]]
# next frame
t += 1
# make consistent
print("Making consistent universes...")
create_consistent(cli_args.start, t-1, output_dir)
# finished
print("Finished!")
parser.exit(0)
if __name__ == '__main__':
main()
|
import unittest
from Libreria1 import *
class TestStringMethods(unittest.TestCase):
def test_sumaVect(self):
a = [(1,3),(2,5),(4,2),(3,3)]
b = [(2,2),(1,12),(5,1),(22,0)]
self.assertEqual(sumaVect (a, b), [(3, 5), (3, 17), (9, 3), (25, 3)])
def test_inversaVect(self):
a = [(4,3),(2,0),(9,2),(6,3)]
self.assertEqual(inversaVect (a), [(-4, -3), (-2, 0), (-9, -2), (-6, -3)])
def test_productoEscalarV(self):
a = [(3,4),(14,2),(25,10)]
self.assertEqual(productoEscalarV(a, (1,0)), [(3,4),(14,2),(25,10)])
def test_sumaMatrix(self):
a = [[(6,10),(2,0),(2,2)],[(0,1),(10,10),(4,5)],[(8,16),(11,10),(21,0)]]
b = [[(4,-10),(8,0),(2,4)],[(1,1),(1,1),(-4,-5)],[(0,-8),(0,-10),(0,21)]]
self.assertEqual(sumaMatrix(a, b), [[(10,0),(10,0),(4,6)],[(1,2),(11,11),(0,0)],[(8,8),(11,0),(21,21)]])
def test_inversaMatrix(self):
a = [[(6,10),(2,0),(2,2)],[(0,1),(10,10),(4,5)],[(8,16),(11,10),(21,0)]]
self.assertEqual(inversaMatrix(a), [[(-6,-10),(-2,0),(-2,-2)],[(0,-1),(-10,-10),(-4,-5)],[(-8,-16),(-11,-10),(-21,0)]])
def test_multiescalarMatrix(self):
a = [[(6,10),(2,0),(2,2)],[(0,1),(10,10),(4,5)],[(8,16),(11,10),(21,0)]]
self.assertEqual(multiEscalarMatrix(a, (-1,0)), [[(-6,-10),(-2,0),(-2,-2)],[(0,-1),(-10,-10),(-4,-5)],[(-8,-16),(-11,-10),(-21,0)]])
def test_matrixTrans(self):
a = [[(1,22),(35,55)],[(7,10),(1,1)]]
self.assertEqual(matrixTrans(a), [[(1,22),(7,10)],[(35,55),(1,1)]])
def test_matrixConj(self):
a = [[(1,22),(35,55)],[(7,10),(1,1)]]
self.assertEqual(matrixConj(a), [[(1,-22),(35,-55)],[(7,-10),(1,-1)]])
def test_matrixAdj(self):
a = [[(1,22),(35,55)],[(7,10),(1,1)]]
self.assertEqual(matrixAdj(a), [[(1,-22),(7,-10)],[(35,-55),(1,-1)]])
def test_multiMatrix(self):
a = [[(1,0)],[(2,2)]]
b = [[(3,2),(1,0)]]
self.assertEqual(multiMatrix(b, a), [[(5, 4), (0, 0)]])
def testAccion(self):
a = [[(1,0),(3,1)],[(2,2),(1,1)]]
b = [(1,0),(0,1)]
self.assertEqual(Accion(a, b), [(0,3),(1,3)])
def test_ProductIntVec(self):
a = [(2,1),(3,2),(1,1)]
b = [(5,6),(4,8),(16,1)]
self.assertEqual(ProductIntVec(a, b), (61,8))
def testNorma(self):
a = [(2,1),(3,2),(1,1)]
self.assertEqual(norma(a), 4.47)
def testDistancia(self):
a = [(2,1),(3,2),(1,1)]
b = [(5,6),(4,8),(16,1)]
self.assertEqual(distancia(a,b), 17.2)
def testUni(self):
a = [[(2,1),(3,2)],[(1,1),(4,1)]]
self.assertEqual(Uni(a), False)
def testHerm(self):
a = [[(1,0),(3,20)],[(3,-20),(5,0)]]
self.assertEqual(Herm(a), True)
def testTensor(self):
a = [[(0,0),(1,0)],[(1,0),(0,0)]]
b = [[(4,0),(2,3),(1,1)],[(1,0),(0,1),(1,0)],[(2,2),(3,3),(4,4)]]
self.assertEqual(Tensor(a,b), [[(0, 0), (0, 0), (0, 0), (4, 0), (2, 3), (1, 1)], [(0, 0), (0, 0), (0, 0), (4, 0), (2, 3), (1, 1)]])
if __name__ == '__main__':
unittest.main()
|
import pygame, sys
from random import randint
from pygame.locals import *
from screen import *
from classes import *
from functions import *
from time import sleep
pygame.init()
global screen
clock = pygame.time.Clock()
tick = 15
#images
body = pygame.image.load("body.png")
headleft = pygame.image.load("head.png")
headright = pygame.transform.rotate(headleft, 180)
headup = pygame.transform.rotate(headleft, -90)
headdown = pygame.transform.rotate(headleft, 90)
headlist = [headup, headdown, headleft, headright]
appleicon = pygame.image.load("apple.jpg")
background = pygame.image.load("bg.jpg")
losefont = pygame.font.Font(None,100)
losetext = losefont.render("YOU LOSE!", 2, (0,0,0))
textbox = losetext.get_rect()
X = width/2 - textbox[2]/2
Y = height/2 - textbox[3]/2
scorefont = pygame.font.Font(None, 20)
#variables
direction = 'R'
slow = 60
collision = False
score = 0
changed = False
randx = randint(0,col)
randy = randint(0,row)
snakelist = [Snake(screen, randx * scale, randy * scale, headright),
Snake(screen, (randx - 1) * scale, randy * scale, body),
Snake(screen, (randx - 2) * scale, randy * scale, body),
Snake(screen, (randx - 3) * scale, randy * scale, body),
Snake(screen, (randx - 4) * scale, randy * scale, body),
Snake(screen, (randx - 5) * scale, randy * scale, body)]
apple = Apple(screen, 301, 301, appleicon)
time = 0
k = 1
while k:
clock.tick(tick)
screen.blit(background, (0,0))
k += 1
for event in pygame.event.get():
if event.type == pygame.QUIT:
k = 0
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
k = 0
elif event.key == K_w and direction !='D':
direction = 'U'
elif event.key == K_s and direction !='U':
direction = 'D'
elif event.key == K_a and direction !='R':
direction = 'L'
elif event.key == K_d and direction !='L':
direction = 'R'
draw_background(screen)
scoretext = scorefont.render("SCORE: "+str(score), 2, (0,0,0))
screen.blit(scoretext, (5,5))
if collision == False:
moveSnake(snakelist,direction, headlist, body)
bound(snakelist)
if eat(snakelist, apple):
score += 10
#speed increase
if score == 300 and changed == False:
tick += 3
changed = True
if score == 600 and changed == True:
tick += 3
changed = False
for i in snakelist:
i.draw()
apple.draw()
if snake_die(snakelist):
collision = True
if collision:
screen.blit(losetext, (X,Y))
pygame.display.flip() |
from __future__ import unicode_literals
from django.apps import AppConfig
class BreachConfig(AppConfig):
name = 'breach'
|
import os
import json
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from torchvision import models
def uncertain_logits_to_probs(logits):
"""Convert explicit uncertainty modeling logits to probabilities P(is_abnormal).
Args:
logits: Input of shape (batch_size, num_tasks * 3).
Returns:
probs: Output of shape (batch_size, num_tasks).
Position (i, j) interpreted as P(example i has pathology j).
"""
b, n_times_d = logits.size()
d = 3
if n_times_d % d:
raise ValueError('Expected logits dimension to be divisible by {}, got size {}.'.format(d, n_times_d))
n = n_times_d // d
logits = logits.view(b, n, d)
probs = F.softmax(logits[:, :, 1:], dim=-1)
probs = probs[:, :, 1]
return probs
class Model(nn.Module):
"""Models from TorchVision's GitHub page of pretrained neural networks:
https://github.com/pytorch/vision/tree/master/torchvision/models
"""
def __init__(self, model_fn, task_sequence, model_uncertainty, use_gpu):
super(Model, self).__init__()
self.task_sequence = task_sequence
self.model_uncertainty = model_uncertainty
self.use_gpu = use_gpu
# Set pretrained to False to avoid loading weights which will be overwritten
self.model = model_fn(pretrained=False)
self.pool = nn.AdaptiveAvgPool2d(1)
num_ftrs = self.model.classifier.in_features
if self.model_uncertainty:
num_outputs = 3 * len(task_sequence)
else:
num_outputs = len(task_sequence)
self.model.classifier = nn.Linear(num_ftrs, num_outputs)
self.fmaps = OrderedDict()
for name, module in self.model.named_modules():
# Only put hooks on the target layer
if name == "features":
self.target_module_id = id(module)
module.register_forward_hook(self.save_fmap)
def set_weights_attribute(self, weights):
self.weights = weights
def forward(self, x):
x = self.model.features(x)
x = F.relu(x, inplace=True)
x = self.pool(x).view(x.size(0), -1)
x = self.model.classifier(x)
return x
def save_fmap(self, m, _, output):
if self.use_gpu:
self.fmaps[id(m)] = output
else:
self.fmaps[id(m)] = output.to('cpu')
def infer(self, x, tasks):
with torch.set_grad_enabled(True):
self.model.zero_grad()
preds = self(x)
get_probs = uncertain_logits_to_probs if self.model_uncertainty else torch.sigmoid
probs = get_probs(preds)[0]
fmaps = self.fmaps[self.target_module_id]
task2prob_cam = {}
for task in tasks:
idx = self.task_sequence[task]
# Sum up along the filter dimension
cam = (fmaps * self.weights[idx:idx+1]).sum(dim=1)
cam = torch.clamp(cam, min=0, max=float('inf'))
cam -= cam.min()
cam /= (cam.max() + 1e-7)
task_prob = probs.detach().cpu().numpy()[idx]
task_cam = cam.detach().cpu().numpy()[0]
task2prob_cam[task] = (task_prob, task_cam)
return task2prob_cam
class DenseNet121(Model):
def __init__(self, task_sequence, model_uncertainty, use_gpu):
super(DenseNet121, self).__init__(models.densenet121, task_sequence, model_uncertainty, use_gpu)
def load_individual(ckpt_path, model_uncertainty, use_gpu=False):
device = 'cuda:0' if use_gpu else 'cpu'
ckpt_path = os.path.join(os.path.dirname(__file__), ckpt_path)
ckpt_dict = torch.load(ckpt_path, map_location=device)
# Build model, load parameters
task_sequence = ckpt_dict['task_sequence']
model = DenseNet121(task_sequence, model_uncertainty, use_gpu)
model = nn.DataParallel(model)
model.load_state_dict(ckpt_dict['model_state'])
params = list(model.module.parameters())
weights = np.squeeze(params[-2].data.numpy())
if model_uncertainty:
weights = weights[2::3]
model.module.set_weights_attribute(torch.tensor(weights).to(device).unsqueeze(2).unsqueeze(3))
return model.eval().to(device), ckpt_dict['ckpt_info']
class Tasks2Models(object):
"""
Main attribute is a (task tuple) -> {iterator, list} dictionary,
which loads models iteratively depending on the
specified task.
"""
def __init__(self, config_path, num_models=1, dynamic=True, use_gpu=False):
super(Tasks2Models).__init__()
self.get_config(config_path)
self.dynamic = dynamic
self.use_gpu = use_gpu
if dynamic:
model_loader = self.model_iterator
else:
model_loader = self.model_list
model_dicts2tasks = {}
for task, model_dicts in self.task2model_dicts.items():
hashable_model_dict = self.get_hashable(model_dicts)
if hashable_model_dict in model_dicts2tasks:
model_dicts2tasks[hashable_model_dict].append(task)
else:
model_dicts2tasks[hashable_model_dict] = [task]
# Initialize the iterators
self.tasks2models = {}
for task, model_dicts in self.task2model_dicts.items():
hashable_model_dict = self.get_hashable(model_dicts)
tasks = tuple(model_dicts2tasks[hashable_model_dict])
if tasks not in self.tasks2models:
self.tasks2models[tasks] = model_loader(model_dicts, num_models=num_models)
self.tasks = list(self.task2model_dicts.keys())
def get_hashable(self, model_dicts):
return tuple([tuple(model_dict.items()) for model_dict in model_dicts])
@property
def module(self):
return self
def get_config(self, config_path):
"""Read configuration from a JSON file.
Args:
config_path: Path to configuration JSON file.
Returns:
task2models: Dictionary mapping task names to list of dicts.
Each dict has keys 'ckpt_path' and 'model_uncertainty'.
aggregation_fn: Aggregation function to combine predictions from multiple models.
"""
with open(config_path, 'r') as json_fh:
config_dict = json.load(json_fh)
self.task2model_dicts = config_dict['task2models']
agg_method = config_dict['aggregation_method']
if agg_method == 'max':
self.aggregation_fn = np.max
elif agg_method == 'mean':
self.aggregation_fn = np.mean
else:
raise ValueError('Invalid configuration: {} = {} (expected "max" or "mean")'.format('aggregation_method', agg_method))
def model_iterator(self, model_dicts, num_models):
def iterator():
for model_dict in model_dicts[:num_models]:
ckpt_path = model_dict['ckpt_path']
model_uncertainty = model_dict['is_3class']
model, ckpt_info = load_individual(ckpt_path, model_uncertainty, self.use_gpu)
yield model
return iterator
def model_list(self, model_dicts, num_models):
loaded_models = []
for model_dict in model_dicts[:num_models]:
ckpt_path = model_dict['ckpt_path']
model_uncertainty = model_dict['is_3class']
model, ckpt_info = load_individual(ckpt_path, model_uncertainty, self.use_gpu)
loaded_models.append(model)
def iterator():
return loaded_models
return iterator
def infer(self, img, tasks):
ensemble_probs = []
cams = []
model_iterable = self.tasks2models[tasks]
task2ensemble_probs_cams = {}
for model in model_iterable():
individual_task2prob_cam = model.module.infer(img, tasks)
for task in tasks:
if task not in task2ensemble_probs_cams:
task2ensemble_probs_cams[task] = [individual_task2prob_cam[task]]
else:
task2ensemble_probs_cams[task].append(individual_task2prob_cam[task])
assert all([task in task2ensemble_probs_cams for task in tasks]), "Not all tasks in task2ensemble_probs_cams"
task2prob_cam = {}
for task in tasks:
ensemble_probs, cams = zip(*task2ensemble_probs_cams[task])
task2prob_cam[task] = (self.aggregation_fn(ensemble_probs, axis=0), self.aggregation_fn(cams, axis=0))
assert all([task in task2prob_cam for task in tasks]), "Not all tasks in task2prob_cam"
return task2prob_cam
def __iter__(self):
return iter(self.tasks2models)
|
#!/usr/bin/env python3.6
# coding: utf-8
# Copyright (c) 2015 Michael Auchter <a@phire.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import os
import json
import logging
import operator
import requests
import colorsys
import datetime
import uuid
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# Imports for v3 validation
from validation import validate_message
# Disable warning about Insecure Request
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
# Setup logger
logger = logging.getLogger()
logger.setLevel(logging.INFO)
LIGHT_SUPPORT_COLOR_TEMP = 2
LIGHT_SUPPORT_RGB_COLOR = 16
LIGHT_SUPPORT_XY_COLOR = 64
DISPLAY_CATEGORIES = {
'garage_door': 'SWITCH',
'group': 'SWITCH',
'input_boolean': 'SWITCH',
'input_number': 'SWITCH',
'switch': 'SWITCH',
'fan': 'SWITCH',
'cover': 'SWITCH',
'lock': 'SMARTLOCK',
'script': 'ACTIVITY_TRIGGER',
'scene': 'SCENE_TRIGGER',
'light': 'LIGHT',
'media_player': 'TV',
'climate': 'THERMOSTAT',
'alert': 'OTHER',
'automation': 'ACTIVITY_TRIGGER'
}
class HomeAssistant(object):
def __init__(self, config):
self.config = config
self.url = config.url.rstrip('/')
agent_str = 'Home Assistant Alexa Smart Home Skill - %s - %s'
agent_fmt = agent_str % (os.environ['AWS_DEFAULT_REGION'],
requests.utils.default_user_agent())
self.session = requests.Session()
self.session.headers = {'x-ha-access': config.password,
'content-type': 'application/json',
'User-Agent': agent_fmt}
self.session.verify = config.ssl_verify
def build_url(self, relurl):
return '%s/%s' % (self.config.url, relurl)
def get(self, relurl):
r = self.session.get(self.build_url(relurl))
r.raise_for_status()
return r.json()
def post(self, relurl, d, wait=False):
read_timeout = None if wait else 1.00 #0.01
r = None
try:
logger.debug('HA post calling %s with %s', relurl, str(d))
r = self.session.post(self.build_url(relurl),
data=json.dumps(d),
timeout=(None, read_timeout))
r.raise_for_status()
except requests.exceptions.ReadTimeout:
# Allow response timeouts after request was sent
logger.debug('HA post for %s sent without waiting for response',
relurl)
return r
class ConnectedHomeCall(object):
def __init__(self, namespace, name, ha, payload, endpoint, correlationToken):
logger.debug('Building ConnectedHomeCall %s, %s, %s', namespace,
name, payload)
self.namespace = namespace
self.name = name
if self.name == 'ReportState':
self.response_name = 'StateReport'
else:
self.response_name = 'Response'
self.namespace = 'Alexa'
self.ha = ha
self.payload = payload
self.endpoint = endpoint
self.entity = None
self.context_properties = []
self.correlationToken = correlationToken
if self.endpoint and ('endpointId' in self.endpoint):
self.entity = mk_entity(ha, self.endpoint['endpointId']
.replace(':', '.'))
class ConnectedHomeException(Exception):
def __init__(self, name="DriverInternalError", payload={}):
self.error_name = name
self.payload = payload
class ValueOutOfRangeError(ConnectedHomeException):
def __init__(self, minValue, maxValue):
self.error_name = 'ValueOutOfRangeError'
self.payload = {'minimumValue': minValue, 'maximumValue': maxValue}
def invoke(self, name):
logger.debug('invoking ConnectedHomeCall %s %s', self.namespace, name)
r = {'event': {}}
try:
r['event']['header'] = {'namespace': self.namespace,
'name': self.response_name,
'payloadVersion': '3',
'messageId': get_uuid(),
"correlationToken": self.correlationToken}
payload = operator.attrgetter(name)(self)()
if payload:
r['event']['payload'] = payload
else:
r['event']['payload'] = {}
if self.endpoint:
r['event']['endpoint'] = {
"endpointId": self.endpoint['endpointId']
}
if self.context_properties:
r['context'] = {"properties": self.context_properties }
logger.debug('response payload: %s', str(r['event']['payload']))
except ConnectedHomeCall.ConnectedHomeException as e:
logger.exception('ConnectedHomeCall failed: %s, %s', e.error_name, e.payload)
self.response_name = e.error_name
r['event']['payload'] = e.payload
except Exception:
logger.exception('ConnectedHomeCall failed unexpectedly')
self.response_name = 'DriverInternalError'
r['event']['payload'] = {}
return r
class Alexa(object):
class ReportState(ConnectedHomeCall):
def ReportState(self):
if hasattr(self.entity, 'get_current_temperature'):
state = self.ha.get('states/' + self.entity.entity_id)
scale = get_temp_scale(state['attributes']['unit_of_measurement'])
temperature = self.entity.get_current_temperature(state)
self.context_properties.append({
"namespace": "Alexa.TemperatureSensor",
"name": "temperature",
"value": {
"value": temperature,
"scale": scale
},
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
if hasattr(self.entity, 'get_temperature'):
state = self.ha.get('states/' + self.entity.entity_id)
scale = get_temp_scale(state['attributes']['unit_of_measurement'])
temperature, mode = self.entity.get_temperature(state)
self.context_properties.append({
"namespace": "Alexa.ThermostatController",
"name": "targetSetpoint",
"value": {
"value": temperature,
"scale": scale
},
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
self.context_properties.append({
"namespace": "Alexa.ThermostatController",
"name": "thermostatMode",
"value": mode.upper(),
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
if hasattr(self.entity, 'get_lock_state'):
lock_state = self.entity.get_lock_state().upper()
self.context_properties.append({
"namespace": "Alexa.LockController",
"name": "lockState",
"value": lock_state,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
if (hasattr(self.entity, 'turn_on') or hasattr(self.entity, 'turn_off')) and not hasattr(self.entity, 'get_temperature'):
state = self.ha.get('states/' + self.entity.entity_id)
device_state = state.get('state').upper()
self.context_properties.append({
"namespace": "Alexa.PowerController",
"name": "powerState",
"value": device_state,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
if hasattr(self.entity, 'get_percentage'):
state = self.ha.get('states/' + self.entity.entity_id)
percentage = self.entity.get_percentage()
self.context_properties.append({
"namespace": "Alexa.PercentageController",
"name": "percentage",
"value": percentage,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
# Report EndpointHealth for ALL items
self.context_properties.append({
"namespace": "Alexa.EndpointHealth",
"name": "connectivity",
"value": {
"value": "OK"
},
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class Discovery(ConnectedHomeCall):
def Discover(self):
try:
return {'endpoints': discover_appliances(self.ha)}
except Exception:
logger.exception('v3 DiscoverAppliancesRequest failed')
class PowerController(ConnectedHomeCall):
def TurnOn(self):
self.entity.turn_on()
self.context_properties.append({
"namespace": "Alexa.PowerController",
"name": "powerState",
"value": "ON",
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def TurnOff(self):
self.entity.turn_off()
self.context_properties.append({
"namespace": "Alexa.PowerController",
"name": "powerState",
"value": "OFF",
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class BrightnessController(ConnectedHomeCall):
def AdjustBrightness(self):
delta = self.payload['brightnessDelta']
brightness = self.entity.get_percentage()
brightness += delta
brightness = check_value(brightness, 0.0, 100.0)
self.entity.set_percentage(brightness)
self.context_properties.append({
"namespace": "Alexa.BrightnessController",
"name": "brightness",
"value": brightness,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def SetBrightness(self):
brightness = self.payload['brightness']
self.entity.set_percentage(brightness)
self.context_properties.append({
"namespace": "Alexa.BrightnessController",
"name": "brightness",
"value": brightness,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class PercentageController(ConnectedHomeCall):
def SetPercentage(self):
percentage = self.payload['percentage']
self.entity.set_percentage(percentage)
self.context_properties.append({
"namespace": "Alexa.PercentageController",
"name": "percentage",
"value": percentage,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def AdjustPercentage(self):
delta = self.payload['percentageDelta']
percentage = self.entity.get_percentage()
percentage += delta
percentage = check_value(percentage, 0.0, 100.0)
self.entity.set_percentage(percentage)
self.context_properties.append({
"namespace": "Alexa.PercentageController",
"name": "percentage",
"value": percentage,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class ColorTemperatureController(ConnectedHomeCall):
def DecreaseColorTemperature(self):
currentColorTemp = self.entity.get_color_temperature()
newColorTemp = currentColorTemp - 500
self.entity.set_color_temperature(newColorTemp)
self.context_properties.append({
"namespace": "Alexa.ColorTemperatureController",
"name": "colorTemperatureInKelvin",
"value": newColorTemp,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def IncreaseColorTemperature(self):
currentColorTemp = self.entity.get_color_temperature()
newColorTemp = currentColorTemp + 500
self.entity.set_color_temperature(newColorTemp)
self.context_properties.append({
"namespace": "Alexa.ColorTemperatureController",
"name": "colorTemperatureInKelvin",
"value": newColorTemp,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def SetColorTemperature(self):
colorTemp = self.payload['colorTemperatureInKelvin']
self.entity.set_color_temperature(temp)
self.context_properties.append({
"namespace": "Alexa.ColorTemperatureController",
"name": "colorTemperatureInKelvin",
"value": colorTemp,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class PowerLevelController(ConnectedHomeCall):
def AdjustPowerLevel(self):
delta = self.payload['powerLevelDelta']
val = self.entity.get_percentage()
val += delta
val = check_value(val, 0.0, 100.0)
self.entity.set_percentage(val)
self.context_properties.append({
"namespace": "Alexa.PowerLevelController",
"name": "powerLevel",
"value": val,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def SetPowerLevel(self):
percentage = self.payload['powerLevel']
self.entity.set_percentage(percentage)
self.context_properties.append({
"namespace": "Alexa.PowerLevelController",
"name": "powerLevel",
"value": percentage,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class ThermostatController(ConnectedHomeCall):
def SetTargetTemperature(self):
state = self.ha.get('states/' + self.entity.entity_id)
unit = state['attributes']['unit_of_measurement']
scale = get_temp_scale(state['attributes']['unit_of_measurement'])
min_temp = convert_temp(state['attributes']['min_temp'], unit)
max_temp = convert_temp(state['attributes']['max_temp'], unit)
temperature, mode = self.entity.get_temperature(state)
new_temp = float(self.payload['targetSetpoint']['value'])
if new_temp > max_temp or new_temp < min_temp:
raise ConnectedHomeCall.ValueOutOfRangeError(min_temp,max_temp)
# Only 4 allowed values for mode in this response
if mode not in ['AUTO', 'COOL', 'ECO', 'HEAT']:
current = self.entity.get_current_temperature(state)
if 'cool' in state['attributes']['operation_list']:
mode = 'COOL' if current >= new_temp else 'HEAT'
else:
mode = 'HEAT'
self.entity.set_temperature(new_temp, mode.lower(), state)
self.context_properties.append({
"namespace": "Alexa.ThermostatController",
"name": "targetSetpoint",
"value": {
"value": new_temp,
"scale": scale
},
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
self.context_properties.append({
"namespace": "Alexa.ThermostatController",
"name": "thermostatMode",
"value": mode.upper(),
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def AdjustTargetTemperature(self):
state = self.ha.get('states/' + self.entity.entity_id)
unit = state['attributes']['unit_of_measurement']
scale = get_temp_scale(state['attributes']['unit_of_measurement'])
min_temp = convert_temp(state['attributes']['min_temp'], unit)
max_temp = convert_temp(state['attributes']['max_temp'], unit)
temperature, mode = self.entity.get_temperature(state)
new_temp = op(temperature,float(self.payload['targetSetpointDelta']['value']))
# Clamp the allowed temperature for relative adjustments
if temperature != max_temp and temperature != min_temp:
new_temp = check_value(new_temp, min_temp, max_temp)
if new_temp > max_temp or new_temp < min_temp:
raise ConnectedHomeCall.ValueOutOfRangeError(min_temp,max_temp)
# Only 4 allowed values for mode in this response
if mode not in ['AUTO', 'COOL', 'ECO', 'HEAT']:
current = self.entity.get_current_temperature(state)
if 'cool' in state['attributes']['operation_list']:
mode = 'COOL' if current >= new_temp else 'HEAT'
else:
mode = 'HEAT'
self.entity.set_temperature(new_temp, mode.lower(), state)
self.context_properties.append({
"namespace": "Alexa.ThermostatController",
"name": "targetSetpoint",
"value": {
"value": new_temp,
"scale": scale
},
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
self.context_properties.append({
"namespace": "Alexa.ThermostatController",
"name": "thermostatMode",
"value": mode.upper(),
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def SetThermostatMode(self):
mode = self.payload['thermostatMode']['value']
logger.debug('mode is ' + mode)
if mode in ['AUTO', 'COOL', 'ECO', 'HEAT']:
self.entity.turn_on()
else:
self.entity.turn_off()
self.context_properties.append({
"namespace": "Alexa.ThermostatController",
"name": "thermostatMode",
"value": mode,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class TemperatureSensor(ConnectedHomeCall):
def ReportState(self):
state = self.ha.get('states/' + self.entity.entity_id)
scale = get_temp_scale(state['attributes']['unit_of_measurement'])
temperature = self.entity.get_current_temperature(state)
self.context_properties.append({
"namespace": "Alexa.TemperatureSensor",
"name": "temperature",
"value": {
"value": temperature,
"scale": scale
},
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class LockController(ConnectedHomeCall):
def Lock(self):
self.entity.set_lock_state(self.payload["lockState"])
self.context_properties.append({
"namespace": "Alexa.LockController",
"name": "lockState",
"value": "LOCKED",
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def Unlock(self):
self.entity.set_lock_state(self.payload["lockState"])
self.context_properties.append({
"namespace": "Alexa.LockController",
"name": "lockState",
"value": "UNOCKED",
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class Speaker(ConnectedHomeCall):
def SetVolume(self):
volume = self.payload['volume']['value']
volume = check_value(volume, 0.0, 100.0)
self.entity.set_volume(volume)
mute_state = self.entity.get_mute()
self.context_properties.append({
"namespace": "Alexa.Speaker",
"name": "volume",
"value": volume,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
self.context_properties.append({
"namespace": "Alexa.Speaker",
"name": "muted",
"value": mute_state,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def AdjustVolume(self):
delta = self.payload['volume']['value']
volume = self.entity.get_volume()
volume += delta
volume = check_value(volume, 0.0, 100.0)
self.entity.set_volume(volume)
mute_state = self.entity.get_mute()
self.context_properties.append({
"namespace": "Alexa.Speaker",
"name": "volume",
"value": volume,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
self.context_properties.append({
"namespace": "Alexa.Speaker",
"name": "muted",
"value": mute_state,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
def SetMute(self):
mute = self.payload['mute']['value']
mute_state = self.entity.set_mute(mute)
volume = self.entity.get_volume()
self.context_properties.append({
"namespace": "Alexa.Speaker",
"name": "volume",
"value": volume,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
self.context_properties.append({
"namespace": "Alexa.Speaker",
"name": "muted",
"value": mute_state,
"timeOfSample": get_utc_timestamp(),
"uncertaintyInMilliseconds": 200
})
class PlaybackController(ConnectedHomeCall):
def FastForward(self):
logger.debug('FastForward')
def Next(self):
logger.debug('Next')
def Pause(self):
logger.debug('Pause')
def Play(self):
logger.debug('Play')
def Previous(self):
logger.debug('Previous')
def Rewind(self):
logger.debug('Rewind')
def StartOver(self):
logger.debug('StartOver')
def Stop(self):
logger.debug('Stop')
class RemoteVideoPlayer(ConnectedHomeCall):
def SearchAndPlay(self):
logger.debug('SearchAndPlay')
def SearchAndDisplayResults(self):
logger.debug('SearchAndDisplayResults')
def invoke(namespace, name, ha, payload, endpoint, correlationToken):
class allowed(object):
Alexa = Alexa
if namespace == 'Alexa' and name == 'ReportState':
namespace = namespace + '.' + name
make_class = operator.attrgetter(namespace)
logger.debug('Calling invoke %s, %s, %s, %s, %s, %s', namespace, name, ha,
payload, endpoint, correlationToken)
obj = make_class(allowed)(namespace, name, ha, payload, endpoint, correlationToken)
return obj.invoke(name)
def discover_appliances(ha):
def entity_domain(x):
return x['entity_id'].split('.', 1)[0]
def is_supported_entity(x):
return entity_domain(x) in ha.config.exposed_domains
def is_exposed_entity(x):
attr = x['attributes']
if 'haaska_hidden' in attr:
return not attr['haaska_hidden']
elif 'hidden' in attr:
return not attr['hidden']
else:
return ha.config.expose_by_default
def mk_appliance(x):
features = 0
if 'supported_features' in x['attributes']:
features = x['attributes']['supported_features']
entity = mk_entity(ha, x['entity_id'], features)
o = {}
# this needs to be unique and has limitations on allowed characters ("^[a-zA-Z0-9_\\-=#;:?@&]*$"):
o['endpointId'] = x['entity_id'].replace('.', ':')
o['manufacturerName'] = 'Unknown'
if 'haaska_name' in x['attributes']:
o['friendlyName'] = x['attributes']['haaska_name']
else:
o['friendlyName'] = x['attributes']['friendly_name']
suffix = ha.config.entity_suffixes[entity_domain(x)]
if suffix != '':
o['friendlyName'] += ' ' + suffix
if 'haaska_desc' in x['attributes']:
o['description'] = x['attributes']['haaska_desc']
else:
o['description'] = 'Home Assistant ' + \
entity_domain(x).replace('_', ' ').title()
o['displayCategories'] = [DISPLAY_CATEGORIES[entity_domain(x)]]
o['capabilities'] = entity.get_capabilities()
return o
states = ha.get('states')
return [mk_appliance(x) for x in states if is_supported_entity(x) and
is_exposed_entity(x)]
def supported_features(payload):
try:
details = 'additionalApplianceDetails'
return payload['appliance'][details]['supported_features']
except Exception:
return 0
def convert_temp(temp, from_unit=u'°C', to_unit=u'°C'):
if temp is None or from_unit == to_unit:
return temp
if from_unit == u'°C':
return temp * 1.8 + 32
else:
return (temp - 32) / 1.8
def get_temp_scale(unit):
if unit == u'°C':
return 'CELSIUS'
else:
return 'FAHRENHEIT'
def get_utc_timestamp():
return datetime.datetime.strftime(datetime.datetime.utcnow(), "%Y-%m-%dT%H:%M:%S.%f")[:-4] + "Z"
def get_uuid():
return str(uuid.uuid4())
def check_value(value, minValue=None, maxValue=None):
if value is None or minValue == maxValue:
return value
if value <= minValue:
return minValue
elif value >= maxValue:
return maxValue
def mk_entity(ha, entity_id, supported_features=0):
entity_domain = entity_id.split('.', 1)[0]
logger.debug('Making entity w/ domain: %s', entity_domain)
return DOMAINS[entity_domain](ha, entity_id, supported_features)
class Entity(object):
def __init__(self, ha, entity_id, supported_features):
self.ha = ha
self.entity_id = entity_id.replace(':', '.')
self.supported_features = supported_features
self.entity_domain = self.entity_id.split('.', 1)[0]
def _call_service(self, service, data={}):
data['entity_id'] = self.entity_id
self.ha.post('services/' + service, data)
def get_model_name(self):
return None
def get_capabilities(self):
capabilities = []
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa",
"version": "3"
})
if hasattr(self, 'turn_on') or hasattr(self, 'turn_off'):
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.PowerController",
"version": "3",
"properties": {
"supported": [
{
"name": "powerState"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
if hasattr(self, 'set_percentage') or hasattr(self, 'get_percentage'):
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.PercentageController",
"version": "3",
"properties": {
"supported": [
{
"name": "percentage"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.BrightnessController",
"version": "3",
"properties": {
"supported": [
{
"name": "brightness"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
if hasattr(self, 'get_current_temperature') or hasattr(
self, 'get_temperature'):
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.TemperatureSensor",
"version": "3",
"properties": {
"supported": [
{
"name": "temperature"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
if hasattr(self, 'set_temperature'):
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.ThermostatController",
"version": "3",
"properties": {
"supported": [
{
"name": "targetSetpoint"
},
{
"name": "thermostatMode"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
if hasattr(self, 'get_lock_state') or hasattr(self, 'set_lock_state'):
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.LockController",
"version": "3",
"properties": {
"supported": [
{
"name": "lockState"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
if self.entity_domain == "light":
if self.supported_features & LIGHT_SUPPORT_RGB_COLOR:
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.ColorController",
"version": "3",
"properties": {
"supported": [
{
"name": "color"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
if self.supported_features & LIGHT_SUPPORT_COLOR_TEMP:
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.ColorTemperatureController",
"version": "3",
"properties": {
"supported": [
{
"name": "colorTemperatureInKelvin"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
if hasattr(self, 'set_volume') or hasattr(self, 'get_volume'):
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.Speaker",
"version": "3",
"properties": {
"supported": [
{
"name": "SetVolume"
},
{
"name": "AdjustVolume"
},
{
"name": "SetMute"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
capabilities.append(
{
"type": "AlexaInterface",
"interface": "Alexa.EndpointHealth",
"version": "3",
"properties": {
"supported": [
{
"name": "connectivity"
}
],
"proactivelyReported": False,
"retrievable": True
}
})
return capabilities
class ToggleEntity(Entity):
def turn_on(self):
self._call_service('homeassistant/turn_on')
def turn_off(self):
self._call_service('homeassistant/turn_off')
class InputNumberEntity(Entity):
def get_percentage(self):
state = self.ha.get('states/' + self.entity_id)
value = float(state['state'])
minimum = state['attributes']['min']
maximum = state['attributes']['max']
adjusted = value - minimum
return (adjusted * 100.0 / (maximum - minimum))
def set_percentage(self, val):
state = self.ha.get('states/' + self.entity_id)
minimum = state['attributes']['min']
maximum = state['attributes']['max']
step = state['attributes']['step']
scaled = val * (maximum - minimum) / 100.0
rounded = step * round(scaled / step)
adjusted = rounded + minimum
self._call_service('input_number/set_value', {'value': adjusted})
class GarageDoorEntity(ToggleEntity):
def turn_on(self):
self._call_service('garage_door/open')
def turn_off(self):
self._call_service('garage_door/close')
class CoverEntity(ToggleEntity):
def turn_on(self):
self._call_service('cover/open_cover')
def turn_off(self):
self._call_service('cover/close_cover')
class LockEntity(Entity):
def set_lock_state(self, state):
if state == "LOCKED":
self._call_service('lock/lock')
elif state == "UNLOCKED":
self._call_service('lock/unlock')
def get_lock_state(self):
state = self.ha.get('states/' + self.entity_id)
return state['state']
class ScriptEntity(ToggleEntity):
def turn_off(self):
self.turn_on()
class SceneEntity(ToggleEntity):
def turn_off(self):
self.turn_on()
class LightEntity(ToggleEntity):
def get_percentage(self):
state = self.ha.get('states/' + self.entity_id)
current_brightness = state['attributes']['brightness']
return (current_brightness / 255.0) * 100.0
def set_percentage(self, val):
brightness = (val / 100.0) * 255.0
self._call_service('light/turn_on', {'brightness': brightness})
def get_color_temperature(self):
state = self.ha.get('states/' + self.entity_id)
current_temperature = state['attributes']['color_temp']
return (1000000 / current_temperature)
def set_color(self, hue, saturation, brightness):
rgb = [int(round(i * 255)) for i in colorsys.hsv_to_rgb(hue / 360.0,
saturation,
brightness)]
self._call_service('light/turn_on', {'rgb_color': rgb})
def set_color_temperature(self, val):
self._call_service('light/turn_on',
{'color_temp': (1000000 / val)})
class MediaPlayerEntity(ToggleEntity):
def get_percentage(self):
state = self.ha.get('states/' + self.entity_id)
vol = state['attributes']['volume_level']
return vol * 100.0
def set_percentage(self, val):
vol = val / 100.0
self._call_service('media_player/volume_set', {'volume_level': vol})
def get_volume(self):
state = self.ha.get('states/' + self.entity_id)
vol = state['attributes']['volume_level']
return vol * 100.0
def set_volume(self, val):
vol = val / 100.0
self._call_service('media_player/volume_set', {'volume_level': vol})
class ClimateEntity(Entity):
def turn_on(self):
state = self.ha.get('states/' + self.entity_id)
current = self.get_current_temperature(state)
temperature, mode = self.get_temperature(state)
if temperature is None:
mode = 'auto'
else:
if 'cool' in state['attributes']['operation_list']:
mode = 'cool' if current >= temperature else 'heat'
else:
mode = 'heat'
self._call_service('climate/set_operation_mode',
{'operation_mode': mode})
def turn_off(self):
self._call_service('climate/set_operation_mode',
{'operation_mode': 'off'})
def aux_heat_on(self):
self._call_service('climate/set_aux_heat',
{'aux_heat': 'on'})
def aux_heat_off(self):
self._call_service('climate/set_aux_heat',
{'aux_heat': 'off'})
def get_current_temperature(self, state=None):
if not state:
state = self.ha.get('states/' + self.entity_id)
return convert_temp(
state['attributes']['current_temperature'],
state['attributes']['unit_of_measurement'])
def get_temperature(self, state=None):
if not state:
state = self.ha.get('states/' + self.entity_id)
temperature = convert_temp(
state['attributes']['temperature'],
state['attributes']['unit_of_measurement'])
mode = state['state'].replace('idle', 'off').upper()
return (temperature, mode)
def set_temperature(self, val, mode=None, state=None):
if not state:
state = self.ha.get('states/' + self.entity_id)
temperature = convert_temp(
val,
to_unit=state['attributes']['unit_of_measurement'])
data = {'temperature': temperature}
if mode:
data['operation_mode'] = mode
self._call_service('climate/set_temperature', data)
class FanEntity(ToggleEntity):
def get_percentage(self):
state = self.ha.get('states/' + self.entity_id)
speed = state['attributes']['speed']
if speed == "off":
return 0
elif speed == "low":
return 33
elif speed == "medium":
return 66
elif speed == "high":
return 100
def set_percentage(self, val):
speed = "off"
if val <= 33:
speed = "low"
elif val <= 66:
speed = "medium"
elif val <= 100:
speed = "high"
self._call_service('fan/set_speed', {'speed': speed})
DOMAINS = {
'garage_door': GarageDoorEntity,
'group': ToggleEntity,
'input_boolean': ToggleEntity,
'input_number': InputNumberEntity,
'switch': ToggleEntity,
'fan': FanEntity,
'cover': CoverEntity,
'lock': LockEntity,
'script': ScriptEntity,
'scene': SceneEntity,
'light': LightEntity,
'media_player': MediaPlayerEntity,
'climate': ClimateEntity,
'alert': ToggleEntity,
'automation': ToggleEntity
}
class Configuration(object):
def __init__(self, filename=None, optsDict=None):
self._json = {}
if filename is not None:
with open(filename) as f:
self._json = json.load(f)
if optsDict is not None:
self._json = optsDict
opts = {}
opts['url'] = self.get(['url', 'ha_url'],
default='http://localhost:8123/api')
opts['ssl_verify'] = self.get(['ssl_verify', 'ha_cert'], default=True)
opts['password'] = self.get(['password', 'ha_passwd'], default='')
opts['exposed_domains'] = \
sorted(self.get(['exposed_domains', 'ha_allowed_entities'],
default=DOMAINS.keys()))
default_entity_suffixes = {'group': 'Group', 'scene': 'Scene'}
opts['entity_suffixes'] = {domain: '' for domain in DOMAINS.keys()}
opts['entity_suffixes'].update(self.get(['entity_suffixes'],
default=default_entity_suffixes))
opts['expose_by_default'] = self.get(['expose_by_default'],
default=True)
opts['debug'] = self.get(['debug'], default=False)
self.opts = opts
def __getattr__(self, name):
return self.opts[name]
def get(self, keys, default):
for key in keys:
if key in self._json:
return self._json[key]
return default
def dump(self):
return json.dumps(self.opts, indent=2, separators=(',', ': '))
def event_handler(request, context):
#Main Lambda handler.
#Only expects v3 requests (as we are only user) so no neeed to handle v2 requests
try:
config = Configuration('config.json')
if config.debug:
logger.setLevel(logging.DEBUG)
ha = HomeAssistant(config)
logger.debug('Directive:')
logger.debug(json.dumps(request, indent=4, sort_keys=True))
directive = request['directive']
header = directive['header']
namespace = header.get('namespace')
name = header.get('name')
correlationToken = header.get('correlationToken')
payload = directive.get('payload')
endpoint = directive.get('endpoint')
logger.debug('calling request_handler for %s, payload: %s', name,
str({k: v for k, v in payload.items()
if k != u'accessToken'}))
response = invoke(namespace, name, ha, payload, endpoint, correlationToken)
logger.debug("Response:")
logger.debug(json.dumps(response, indent=4, sort_keys=True))
logger.debug("Validate response")
#validate_message(request, response)
return response
except ValueError as error:
logger.error(error)
raise
|
#-*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib.request
import re
import BookDBHandler
import sys
#if sys.argv[1] != None:
# print("Need book _id")
#
#if sys.argv[2] not in ['-s','-w']:
# print("""
# Need second argument
# -s : show score
# -w : write score
# """)
#try:
# BookDBHandler.get_book_data(para)
#except UnboundLocalError as err:
# print("the query is not available:",err)
class score_handler:
''' Colloect Sales Point form http://www.aladin.co.kr or http://www.yes24.com. In case of Kyobobook (site = 1) is 0 score. They don't provide any score.
'''
def __init__(self, book_id):
self.url = BookDBHandler.get_url(book_id)
self._id = book_id
self.keys = tuple(self.url.keys())
self.web_data = dict()
self.score = dict()
for key in self.keys:
self.web_data[key] = {
'url': self.url[key]
}
for key in self.keys:
self.web_data[key]['doc'] = urllib.request.urlopen(self.web_data[key]['url']).read()
self.web_data[key]['soup'] = BeautifulSoup(self.web_data[key]['doc'], "html.parser")
self.score[key] = self.get_score(key, self.web_data[key]['soup'])
#self.soup = BeautifulSoup(self.doc, "html.parser")
def get_score(self, site, soup):
s = None
if site == 'kyobo':
score = 0
return score
if site == 'aladin':
t = soup.find("div",class_ = "ss_book_list")
start_point = t.get_text().index(':') + 2
s = t.get_text()[start_point:]
if site == 'yes24':
t = soup.find("p",class_ = "goods_rating")
m = re.search('[\d,]+',t.get_text())
s = m.group()
score = self.remove_comma(s)
return score
def get_yes24_score(self, soup):
t = soup.find("p",class_ = "goods_rating")
m = re.search('[\d,]+',t.get_text())
score = int(m.group())
return score
def remove_comma(self,st):
return int(st.replace(',','').strip())
def show_score(self):
print ("Kyobo : Not Count\nYes24:%d\nAladin:%d" % (self.score['yes24'], self.score['aladin']))
def input_score(self):
BookDBHandler.write_score(self._id, self.score)
s = score_handler(sys.argv[1])
print(s.show_score())
s.input_score()
|
import sys
sys.path.append(".")
from BankAccount import *
class User:
def __init__(self, name, email_address):
self.name = name
self.email = email_address
self.account = BankAccount(int_rate=0.02, balance=0)
def make_deposit(self):
self.account.deposit(100)
return self
def make_withdrawal(self, amount):
self.account_balance -= amount
return self
def display_user_balance(self):
print(f"User: {self.name}, Balance: {self.account_balance}")
return self
def transfer_money(self, other_user, amount):
self.account_balance -= amount
self = other_user
self.account_balance += amount
return self
guido = User("Guido van Rossum", "guido@python.com", "account1")
monty = User("Monty Python", "monty@python.com")
rocky = User("Rocky Balboa", "rocky@python.com")
guido.account.deposit(100)
guido.make_deposit(100).make_deposit(100).make_deposit(100).make_withdrawal(50).display_user_balance().transfer_money(rocky,50)
monty.make_deposit(200).make_deposit(300).make_withdrawal(50).make_withdrawal(50).display_user_balance()
rocky.make_deposit(500).make_withdrawal(50).make_withdrawal(50).make_withdrawal(50).display_user_balance()
guido.display_user_balance()
|
from mysql.connector import connect, Error
locadora = {
"host": "localhost", "user": "root",
"password": "root", "database": "locadora"}
def execute(sql, params=None): # Executa um comando no mysql e salva os valores. Serve para:
# insert, update, delete, create, alter, drop
with connect(**locadora) as conn: #conecta ao banco
with conn.cursor() as cursor: # abre a página para execução
cursor.execute(sql, params) # executa o sql que está sendo passado
conn.commit() # grava no banco de dados
return cursor.lastrowid
def query(sql, params=None):
with connect(**locadora) as trabalho:
with trabalho.cursor() as cursor:
cursor.execute(sql, params)
return cursor.fetchall()
def insert(tabela, colunas, valores):
return execute(f"INSERT INTO {tabela} ({','.join(colunas)}) VALUES ({','.join(['%s' for valor in valores])})", valores)
def delete(tabela, coluna, valor):
execute(f"DELETE FROM {tabela} WHERE {coluna} = %s", (valor,))
def update(tabela, chave, valor_chave, colunas, valores):
sets = [f"{coluna} = %s" for coluna in colunas]
execute(f"""UPDATE {tabela} SET {",".join(sets)} WHERE {chave} = %s""", valores + [valor_chave])
def select_like(tabela, chave, valor_chave):
return query(f"""SELECT * FROM {tabela} WHERE {chave} LIKE %s""", (valor_chave))
def select(tabela, chave, valor_chave=1, limit=100, offset=0):
return query(f"""SELECT * FROM {tabela} WHERE {chave} LIKE %s LIMIT {limit} offset {offset}""", (valor_chave,))
# tirar dúvidas neste arquivo |
#!../../software/pyhail.sh
import hail
from hail.representation import Interval
from hail.expr import TString, TBoolean, TFloat, TInt
hc = hail.HailContext(log = 'log/99_dreamlab2.log', tmp_dir = 'tmp/hail')
vds = hc.read('../MGRB.phase2.tier12.match.vqsr.minrep.vds')
# Chr22 only, rough variant quality filters
vds = (vds
.filter_intervals(Interval.parse('22'), keep=True)
.filter_variants_expr('va.filters.isEmpty()', keep=True)
.split_multi()
.variant_qc()
.filter_variants_expr('''
v.altAllele.isSNP &&
va.qc.callRate >= 0.99 &&
va.qc.dpMean >= 20 && va.qc.dpMean <= 60 &&
va.qc.dpStDev < 8 &&
va.filters.isEmpty() &&
va.qc.AF >= 0.05 && va.qc.AF <= 0.95''')
)
# Drop samples with poor metrics on these filtered variants.
vds = (vds
.sample_qc()
.filter_samples_expr('sa.qc.callRate >= 0.985')
)
# Drop samples with no phenotype for SBPMean, HtMtrs, WtKgs, AMD, or GlcmmolL
vds = (vds
.filter_samples_expr('''
isnan(sa.pheno.SBPMean) ||
isMissing(sa.pheno.SBPMean) ||
isnan(sa.pheno.HtMtrs) ||
isMissing(sa.pheno.HtMtrs) ||
isnan(sa.pheno.WtKgs) ||
isMissing(sa.pheno.WtKgs) ||
isMissing(sa.pheno.AMD) ||
isnan(sa.pheno.GlcmmolL) ||
isMissing(sa.pheno.GlcmmolL)''', keep=False)
.annotate_samples_expr('sa.pheno = select(sa.pheno, SBPMean, HtMtrs, WtKgs, AMD, GlcmmolL)')
)
vds = vds.repartition(280)
# PCs for covariates
vds = vds.pca('sa.scores', k=5)
# Export the allele count table
vds.make_table('v = v', ['aac = if (g.isHet()) 1 else if (g.isHomVar()) 2 else 0']).export('../MGRB.phase2.tier12.match.vqsr.minrep.dreamlab2chr22.geno.tsv')
# Export the covariates
vds.export_samples('../MGRB.phase2.tier12.match.vqsr.minrep.dreamlab2chr22.covar.tsv', 'sample = s, sa.scores.*')
# Export the phenotypes
vds.export_samples('../MGRB.phase2.tier12.match.vqsr.minrep.dreamlab2chr22.pheno.tsv', 'sample = s, sa.pheno.*')
|
import argparse
import sys
import wave
import numpy as np
def read_data(data):
data = np.fromstring(data, dtype='int16')
data = data.astype('double')
data /= (2**15 - 1)
return data.reshape((2,-1), order='F')
def extract_sinusoid(t, frequency, data):
total_t = t[-1] - t[0]
dt = t[1]-t[0]
n = total_t * frequency
s = np.dot(np.sin(np.pi*2*frequency*t)*dt, data)
c = np.dot(np.cos(np.pi*2*frequency*t)*dt, data)
A = (2*np.pi*frequency* np.sqrt(s**2 + c**2))/(n*np.pi)
cphi = s*(2*np.pi*frequency)/(n*A*np.pi)
sphi = c*(2*np.pi*frequency)/(n*A*np.pi)
return A, np.arctan2(sphi, cphi)
def read_wave(filename):
wf = wave.open(filename, "r")
nframes = wf.getnframes()
frames = wf.readframes(nframes)
frame_rate = wf.getframerate()
t = np.arange(0, nframes, dtype='double')/(frame_rate)
return frames, t
def main(args):
parser = argparse.ArgumentParser()
parser.add_argument("filename")
parser.add_argument("--frequencies", nargs="*",
type=float, default=[660])
options = parser.parse_args(args)
frames, t = read_wave(options.filename)
channels = read_data(frames)
for frequency in options.frequencies:
print frequency, extract_sinusoid(t, frequency, channels[0])
if __name__ == "__main__":
main(sys.argv[1:])
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as mcolors
import sys
import os
import math
import h5py
beta_low=float(sys.argv[1])
beta_high=float(sys.argv[2])
nbeta=int(sys.argv[3])
BASEDIR=sys.argv[4]
L=sys.argv[5]
nu=float(sys.argv[6])
e=float(sys.argv[7])
beta=np.zeros((nbeta))
##this are the tag used in writing the h5 file
#Observables=["E", "m", "ds"]
if(e>0):
Observables=np.array(["E", "m", "ds"])
else:
if(nu>0):
Observables=np.array(["E", "m", "D2H_Dd2i", "DH_Ddi", "D2H_Dd2ij"])
else:
Observables=np.array(["E", "m", "D2H_Dd2i", "DH_Ddi"])
transient_max=np.zeros((len(Observables)))
for name in range(len(Observables)):
A_name=Observables[name]
transient_list=np.zeros((nbeta))
for b in range(nbeta):
beta[b]=beta_low +b*(beta_high -beta_low)/(nbeta-1)
base=2
exp=2
box_length=base**exp
file=h5py.File('%s/beta_%d/Output.h5' %(BASEDIR, b), 'r')
A=np.asarray(file['Measurements']['%s' %(Observables[name])])
if((Observables[name]=="D2H_Dd2i") or (Observables[name]=="DH_Ddi") or (Observables[name]=="D2H_Dd2ij")):
A=A[:,0]
tot_length=box_length
start=0
A_mean=[]
A_std=[]
bins=[]
while (tot_length< len(A)):
A_mean.append(np.mean(A[start:tot_length]))
A_std.append(np.sqrt(np.var(A[start:tot_length])/(len(A[start:tot_length]) -1)))
bins.append(tot_length)
start=tot_length
exp+=1
box_length=base**exp
tot_length+=box_length
A_mean=np.array(A_mean)
A_std=np.array(A_std)
bins=np.array(bins)
#plt.errorbar(bins, A_mean, yerr=A_std, fmt='o-', label= "%s L=%s" %(Observables[name], L))
#plt.legend(loc="best")
#plt.show()
##### Find where the plateau starts by taking the minimum values of the derivative of the binned funciton ####
A_diff=np.diff(A_mean)
index=np.argmin(np.abs(A_diff))
#### The information to be extracted is the time up to the end of the bin where the plateau is observed: bin[index] ####
transient_list[b]=bins[index]
file_Aout=("%s/beta_%d/Thermalization_%s.txt" %(BASEDIR, b, A_name))
np.savetxt(file_Aout, np.transpose( [bins, A_mean, A_std]), fmt='%19.12e', delimiter=' ', newline=os.linesep)
transient_max[name]=np.amax(transient_list)
data=np.vstack((Observables, transient_max))
np.savetxt("%s/transient_time.txt" %BASEDIR, data, fmt="%s")
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from .opencv import OpenCVSegmentationHelper as SegmentationHelper
except ImportError:
try:
from .scikit import ScikitSegmentationHelper as SegmentationHelper
except ImportError:
raise ImportError('Could not load OpenCV or Scikit-Image.')
|
# Generated by Django 3.2.3 on 2021-05-19 07:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Category',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=50, null=True)),
],
),
migrations.CreateModel(
name='Warehouse',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('warehouse_name', models.CharField(blank=True, max_length=50, null=True)),
('warehouse_address', models.CharField(blank=True, max_length=50, null=True)),
],
),
migrations.CreateModel(
name='Products',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('product_name', models.CharField(blank=True, max_length=50, null=True)),
('product_brand', models.CharField(blank=True, max_length=50, null=True)),
('product_category', models.ForeignKey(blank=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.category')),
],
),
]
|
a = int(input())
b = int(input())
c = int(input())
if (a>=b and a>=c):
print(a)
elif (b>=a and b>=c):
print(b)
else:
print(c) |
#!/usr/bin/env python
import os
import sys
import multiprocessing
import subprocess
if __name__ == '__main__':
print 'Please do not run me! Use run_pconsc.py'
print '\n\tYours sincerely,\n\n\t', sys.argv[0]
sys.exit(0)
# Directory where PconsC in the distributable package is located
#root = os.path.dirname(os.path.abspath(sys.argv[0])) + '/'
root = '/scratch/arne/PconsC2//'
print root
########################################################
### Please adjust the following paths to your system ###
########################################################
### Jackhmmer executable ###
#jackhmmer = root + 'dependencies/hmmer-3.0/src/jackhmmer'
jackhmmer = '/scratch/arne/PconsC2-extra/hmmer-3.1b1-linux-intel-x86_64/binaries/jackhmmer'
uniref = '/scratch/data/uniref90.fasta'
### HHblits executable ###
#hhblits = root + 'dependencies/hhsuite-2.0.16/bin/hhblits'
hhblits = '/usr/local/bin/hhblits'
hhdatabase = '/scratch/data/hhsuite/hhsuite_dbs/nr20_12Aug11'
### PSICOV executable ###
#psicov = root + 'dependencies/psicov-1.11/psicov'
psicov = '/usr/local/bin/psicov'
### NetSurfP executable ###
#netsurf = root + 'dependencies/netsurfp-1.0/netsurfp'
netsurf = '/usr/local/bin/netsurfp'
### PSIPRED executable ###
#psipred = root + 'dependencies/psipred/runpsipred'
psipred = '/scratch/arne/PconsC2-extra/psipred/runpsipred'
### MATLAB executable ###
# Please set this variable to None if you don't have access to matlab.
# PconsFold will then try to use the compiled version.
#matlab = '/sw/apps/matlab/x86_64/8.1/bin/matlab'
#matlab = None
matlab= '/pdc/vol/matlab/r2012a/bin/matlab'
### Path to MATLAB compiler ###
# Only needed if matlab is not available.
matlabdir = ''
# Directory to PconsC3 scripts (i.e. this one)
PconsC3 = '/scratch/arne/PconsC3/'
########################################################
### Please do not change anything below this line ###
########################################################
# Paths to included scripts
trim2jones = root + 'scripts/a3mToJones.py'
trim2trimmed = root + 'scripts/a3mToTrimmed.py'
#trim = root + 'scripts/trim.py'
#trim2 = root + 'scripts/trimToFasta.py'
# Reformat script scavenged from HHsuite. Please cite the HHblits paper!
reformat = root + 'scripts/reformat.pl'
# Maximum amount of cores to use per default
n_cores = multiprocessing.cpu_count()
# Enable work-around for PSICOV not handling low complexity alignments
psicovfail = True
# Adjust plmdca path to either standalone or compiled,
# depending on presence of matlab.
if matlab:
plmdca = None # matlab licence present: do not use compiled version
# plmdcapath = root + 'dependencies/plmDCA_symmetric_v2'
plmdcapath = '/scratch/arne/PconsC2-extra/plmDCA_asymmetric_v2'
else:
plmdca = root + 'dependencies/plmdca_standalone/2012/build01/bin/plmdca'
plmdcapath = None
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'clients/clients.ui'
#
# Created by: PyQt5 UI code generator 5.9.2
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_ClientsViewer(object):
def setupUi(self, ClientsViewer):
ClientsViewer.setObjectName("ClientsViewer")
ClientsViewer.resize(892, 393)
self.centralwidget = QtWidgets.QWidget(ClientsViewer)
self.centralwidget.setObjectName("centralwidget")
self.clients_groupBox = QtWidgets.QGroupBox(self.centralwidget)
self.clients_groupBox.setGeometry(QtCore.QRect(10, 290, 861, 61))
self.clients_groupBox.setObjectName("clients_groupBox")
self.horizontalLayoutWidget = QtWidgets.QWidget(self.clients_groupBox)
self.horizontalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 611, 41))
self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
self.horizontalLayout.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout.setObjectName("horizontalLayout")
self.add_client = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.add_client.setMinimumSize(QtCore.QSize(0, 30))
self.add_client.setObjectName("add_client")
self.horizontalLayout.addWidget(self.add_client)
self.save_clients = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.save_clients.setMinimumSize(QtCore.QSize(0, 30))
self.save_clients.setObjectName("save_clients")
self.horizontalLayout.addWidget(self.save_clients)
self.remove_client = QtWidgets.QPushButton(self.horizontalLayoutWidget)
self.remove_client.setMinimumSize(QtCore.QSize(0, 30))
self.remove_client.setObjectName("remove_client")
self.horizontalLayout.addWidget(self.remove_client)
self.clients_table = QtWidgets.QTableWidget(self.centralwidget)
self.clients_table.setGeometry(QtCore.QRect(10, 10, 861, 271))
self.clients_table.setSizeAdjustPolicy(QtWidgets.QAbstractScrollArea.AdjustToContents)
self.clients_table.setColumnCount(3)
self.clients_table.setObjectName("clients_table")
self.clients_table.setRowCount(0)
item = QtWidgets.QTableWidgetItem()
self.clients_table.setHorizontalHeaderItem(0, item)
item = QtWidgets.QTableWidgetItem()
self.clients_table.setHorizontalHeaderItem(1, item)
item = QtWidgets.QTableWidgetItem()
item.setTextAlignment(QtCore.Qt.AlignCenter)
self.clients_table.setHorizontalHeaderItem(2, item)
self.clients_table.horizontalHeader().setCascadingSectionResizes(False)
self.clients_table.horizontalHeader().setStretchLastSection(True)
self.clients_table.verticalHeader().setCascadingSectionResizes(False)
self.clients_table.verticalHeader().setStretchLastSection(False)
ClientsViewer.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(ClientsViewer)
self.menubar.setGeometry(QtCore.QRect(0, 0, 892, 22))
self.menubar.setObjectName("menubar")
ClientsViewer.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(ClientsViewer)
self.statusbar.setObjectName("statusbar")
ClientsViewer.setStatusBar(self.statusbar)
self.retranslateUi(ClientsViewer)
QtCore.QMetaObject.connectSlotsByName(ClientsViewer)
def retranslateUi(self, ClientsViewer):
_translate = QtCore.QCoreApplication.translate
ClientsViewer.setWindowTitle(_translate("ClientsViewer", "Base de dados de clientes"))
self.clients_groupBox.setTitle(_translate("ClientsViewer", "Clientes"))
self.add_client.setText(_translate("ClientsViewer", "Adicionar Cliente"))
self.save_clients.setText(_translate("ClientsViewer", "Guardar Cambios"))
self.remove_client.setText(_translate("ClientsViewer", "Remover cliente"))
item = self.clients_table.horizontalHeaderItem(0)
item.setText(_translate("ClientsViewer", "Cliente"))
item = self.clients_table.horizontalHeaderItem(1)
item.setText(_translate("ClientsViewer", "Dirección"))
item = self.clients_table.horizontalHeaderItem(2)
item.setText(_translate("ClientsViewer", "RUC"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
ClientsViewer = QtWidgets.QMainWindow()
ui = Ui_ClientsViewer()
ui.setupUi(ClientsViewer)
ClientsViewer.show()
sys.exit(app.exec_())
|
# ######################################################################################################################
# Initialize: Libraries, functions, parameters
# ######################################################################################################################
# General libraries, parameters and functions
'''
import os, sys
os.chdir("../Ashrae")
sys.path.append(os.getcwd() + "\\code") #not needed if code is marked as "source" in pycharm
'''
from initialize import *
# Specific libraries
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor # , GradientBoostingClassifier
from sklearn.linear_model import SGDClassifier, SGDRegressor, LogisticRegression, ElasticNet
from keras.models import Sequential
from keras.layers import Dense, BatchNormalization, Dropout
from keras.regularizers import l2
from keras import optimizers
from keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
# from sklearn.tree import DecisionTreeRegressor, plot_tree , export_graphviz
# Main parameter
TARGET_TYPE = "REGR"
target = "target_zscore"
plt.ion(); matplotlib.use('TkAgg')
# Specific parameters
n_jobs = 14
metric = "spear"
# Load results from exploration
df = metr_standard = cate_standard = metr_binned = cate_binned = metr_encoded = cate_encoded = target_labels = None
with open(TARGET_TYPE + "_1_explore.pkl", "rb") as file:
d_pick = pickle.load(file)
for key, val in d_pick.items():
exec(key + "= val")
# ######################################################################################################################
# # Test an algorithm (and determine parameter grid)
# ######################################################################################################################
# --- Sample data ----------------------------------------------------------------------------------------------------
df_tune = df.sample(n = min(df.shape[0], int(1e6))).reset_index(drop = True)
# Scale "metr_enocded" features for DL
df_tune[metr_encoded + "_normed"] = ((df_tune[metr_encoded] - df_tune[metr_encoded].min()) /
(df_tune[metr_encoded].max() - df_tune[metr_encoded].min()))
# --- Define some splits -------------------------------------------------------------------------------------------
#split_index = PredefinedSplit(df_tune["fold"].map({"train": -1, "test": 0}).values)
split_my1fold_cv = TrainTestSep(1)
#split_5fold = KFold(5, shuffle=False, random_state=42)
split_my5fold_cv = TrainTestSep(5)
split_my5fold_boot = TrainTestSep(5, "bootstrap")
'''
df_tune["fold"].value_counts()
mysplit = split_my1fold_cv.split(df_tune)
i_train, i_test = next(mysplit)
df_tune["fold"].iloc[i_train].describe()
df_tune["fold"].iloc[i_test].describe()
i_test.sort()
i_test
'''
# --- Fits -----------------------------------------------------------------------------------------------------------
# Lasso / Elastic Net
#fit = (GridSearchCV(SGDRegressor(penalty = "ElasticNet", warm_start = True), # , tol=1e-2
fit = (GridSearchCV(ElasticNet(normalize=False, warm_start=True), # , tol=1e-2
{"alpha": [2 ** x for x in range(-8, -24, -2)],
"l1_ratio": [1]},
cv = split_my1fold_cv.split(df_tune),
refit = False,
scoring = d_scoring[TARGET_TYPE],
return_train_score = True,
n_jobs = n_jobs)
.fit(CreateSparseMatrix(metr = metr_binned, cate = cate_binned, df_ref = df_tune).fit_transform(df_tune),
df_tune[target]))
plot_cvresult(fit.cv_results_, metric = metric, x_var = "alpha", color_var = "l1_ratio")
pd.DataFrame(fit.cv_results_)
# -> keep l1_ratio=1 to have a full Lasso
# XGBoost
start = time.time()
fit = (GridSearchCV_xlgb(xgb.XGBRegressor(verbosity = 0, n_jobs = n_jobs),
{"n_estimators": [x for x in range(100, 3100, 500)], "learning_rate": [0.02],
"max_depth": [12,15,17], "min_child_weight": [10],
"colsample_bytree": [0.7], "subsample": [0.7]},
cv = split_my1fold_cv.split(df_tune),
refit = False,
scoring = d_scoring[TARGET_TYPE],
return_train_score = True,
n_jobs = n_jobs)
.fit(CreateSparseMatrix(metr = metr_standard, cate = cate_standard, df_ref = df_tune).fit_transform(df_tune),
df_tune[target]))
print(time.time()-start)
pd.DataFrame(fit.cv_results_)
plot_cvresult(fit.cv_results_, metric = metric,
x_var = "n_estimators", color_var = "max_depth", column_var = "min_child_weight")
# -> keep around the recommended values: max_depth = 6, shrinkage = 0.01, n.minobsinnode = 10
# LightGBM
# metr_encoded = setdiff(metr_encoded,'building_id_ENCODED')
start = time.time()
fit = (GridSearchCV_xlgb(lgbm.LGBMRegressor(n_jobs = n_jobs),
{"n_estimators": [x for x in range(100, 3100, 500)], "learning_rate": [0.02],
"num_leaves": [64,128,512], "min_child_samples": [10],
"colsample_bytree": [0.7], "subsample": [0.7]},
cv = split_my1fold_cv.split(df_tune),
refit = False,
scoring = d_scoring[TARGET_TYPE],
return_train_score = True,
n_jobs = n_jobs)
.fit(df_tune[metr_encoded], df_tune[target],
categorical_feature = [x for x in metr_encoded.tolist() if "_ENCODED" in x]))
print(time.time()-start)
plot_cvresult(fit.cv_results_, metric = metric,
x_var = "n_estimators", color_var = "num_leaves",
column_var = "colsample_bytree", row_var = "subsample",
style_var = "min_child_samples")
# DeepL
# Keras wrapper for Scikit
def keras_model(input_dim, output_dim, target_type,
size = "10",
lambdah = None, dropout = None,
lr = 1e-5,
batch_normalization = False,
activation = "relu"):
model = Sequential()
# Add dense layers
for units in size.split("-"):
model.add(Dense(units = int(units), activation = activation, input_dim = input_dim,
kernel_regularizer = l2(lambdah) if lambdah is not None else None,
kernel_initializer = "glorot_uniform"))
# Add additional layer
if batch_normalization is not None:
model.add(BatchNormalization())
if dropout is not None:
model.add(Dropout(dropout))
# Output
if target_type == "CLASS":
model.add(Dense(1, activation = 'sigmoid',
kernel_regularizer = l2(lambdah) if lambdah is not None else None))
model.compile(loss = "binary_crossentropy", optimizer = optimizers.RMSprop(lr = lr), metrics = ["accuracy"])
elif target_type == "MULTICLASS":
model.add(Dense(output_dim, activation = 'softmax',
kernel_regularizer = l2(lambdah) if lambdah is not None else None))
model.compile(loss = "categorical_crossentropy", optimizer = optimizers.RMSprop(lr = lr),
metrics = ["accuracy"])
else:
model.add(Dense(1, activation = 'linear',
kernel_regularizer = l2(lambdah) if lambdah is not None else None))
model.compile(loss = "mean_squared_error", optimizer = optimizers.RMSprop(lr = lr),
metrics = ["mean_squared_error"])
return model
# Fit
fit = (GridSearchCV(KerasRegressor(build_fn = keras_model,
input_dim = metr_encoded.size,
output_dim = 1,
target_type = TARGET_TYPE,
verbose = 0),
{"size": ["50","100-50-20"],
"lambdah": [None], "dropout": [None],
"batch_size": [100], "lr": [1e-3],
"batch_normalization": [True],
"activation": ["relu"],
"epochs": [20]},
cv = split_my1fold_cv.split(df_tune),
refit = False,
scoring = d_scoring[TARGET_TYPE],
return_train_score = False,
n_jobs = n_jobs)
.fit(CreateSparseMatrix(metr = metr_encoded + "_normed", df_ref = df_tune).fit_transform(df_tune),
df_tune[target]))
plot_cvresult(fit.cv_results_, metric = metric, x_var = "epochs", color_var = "lr",
column_var = "size", row_var = "batch_size")
# ######################################################################################################################
# Evaluate generalization gap
# ######################################################################################################################
# Sample data (usually undersample training data)
df_gengap = df_tune.copy()
# Tune grid to loop over
param_grid = {"n_estimators": [x for x in range(100, 3100, 500)], "learning_rate": [0.01],
"max_depth": [3, 6, 9], "min_child_weight": [10],
"colsample_bytree": [0.7], "subsample": [0.7],
"gamma": [10]}
# Calc generalization gap
fit = (GridSearchCV_xlgb(xgb.XGBRegressor(verbosity = 0),
param_grid,
cv = split_my1fold_cv.split(df_gengap),
refit = False,
scoring = d_scoring[TARGET_TYPE],
return_train_score = True,
n_jobs = n_jobs)
.fit(CreateSparseMatrix(metr = metr_standard, cate = cate_standard, df_ref = df_gengap).fit_transform(df_gengap),
df_gengap["target"]))
plot_gengap(fit.cv_results_, metric = metric,
x_var = "n_estimators", color_var = "max_depth", column_var = "min_child_weight", row_var = "gamma",
pdf = plotloc + TARGET_TYPE + "_xgboost_gengap.pdf")
# ######################################################################################################################
# Simulation: compare algorithms
# ######################################################################################################################
# Basic data sampling
df_modelcomp = df_tune.copy()
# --- Run methods ------------------------------------------------------------------------------------------------------
df_modelcomp_result = pd.DataFrame() # intialize
# Lightgbm
cvresults = cross_validate(
estimator = GridSearchCV_xlgb(
lgbm.LGBMRegressor(),
{"n_estimators": [x for x in range(100, 3100, 500)], "learning_rate": [0.01],
"num_leaves": [64], "min_child_weight": [10],
"colsample_bytree": [0.7], "subsample": [0.7]},
cv = ShuffleSplit(1, 0.2, random_state = 999), # just 1-fold for tuning
refit = metric,
scoring = d_scoring[TARGET_TYPE],
return_train_score = False,
n_jobs = n_jobs),
X = df_tune[metr_encoded],
y = df_tune[target],
fit_params = {"categorical_feature": [x for x in metr_encoded.tolist() if "_ENCODED" in x]},
cv = split_my5fold_cv.split(df_modelcomp),
return_train_score = False,
n_jobs = n_jobs)
df_modelcomp_result = df_modelcomp_result.append(pd.DataFrame.from_dict(cvresults).reset_index()
.assign(model = "Lightgbm"),
ignore_index = True)
# Xgboost
cvresults = cross_validate(
estimator = GridSearchCV_xlgb(
xgb.XGBRegressor(verbosity = 0),
{"n_estimators": [x for x in range(100, 3100, 500)], "learning_rate": [0.01],
"max_depth": [6], "min_child_weight": [10],
"colsample_bytree": [0.7], "subsample": [0.7]},
cv = ShuffleSplit(1, 0.2, random_state = 999), # just 1-fold for tuning
refit = metric,
scoring = d_scoring[TARGET_TYPE],
return_train_score = False,
n_jobs = n_jobs),
X = CreateSparseMatrix(metr = metr_standard, cate = cate_standard, df_ref = df_modelcomp).fit_transform(
df_modelcomp),
y = df_modelcomp[target],
cv = split_my5fold_cv.split(df_modelcomp),
return_train_score = False,
n_jobs = n_jobs)
df_modelcomp_result = df_modelcomp_result.append(pd.DataFrame.from_dict(cvresults).reset_index()
.assign(model = "XGBoost"),
ignore_index = True)
# --- Plot model comparison ------------------------------------------------------------------------------
plot_modelcomp(df_modelcomp_result.rename(columns = {"index": "run", "test_score": metric}), scorevar = metric,
pdf = plotloc + TARGET_TYPE + "_model_comparison.pdf")
# ######################################################################################################################
# Learning curve for winner algorithm
# ######################################################################################################################
# Basic data sampling
df_lc = df_tune.copy()
# Calc learning curve
n_train, score_train, score_test = learning_curve(
estimator = GridSearchCV_xlgb(
xgb.XGBRegressor(verbosity = 0) if TARGET_TYPE == "REGR" else xgb.XGBClassifier(verbosity = 0),
{"n_estimators": [x for x in range(100, 3100, 500)], "learning_rate": [0.01],
"max_depth": [6], "min_child_weight": [10],
"colsample_bytree": [0.7], "subsample": [1]},
cv = ShuffleSplit(1, 0.2, random_state = 999), # just 1-fold for tuning
refit = metric,
scoring = d_scoring[TARGET_TYPE],
return_train_score = False,
n_jobs = 4),
X = CreateSparseMatrix(metr = metr_standard, cate = cate_standard, df_ref = df_lc).fit_transform(df_lc),
y = df_lc[target],
train_sizes = np.append(np.linspace(0.05, 0.1, 5), np.linspace(0.2, 1, 5)),
cv = split_my1fold_cv.split(df_lc),
n_jobs = 4)
# Plot it
plot_learning_curve(n_train, score_train, score_test,
pdf = plotloc + TARGET_TYPE + "_learningCurve.pdf")
plt.close("all")
|
import torch
import pandas as pd
from sklearn import model_selection
from torch.utils.data import Dataset, DataLoader
from parameter_weekly import week_input, timelagging, average_num, feature_num
def dataset_generate():
""" read data in and clean! """
filename = "zipcode_weekly_new.csv"
zipcode_daily = pd.read_csv(filename, encoding="ISO-8859-1", dtype={'ZIP': str, 'week': str})
zip = zipcode_daily['ZIP'] # 'ZIP' column
del zipcode_daily['ZIP'] # delete the non-numeric columns
del zipcode_daily['week']
zipcode_daily = pd.DataFrame(zipcode_daily, dtype=float) # change the type from 'int' to 'float'
zipcode_daily['ZIP'] = zip # add the 'ZIP' column again
# key: 'zip code', value: feature that belong to the 'zip code'
data_dict = {}
for i, zipcode in enumerate(zipcode_daily[:]['ZIP']):
if zipcode not in data_dict:
data_dict[zipcode] = []
feature = []
for f in zipcode_daily.iloc[i]:
feature.append(f)
data_dict[zipcode].append(feature)
train_x = []
train_y = []
test_x = []
test_y = []
ratio = 0.6 # training set ratio
# week sequential split
for key, values in data_dict.items():
l = len(values)
input_num = l - timelagging - average_num
feature = []
for i in range(input_num):
index = int(input_num * ratio)
if i <= index: # one input point contains 2 week's data, that is week1,week2
first = True
for j in range(week_input):
if first: # one input point contains all the feature of week1
for k in values[i][:-2]:
feature.append(k)
first = False
else: # for week2, one input point only contains confirmed_cases & new_confirmed_cases
feature.append(values[i + j][0])
feature.append(values[i + j][1])
train_y.append(values[i][23]) # output: average cases, that is the third week
tmp = []
tmp.append(feature)
train_x.append(tmp) # size: [1, feature_num]
feature = []
else:
first = True
for j in range(week_input):
if first: # one input point contains all the feature of week1
for k in values[i][:-2]:
feature.append(k)
first = False
else: # for week2, one input point only contains confirmed_cases & new_confirmed_cases
feature.append(values[i + j][0])
feature.append(values[i + j][1])
test_y.append(values[i][23]) # output: average cases, that is the third week
tmp = []
tmp.append(feature)
test_x.append(tmp) # size: [1, feature_num]
feature = []
print(len(train_x))
train_x_ls = [] # Change the format for later processing
for j in train_x:
for i in j:
train_x_ls.append(i)
train_x_df = pd.DataFrame(train_x_ls)
train_y_df = pd.DataFrame(train_y)
train_x_mean = train_x_df.mean() # train_x dataset mean
train_x_std = train_x_df.std() # train_x dataset std
train_y_mean = train_y_df.mean() # train_y dataset mean
train_y_std = train_y_df.std() # train_y dataset std
for i in range(len(train_x)): # using train_x mean and train_x std to normalize
for j in range(len(train_x[i])):
for k in range(len(train_x[i][j])):
train_x[i][j][k] = (train_x[i][j][k] - train_x_mean[k]) / train_x_std[k]
for i in range(len(train_y)): # using train_y mean and train_y std to normalize
train_y[i] = (train_y[i] - train_y_mean) / train_y_std
for i in range(len(test_x)): # using train_x mean and train_x std to normalize
for j in range(len(test_x[i])):
for k in range(len(test_x[i][j])):
test_x[i][j][k] = (test_x[i][j][k] - train_x_mean[k]) / train_x_std[k]
for i in range(len(test_y)): # using train_y mean and train_y std to normalize
test_y[i] = (test_y[i] - train_y_mean) / train_y_std
# split test to validation and test
validation_x, test_x, validation_y, test_y = model_selection.train_test_split(test_x, test_y, test_size=0.5,
random_state=1)
train_x = torch.tensor(train_x)
train_y = torch.tensor(train_y).reshape(-1, 1)
validation_x = torch.tensor(validation_x)
validation_y = torch.tensor(validation_y).reshape(-1, 1)
test_x = torch.tensor(test_x)
test_y = torch.tensor(test_y).reshape(-1, 1)
# define dataset
class Mydataset(Dataset):
def __init__(self, input, output):
super(Mydataset, self).__init__()
self.input = input
self.lable = output
def __getitem__(self, index):
return self.input[index], self.lable[index]
def __len__(self):
return len(self.input)
train_data = Mydataset(train_x, train_y)
trainloader = DataLoader(train_data, batch_size=256, shuffle=True)
return trainloader, train_x, train_y, validation_x, validation_y, \
test_x, test_y, train_x_mean, train_x_std, train_y_mean, train_y_std |
from odoo import http
from odoo.http import request
from odoo.addons.website_sale.controllers.main import WebsiteSale
import logging
_logger = logging.getLogger(__name__)
class WebsiteSaleExtend(WebsiteSale):
def _get_mandatory_billing_fields(self):
res = super(WebsiteSaleExtend,self)._get_mandatory_billing_fields()
res.append("vat")
res.append("l10n_latam_identification_type_id")
res.remove("street")
res.remove("city")
res.remove("country_id")
return res
def _get_mandatory_shipping_fields(self):
res = super(WebsiteSaleExtend,self)._get_mandatory_shipping_fields()
res.remove("street")
res.remove("city")
res.remove("country_id")
return res
def checkout_form_validate(self, mode, all_form_values, data):
error, error_message = super(WebsiteSaleExtend,self).checkout_form_validate(mode, all_form_values, data)
# _logger.info(all_form_values)
# _logger.info(data)
# Se valida el tipo de docuemnto de identidad con el tipo de comprobante
return error, error_message
@http.route("/change_invoice_type_code",type="json",method="POST",csrf=True,auth="public", website=True)
def change_invoice_type_code(self,**kargs):
# _logger.info(kargs)
order = request.website.sale_get_order()
order.sudo().write({"invoice_type_code":kargs.get("invoice_type_code")})
return True
def checkout_redirection(self,order):
res = super(WebsiteSaleExtend,self).checkout_redirection(order)
# _logger.info(order)
# _logger.info(order.read())
return res |
# basic operations with mne prepared labels (masking, ROI correction, etc.)
import os
import numpy as np
import mne
def get_lbl_active_indices(lbl_filename):
lbl = mne.read_label(lbl_filename)
max_lh_vertex_ind = 10242
max_rh_vertex_ind = 20484
if lbl_filename.split("-")[-1] == "rh.label":
return np.array(lbl.vertices[(lbl.vertices >= max_lh_vertex_ind) & (lbl.vertices < max_rh_vertex_ind)])
elif lbl_filename.split("-")[-1] == "lh.label":
#print(lbl_filename.split("-")[-2], 'active indices: ')
#print(np.array(lbl.vertices[lbl.vertices < max_lh_vertex_ind]))
return np.array(lbl.vertices[lbl.vertices < max_lh_vertex_ind])
else:
print("Error")
return 0
def get_ttestROI_indices(template_lbl, ttest_stc, th_h):
tempROIinds = get_lbl_active_indices(template_lbl)
#print('temp lbl :', tempROIinds)
print('tmp lbl:', len(tempROIinds))
stc_data = mne.read_source_estimate(ttest_stc).data
#print(stc_data.shape)
lbl_data = np.zeros(stc_data.shape)
lbl_data[tempROIinds, :] = stc_data[tempROIinds, :]
return np.array(np.where(abs(lbl_data) >= th_h)[0]) # <--- will work only with one frame stc !!
def get_max_singnif_vox_ind(template_lbl, ttest_stc):
tempROIinds = get_lbl_active_indices(template_lbl)
#print('temp lbl :', tempROIinds)
#print('tmp lbl:', len(tempROIinds))
stc_data = mne.read_source_estimate(ttest_stc).data
#print(stc_data.shape)
#print(np.unique(stc_data))
lbl_data = np.zeros(stc_data.shape)
lbl_data[tempROIinds, :] = abs(stc_data[tempROIinds, :])
#print('max_sgnf:')
#print(max(abs(lbl_data)))
return np.array(np.where(lbl_data == max(lbl_data))[0]) # <--- will work only with one frame stc !!
def merge_lbls_indices(lbls_path):
lbls_list = [lbl for lbl in os.listdir(lbls_path) if '.label' in lbl]
print('\n labels in folder: ', len(lbls_list))
print('\n labels : \n', len(lbls_list))
merged_lbls_inds = []
for l, lbl in enumerate(lbls_list):
cur_inds = get_label_active_indices(lbls_path+lbl)
merged_lbls_inds = np.concatenate((merged_lbls_inds, cur_inds)).astype(int)
return np.unique(merged_lbls_inds)
def subtract_2lbls_indices(lbl, sub_lbl):
tempROIinds = get_lbl_active_indices(lbl)
subROIinds = get_lbl_active_indices(sub_lbl)
return np.delete(tempROIinds, np.where(np.ind1d(tempROIinds, subROIinds)))
|
'''
Your task is to calculate logical value of boolean array. Test arrays are one-dimensional and their size is in the range 1-50.
Links referring to logical operations: AND, OR and XOR.
You should begin at the first value, and repeatedly apply the logical operation across the remaining elements
in the array sequentially.
First Example:
Input: true, true, false, operator: AND
Steps: true AND true -> true, true AND false -> false
Output: false
Second Example:
Input: true, true, false, operator: OR
Steps: true OR true -> true, true OR false -> true
Output: true
Third Example:
Input: true, true, false, operator: XOR
Steps: true XOR true -> false, false XOR false -> false
Output: false
Input:
boolean array, string with operator' s name: 'AND', 'OR', 'XOR'.
Output:
calculated boolean
'''
def logical_calc(array, op):
if op=='AND':
if False in array:
result = False
else:
result = True
elif op=="OR":
if True in array:
result = True
else:
result = False
else:
result = array[0]
for i in range(1,len(array)):
result = result ^ array[i]
#boolean
return result
|
from itertools import product
from typing import Callable
import numpy as np
import pandas as pd
from starfish.core.imagestack.imagestack import ImageStack
from starfish.core.types import (
Axes,
Features,
Number,
PerImageSliceSpotResults,
SpotAttributes,
SpotFindingResults
)
def measure_intensities_at_spot_locations_in_image(
image: np.ndarray,
spots: SpotAttributes,
measurement_function: Callable[[np.ndarray], Number],
radius_is_gyration: bool = False,
) -> pd.Series:
"""measure the intensity of each spot in spots in the corresponding image
Parameters
----------
image : np.ndarray,
3-d volume in which to measure intensities
spots : pd.DataFrame
SpotAttributes table containing coordinates and radii of spots
measurement_function : Callable[[np.ndarray], Number])
Function to apply over the spot volumes to identify the intensity (e.g. max, mean, ...)
radius_is_gyration : bool
if True, indicates that the radius corresponds to radius of gyration, which is a
function of spot intensity, but typically is a smaller unit than the sigma generated by
blob_log. In this case, the spot's bounding box is rounded up instead of down when
measuring intensity. (default False)
Returns
-------
pd.Series :
Intensities for each spot in SpotAttributes
"""
def fn(row: pd.Series) -> Number:
d = image[row['z_min']:row['z_max'], row['y_min']:row['y_max'], row['x_min']:row['x_max']]
return measurement_function(d)
if radius_is_gyration:
radius = np.ceil(spots.data[Features.SPOT_RADIUS]).astype(int) + 1 # round up
else:
radius = spots.data[Features.SPOT_RADIUS].astype(int) # truncate down to nearest int
for v, max_size in zip(['z', 'y', 'x'], image.shape):
# numpy does exclusive max indexing, so need to subtract 1 from min to get centered box
spots.data[f'{v}_min'] = np.clip(spots.data[v] - (radius - 1), 0, None)
spots.data[f'{v}_max'] = np.clip(spots.data[v] + radius, None, max_size)
return spots.data[['z_min', 'z_max', 'y_min', 'y_max', 'x_min', 'x_max']].astype(int).apply(
fn,
axis=1
)
def measure_intensities_at_spot_locations_across_imagestack(
data_image: ImageStack,
reference_spots: PerImageSliceSpotResults,
measurement_function: Callable[[np.ndarray], Number],
radius_is_gyration: bool = False) -> SpotFindingResults:
"""given spots found from a reference image, find those spots across a data_image
Parameters
----------
data_image : ImageStack
ImageStack containing multiple volumes for which spots' intensities must be calculated
reference_spots : PerImageSliceSpotResults
Spots found in a reference image
measurement_function : Callable[[np.ndarray], Number])
Function to apply over the spot volumes to identify the intensity (e.g. max, mean, ...)
radius_is_gyration : bool
if True, indicates that the radius corresponds to radius of gyration, which is
a function of spot intensity, but typically is a smaller unit than the sigma generated
by blob_log. In this case, the spot's bounding box is rounded up instead of down when
measuring intensity. (default False)
Returns
-------
SpotFindingResults :
A Dict of tile indices and their corresponding measured SpotAttributes
"""
ch_labels = data_image.axis_labels(Axes.CH)
round_labels = data_image.axis_labels(Axes.ROUND)
spot_results = SpotFindingResults(imagestack_coords=data_image.xarray.coords,
log=data_image.log)
# measure spots in each tile
indices = product(ch_labels, round_labels)
for c, r in indices:
tile_indices = {Axes.ROUND: r, Axes.CH: c}
if reference_spots.spot_attrs.data.empty:
# if no spots found don't measure
spot_results[tile_indices] = reference_spots
else:
image, _ = data_image.get_slice({Axes.CH: c, Axes.ROUND: r})
blob_intensities: pd.Series = measure_intensities_at_spot_locations_in_image(
image,
reference_spots.spot_attrs,
measurement_function,
radius_is_gyration=radius_is_gyration
)
# copy reference spot positions and attributes
tile_spots = SpotAttributes(reference_spots.spot_attrs.data.copy())
# fill in intensities
tile_spots.data[Features.INTENSITY] = blob_intensities
spot_results[tile_indices] = PerImageSliceSpotResults(
spot_attrs=tile_spots, extras=None)
return spot_results
|
import numpy as np
from random import shuffle
def softmax_loss_naive(W, X, y, reg):
"""
Softmax loss function, naive implementation (with loops)
Inputs have dimension D, there are C classes, and we operate on minibatches
of N examples.
Inputs:
- W: A numpy array of shape (D, C) containing weights.
- X: A numpy array of shape (N, D) containing a minibatch of data.
- y: A numpy array of shape (N,) containing training labels; y[i] = c means
that X[i] has label c, where 0 <= c < C.
- reg: (float) regularization strength
Returns a tuple of:
- loss as single float
- gradient with respect to weights W; an array of same shape as W
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
D = W.shape[0]
C = W.shape[1]
N = X.shape[0]
for i in xrange(N):
#We consider samples column-wise, sample is 1xD
sample = X[i,:]
scores = sample.dot(W)
max_score = np.argmax(scores)
#To avoid numerical instability, now our score values will lay
#between [-max_score, 0]
scores -= max_score
exp_sum = np.sum(np.exp(scores))
#Loss is updated for each sample and normalized
#see course notes for the numerical stability of this formula
loss += -scores[y[i]] + np.log(exp_sum)
#Here comes the harsh stuff.
#We have to update dW for each class
#First we consider if we are grading the correct class, in that
#case we add minus one, then we add our score value.
#The score value is raised to e then normalized over
#the sum of exponentials
#Anyway the result of the expression between parentheses is just
# a scalar so, sample has size 1xD and dW[:,j] Dx1
exp_sum = np.sum(np.exp(scores))
for j in xrange(C):
dW[:,j] += sample * (-1 * (j==y[i]) + np.exp(scores[j])/exp_sum )
#Loss and dW are normalized over the number of training examples
loss /= N
dW /= N
#regularization, for loss is l2
loss += reg*np.sum(W**2) / 2
dW += reg*W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
def softmax_loss_vectorized(W, X, y, reg):
"""
Softmax loss function, vectorized version.
Inputs and outputs are the same as softmax_loss_naive.
"""
# Initialize the loss and gradient to zero.
loss = 0.0
dW = np.zeros_like(W)
#############################################################################
# TODO: Compute the softmax loss and its gradient using no explicit loops. #
# Store the loss in loss and the gradient in dW. If you are not careful #
# here, it is easy to run into numeric instability. Don't forget the #
# regularization! #
#############################################################################
D = W.shape[0]
C = W.shape[1]
N = X.shape[0]
#The procedure goes like for the naive impl
scores = X.dot(W)
scores_max = np.max(scores).reshape(-1,1)
#To avoid numerical instability, see above
scores -= scores_max
exp_scores = np.exp(scores)
#For each example we sum the scores
exp_scores_sum = np.sum(exp_scores, axis=1)
#correct_class_exp_scores = exp_scores[xrange(N), y]
correct_scores = scores[xrange(N),y]
loss = np.sum(-correct_scores + np.log(exp_scores_sum))
norm_exp_scores = exp_scores/(exp_scores_sum.reshape(-1,1))
binary_matrix = np.zeros_like(scores)
binary_matrix[xrange(N), y] = -1
norm_exp_scores += binary_matrix
dW = (X.T).dot(norm_exp_scores)
#loss is normalized and added l2 regularization
loss /= N
loss += reg*np.sum(W**2)/2
#also dW is normalized and regularied
dW /= N
dW += reg*W
#############################################################################
# END OF YOUR CODE #
#############################################################################
return loss, dW
|
"""
Largest palindrome product
Problem 4
A palindromic number reads the same both ways. The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
"""
pals = []
for x in reversed(range(100, 999)):
for y in reversed(range(100, x)):
xy = str(x * y)
if xy == xy[::-1]:
pals.append(x * y)
print(max(pals))
def is_palindrome(number):
"""
Tests if a number is a palindrome
:type number: int
:param number:
:return:
"""
str_input = str(number)
return str_input == reversed(str_input) |
#!/usr/bin/env python
#-*- coding: utf-8 -*-
# Author: (hexue@bytedance.com)
#########################################################################
# Created Time: 2016-12-21 20:23:09
# File Name: protobufhelper.py
# Description: ProtocolBuffer类
#########################################################################
import sys
import os
import glob2
from google.protobuf.json_format import MessageToJson, Parse
import re
import logging
import importlib
logger = logging.getLogger('main')
CWD_PATH = os.getcwd() # 获取当前工作目录
PROTOC_DIR = CWD_PATH + '/protoc' # 按照idl库存放proto源文件
PB_GEN_DIR = CWD_PATH + '/protoc/pb_gen' # 存放proto生成的py文件,按项目存放
# protoc 命令路径
# if sys.platform == 'darwin':
# PROTOC_EXEC = 'bin/protoc_mac'
# else:
# PROTOC_EXEC = 'bin/protoc'
PROTOC_EXEC = "/usr/local/bin/protoc"
USERNAME = 'ci_testing' # testing拉取代码的用户名
ACCESS_TOKEN = '-kqWMyVXSuFJ5Cd2UwaQ' # testing拉取代码access token
regex = r"(.*)\/(.*)\.git"
# 管理proto文件,进行pb库的拉取,更新
def manage_proto_repo(module):
print "hello"
# temp = str(module['pb_repo']).split('https://code.byted.org/')[1]
# match = re.search(regex, temp)
# if match:
# repo_name = '_'.join(match.groups())
# else:
# repo_name = temp.split('.git')[0].replace('/', '_')
#
# pb_file = str(module['pb_file'])
# pb_repo_path = os.path.join(PROTOC_DIR, repo_name) # pb项目路径
# pb_import_path = module['pb_import_path']
#
# # 如果项目已存在,更新仓库,否则clone仓库
# try:
# if not os.path.exists(pb_repo_path):
# git_path = 'https://ci_testing:{access_token}@{git_addr}'.format(access_token=ACCESS_TOKEN, git_addr=module['pb_repo'].split('//')[-1])
# git.Repo.clone_from(url=git_path, to_path=pb_repo_path)
# else:
# repo = git.Repo(pb_repo_path)
# repo.git.pull()
#
# except git.exc.InvalidGitRepositoryError as ex:
# ex_info = 'Repository is invalid, ex: %s' % ex
# logging.error(ex_info)
# raise Exception(ex_info)
# except git.GitCommandError as ex:
# logger.error('manage_proto_repo error, ex: %s' % ex)
# ex_info = "git command error, please make sure 'ci_testing' is the member of repository"
# raise Exception(ex_info)
# except Exception as ex:
# raise Exception(ex)
#
# return generate_py_file(module, pb_repo_path, pb_file, repo_name, pb_import_path)
# 根据proto文件生成对应的py文件
def generate_py_file(pb_repo_path, pb_file, repo_name, pb_import_path):
pb_dir = os.path.join(PB_GEN_DIR, repo_name) # proto对应pb文件目录
if pb_import_path:
pb_dir = os.path.join(pb_dir, pb_import_path)
if not os.path.exists(pb_dir):
os.makedirs(pb_dir, 0o755)
# 查找对应的proto文件
if pb_import_path:
include_path = os.path.join(pb_repo_path, pb_import_path)
else:
include_path = pb_repo_path
all_files = glob2.glob(os.path.join(include_path, '**', '*.proto'))
for file_path in all_files:
proto_gen_command = '{proto_cmd} --proto_path={pb_include_path} --python_out={pb_gen_dir} {pb_file}'.format(
proto_cmd = PROTOC_EXEC,
pb_include_path = include_path,
pb_gen_dir = pb_dir,
pb_file = file_path)
try:
os.system(proto_gen_command)
except Exception as ex:
logger.error('protoc cmd error, %s' % ex)
continue
pb_relative_file = file_path[len(include_path):].strip('/')
pb_relative_modules = pb_relative_file.split('/')[:-1]
cwd = pb_dir
for rm in pb_relative_modules:
cwd = os.path.join(cwd, rm)
os.system("touch " + cwd + "/__init__.py")
os.system("touch " + pb_dir + "/__init__.py")
pb_check_file = pb_file.replace('.proto', '_pb2.py') # 需要校验的proto生成的python文件
return pb_check_file, pb_dir
# 根据输入的proto 仓库信息,拉取proto文件,反序列化response
def protobuf_response(module, response):
'''
获取模块描述信息
'''
try:
# pb_gen_file, pb_gen_path = manage_proto_repo(module)
repo_name = "person"
pb_repo_path = os.path.join(PROTOC_DIR, repo_name)
pb_file = "protoc/person/person_student.proto"
pb_import_path = "/Users/withheart/Documents/studys/tools/protoc/person"
pb_gen_file, pb_gen_pat = generate_py_file(pb_repo_path, pb_file, repo_name, pb_import_path)
except Exception as ex:
return False, ex
sys.path.append(pb_gen_file)
MODULE_NAME = pb_gen_file.replace('.py', '').replace('/', '.')
MODULE_RESPONSE = module['pb_resp_name']
name = importlib.import_module(MODULE_NAME)
try:
target = getattr(name, MODULE_RESPONSE)()
target.ParseFromString(response)
json_resp = MessageToJson(target, including_default_value_fields=True)
except Exception as ex:
return False, '反序列化proto出错,%s' % ex
return True, json_resp
# 将对应的json数据转成message,然后序列化成二进制返回
def json_to_pb_message(module, str_response):
'''
获取模块描述信息
'''
try:
pb_gen_file, pb_gen_path = manage_proto_repo(module)
except Exception as ex:
return False, '拉取代码仓库错误, %s' % ex
sys.path.append(pb_gen_path)
MODULE_NAME = pb_gen_file.replace('.py', '').replace('/', '.')
MODULE_RESPONSE = module['pb_resp_name']
name = importlib.import_module(MODULE_NAME)
target = getattr(name, MODULE_RESPONSE)()
parsed_pb = Parse(str_response, target, ignore_unknown_fields=False)
protobuf_str = parsed_pb.SerializeToString()
return protobuf_str
if __name__ == '__main__':
module = {
# 'pb_repo': 'https://code.byted.org/rocket/common.git',
# 'pb_repo': 'https://code.byted.org/ez/idl.git',
# 'pb_file': 'client/index.proto',
# 'pb_import_path': 'proto',
'pb_resp_name': 'Student',
}
flag, json_resp = protobuf_response(module, '')
print json_resp
url = 'http://api.openlanguage.com/ez/studentapp/v15/home?iid=41421991999&device_id=41024141561&ac=wifi&channel=local_test&aid=1335&app_name=open_language&version_code=203&version_name=2.0.3&device_platform=android&ssmix=a&device_type=OPPO+R11+Plus&device_brand=OPPO&language=zh&os_api=25&os_version=7.1.1&openudid=d20fc7a44e1423b&manifest_version_code=203&resolution=1080*1920&dpi=480&update_version_code=2030&_rticket=1534747599027'
import requests
resp = requests.get(url)
if resp.status_code == 200:
flag, json_resp = protobuf_response(module, resp.content)
# print json_resp
# assert 'err_no' in json_resp, 'errno not in json'
# print json_resp
f = open('text.json', 'w')
f.write(json_resp)
f.close()
binary_data = json_to_pb_message(module, json_resp)
bf = open('test_pb', 'wb')
bf.write(binary_data)
bf.close()
|
from collections import defaultdict
import string
import re
DELIMITERS = string.punctuation.replace("'", '') + string.whitespace
def word_count(phrase):
words = defaultdict(int)
splitted_phrase = re.split(f"'?[{DELIMITERS}]+'?", phrase.lower())
for word in splitted_phrase:
if word:
words[word] += 1
return dict(words)
|
PANEL_DASHBOARD = 'project'
PANEL_GROUP = 'virtlab'
PANEL = 'vlconfig'
ADD_PANEL = 'openstack_dashboard.local.dashboards.project_nci.vlconfig.panel.VLConfig'
|
import logging
from AItools import OCR, CreateImage, SpeechRecognition, CreateAudio, ChatBott
import constants as keys
from telegram.ext import *
from telegram import *
import os
import uuid
# img = ImageClassification('Inception')
# img.read_process_image('abc.jpg')
# print(img.predict())
# ocr = OCR()
# out = ocr.text_from_image('bcd.jpg')
# print(out)
# crimg = CreateImage()
# crimg.draw('black', 1200, 1200, out)
# sr = SpeechRecognition()
# print(sr.text_from_audio('abc.wav'))
# au = CreateAudio()
# au.text_to_audio('that contain at least half as many vowels as consonants.')
logging.basicConfig(
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO
)
logger = logging.getLogger(__name__)
_CHOOSING, _IMAGE, _AUDIO, _CHAT = range(4)
_IMAGETOTEXT, _AUDIOTOTEXT, _TEXTTOIMAGE, _TEXTTOAUDIO = range(4, 8)
keyboard1 = [
['IMAGE', 'AUDIO'],
["CHAT"]
]
keyboard2 = [
['IMAGE TO TEXT', 'TEXT TO IMAGE'],
['BACK']
]
keyboard3 = [
['AUDIO TO TEXT', 'TEXT TO AUDIO'],
['BACK']
]
keyboard4 = [
["CANCEL"]
]
markup1 = ReplyKeyboardMarkup(keyboard1, resize_keyboard=True, one_time_keyboard=True)
markup2 = ReplyKeyboardMarkup(keyboard2, resize_keyboard=True, one_time_keyboard=True)
markup3 = ReplyKeyboardMarkup(keyboard3, resize_keyboard=True, one_time_keyboard=True)
markup4 = ReplyKeyboardMarkup(keyboard4, resize_keyboard=True, one_time_keyboard=True)
ocr = OCR()
crimg = CreateImage()
sr = SpeechRecognition()
au = CreateAudio()
ch = ChatBott()
def start(update, context) -> int:
update.message.reply_text(
"Hello I am AI-Bot\nI can change Image and Audio to Text and vice versal and also you can chat with me",
reply_markup=markup1,
)
return _CHOOSING
def startagain(update, context) -> int:
update.message.reply_text(
"Hello Again please press /start to continue",
)
return _CHOOSING
def image(update, context) -> int:
update.message.reply_text(
"choose either image to text or text to image or back to return to main",
reply_markup=markup2,
)
return _IMAGE
def audio(update, context) -> int:
update.message.reply_text(
"choose either audio to text or text to audio or back to return to main",
reply_markup=markup3,
)
return _AUDIO
def chat(update, context):
global chat
chat = ch.createbot()
update.message.reply_text(
"your chat is started say hello or something to start or back to return to main",
reply_markup=markup4
)
return _CHAT
def image_to_text(update, context):
update.message.reply_text(
"now send me an image file or cancel to go back",
reply_markup=markup4,
)
return _IMAGETOTEXT
def uploadimage(update, context):
uploaded = False
uuud = str(uuid.uuid1())
os.mkdir(f'trash/{uuud}')
try:
has_photo = len(update.message.photo) if update.message.photo else False
bot = context.bot
# print(update.message.photo[0])
# fil = bot.getFile(update.message.photo[0]['file_id'])
# print(dir(fil))
file_ = [
[fil["file_size"], fil["file_id"], fil["file_unique_id"]]
for fil in update.message.photo
]
file_.sort()
file_name = f'trash/{uuud}/temp.png'
download_file = bot.getFile(file_[-1][1])
download_file.download(custom_path=file_name)
update.message.reply_text("succesfully uploaded!")
uploaded = True
text = ocr.text_from_image(file_name)
os.system(f'rm -rf trash/{uuud}')
update.message.reply_text(
"text extracted from the image:\n"+text,
reply_markup=markup2
)
return _IMAGE
except Exception as e:
print(e)
if uploaded:
os.system(f'rm -rf trash/{uuud}')
update.message.reply_text(
"there was an error please send me the photo again",
reply_markup=markup4
)
return _IMAGETOTEXT
def text_to_image(update, context):
update.message.reply_text(
"now send me an text that you want to write it on a picture or cancel to go back",
reply_markup=markup4,
)
return _TEXTTOIMAGE
def textonimage(update, context):
text = update.message.text
chat_id = update.message.chat_id
try:
# saves the image with name temp.png in the current directory
file_path = crimg.draw('black', 1200, 1200, text=text)
update.message.reply_text('here you go')
context.bot.send_photo(
chat_id=chat_id, photo=open(file_path, 'rb'),
reply_markup=markup2
)
os.system(f'rm -rf {file_path[:-9]}')
return _IMAGE
except Exception as e:
print(e)
update.message.reply_text(
'an error has occured please send me the text again',
)
return _TEXTTOIMAGE
def audio_to_text(update, context):
update.message.reply_text(
"now send me an audio file wav format preferred or cancel to go back",
reply_markup=markup4
)
return _AUDIOTOTEXT
def uploadaudio(update, context):
uploaded = False
uuud = str(uuid.uuid1())
os.mkdir(f'trash/{uuud}')
try:
download_file = update.message.audio.get_file()
file_name = update.message.audio.file_name
new_filename = f'trash/{uuud}/temp.'+file_name.split('.')[-1]
# print(dir(update.message.audio))
download_file.download(custom_path=new_filename)
update.message.reply_text("succesfully uploaded!")
uploaded = True
text = sr.text_from_audio(new_filename)
os.system(f'rm -rf trash/{uuud}')
update.message.reply_text(
"text extracted from the audio file\n"+text,
reply_markup=markup3
)
return _AUDIO
except Exception as e:
print(e)
if uploaded:
os.system(f'rm -rf trash/{uuud}')
update.message.reply_text(
"there was an error please send me the audio file again",
reply_markup=markup4
)
return _AUDIOTOTEXT
def text_to_audio(update, context):
update.message.reply_text(
"now send me an text that you want to convert it to audio or cancel to go back",
reply_markup=markup4)
return _TEXTTOAUDIO
def textonaudio(update, context):
text = update.message.text
chat_id = update.message.chat_id
try:
# saves the audio file named 'temp.ext' in current directory
file_path = au.text_to_audio(text)
update.message.reply_text("here you go", reply_markup=markup3)
context.bot.send_audio(
chat_id=chat_id, audio=open(file_path, 'rb'),
reply_markup=markup3)
os.system(f'rm -rf {file_path[:-9]}')
return _AUDIO
except Exception as e:
print(e)
update.message.reply_text(
"an error has occured please send me the text again",
reply_markup=markup3)
return _TEXTTOAUDIO
def chatting(update, context):
global chat
text = update.message.text
res = chat.get_response(text)
update.message.reply_text(
str(res), reply_markup=markup4
)
return _CHAT
def main() -> None:
"""Run the bot."""
# Create the Updater and pass it your bot's token.
updater = Updater(keys.API_KEY)
# Get the dispatcher to register handlers
dispatcher = updater.dispatcher
conv_handler = ConversationHandler(
entry_points=[MessageHandler(Filters.text, start)],
states={
_CHOOSING : [
MessageHandler(
Filters.regex(
"^(IMAGE)$",
),
image,
),
MessageHandler(
Filters.regex(
"^(AUDIO)$",
),
audio,
),
MessageHandler(
Filters.regex(
"^(CHAT)$",
),
chat,
),
],
_IMAGE : [
MessageHandler(
Filters.regex(
"^(IMAGE TO TEXT)$",
),
image_to_text,
),
MessageHandler(
Filters.regex(
"^(TEXT TO IMAGE)$",
),
text_to_image,
),
MessageHandler(
Filters.regex(
"^(BACK)$",
),
start,
),
],
_IMAGETOTEXT : [
MessageHandler(
Filters.photo | Filters.document.category("image"), uploadimage
),
MessageHandler(
Filters.regex(
"^(CANCEL)$",
),
image,
),
],
_TEXTTOIMAGE : [
MessageHandler(
Filters.regex(
"^(CANCEL)$",
),
image,
),
MessageHandler(
Filters.text,
textonimage,
),
],
_AUDIO: [
MessageHandler(
Filters.regex(
"^(AUDIO TO TEXT)$",
),
audio_to_text,
),
MessageHandler(
Filters.regex(
"^(TEXT TO AUDIO)$",
),
text_to_audio,
),
MessageHandler(
Filters.regex(
"^(BACK)$",
),
start,
),
],
_AUDIOTOTEXT : [
MessageHandler(
Filters.audio | Filters.document.category("audio"), uploadaudio
),
MessageHandler(
Filters.regex(
"^(CANCEL)$",
),
audio,
),
],
_TEXTTOAUDIO : [
MessageHandler(
Filters.regex(
"^(CANCEL)$",
),
audio,
),
MessageHandler(
Filters.text,
textonaudio,
),
],
_CHAT : [
MessageHandler(
Filters.regex(
"^(CANCEL)$",
),
start,
),
MessageHandler(
Filters.text,
chatting,
),
],
},
fallbacks=[
MessageHandler(Filters.text, start),
],
)
# dispatcher.add_handler(CommandHandler('start', start))
dispatcher.add_handler(conv_handler)
# dispatcher.add_handler(MessageHandler(Filters.text, startagain))
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
# 路由配置
# 路由:web应用一般使用语义化的url
# @app.route()绑定path(url)以及处理函数
from flask import Flask
app = Flask(__name__)
# 127.0.0.1:5000/
@app.route('/')
def hello():
return 'hello world'
# 127.0.0.1:5000/user
@app.route('/user')
def user():
return '用户信息'
# 路由:字符串参数
@app.route('/username/<username>')
def username(username):
# 根据username 进行业务处理 返回处理后的值
return username
# 路由:整型参数
@app.route('/userid/<int:id>')
def userid(id):
# 返回类型必须是字符串 字典 元祖等信息
return F"用户id是{id}"
# 路由:路径参数(path字符串可以包含/)
@app.route('/userinfo/<path:path>')
def userinfo(path):
return path
if __name__ == '__main__':
app.run(host = '127.0.0.1', port = '5001', debug = True)
|
from aiohttp import web
from api.settings import config
from api.routes import setup_routes
from api.db import init_pg, close_pg, init_redis, close_redis
from api.middlewares import process_req_param
import logging
def init_app():
app = web.Application(middlewares=[process_req_param])
logging.basicConfig(level=logging.DEBUG)
setup_routes(app)
app['config'] = config
app.on_startup.extend([init_pg, init_redis])
app.on_cleanup.extend([close_pg, close_redis])
return app
if __name__ == '__main__':
app = init_app()
web.run_app(app, port=8001)
|
from __future__ import print_function
from __future__ import absolute_import
# Need to import path to test/fixtures and test/scripts/
# Ex : export PYTHONPATH='$PATH:/root/test/fixtures/:/root/test/scripts/'
#
# To run tests, you can do 'python -m testtools.run tests'. To run specific tests,
# You can do 'python -m testtools.run -l tests'
# Set the env variable PARAMS_FILE to point to your ini file. Else it will try to pick params.ini in PWD
#
import os
from common.openstack_libs import nova_client as mynovaclient
from common.openstack_libs import nova_exception as novaException
import fixtures
import testtools
import unittest
from common.contrail_test_init import ContrailTestInit
from vn_test import *
from quantum_test import *
from vnc_api_test import *
from nova_test import *
from vm_test import *
from common.connections import ContrailConnections
from floating_ip import *
from policy_test import *
from multiple_vn_vm_test import *
from contrail_fixtures import *
from tcutils.wrappers import preposttest_wrapper
from testresources import ResourcedTestCase
from .vgw_test_resource import SolnSetupResource
import traffic_tests
from vgw.verify import VerifyVgwCases
class TestVgwCases(testtools.TestCase, ResourcedTestCase, fixtures.TestWithFixtures, VerifyVgwCases):
resources = [('base_setup', SolnSetupResource)]
def __init__(self, *args, **kwargs):
testtools.TestCase.__init__(self, *args, **kwargs)
self.res = SolnSetupResource.getResource()
self.inputs = self.res.inputs
self.connections = self.res.connections
self.logger = self.res.logger
self.nova_h = self.res.nova_h
self.agent_inspect = self.connections.agent_inspect
self.cn_inspect = self.connections.cn_inspect
self.analytics_obj = self.connections.analytics_obj
self.vnc_lib = self.connections.vnc_lib
def __del__(self):
print("Deleting test_with_setup now")
SolnSetupResource.finishedWith(self.res)
def setUp(self):
super(TestVgwCases, self).setUp()
if 'TEST_CONFIG_FILE' in os.environ:
self.input_file = os.environ.get('TEST_CONFIG_FILE')
else:
self.input_file = 'params.ini'
def tearDown(self):
print("Tearing down test")
super(TestVgwCases, self).tearDown()
SolnSetupResource.finishedWith(self.res)
def runTest(self):
pass
# end runTest
@preposttest_wrapper
def test_vgw_with_fip_on_same_node(self):
'''Test VM is launched on the same compute node where VGW is configured and VM got FIP from VGW network
'''
return self.verify_vgw_with_fip(compute_type='same')
@preposttest_wrapper
def test_vgw_with_fip_on_different_node(self):
'''Test VM is launched on the different compute node where VGW is configured and VM got FIP from VGW network
'''
return self.verify_vgw_with_fip(compute_type='different')
@preposttest_wrapper
def test_vgw_with_native_vm_on_same_node(self):
'''Test VM is launched on the same compute node where VGW is configured and VM is launched on VGW network
'''
return self.verify_vgw_with_native_vm(compute_type='same')
@preposttest_wrapper
def test_vgw_with_native_vm_on_different_node(self):
'''Test VM is launched on the same compute node where VGW is configured and VM is laucnhed on VGW network
'''
return self.verify_vgw_with_native_vm(compute_type='different')
@preposttest_wrapper
def test_vgw_with_multiple_subnet_for_single_vgw(self):
'''Test VGW having multiple subnet is working properly
'''
return self.verify_vgw_with_multiple_subnet()
@preposttest_wrapper
def test_vgw_with_restart_of_vgw_node(self):
'''Test VGW with restarting the VGW node
'''
return self.vgw_restart_of_vgw_node()
|
import scrapy
import import_data as import_data
from retrieval.items import MetaItem
class MetaSpider(scrapy.Spider):
name = "meta retrieval"
print "META RETRIEVAL"
print 80*"="
start_urls = import_data.generateSearchURLs()[10000:]
def parse(self, response):
item = MetaItem()
url = response.xpath('//a[@title="Show document details"]/@href').extract()[0]
request = scrapy.Request(url, callback=self.parse_next)
request.meta['item'] = item
return request
def parse_next(self, response):
item = response.meta['item']
url = response.url
start_index = url.index("eid=") + 4
end_index = url.index("&", start_index)
eid = url[start_index:end_index]
item['url'] = import_data.generateMetaURL(eid)
yield item |
from django.db import models
from django.contrib.auth import get_user_model
from alfheimproject.settings import SECRETS
User = get_user_model()
class DonationsLog(models.Model):
PAYMENT_SYSTEMS = (
(1, 'Paypal'),
(2, 'Yandex.Money'),
(3, 'Unitpay')
)
user = models.ForeignKey(User, on_delete=models.CASCADE)
payment_id = models.CharField(max_length=255)
payer_id = models.CharField(max_length=255, default='')
payment_system = models.IntegerField(choices=PAYMENT_SYSTEMS)
amount = models.IntegerField(default=0)
date = models.DateTimeField(auto_now=True)
update_date = models.DateTimeField(auto_now=True)
execute_url = models.CharField(max_length=255, default='')
approval_url = models.CharField(max_length=255, default='')
excecuted = models.BooleanField(default=False)
class Meta:
db_table = '{prefix}donations_log'.format(prefix=SECRETS['table_prefix'])
verbose_name = 'Donation Log'
verbose_name_plural = 'Donations Log'
|
import random
import nltk
import os.path, os
from nltk.tokenize import sent_tokenize
import json
from preprocess import hltag
from shutil import copyfile
def read_race(fns):
article = []
for f1 in fns:
for f2 in ["high", "middle"]:
fl = os.listdir("./data/RACE/" + f1 + "/" + f2)
for fn in fl:
with open("./data/RACE/" + f1 + "/" + f2 + "/" + fn, "r") as f:
article += [json.load(f)["article"]]
return article
def problem_gen(article, id):
delimiter = '_[[#@]]_'
def get_cloze(sentence, words):
cloze = []
ans = []
dis = []
sentences = sentence.split(delimiter)
tokens = []
for i in range(len(sentences)):
tokens += [x for x in nltk.word_tokenize(sentences[i])]
if i != len(sentences) - 1:
tokens += ['_[[#@]]_']
if len(tokens) > 50:
return None
used = set()
n_cloze = min((len(tokens)-2) // 6, 4)
if n_cloze <= 0:
if len(tokens) >= 6:
n_cloze = 1
if n_cloze <= 0:
return None
n_cloze = random.randint(1, n_cloze)
for i in range(n_cloze):
while True:
cloze_len = random.randint(1, 4)
left = random.randint(0, len(tokens)-cloze_len)
ok = True
for j in range(left, left+cloze_len):
if j in used:
ok = False
break
if not ok:
continue
for j in range(left, left+cloze_len):
used.add(j)
for j in range(left, left+cloze_len):
if not tokens[j].isalpha():
ok = False
if ok:
cloze += [[left, left+cloze_len]]
ans += [' '.join(tokens[left:left+cloze_len])]
break
if len(ans) == 0:
return None
for i in range(len(ans)):
dislen = max(1, random.randint(len(ans[i].split()) - 1, len(ans[i].split()) + 1))
dislis = []
for j in range(3):
while True:
start = random.randint(0, len(words)-dislen)
d = ' '.join(words[start:start+dislen])
if d != ' '.join(ans[i]) and d not in dislis:
dislis += [d]
break
dis += [dislis]
for j in range(cloze[i][0], cloze[i][1]):
tokens[j] = ''
tokens[cloze[i][0]] = '_'
for i in range(len(cloze)):
for j in range(i+1, len(cloze)):
if cloze[i][0] > cloze[j][0]:
cloze[i], cloze[j] = cloze[j], cloze[i]
ans[i], ans[j] = ans[j], ans[i]
dis[i], dis[j] = dis[j], dis[i]
usedmask = set()
for i in range(3):
masklen = random.randint(0, max(0, len(cloze)-1))
if masklen == 0:
continue
mask = [j for j in range(len(cloze)) if j not in usedmask]
random.shuffle(mask)
mask = mask[:masklen]
mask.sort()
for j in mask:
dis[j][i] = ans[j]
usedmask.add(j)
ret = [' '.join((' '.join(tokens).split())), ', '.join(ans)]
for i in range(3):
ret += [', '.join([x[i] for x in dis])]
return ret
d = [[], [], id]
article.replace(delimiter, '')
sentences_raw = [x for x in sent_tokenize(article)]
sentences = [[sentences_raw[i], i] for i in range(len(sentences_raw))]
words = [x for x in nltk.word_tokenize(article) if x.isalpha()]
n_problem = min(10, len(words) // 30)
for i in range(n_problem):
random.shuffle(sentences)
selected = sentences[0:random.randint(1, 3)]
selected = [x[0] for x in selected]
question = get_cloze(delimiter.join(selected), words)
if question is not None:
q = {}
q["question"] = ' '.join(question[0].replace(delimiter, '').split())
ok = True
for j in range(len(d[1])):
if d[1][j]["question"] == q["question"]:
ok = False
break
if not ok:
continue
q["choice"] = question[1:]
if len(set(q["choice"])) != 4:
continue
random.shuffle(q["choice"])
q["answer"] = question[1]
d[1] += [q]
sentences.sort(key = lambda x : x[1])
sentences = [x[0] for x in sentences]
d[0] += [' '.join(sentences)]
return d
if __name__ == '__main__':
for fn in ["train", "dev"]:
output = []
cnt = 0
data = read_race([fn])
for i in range(len(data)):
output += [problem_gen(data[i], str(i))]
cnt += len(output[i][1])
print(fn, len(output), cnt)
output = hltag(output)
print(fn, len(output))
os.makedirs("cloze", exist_ok=True)
with open("./cloze/race_" + fn + ".json", "w", encoding='utf8') as f:
json.dump(output, f, indent=2)
copyfile("./cloze/race_dev.json", "./cloze/race_test.json")
|
#!/usr/bin/python
'''
Streaming Utility for FOSSASIA PSLab - version 1.0.0.
Evaluates user defined python statements and plots
the return values as a function of time.
Useful for monitoring time evolution of parameters
measured by the PSLab
'''
from __future__ import print_function
import os
os.environ['QT_API'] = 'pyqt'
import sip
sip.setapi("QString", 2)
sip.setapi("QVariant", 2)
from PyQt4 import QtCore, QtGui
import time,sys
from .templates import arbitStream
from PSL_Apps.utilitiesClass import utilitiesClass
import sys,os,string
import time
import sys
import pyqtgraph as pg
import numpy as np
err_count=0
trial = 0
start_time = time.time()
fps = None
dacval=0
params = {
'image' : 'stream.png',
'name':'Data\nStreaming',
'hint':'A continuous data acquisition utility to visualize time dependent behaviour of any of the measurement functions contained in the SEELablet python library.\nThese include get_freq,get_capacitance, and get_average_voltage'
}
class AppWindow(QtGui.QMainWindow, arbitStream.Ui_MainWindow,utilitiesClass):
def __init__(self, parent=None,**kwargs):
super(AppWindow, self).__init__(parent)
self.setupUi(self)
self.I=kwargs.get('I',None)
self.setWindowTitle(self.I.H.version_string+' : '+params.get('name','').replace('\n',' ') )
self.plot=self.add2DPlot(self.plot_area)
labelStyle = {'color': 'rgb(255,255,255)', 'font-size': '11pt'}
self.plot.setLabel('left','Value -->', units='',**labelStyle)
self.totalpoints=2000
self.X=np.arange(self.totalpoints)
self.Y=np.zeros(self.totalpoints)
self.plot.setXRange(0,self.totalpoints)
self.plot.setYRange(-16,16)
self.curve = self.addCurve(self.plot,name='Data'); self.curve.setPen(color=[255,255,255], width=1)
self.streamfunc="I."+self.cmdlist.currentText()
self.start_time=time.time()
self.num=0
self.arrow=pg.ArrowItem(angle=90)
self.plot.addItem(self.arrow)
self.plot_area.addWidget(self.plot)
self.looptimer = QtCore.QTimer()
self.looptimer.timeout.connect(self.acquire)
self.looptimer.start(1)
self.nm=0
self.start_time=time.time()
self.averagingSamples = 1
def stream(self):
self.looptimer.stop()
self.streamfunc="I."+self.cmdlist.currentText()
self.X=np.arange(self.totalpoints)
self.Y=np.zeros(self.totalpoints)
self.num=0
self.looptimer = QtCore.QTimer()
self.looptimer.timeout.connect(self.acquire)
self.looptimer.start(1)
def setAveraging(self):
self.averagingSamples = self.averageCount.value()
def acquire(self):
#if(self.nm<4095):
# self.ad.set_voltage(self.nm)
# self.nm+=1
#self.Y=np.roll(self.Y,-1)
if self.pause.isChecked():return
val=np.average([eval(self.streamfunc,{'I':self.I}) for a in range(self.averagingSamples)])
self.Y[self.num]=val #self.mag.read()[1]
self.msg.setText('%.4f'%(val))
try:
self.arrow.setPos(self.num,self.Y[self.num])
except:
print (self.num)
self.num+=1
if self.num>=self.totalpoints:
self.num=0
T=time.time()
if T-self.start_time>0.5:
self.curve.setData(self.X,self.Y)
self.start_time = T
def saveData(self):
self.pause.setChecked(True)
self.saveDataWindow([self.curve],self.plot)
def parseFunc(self,fn):
fn_name=fn.split('(')[0]
args=str(fn.split('(')[1]).split(',')
int_args=[]
try:
args[-1]=args[-1][:-1]
int_args=[string.atoi(t) for t in args]
except:
int_args=[] #in case the function has zero arguments, args[-1] will fail.
method = getattr(self.I,fn_name)
if method == None :
print ('no such command :',fn_name)
return None
else:
print (method,int_args)
return method,int_args
def __del__(self):
self.looptimer.stop()
def closeEvent(self, event):
self.looptimer.stop()
self.finished=True
if __name__ == "__main__":
from PSL import sciencelab
app = QtGui.QApplication(sys.argv)
myapp = AppWindow(I=sciencelab.connect())
myapp.show()
sys.exit(app.exec_())
|
__author__ = 'Netšajev'
from nose.tools import *
from ex49 import parser
def test_peek():
assert_equal(parser.peek([('verb', 'attack')]), 'verb')
assert_equal(parser.peek([('direction', 'south')]), 'direction')
assert_equal(parser.peek([('noun', 'bear')]), 'noun')
assert_equal(parser.peek([('error', 'Eduard')]), 'error')
assert_equal(parser.peek([('noun', 'door'), ('direction', 'west')]), 'noun')
assert_equal(parser.peek([('direction', 'west'), ('noun', 'door')]), 'direction')
assert_equal(parser.peek([('verb', 'go'), ('direction', 'north')]), 'verb')
def test_match():
words_list = [('verb', 'go')]
assert_equal(parser.match(words_list, 'verb'), ('verb', 'go'))
assert_equal(words_list, [])
words_list = [('direction', 'north')]
assert_equal(parser.match(words_list, 'direction'), ('direction', 'north'))
assert_equal(words_list, [])
words_list = [('noun', 'princess')]
assert_equal(parser.match(words_list, 'noun'), ('noun', 'princess'))
assert_equal(words_list, [])
words_list = [('number', 8)]
assert_equal(parser.match(words_list, 'number'), ('number', 8))
assert_equal(words_list, [])
words_list = [('noun', 'door'), ('direction', 'west')]
assert_equal(parser.match(words_list, 'noun'), ('noun', 'door'))
assert_equal(words_list, [('direction', 'west')])
words_list = [('noun', 'door'), ('direction', 'west')]
assert_equal(parser.match(words_list, 'verb'), None)
assert_equal(words_list, [('direction', 'west')])
words_list = []
assert_equal(parser.match(words_list, 'error'), None)
def test_skip():
words_list = [('verb', 'go'), ('verb', 'attack')]
parser.skip(words_list, 'verb')
assert_equal(words_list, [])
words_list = [('verb', 'go'), ('direction', 'north'), ('verb', 'attack')]
parser.skip(words_list, 'verb')
assert_equal(words_list, [('direction', 'north'), ('verb', 'attack')])
words_list = [('verb', 'go'), ('direction', 'north'), ('verb', 'attack')]
parser.skip(words_list, 'direction')
assert_equal(words_list, [('verb', 'go'), ('direction', 'north'), ('verb', 'attack')])
words_list = [('noun', 'door'), ('direction', 'west')]
parser.skip(words_list, 'noun')
assert_equal(words_list, [('direction', 'west')])
words_list = [('noun', 'door'), ('direction', 'west')]
parser.skip(words_list, 'verb')
assert_equal(words_list, [('noun', 'door'), ('direction', 'west')])
words_list = []
parser.skip(words_list, 'error')
assert_equal(words_list, [])
def test_parse_verb():
word_list = [('verb', 'go'), ('direction', 'north')]
assert_equal(parser.parse_verb(word_list), ('verb', 'go'))
assert_equal(word_list, [('direction', 'north')])
word_list = [('verb', 'kill'), ('stop', 'the'), ('noun', 'princess')]
assert_equal(parser.parse_verb(word_list), ('verb', 'kill'))
assert_equal(word_list, [('stop', 'the'), ('noun', 'princess')])
word_list = [('verb', 'eat'), ('stop', 'the'), ('noun', 'bear')]
assert_equal(parser.parse_verb(word_list), ('verb', 'eat'))
assert_equal(word_list, [('stop', 'the'), ('noun', 'bear')])
word_lst = [('error', 'open'), ('stop', 'the'), ('noun', 'door'), ('error', 'and'),
('error', 'smack'), ('stop', 'the'), ('noun', 'bear'), ('stop', 'in'),
('stop', 'the'), ('error', 'nose')]
old_word_list = word_lst
assert_raises(parser.ParserError, parser.parse_verb, word_lst)
assert_equal(word_lst, old_word_list)
word_list = [('stop', 'from'), ('stop', 'to'), ('verb', 'attack'), ('stop', 'the'), ('noun', 'bear')]
assert_equal(parser.parse_verb(word_list), ('verb', 'attack'))
assert_equal(word_list, [('stop', 'the'), ('noun', 'bear')])
def test_parse_object():
word_list = [('noun', 'player'), ('stop', 'to'), ('verb', 'attack'), ('stop', 'the'), ('noun', 'bear')]
assert_equal(parser.parse_object(word_list), ('noun', 'player'))
assert_equal(word_list, [('stop', 'to'), ('verb', 'attack'), ('stop', 'the'), ('noun', 'bear')])
word_list = [('noun', 'door'), ('direction', 'west')]
assert_equal(parser.parse_object(word_list), ('noun', 'door'))
assert_equal(parser.parse_object(word_list), ('direction', 'west'))
assert_equal(word_list, [])
word_list = [('verb', 'go'), ('direction', 'north')]
assert_raises(parser.ParserError, parser.parse_object, word_list)
def test_parse_subject():
word_list = [('stop', 'the'), ('stop', 'to'), ('verb', 'attack'), ('stop', 'the'), ('noun', 'bear')]
assert_equal(parser.parse_subject(word_list, ('noun', 'player')).phrase,
parser.Sentence(('noun', 'player'), ('verb', 'attack'), ('noun', 'bear')).phrase)
word_list = [('stop', 'to'), ('verb', 'go'),
('stop', 'to'), ('stop', 'the'), ('direction', 'east')]
assert_equal(parser.parse_subject(word_list, ('noun', 'princess')).phrase,
parser.Sentence(('noun', 'princess'), ('verb', 'go'), ('direction', 'east')).phrase)
word_list = [('stop', 'to'), ('stop', 'the'), ('direction', 'east')]
assert_raises(parser.ParserError, parser.parse_subject, word_list, ('noun', 'princess'))
def test_parse_sentence():
word_list = [('stop', 'to'), ('verb', 'attack'), ('stop', 'the'), ('noun', 'bear')]
assert_equal(parser.parse_sentence(word_list).phrase,
parser.Sentence(('noun', 'player'), ('verb', 'attack'), ('noun', 'bear')).phrase)
word_list = [('noun', 'princess'), ('stop', 'to'), ('verb', 'go'),
('stop', 'to'), ('stop', 'the'), ('direction', 'east')]
assert_equal(parser.parse_sentence(word_list).phrase,
parser.Sentence(('noun', 'princess'), ('verb', 'go'), ('direction', 'east')).phrase)
word_list = [('stop', 'to'), ('stop', 'the'), ('direction', 'east')]
assert_raises(parser.ParserError, parser.parse_sentence, word_list) |
from django.shortcuts import render, HttpResponse
from servicios.models import servicio
from blog.models import Post, Categoria
# Crear las vistas
def home(request):
return render(request, "ProyectowebApp/home.html")
|
import cgi
import datetime
import wsgiref.handlers
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from google.appengine.api import images
from google.appengine.api import users
|
time = []
jogador = {}
partidas = []
while True:
jogador.clear()
jogador['nome'] = str(input('Nome do Jogador: '))
total = int(input(f'Quantas partidas {jogador["nome"]} jogou? '))
partidas.clear()
for n in range(0, total):
partidas.append(int(input(f'Quantos gols na partida {n+1}? ')))
jogador['gols'] = partidas[:]
jogador['totalGols'] = sum(partidas)
time.append(jogador.copy())
while True:
resposta = str(input('Quer continuar? [S/N] ')).strip().upper()[0]
if resposta in 'SN':
break
print('Erro! Responda apenas com S ou N')
if resposta == 'N':
break
print('-'*30)
print('cod ', end='')
for i in jogador.keys():
print(f'{i:<15}', end='')
print()
for k, v in enumerate(time):
print(f'{k:>2} ', end='')
for d in v.values():
print(f'{str(d):<15}', end='')
print()
print('-'*30)
while True:
busca = int(input('Mostrar dados de qual jogador? (999 para finalizar) '))
if busca == 999:
break
if busca >= len(time):
print(f'Erro! Não existe jogador com código {busca}')
else:
print(f' == LEVANTAMENTO DO JOGADOR {time[busca]["nome"]} ==')
for i, v in enumerate(time[busca]['gols']):
print(f' => Na partida {i+1}, fez {v} gols')
print('-'*30)
print(' << VOLTE SEMPRE >>')
|
#coding:utf-8
import wx
import wx.aui
#import platform
import About
from additions import Settings
from checkconnect import CheckConnect
from ethsetting import EthSetting
from firezones import FireZone1,FireZone2
from gsmsetting import GSMSetting
from outputs import OutPuts
from searchusb import ListUsb
from tools import SaveConfig, GetDev, SetDev
from userskeys import Keys,MasterKey
from zones import Zones
#----------------------------------------------------------------------
ID_Exit = wx.NewId()
ID_Close = wx.NewId()
ID_About = wx.NewId()
ID_ScanUSB = wx.NewId()
ID_Connect = wx.NewId()
ID_GSM = wx.NewId()
ID_Eth = wx.NewId()
ID_SaveConf = wx.NewId()
ID_Keys = wx.NewId()
ID_MasterKey = wx.NewId()
ID_FireZone1 = wx.NewId()
ID_FireZone2 = wx.NewId()
ID_Additions = wx.NewId()
ID_OutPuts = wx.NewId()
ID_Man = wx.NewId()
ID_Zones = wx.NewId()
#### --- Самое верхнее окно ---
class MyParentFrame(wx.aui.AuiMDIParentFrame):
def __init__(self):
wx.aui.AuiMDIParentFrame.__init__(self, None, 1, u"Программирование ППКОП \"Офицер\" 04", size=(1200,800),style=wx.FRAME_NO_WINDOW_MENU | wx.MINIMIZE_BOX | wx.MAXIMIZE_BOX | wx.RESIZE_BORDER | wx.CAPTION)
self.MId=wx.NewId()
self.winCount = 0
self.CreateStatusBar(1)
menu = wx.Menu()
menu.Append(ID_ScanUSB, u"USB-RS485")
menu.Append(ID_Man, u"Указать COM порт вручную")
menu.Append(ID_Connect, u"Проверка связи")
menu.AppendSeparator()
menu.Append(ID_SaveConf, u"Запись в ПЗУ")
menu.AppendSeparator()
menu.Append(ID_Close, u"Закрыть все")
menu.AppendSeparator()
menu.Append(ID_Exit, u"Выход")
menubar = wx.MenuBar()
menubar.Append(menu, u"Настройка панели")
menu2 = wx.Menu()
menu2.Append(ID_GSM, u"GSM")
menu2.Append(ID_Eth, u"Ethernet")
menubar.Append(menu2, u"Сетевые настройки")
menu3 = wx.Menu()
menu3.Append(ID_FireZone1, u"Пожарная зона 1")
menu3.Append(ID_FireZone2, u"Пожарная зона 2")
menubar.Append(menu3, u"Пожарные зоны")
menu4 = wx.Menu()
menu4.Append(ID_Keys, u"Ключи")
menu4.Append(ID_MasterKey, u"Мастер ключ")
menubar.Append(menu4, u"Пользователи")
menu5 = wx.Menu()
menu5.Append(ID_OutPuts, u"Выходы")
menu5.Append(ID_Zones, u"Зоны")
menubar.Append(menu5, u"Основные")
menu6 = wx.Menu()
menu6.Append(ID_Additions, u"Дополнительне настройки")
menu6.AppendSeparator()
menu6.Append(ID_About, u"О программе")
menubar.Append(menu6, u"Дополнительно")
self.SetMenuBar(menubar)
self.Bind(wx.EVT_MENU, self.OnExit, id=ID_Exit)
self.Bind(wx.EVT_MENU, self.OnClose, id=ID_Close)
self.Bind(wx.EVT_MENU, self.Ab, id=ID_About)
self.Bind(wx.EVT_MENU, self.SetUsb, id=ID_ScanUSB)
self.Bind(wx.EVT_MENU, self.GSM, id=ID_GSM)
self.Bind(wx.EVT_MENU, self.Eth, id=ID_Eth)
self.Bind(wx.EVT_MENU, self.CheckConn, id=ID_Connect)
self.Bind(wx.EVT_MENU, self.SaveConf, id=ID_SaveConf)
self.Bind(wx.EVT_MENU, self.UserKeys, id=ID_Keys)
self.Bind(wx.EVT_MENU, self.Masterkey, id=ID_MasterKey)
self.Bind(wx.EVT_MENU, self.FireZ1, id=ID_FireZone1)
self.Bind(wx.EVT_MENU, self.FireZ2, id=ID_FireZone2)
self.Bind(wx.EVT_MENU, self.Outputs, id=ID_OutPuts)
self.Bind(wx.EVT_MENU, self.Setting, id=ID_Additions)
self.Bind(wx.EVT_MENU, self.Man, id=ID_Man)
self.Bind(wx.EVT_MENU, self.Mainzones, id=ID_Zones)
def ShowVidPid(self):
self.SetStatusText(u'dev: %s' % (GetDev()), 0)
ShowVidPid(self)
#### --- USB ---
def SetUsb(self, evt):
dlg = ListUsb(self,-1,u"Список устройств USB", size=(450,300), style = wx.DEFAULT_DIALOG_STYLE)
dlg.ShowModal()
self.SetStatusText(u'dev: %s' % (GetDev()), 0)
#### --- Завершение работы ---
def OnExit(self, evt):
for m in self.GetChildren():
if isinstance(m, wx.aui.AuiMDIClientWindow):
for k in m.GetChildren():
k.Close()
evt.Skip()
self.Close(True)
#### --- Закрытие дочерних фреймов в главном окне ---
def OnClose(self, evt):
for m in self.GetChildren():
if isinstance(m, wx.aui.AuiMDIClientWindow):
for k in m.GetChildren():
k.Close()
evt.Skip()
def CheckConn(self,evt):
CheckConnect(self)
def SaveConf(self,evt):
SaveConfig(self)
#### --- Вывод информации "О программе" ---
def Ab(self,evt):
About.Info(self)
#### --- GSM ---
def GSM(self, evt):
child = GSMSetting(self)
child.Activate()
#### --- Ethernet ---
def Eth(self, evt):
child = EthSetting(self)
child.Activate()
#### Ключи
def UserKeys(self, evt):
child = Keys(self)
"""
if platform.system() == 'Linux':
child = Keys(self)
else:
child = KeysNT(self)
child.Activate()
"""
#### Мастер ключ
def Masterkey(self, evt):
child = MasterKey(self)
child.Activate()
#### Пожарная зона 1
def FireZ1(self, evt):
child = FireZone1(self)
child.Activate()
#### Пожарная зона 2
def FireZ2(self, evt):
child = FireZone2(self)
child.Activate()
#### Выходы
def Outputs(self, evt):
child = OutPuts(self)
child.Activate()
#### Зоны
def Mainzones(self, evt):
child = Zones(self)
child.Activate()
#### Дополнительные настройки
def Setting(self, evt):
child = Settings(self)
child.Activate()
#### Указание Com порта вручную
def Man(self, evt):
dev = ''
dlg = wx.TextEntryDialog(self, u'Введите название COM порта ',u'COM1,COM2 или /dev/ttyUSB0 ?', '')
if dlg.ShowModal() == wx.ID_OK:
SetDev(dlg.GetValue())
dlg.Destroy()
self.SetStatusText(u'dev: %s' % (dev), 0)
#----------------------------------------------------------------------
if __name__ == '__main__':
class MyApp(wx.App):
def OnInit(self):
wx.InitAllImageHandlers()
MainFrame = MyParentFrame()
MainFrame.Show(True)
self.SetTopWindow(MainFrame)
return True
app = MyApp(False)
app.MainLoop()
|
# coding: UTF-8
f = open('hightemp.txt')
line = f.readline()
n = int(raw_input())
while n:
print line.replace('\n', '')
line = f.readline()
n -= 1
|
import numpy as np
import sys
import netCDF4 as nc
import os
# Globals
tile_attrs = ['tile_number', 'sNx', 'sNy', 'nSx', 'nSy', 'nPx', 'nPy']
def collate(tile_fnames, output_fname, partition=None, deflatelvl=5, shuffle=True):
# Use a sample tile to initialise the output fields
fname = tile_fnames[0]
tile = nc.Dataset(fname, 'r')
# Force netCDF4 output format
output_format = 'NETCDF4_CLASSIC'
# Create output file using tile's format
output_nc = nc.Dataset(output_fname, 'w', format=output_format)
# Determine collated grid dimensions
tiled_dimsize = {'X': tile.Nx,
'Y': tile.Ny,
'Xp1': 1 + tile.Nx,
'Yp1': 1 + tile.Ny}
# Copy global attributes
output_attrs = [attr for attr in tile.ncattrs()
if not attr in tile_attrs]
for attr in output_attrs:
output_nc.setncattr(attr, tile.getncattr(attr))
# Copy collated dimensions
for d in tile.dimensions:
dim = tile.dimensions[d]
if dim.isunlimited():
output_nc.createDimension(d, None)
elif d in tiled_dimsize:
output_nc.createDimension(d, tiled_dimsize[d])
else:
output_nc.createDimension(d, len(dim))
# Create a variable manifest
untiled_vars = {}
tiled_vars = {}
buffered_vars = {}
for v in tile.variables:
var = tile.variables[v]
if deflatelvl > 0:
# TODO: implement decent chunking scheme, use (bad) library defaults meanwhile
v_out = output_nc.createVariable(v, var.dtype, var.dimensions, zlib=True, complevel=deflatelvl, shuffle=shuffle)
else:
v_out = output_nc.createVariable(v, var.dtype, var.dimensions)
# Copy attributes
for attr in var.ncattrs():
v_out.setncattr(attr, var.getncattr(attr))
# Sort tiled variables and copy untiled variables
if any([d in var.dimensions for d in tiled_dimsize]):
if 'T' in var.dimensions:
buffered_vars[v] = v_out
else:
tiled_vars[v] = v_out
else:
untiled_vars[v] = v_out
# Before closing the tile, transfer any untiled variables
for v in untiled_vars:
output_nc.variables[v][:] = tile.variables[v][:]
tile.close()
#---
# Copy unbuffered tiled variables
transfer_tiles(output_nc, tile_fnames, tiled_vars)
#---
# Process buffered variables along time axis
# Index partitions
# NOTE: Assumes 'T' (i.e. time) is the top axis to be partitioned
if 'T' in output_nc.dimensions and buffered_vars:
t_len = len(output_nc.dimensions['T'])
# Estimate number of partitions based on available memory
# NOTE: np.array.nbytes requires allocation, do not use
# NOTE: To be honest, the partition forecaster isn't very good...
if not partition:
partition = t_len
elif partition == 'auto':
v_itemsize = max([output_nc.variables[v].dtype.itemsize
for v in buffered_vars])
v_size = max([output_nc.variables[v].size for v in buffered_vars])
pbs_vmem = int(os.environ['PBS_VMEM'])
# Memory model: 80MB + array allocation
model_vmem = (80 * 2**20) + (v_itemsize * v_size)
# Pad memory
partition = 1 + int(1.25 * model_vmem) // pbs_vmem
# Determine index bounds for partitions
t_bounds = [(i * t_len // partition, (i+1) * t_len // partition)
for i in range(partition)]
else:
t_bounds = []
# Begin buffered tile transfer
for ts, te in t_bounds:
transfer_tiles(output_nc, tile_fnames, buffered_vars, ts, te)
output_nc.close()
def transfer_tiles(output_nc, tile_fnames, tiled_vars, ts=0, te=-1):
for v in tiled_vars:
v_out = output_nc.variables[v]
dims = v_out.dimensions
# Allocate variable field
is_buffered = False
v_shape = list(v_out.shape)
for i, d in enumerate(v_out.dimensions):
if output_nc.dimensions[d].isunlimited():
v_shape[i] = te - ts
is_buffered = True
field = np.empty(v_shape, dtype=v_out.dtype)
# Copy each tile to field
for fname in tile_fnames:
tile = nc.Dataset(fname, 'r')
var = tile.variables[v]
# Determine bounds: xs <= x < xe, ys <= y < ye
# TODO: Precalculate and store the index ranges
nt = tile.tile_number - 1
xt, yt = nt % tile.nPx, nt // tile.nPx
xs = tile.sNx * xt
xe = xs + tile.sNx
ys = tile.sNy * yt
ye = ys + tile.sNy
# Extend range for boundary grids
if 'Xp1' in dims:
xe += 1
if 'Yp1' in dims:
ye += 1
# If necessary, pull out a time-buffered sample
if is_buffered:
# NOTE: Assumes that 'T' is the first axis
assert var.dimensions[0] == 'T'
var = var[ts:te, ...]
# Transfer tile to the collated field
if ('X' in dims or 'Xp1' in dims) and ('Y' in dims or 'Yp1' in dims):
field[..., ys:ye, xs:xe] = var[:]
elif ('X' in dims or 'Xp1' in dims):
field[..., xs:xe] = var[:]
elif ('Y' in dims or 'Yp1' in dims):
field[..., ys:ye] = var[:]
else:
# TODO: Use an exception?
sys.exit('Error: untiled variable')
tile.close()
# Save field to output
if is_buffered:
output_nc.variables[v][ts:te, ...] = field[:]
else:
output_nc.variables[v][:] = field[:]
|
from django.shortcuts import render, redirect, get_object_or_404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
# from django.views import View
# from django.views.generic import TemplateView, ListView, DetailView
# from django.http import HttpResponse
from .models import Reference
from .serializer import ReferenceSerializer
# Create your views here.
def home(request):
references = Reference.objects.order_by('-pub_date')
tags = [list(map(str.strip, reference.tags.split(','))) for reference in references]
context = {'info': zip(references, tags)}
return render(request, 'references/home.html', context)
def tag(request, tag_name):
references = [reference for reference in Reference.objects.order_by('-pub_date') if tag_name in reference.tags]
tags = [list(map(str.strip, reference.tags.split(','))) for reference in references]
context = {
'type': 'tag',
'info': zip(references, tags),
'input': tag_name
}
return render(request, 'references/tagsearch.html', context)
def search(request):
def search(search_params):
params = list(map(str.lower, search_params.split()))
references = []
for reference in Reference.objects.order_by('-pub_date'):
quote = reference.quote
author = reference.author
source = reference.source
tags = reference.tags
mish = (quote + author + source + tags).lower()
for param in params:
if (param in mish) and (reference not in references):
references.append(reference)
return references
if request.method == 'GET':
search_param = request.GET['params']
references = search(search_param)
tags = [list(map(str.strip, reference.tags.split(','))) for reference in references]
context = {
'type': 'search',
'info': zip(references, tags),
'input': search_param
}
return render(request, 'references/tagsearch.html', context)
def unique(request, id):
references = [Reference.objects.get(pk=id)]
tags = [list(map(str.strip, reference.tags.split(','))) for reference in references]
context = {'info': zip(references, tags)}
return render(request, 'references/home.html', context)
# Lists all stocks and get new one
class ReferenceList(APIView):
def get(self, request):
references = Reference.objects.all()
serializer = ReferenceSerializer(references, many=True)
return Response(serializer.data)
def post(self, request):
serializer = ReferenceSerializer(data=request.data)
print(request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) |
from django.db import models
from django.db.models import CharField
from django.contrib import admin
class Sensor(models.Model):
# class Meta:
# managed=False
# db_table='TBL1'
SensorNo = models.IntegerField(primary_key=True)
SensorStatus = models.IntegerField()
class Supplier(models.Model):
supplier_id = models.AutoField(primary_key=True)
supplier_name = models.CharField(max_length=200)
def __str__(self):
return self.supplier_name
class Article(models.Model):
article_id = models.AutoField(primary_key=True)
article_label = models.CharField(max_length=200)
article_sensor_no = models.IntegerField()
article_sensor_status = models.BooleanField(default="True")
article_supplier = models.ManyToManyField(Supplier)#, through='Detail')
def __str__(self):
return self.article_label
class Detail(models.Model):
article = models.ForeignKey(Article, on_delete=models.CASCADE)
supplier = models.ForeignKey(Supplier, on_delete=models.CASCADE)
shipment_cost = models.IntegerField()
order_min = models.IntegerField()
class Order(models.Model):
order_id = models.AutoField(primary_key=True)
order_supplier = models.ForeignKey(Supplier, on_delete=models.CASCADE)
order_article = models.ForeignKey(Article, on_delete=models.CASCADE)
order_number = models.CharField(max_length=200)
order_date = models.DateField()
|
from django.contrib.auth.models import AbstractUser
from django.db import models
class User(AbstractUser):
pass
class Post(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="posts")
text = models.TextField(blank=True)
timestamp = models.DateTimeField(auto_now_add=True)
class Meta:
verbose_name = "post"
verbose_name_plural = "posts"
def serialize(self):
return {
"id": self.id,
"user": self.user_id,
"text": self.text,
"timestamp": self.timestamp.strftime("%b %d %Y, %I:%M %p")
}
class Like(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE, related_name="likes")
post = models.ForeignKey(Post, on_delete=models.CASCADE, related_name="liked")
like_date = models.DateTimeField('date liked', auto_now_add=True)
class Following(models.Model):
follower = models.ForeignKey(User, on_delete=models.CASCADE, related_name="following")
friend = models.ForeignKey(User, on_delete=models.CASCADE, related_name="friend")
follow_date = models.DateTimeField('date added', auto_now_add=True)
class Meta:
verbose_name = "friend"
verbose_name_plural = "friends"
|
from context import megasecret # type: ignore
def test_hello():
assert megasecret.generate_hello() != "Hello, World! From "
|
from flask import Flask,request
app = Flask(__name__)
@app.route('/')
def welcome_user():
name = request.args.get('name')
phone = request.args.get('phone')
if name == None and phone == None:
return "Please enter the details"
else:
return "You're details is updated"
if __name__=="__main__":
app.run(debug=True) |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 13 19:56:19 2013
@author: Nicholas Léonard
"""
import sys
from pylearn2.datasets.preprocessing import Standardize
from contest_dataset import ContestDataset
from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix
from pylearn2.datasets.dense_design_matrix import DefaultViewConverter
from pylearn2.monitor import Monitor
from hps2 import HPS, HPSData
from pylearn2.training_algorithms.sgd import SGD
import numpy as np
import theano.tensor as T
from theano import config
from theano import function
class MyHPS(HPS):
def get_classification_accuracy(self, model, minibatch, target):
patches = []
patches.append(minibatch[:,:42,:42])
patches.append(minibatch[:,6:,:42])
patches.append(minibatch[:,6:,6:])
patches.append(minibatch[:,:42,6:])
patches.append(minibatch[:,3:45,3:45])
"""for i in xrange(5):
mirror_patch = []
for j in xrange(42):
mirror_patch.append(patches[i][:,:,42-(j+1):42-j])
patches.append(T.concatenate(mirror_patch,axis=2))"""
"""for patch in patches:
Y_list.append(model.fprop(patch, apply_dropout=False))
Y = T.mean(T.stack(Y_list), axis=(1,2))"""
Y = model.fprop(patches[-1], apply_dropout=False)
i = 1
for patch in patches[:-1]:
Y = Y + model.fprop(patch, apply_dropout=False)
i+=1
print i
Y = Y/float(i)
return T.mean(T.cast(T.eq(T.argmax(Y, axis=1),
T.argmax(target, axis=1)), dtype='int32'),
dtype=config.floatX)
def get_trainingAlgorithm(self, config_id, config_class, cost):
if 'sgd' in config_class:
(learning_rate,batch_size,init_momentum,train_iteration_mode) \
= self.select_train_sgd(config_id)
num_train_batch = (self.ntrain/batch_size)/8
print "num training batches:", num_train_batch
termination_criterion \
= self.get_termination(config_id, config_class)
return SGD( learning_rate=learning_rate,
cost=cost,
batch_size=batch_size,
batches_per_iter=num_train_batch,
monitoring_dataset=self.monitoring_dataset,
termination_criterion=termination_criterion,
init_momentum=init_momentum,
train_iteration_mode=train_iteration_mode)
else:
raise HPSData("training class not supported:"+str(config_class))
def get_valid_ddm(path='../data'):
return ContestDataset(which_set='train',
base_path = path,
start = 3584,
stop = 4096,
preprocessor = Standardize(),
fit_preprocessor = True)
def validate(model_path):
from pylearn2.utils import serial
try:
model = serial.load(model_path)
except Exception, e:
print model_path + "doesn't seem to be a valid model path, I got this error when trying to load it: "
print e
dataset = get_valid_ddm()
# use smallish batches to avoid running out of memory
batch_size = 32
model.set_batch_size(batch_size)
# dataset must be multiple of batch size of some batches will have
# different sizes. theano convolution requires a hard-coded batch size
""" m = dataset.X.shape[0]
extra = batch_size - m % batch_size
assert (m + extra) % batch_size == 0
import numpy as np
if extra > 0:
dataset.X = np.concatenate((dataset.X, np.zeros((extra, dataset.X.shape[1]),
dtype=dataset.X.dtype)), axis=0)
assert dataset.X.shape[0] % batch_size == 0"""
X = model.get_input_space().make_batch_theano()
Ta = model.get_output_space().make_batch_theano()
patches = []
patches.append(X[:,:42,:42,:])
patches.append(X[:,6:,:42,:])
patches.append(X[:,6:,6:,:])
patches.append(X[:,:42,6:,:])
patches.append(X[:,3:45,3:45,:])
for i in xrange(5):
mirror_patch = []
for j in xrange(42):
mirror_patch.append(patches[i][:,:,42-(j+1):42-j,:])
patches.append(T.concatenate(mirror_patch,axis=2))
"""for patch in patches:
Y_list.append(model.fprop(patch, apply_dropout=False))
Y = T.mean(T.stack(Y_list), axis=(1,2))"""
Y = model.fprop(patches[-1], apply_dropout=False)
i = 1
for patch in patches[:-1]:
Y = Y + model.fprop(patch, apply_dropout=False)
i+=1
print i
Y = Y/float(i)
A = T.cast(T.eq(T.argmax(Y, axis=1),
T.argmax(Ta, axis=1)), dtype='int32')
from theano import function
f = function([X, Ta], A)
Acc = []
for i in xrange(dataset.X.shape[0] / batch_size):
x_arg = dataset.X[i*batch_size:(i+1)*batch_size,:]
y_arg = dataset.y[i*batch_size:(i+1)*batch_size,:]
if X.ndim > 2:
x_arg = dataset.get_topological_view(x_arg)
Acc.append(f(x_arg.astype(X.dtype), y_arg.astype(Ta.dtype)))
#print Acc[0].shape
Acc = np.concatenate(Acc)
#print Acc, Acc.shape, dataset.X.shape, dataset.y.shape
assert Acc.ndim == 1
assert Acc.shape[0] == dataset.X.shape[0]
# discard any zero-padding that was used to give the batches uniform size
#Acc = Acc[:m]
print Acc.mean()
if __name__ == '__main__':
if sys.argv[1] == 'train':
"""train_ddm = ContestDataset(which_set='train',
base_path = "../data/",
start = 0,
stop = 3584,
preprocessor = Standardize())"""
train_ddm = DenseDesignMatrix(
X=np.load("../data/train_X.npy"),
y=np.load("../data/train_y.npy"),
view_converter = DefaultViewConverter(shape=[42,42,1]))
#preprocessor = Standardize()
#preprocessor.apply(train_ddm)
valid_ddm = get_valid_ddm()
task_id = int(sys.argv[2])
start_config_id = None
if len(sys.argv) > 3:
start_config_id = int(sys.argv[3])
log_channel_names = ['train_output_misclass',
'Validation Classification Accuracy']
mbsb_channel_name = 'Validation Missclassification'
hps = MyHPS(dataset_name = "Emotion Recognition Augmented",
task_id = task_id,
train_ddm = train_ddm,
valid_ddm = valid_ddm,
log_channel_names = log_channel_names,
mbsb_channel_name = mbsb_channel_name)
hps.run(start_config_id)
elif sys.argv[1] == 'validate':
validate(sys.argv[2])
else:
print """Usage: python main.py train "experiment_id" ["config_id"]
or
python main.py validate "path/to/model.pkl"
"""
|
from datetime import datetime
from pytz import timezone
import requests
def singleton(cls):
instances = dict()
def wrap(*args, **kwargs):
if cls not in instances:
instances[cls] = cls(*args, **kwargs)
return instances[cls]
return wrap
@singleton
class ElasticSearch():
"""
Parameters
----------
host: str -- host of elasticsearch
index: str -- index of elasticsearch
"""
FORMAT = '%Y-%m-%dT%H:%M:%S-05:00'
TIME_ZONE = timezone('America/Bogota')
HEADERS = {'Content-Type': 'application/json'}
def __init__(self, **kwargs):
"""
"""
self.url = f'{kwargs.get("host")}/{kwargs.get("index")}/request'
def send_message_to_elastic(self, message: dict):
"""
Send message to ElasticSearch
Parameters
----------
message: dict
"""
document = {
"timestamp": datetime.now(self.TIME_ZONE).strftime(self.FORMAT),
"message": message
}
response_elastic = requests.post(self.url, json=document, headers=self.HEADERS)
print('response', response_elastic)
data = {'name':'Santiago', 'age': 23}
obj_elasticsearch = ElasticSearch(host='http://localhost:9200', index='INDEX_NAME')
obj_elasticsearch.send_message_to_elastic(data)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.