text stringlengths 38 1.54M |
|---|
from utils.utils import create_dataset, Trainer
from layer.layer import Embedding, FeaturesEmbedding, EmbeddingsInteraction, MultiLayerPerceptron
import torch
import torch.nn as nn
import torch.optim as optim
from DIN import BaseModel
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Training on [{}].'.format(device))
dataset = create_dataset('amazon-books', sample_num=100000, sequence_length=40, device=device)
field_dims, (train_X, train_y), (valid_X, valid_y), (test_X, test_y) = dataset.train_valid_test_split()
EMBEDDING_DIM = 8
LEARNING_RATE = 1e-4
REGULARIZATION = 1e-6
BATCH_SIZE = 4096
EPOCH = 600
TRIAL = 100
bm = BaseModel(field_dims, EMBEDDING_DIM).to(device)
optimizer = optim.Adam(bm.parameters(), lr=LEARNING_RATE, weight_decay=REGULARIZATION)
criterion = nn.BCELoss()
trainer = Trainer(bm, optimizer, criterion, BATCH_SIZE)
trainer.train(train_X, train_y, epoch=EPOCH, trials=TRIAL, valid_X=valid_X, valid_y=valid_y)
test_loss, test_metric = trainer.test(test_X, test_y)
print('test_loss: {:.5f} | test_auc: {:.5f}'.format(test_loss, test_metric))
|
from __future__ import division
from math import sin, cos, acos, asin, degrees, pi
import serial
from time import sleep
import numpy as np
from scipy.interpolate import splprep, splev, splrep
class Skycam:
''' Represents entire 3-node and camera system. Contains methods to calculate
and initialize paths, control the camera, connect and send serial commands.
'''
def __init__(self, a, b, c, zB, zC, cam):
''' Intialize new Skycam with calculated node positions, camera position,
in path-controlled mode.
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
zB: (float) height of point B
zC: (float) height of point C
cam: (tuple of floats) initial position of camera
'''
self.node0, self.node1, self.node2 = self.calc_nodes(a, b, c, zB, zC)
self.cam = cam
self.direct = False
self.pause = False
self.save_point = 0
def calc_nodes(self, a, b, c, zB, zC):
''' Calculate the positions of Skycam nodes based on node distance measurements.
A is the origin, B is along the y-axis, C is the remaining point.
Sides are opposite their respective points:
a is BC, b is AC, c is AB
Inputs:
a: (float) length of side a
b: (float) length of side b
c: (float) length of side c
Returns:
(tuple of floats) coordinates of node0, node1, node2
'''
# Project lengths into xy plane
a_eff = ((zC-zB)**2 + c**2)**.5
b_eff = (zC**2 + b**2)**.5
c_eff = (zB**2 + c**2)**.5
# Law of cosines
numer = b_eff**2 + c_eff**2 - a_eff**2
denom = 2*b_eff*c_eff
Arad = acos(numer/denom)
# Law of sines
Brad = asin(b*sin(Arad)/a)
Crad = asin(c*sin(Arad)/a)
theta = .5*pi-Arad
return (0,0, 0), (0, c, zB), (b*cos(theta), b*sin(theta), zC)
def load_path(self, points):
''' Create a new path based on predetermined points.
Inputs:
points: (list of tuples of floats) specific camera positions for
any given time.
Returns:
Initializes new Path in Skycam's path attribute
'''
self.path = Path.new_path(points, self.node0, self.node1, self.node2)
def create_path(self, waypoints, steps):
''' Generate a new list of points based on waypoints.
Inputs:
waypoints: (list of tuples of floats): points the path should
bring the camera to
steps: (int) number of steps in which to complete the path
Returns:
Calls load_path method on list of generated spline points
'''
xpoints = [point[0] for point in waypoints]
ypoints = [point[1] for point in waypoints]
zpoints = [point[2] for point in waypoints]
# spline parameters
s = 2.0 # smoothness
k = 1 # spline order
nest = -1 # estimate of knots needed
# create spline and calculate length
s, us = splprep([xpoints, ypoints, zpoints], s=s, k=k, nest=nest)
totl = self.splineLen(s)
dl = totl/steps
if dl > 1:
print "dl greater than 1!"
i = 0
u = 0
upath = [u]
# Divide path into equidistant lengths
while i < steps-1:
u = self.binary_search(u, s, dl) # optionally pass tolerance
upath.append(u)
print i
i += 1
path = [splev(u, s) for u in upath]
path_lens = []
for i in xrange(len(path) - 1):
path_lens.append(distance(path[i], path[i+1]))
error = [ele - dl for ele in path_lens]
print 'Error is: ', sum(error)/len(error)
self.load_path(path)
# self.path = Path.new_path(path, self.node0, self.node1, self.node2)
def go_path(self, start):
'''Send appropriate movement commands for loaded path.
Input:
start: (int) index of path at which to begin sending commands
'''
#TODO: Implement save point
while (not self.direct and not self.pause):
for i in xrange(len(self.path.diffs0) - start):
self.send_command(self.path.diffs0[i + start], self.path.diffs1[i + start], self.path.diffs2[i + start])
# raw_input('')
self.save_point = i
break
# def pause_path(self):
# ''' Pause path traversal.'''
# self.pause = True
# def switch_mode(self):
# ''' Switch from path control to joystick control '''
# self.direct = not self.direct
# def go_input(self):
# ''' Translate a direct-control input into a directional vector and send appropriate commands '''
# pass
def connect(self, baud=57600):
''' Connect to proper serial ports for Bluetooth communication.
Inputs:
baud: (int) baud rate at which to connect
Returns:
Print confirmation of connection
'''
# Connect to proper serial ports
self.serA = serial.Serial('/dev/rfcomm0', baud, timeout=50)
self.serB = serial.Serial('/dev/rfcomm1', baud, timeout=50)
self.serC = serial.Serial('/dev/rfcomm2', baud, timeout=50)
print 'Hacking the mainframe...'
sleep(8)
print 'Mainframe hacked'
def send_command(self, diff0, diff1, diff2):
'''Send proper commands to all three serial ports.
Inputs:
diff0: (float) node length difference for node 0
diff1: (float) node length difference for node 1
diff2: (float) node length difference for node 2
'''
print diff0, diff1, diff2
self.serA.write(str(diff0) + 'g')
self.serB.write(str(diff1) + 'g')
self.serC.write(str(diff2) + 'g')
#TODO: Always mess around with this value
sleep(.35)
pass
# def dldp(self, nodePos, theta, phi):
# ''' use a directional vector and current position to calculate change in node length '''
# cam = self.cam
# deltaX = cam[0] - nodePos[0]
# deltaY = cam[1] - nodePos[1]
# deltaZ = cam[2] - nodePos[2]
# numer = deltaX*cos(theta)*cos(phi) + deltaY*sin(theta)*cos(phi) + deltaZ*sin(phi)
# denom = (deltaX**2 + deltaY**2 + deltaZ**2)**.5
# return numer/denom
def binary_search(self, ustart, s, dl, tol=.01):
''' Perform a binary search to find parametrized location of point.
Inputs:
ustart: (float)
s: (spline object)
dl: (float)
tol: (float)
Returns:
Reassigns middle and endpoints of search
um: (float) midpoint of search
'''
point = splev(ustart, s)
ui = ustart
uf = 1
um = (ui + uf)/2
xm, ym, zm = splev(um, s)
xf, yf, zf = splev(uf, s)
while True:
tpoint = splev(um, s)
if distance(point, tpoint)>(dl*(1+tol)):
uf, um = um, (um+ui)/2
elif distance(point, tpoint)<(dl*(1-tol)):
ui, um = um, (um+uf)/2
else:
return um
def splineLen(self, s):
''' Calculate length of a spline.
Inputs:
s: (spline object) represents path that joins waypoints
Returns:
(float) length of spline
'''
ts = np.linspace(0, 1, 1000)
xs, ys, zs = splev(ts, s)
spline = zip(xs, ys, zs)
ipoint = spline[0]
totl = 0
for point in spline:
totl += distance(point, ipoint)
ipoint = point
return totl
def tighten(self):
''' Calibrate node lengths to current position of camera.
Enter ' ' to tighten
Enter 's' to accept node length
'''
while True:
input = raw_input('Tightening Node A')
if input == ' ':
self.serA.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node B')
if input == ' ':
self.serB.write('-100g')
elif input == 's':
break
while True:
input = raw_input('Tightening Node C')
if input == ' ':
self.serC.write('-100g')
elif input == 's':
return
class Path:
''' Path object stores a path's points, node lengths, and length differences
to enable in path traversal.
'''
def __init__(self, points, node0, node1, node2):
''' Init method for Path class.
Input:
points: (list of tuples of floats)
node0, 1, 2: (tuple of floats)
Returns:
Initializes Path attributes
'''
self.points = points
self.lens0 = [distance(node0, point) for point in points]
self.lens1 = [distance(node1, point) for point in points]
self.lens2 = [distance(node2, point) for point in points]
self.diffs0 = self.diff_calc(self.lens0)
self.diffs1 = self.diff_calc(self.lens1)
self.diffs2 = self.diff_calc(self.lens2)
@staticmethod
def new_path(points, node0, node1, node2):
''' Factory function to create new path object, if it exists within boundary.
Inputs:
points: (list of tuples of floats) points that make up a path
node0, 1, 2: (tuple of floats) coordinates of nodes
Returns:
(Path) new initialized Path object
'''
#Check if any point lies outside boundary
for point in points:
if Path.boundary(node0, node1, node2, point):
return None
return Path(points, node0, node1, node2)
@staticmethod
def boundary(node0, node1, node2, point, offset=6, hbound=120):
''' Check if any given point lies outside the boundaries of our system.
Inputs:
node0, 1, 2: (tuple of floats)
point: (tuple of floats)
offset: (float) offset distance from nodes to define boundary triangle
hbound: (float) lower bound of z y-axis
Returns:
(bool) Whether point is outside boundary, prints which
'''
# Find midpoint of each side
mid_AB = tuple((node0[i] + node1[i])/2 for i in xrange(3))
mid_BC = tuple((node1[i] + node2[i])/2 for i in xrange(3))
mid_AC = tuple((node2[i] + node0[i])/2 for i in xrange(3))
# Find slope of line connecting point to opposite midpoint
m_A = tuple((mid - node)/distance(mid_BC, node0) for (mid, node) in zip(mid_BC, node0))
m_B = tuple((mid - node)/distance(mid_AC, node1) for (mid, node) in zip(mid_AC, node1))
m_C = tuple((mid - node)/distance(mid_AB, node2) for (mid, node) in zip(mid_AB, node2))
# Find offset node coordinates
new_0 = tuple(coord + slope*offset for (coord, slope) in zip(node0, m_A))
new_1 = tuple(coord + slope*offset for (coord, slope) in zip(node1, m_B))
new_2 = tuple(coord + slope*offset for (coord, slope) in zip(node2, m_C))
if point[2] < 0 or point[2] > hbound:
print 'Height of path out of bounds', point[2]
return True
elif point[0] < 0:
print "Path out of bounds of line AB", point[0]
return True
elif point[1] < (new_2[1]*point[0]/new_2[0]):
print "Path out of bounds of line AC", point[1]
return True
elif point[1] > (((new_2[1] - new_1[1])/new_2[0])*point[0] + new_1[1]):
print "Path out of bounds of line BC", point[1]
return True
else:
return False
def diff_calc(self, lens):
''' Return differences between subsequent spool lengths x100 for sending.
Input:
lens: (list of floats) lengths of node wires at any time
Returns:
(list of floats) differences between subsequent lengths*100
'''
return [int(80*(lens[ind+1] - lens[ind])) for ind in xrange(len(lens)-1)]
def distance(A, B):
''' Calculate the distance between two points.
Inputs:
A: (tuple of floats/ints) first point
B: (tuple of floats/ints) second point
Returns:
(float) distance between the points
'''
dx = A[0] - B[0]
dy = A[1] - B[1]
dz = A[2] - B[2]
return (dx**2 + dy**2 + dz**2)**.5 |
import math as m
f_values_1 = [0, 0.062057145, 1.0558914326, 5.3775128776, 17.0202629818, 41.5748215863, 86.2292060414, 159.7687706061, 272.5762067482, 436.6315430847, 665.5121453814, 974.3927165529, 1380.0452966628, 1900.8392629238, 2556.7413296971, 3369.3155484932, 4361.7233079714]
f_values_2 = [0, 1.0558914326, 17.0202629818, 86.2292060414, 272.5762067482, 665.5121453814, 1380.0452966628, 2556.7413296971, 4361.7233079714]
f_values_4 = [0, 17.0202629818, 272.5762067482, 1380.0452966628, 4361.7233079714]
f_values_8 = [0, 272.5762067482, 4361.7233079714]
def ex1_1():
result = 0
h = 1
for i, v in enumerate(f_values_1):
if i == 0 or i == len(f_values_1) - 1:
result += v
elif i % 2:
result += 4 * v
else:
result += 2 * v
return result * h / 3
def ex1_2():
result = 0
h = 2
for i, v in enumerate(f_values_2):
if i == 0 or i == len(f_values_2) - 1:
result += v
elif i % 2:
result += 4 * v
else:
result += 2 * v
return result * h / 3
def ex1_4():
result = 0
h = 4
for i, v in enumerate(f_values_4):
if i == 0 or i == len(f_values_4) - 1:
result += v
elif i % 2:
result += 4 * v
else:
result += 2 * v
return result * h / 3
def ex1_8():
result = 0
h = 8
for i, v in enumerate(f_values_8):
if i == 0 or i == len(f_values_8) - 1:
result += v
elif i % 2:
result += 4 * v
else:
result += 2 * v
return result * h / 3
print("EX1:")
s = ex1_8()
s_ = ex1_4()
s__ = ex1_2()
s___ = ex1_1()
print(s)
print(s_)
print(s__)
print(s___)
qc_1 = (s_ - s) / (s__ - s_)
qc_2 = (s__ - s_) / (s___ - s__)
error_1 = (s__ - s_) / 15
error_2 = (s___ - s__) / 15
print(qc_1, error_1)
print(qc_2, error_2)
def gauss_seidel_4(x0, y0, z0, t0, matrix, b, error, it):
_it = 0
x = x0
y = y0
z = z0
t = t0
x0 += error * 10
y0 += error * 10
z0 += error * 10
t0 += error * 10
while (abs(x - x0) > error or abs(y - y0) > error or abs(z - z0) > error or abs(t - t0) > error) and _it != it:
print(x, y, z, t, _it)
x0 = x
y0 = y
z0 = z
t0 = t
x = (b[0] - matrix[0][1]*y0 - matrix[0][2]*z0 - matrix[0][3]*t0) / matrix[0][0]
y = (b[1] - matrix[1][0]*x - matrix[1][2]*z0 - matrix[1][3]*t0) / matrix[1][1]
z = (b[2] - matrix[2][0]*x - matrix[2][1]*y - matrix[2][3]*t0) / matrix[2][2]
t = (b[3] - matrix[3][0]*x - matrix[3][1]*y - matrix[3][2]*z) / matrix[3][3]
_it += 1
print(x, y, z, t, _it)
return x, y, z, t
print("EX3:")
gauss_seidel_4(-0.81959, 1.40167, 2.15095, 0.11019, [[6, 0.5, 3, 0.25], [1.2, 3, 0.25, 0.2], [-1, 0.25, 4, 2], [2, 4, 1, 8]], [2.5, 3.8, 10, 7], 10**-4, 1)
def euler(x0, xf, y, function, h, error, it):
_it = 0
while abs(xf - x0) > error and _it != it:
# print(x0, y, _it)
y += function(x0, y) * h
x0 += h
# _it += 1
# print(x0, y, _it)
return y
def ex4_f(t, x):
return t * (t / 2 + 1) * (x ** 3) + (t + 5/2) * (x ** 2)
def qc_value_euler_rk(method, h0, x0, xf, y, function, it, order, error):
s = method(x0, xf, y, function, h0, error, it)
s_ = method(x0, xf, y, function, h0 / 2, error, it)
s__ = method(x0, xf, y, function, h0 / 4, error, it)
qc = (s_ - s) / (s__ - s_)
print(s, h0)
print(s_, h0/2)
print(s__, h0/4)
print(qc)
# while int(qc + 0.5) != 2**order:
# h0 /= 2
# s = method(x0, xf, y, function, h0, error, it)
# s_ = method(x0, xf, y, function, h0 / 2, error, it)
# s__ = method(x0, xf, y, function, h0 / 4, error, it)
# qc = (s_ - s) / (s__ - s_)
error = (s__ - s_) / (2**order - 1)
print("qc = {} | erro absoluto estimado = {} | h = {}" .format(qc, error, h0))
print("erro relativo estimado = {}" .format(error / s__))
print("EX4:")
print("Euler")
qc_value_euler_rk(euler, 0.08, 1, 1.8, 0.1, ex4_f, 100, 1, 10**-4)
def delta1(x, y, h, function):
return h * function(x, y)
def delta2(x, y, h, function):
return h * function(x + h/2, y + delta1(x, y, h, function) / 2)
def delta3(x, y, h, function):
return h * function(x + h/2, y + delta2(x, y, h, function) / 2)
def delta4(x, y, h, function):
return h * function(x + h, y + delta3(x, y, h, function))
def delta(x, y, h, function):
return delta1(x, y, h, function)/6 + delta2(x, y, h, function)/3 + delta3(x, y, h, function)/3 + delta4(x, y, h, function)/6
def rk4(x, xf, y, function, h, error, it):
_it = 0
while abs(xf - x) > error and _it != it:
# print(x, y, _it)
y += delta(x, y, h, function)
x += h
# _it += 1
# print(x, y, _it)
return y
def ex4_b_f(u, v):
return u * (u/2 + 1) * (v**3) + (u + 5/2)* (v**2)
print("RK4:")
qc_value_euler_rk(rk4, 0.16, 1, 2.6, 0.1, ex4_b_f, 1000, 4, 10**-4)
def ex5_z(t, y, z):
return 0.5 + t**2 + t*z
def ex5_y(t, y, z):
return z
def euler_system(x, xf, y, z, functions, h, error, it):
_it = 0
while abs(xf - x) > error and _it != it:
print(x, y, z, _it)
y_old = y
y += functions[0](x, y, z) * h
z += functions[1](x, y_old, z) * h
x += h
_it += 1
print(x, y, z, _it)
return y, z
def delta1_system(x, y, z, h, function, char):
if char == 'y':
return h * function[0](x, y, z)
return h * function[1](x, y, z)
def delta2_system(x, y, z, h, function, char):
if char == 'y':
return h * function[0](x + h/2, y + delta1_system(x, y, z, h, function, char) / 2, z + delta1_system(x, y, z, h, function, 'z') / 2)
return h * function[1](x + h/2, y + delta1_system(x, y, z, h, function, 'y') / 2, z + delta1_system(x, y, z, h, function, char) / 2)
def delta3_system(x, y, z, h, function, char):
if char == 'y':
return h * function[0](x + h/2, y + delta2_system(x, y, z, h, function, char) / 2, z + delta2_system(x, y, z, h, function, 'z') / 2)
return h * function[1](x + h/2, y + delta2_system(x, y, z, h, function, 'y') / 2, z + delta2_system(x, y, z, h, function, char) / 2)
def delta4_system(x, y, z, h, function, char):
if char == 'y':
return h * function[0](x + h, y + delta3_system(x, y, z, h, function, char), z + delta3_system(x, y, z, h, function, 'z'))
return h * function[1](x + h, y + delta3_system(x, y, z, h, function, 'y'), z + delta3_system(x, y, z, h, function, char))
def delta_system(x, y, z, h, function, char):
return delta1_system(x, y, z, h, function, char)/6 + delta2_system(x, y, z, h, function, char)/3 + delta3_system(x, y, z, h, function, char)/3 + delta4_system(x, y, z, h, function, char)/6
def rk4_system(x, xf, y, z, functions, h, error, it):
_it = 0
while abs(xf - x) > error and _it != it:
print(x, y, z, _it)
y_old = y
y += delta_system(x, y_old, z, h, functions, 'y')
z += delta_system(x, y_old, z, h, functions, 'z')
x += h
_it += 1
print(x, y, z, _it)
return y, z
print("EX5:")
print("Euler")
euler_system(0, 8, 0, 1, [ex5_y, ex5_z], 0.25, 10**-4, 2)
print("RK4:")
rk4_system(0, 8, 0, 1, [ex5_y, ex5_z], 0.25, 10**-4, 2)
|
#!/usr/bin/python3
"""calculates the fewest number of
operations needed for the end result"""
def minOperations(n):
counter = 0
if n <= 1:
return counter
for i in range(2, n + 1):
while n % i == 0:
++counter
j = int(n / i)
return minOperations(j) + i
|
"""
Generic plot functions based on matplotlib
"""
from __future__ import absolute_import, division, print_function, unicode_literals
try:
## Python 2
basestring
except:
## Python 3
basestring = str
import numpy as np
import pylab
import matplotlib
from matplotlib.font_manager import FontProperties
from .common import (show_or_save_plot, common_doc)
from .frame import (plot_ax_frame, ax_frame_doc)
__all__ = ['plot_histogram']
def plot_histogram(datasets, bins, data_is_binned=False, weights=None,
histogram_type='bar', stacked=True, cumulative=False, normed=False,
orientation='vertical', align='mid', bar_width=0.8, baseline=0,
colors=[], labels=[],
line_color='k', line_width=0.5,
xscaling='lin', yscaling='lin',
xmin=None, xmax=None, ymin=None, ymax=None,
xlabel='', ylabel='N', ax_label_fontsize='large',
xticks=None, xtick_labels=None, xtick_interval=None, xtick_rotation=0,
xtick_direction='', xtick_side='', xlabel_side='',
yticks=None, ytick_labels=None, ytick_interval=None, ytick_rotation=0,
ytick_direction='', ytick_side='', ylabel_side='',
tick_label_fontsize='medium', tick_params={},
title='', title_fontsize='large',
xgrid=0, ygrid=0, aspect_ratio=None,
hlines=[], hline_args={}, vlines=[], vline_args={},
legend_location=0, legend_fontsize='medium',
style_sheet='classic', border_width=0.2, skip_frame=False,
fig_filespec=None, figsize=None, dpi=300, ax=None):
"""
Plot histograms
:param datasets:
list of 1-D arrays, datasets containing either data to be
binned or counts, (i.e., data that is already binned,
see :param:`data_is_binned`)
:param bins:
int (number of bins) or list or array (bin edges)
:param data_is_binned:
bool, whether or not data in :param:`datasets` is already binned
Note that, if this is True, :param:`bins` must correspond to
the bin edges (including right edge)!
(default: False)
:param weights:
array with same shape as :param:`datasets`, weights associated
with each value. Only applies if :param:`data_is_binned` is False
(default: None)
:param histogram_type:
str, histogram type: 'bar', 'step' or 'stepfilled'
(default: 'bar')
:param stacked:
bool, whether to plot mulitple datasets on top of each other
(True) or side by side (if :param:`histogram_type` is 'bar')
or on top of each other (if :param:`histogram_type` is 'step')
(default: True)
:param cumulative:
bool, whether or not to draw a cumulative histogram, where each
bin gives the counts in that bin plus all bins for smaller values
(default: False)
:param normed:
bool, whether or not counts should be normalized to sum to 1
(default: False)
:param orientation:
str, orientation of histogram bars: 'horizontal' or 'vertical'
(default: 'vertical')
:param align:
str, alignment of histogram bars: 'left', 'mid' or 'right'
(default: 'mid')
:param bar_width:
float, relative width of bars as a fraction of the bin width
(default: 0.8)
:param baseline:
float or array, location of the bottom baseline of each bin
if array, its length must match the number of bins
(default: 0)
:param colors:
list of matplotlib color specifications, one for each dataset
or one for each bin if there is only 1 dataset
May also be a matplotlib colormap or a string (colormap name)
(default: [], will use default color(s))
:param labels:
list of strings, legend labels for each dataset
(default: [], will not plot any labels)
:param line_color:
matplotlib color specification, color(s) of bar edges
(default: 'k')
:param line_width:
float, width of bar edges
(default: 0.5)
"""
frame_args = {key: val for (key, val) in locals().items()
if not key in ['datasets', 'bins', 'data_is_binned',
'weights', 'histogram_type',
'cumulative', 'stacked', 'normed', 'orientation',
'align', 'bar_width', 'baseline', 'colors', 'labels',
'line_color', 'line_width',
'legend_location', 'legend_fontsize', 'style_sheet',
'border_width', 'skip_frame', 'fig_filespec',
'figsize', 'dpi', 'ax']}
from itertools import cycle
pylab.style.use(style_sheet)
if ax is None:
fig, ax = pylab.subplots(figsize=figsize, facecolor='white')
else:
fig = ax.get_figure()
## markers, colors, linewidhts, linestyles, labels, etc.
if not colors:
#colors = 'bgrcmyk'
colors = pylab.rcParams['axes.prop_cycle'].by_key()['color']
colors = colors[:len(datasets)]
if isinstance(colors, basestring):
colors = matplotlib.cm.get_cmap(colors)
if isinstance(colors, matplotlib.colors.Colormap):
if len(datasets) > 1:
num_colors = len(datasets)
else:
if np.isscalar(bins):
num_colors = bins
else:
num_colors = len(bins) - 1
dc = 1. / num_colors / 2.
colors = colors(np.linspace(dc, 1-dc, num_colors))
if not labels:
#labels = ['%d' % i for i in range(len(datasets))]
labels = [''] * len(datasets)
for i, label in enumerate(labels):
if label in (None, '', 'None'):
labels[i] = '_nolegend_'
unique_labels = set(labels)
if not (len(datasets) == 1 and len(colors) > 1):
colors = cycle(colors)
colors = [next(colors) for i in range(len(datasets))]
labels = cycle(labels)
labels = [next(labels) for i in range(len(datasets))]
## Histogram
if orientation == 'vertical' and 'log' in yscaling:
log = True
elif orientation == 'horizontal' and 'log' in xscaling:
log = True
else:
log = False
if len(datasets) == 1 and len(colors) > 1 and histogram_type[:3] == 'bar':
if not data_is_binned:
bar_heights, bin_edges = pylab.histogram(datasets[0], bins=bins,
density=normed, weights=weights)
else:
bin_edges = bins
bar_heights = datasets[0]
if cumulative:
bar_heights = np.cumsum(bar_heights)
if normed:
bar_heights /= bar_heights[-1]
elif normed:
bar_heights /= np.sum(bar_heights)
if bar_width is None:
bar_width = 0.8
## Convert to absolute bar width, assuming uniform bin intervals
bar_width *= np.abs(bin_edges[1] - bin_edges[0])
if align == 'mid':
align = 'center'
elif align == 'left':
align = 'edge'
elif align == 'right':
align = 'edge'
bar_width = -bar_width
ax.bar(bin_edges[:-1], bar_heights, width=bar_width, bottom=baseline,
color=colors, edgecolor=line_color, linewidth=line_width,
align=align, orientation=orientation, label=labels[0], log=log)
else:
if data_is_binned:
#The weights are the y-values of the input binned data
weights = datasets
#The dataset values are the bin centres
bins = np.asarray(bins)
datasets = [((bins[1:] + bins[:-1]) / 2.) for i in range(len(datasets))]
if align == 'center':
align = 'mid'
## Work around bug
if np.isscalar(baseline):
baseline = [baseline]
ax.hist(datasets, bins, normed=normed, cumulative=cumulative,
histtype=histogram_type, align=align, orientation=orientation,
rwidth=bar_width, color=colors, label=labels, stacked=stacked,
edgecolor=line_color, linewidth=line_width, bottom=baseline,
log=log, weights=weights)
## Frame
if not skip_frame:
plot_ax_frame(ax, **frame_args)
## Legend
legend_fontsize = legend_fontsize or tick_label_fontsize
legend_font = FontProperties(size=legend_fontsize)
## Avoid warning if there are no labeled curves
if len(unique_labels.difference(set(['_nolegend_']))):
ax.legend(loc=legend_location, prop=legend_font)
## Output
return show_or_save_plot(ax, fig_filespec=fig_filespec, dpi=dpi,
border_width=border_width)
plot_histogram.__doc__ += (ax_frame_doc + common_doc)
|
# -*- coding:utf-8 -*-
# author: hpf
# create time: 2020/11/11 17:19
# file: 92_反转链表II.py
# IDE: PyCharm
# 题目描述:
# 反转从位置 m 到 n 的链表。请使用一趟扫描完成反转。
#
# 说明:
# 1 ≤ m ≤ n ≤ 链表长度。
#
# 示例:
#
# 输入: 1->2->3->4->5->NULL, m = 2, n = 4
# 输出: 1->4->3->2->5->NULL
# 解法一: 递归
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution1:
def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:
def reverseListNode(head, n):
"""
反转链表前N个节点
:param head:
:param n:
:return:
"""
if n == 1:
return head
last = reverseListNode(head.next, n - 1)
succeed = head.next.next
head.next.next = head
head.next = succeed
return last
if m == 1:
# m等于1,相当于反转前N个节点
return reverseListNode(head, n)
head.next = self.reverseBetween(head.next, m - 1, n - 1)
return head
# 解法二: 双指针,虚假头结点,迭代
class Solution2:
def reverseBetween(self, head: ListNode, m: int, n: int) -> ListNode:
if m == n:
return head
dummy = ListNode(-1)
dummy.next = head
# a -> m的前一个节点, d -> n所在节点
a, d = dummy, dummy
for _ in range(m - 1):
a = a.next
for _ in range(n):
d = d.next
# b -> m所在节点, c -> n后面的节点
b, c = a.next, d.next
pre = b
cur = pre.next
while cur != c:
next = cur.next
cur.next = pre
pre = cur
cur = next
a.next = d
b.next = c
return dummy.next |
"""
Plot barycentric
"""
import numpy as np
from mpl_toolkits.mplot3d import Axes3D, art3d
from scipy.spatial import ConvexHull
from itertools import combinations
import seaborn as sns
import matplotlib.pyplot as plt
from dreye.utilities.barycentric import (
barycentric_to_cartesian,
barycentric_dim_reduction,
barycentric_to_cartesian_transformer
)
def plot_simplex(
n=4,
points=None,
hull=None,
lines=True,
ax=None,
line_color='black',
hull_color='gray',
labels=None,
label_size=16,
point_colors='blue',
hull_kws={},
point_scatter_kws={},
fig_kws={},
remove_axes=True
):
"""
Plot simplex of points and/or convex hull
"""
assert n in {3, 4}
if ax is None:
if n == 4:
fig = plt.figure(**fig_kws)
ax = Axes3D(fig)
else:
fig = plt.figure(**fig_kws)
ax = plt.subplot(111)
if hull is not None:
if not isinstance(hull, ConvexHull):
if hull.shape[1] == n:
hull = barycentric_dim_reduction(hull)
assert hull.shape[1] == (n-1)
hull = ConvexHull(hull)
pts = hull.points
if n == 3:
for simplex in hull.simplices:
ax.plot(
pts[simplex, 0], pts[simplex, 1],
color=hull_color, **hull_kws
)
else:
org_triangles = [pts[s] for s in hull.simplices]
f = Faces(org_triangles)
g = f.simplify()
hull_kws_default = {
'facecolors': hull_color,
'edgecolor': 'lightgray',
'alpha': 0.8
}
hull_kws = {**hull_kws_default, **hull_kws}
pc = art3d.Poly3DCollection(g, **hull_kws)
ax.add_collection3d(pc)
if points is not None:
if points.shape[1] == n:
X = barycentric_dim_reduction(points)
elif points.shape[1] == (n-1):
X = points
else:
raise ValueError(
"`points` argument is the wronge dimension, "
f"must be `{n}` or `{n-1}`, but is `{points.shape[1]}`."
)
ax.scatter(
*X.T, c=point_colors, **point_scatter_kws
)
if lines:
A = barycentric_to_cartesian_transformer(n)
lines = combinations(A, 2)
for line in lines:
line = np.transpose(np.array(line))
if n == 4:
ax.plot3D(*line, c=line_color)
else:
ax.plot(*line, c=line_color)
if labels is not None:
eye = np.eye(n)
eye_cart = barycentric_to_cartesian(eye)
for idx, (point, label) in enumerate(zip(eye_cart, labels)):
text_kws = {}
if idx == 0:
text_kws['ha'] = 'right'
text_kws['va'] = 'center'
elif (idx+1) == n:
text_kws['ha'] = 'center'
text_kws['va'] = 'bottom'
else:
text_kws['ha'] = 'left'
text_kws['va'] = 'center'
ax.text(*point, label, size=label_size, **text_kws)
if remove_axes:
if n == 4:
ax._axis3don = False
else:
ax.set_xticks([])
ax.set_yticks([])
sns.despine(left=True, bottom=True, ax=ax)
return ax
class Faces:
"""
Get faces for 3D convex hull.
From: https://stackoverflow.com/questions/49098466/plot-3d-convex-closed-regions-in-matplot-lib/49115448
"""
def __init__(self, tri, sig_dig=12, method="convexhull"):
self.method = method
self.tri = np.around(np.array(tri), sig_dig)
self.grpinx = list(range(len(tri)))
norms = np.around([self.norm(s) for s in self.tri], sig_dig)
_, self.inv = np.unique(norms, return_inverse=True, axis=0)
def norm(self, sq):
cr = np.cross(sq[2]-sq[0], sq[1]-sq[0])
return np.abs(cr/np.linalg.norm(cr))
def isneighbor(self, tr1, tr2):
a = np.concatenate((tr1, tr2), axis=0)
return len(a) == len(np.unique(a, axis=0))+2
def order(self, v):
if len(v) <= 3:
return v
v = np.unique(v, axis=0)
n = self.norm(v[:3])
y = np.cross(n, v[1]-v[0])
y = y/np.linalg.norm(y)
c = np.dot(v, np.c_[v[1]-v[0], y])
if self.method == "convexhull":
h = ConvexHull(c)
return v[h.vertices]
else:
mean = np.mean(c, axis=0)
d = c-mean
s = np.arctan2(d[:, 0], d[:, 1])
return v[np.argsort(s)]
def simplify(self):
for i, tri1 in enumerate(self.tri):
for j, tri2 in enumerate(self.tri):
if j > i:
if (
self.isneighbor(tri1, tri2)
and
self.inv[i] == self.inv[j]
):
self.grpinx[j] = self.grpinx[i]
groups = []
for i in np.unique(self.grpinx):
u = self.tri[self.grpinx == i]
u = np.concatenate([d for d in u])
u = self.order(u)
groups.append(u)
return groups
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
# Register your models here.
from .models import Customer, Merchant, MerchantType, TransactionType, Currency, AccountCategory, Account
from .models import AccountSnapshot, Transaction
class AccountSnapshotAdmin(admin.ModelAdmin):
list_display = ['account', 'date', 'balance']
class AccountAdmin(admin.ModelAdmin):
list_display = ['acount_number', 'name', 'balance']
admin.site.register(Customer)
admin.site.register(MerchantType)
admin.site.register(Merchant)
admin.site.register(TransactionType)
admin.site.register(Currency)
admin.site.register(AccountCategory)
admin.site.register(Account, AccountAdmin)
admin.site.register(AccountSnapshot, AccountSnapshotAdmin)
admin.site.register(Transaction)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
try:
from setuptools import setup
from setuptools.extension import Extension
# Force `setup_requires` Cython to be installed before proceeding
from setuptools.dist import Distribution
except ImportError:
print("Couldn't import setuptools. Falling back to distutils.")
from distutils.core import setup
from distutils.extension import Extension
from distutils.dist import Distribution
from distutils.util import convert_path
Distribution(dict(setup_requires='Cython'))
try:
from Cython.Build import cythonize
except ImportError:
print("Could not import Cython.Distutils. Install `cython` and rerun.")
sys.exit(1)
if os.path.exists('LICENSE'):
print("""The setup.py script should be executed from the build directory.
Please see the file 'readme.rst' for further instructions.""")
sys.exit(1)
main_ns = {}
ver_path = convert_path('heat/__init__.py')
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
setup(
name='heat',
version=main_ns['__version__'],
author='Francois Roy',
author_email='francois@froy.ca',
description=("Solve the heat equation with constant coefficients, heat "
"source, and usual boundary conditions using Green's "
"function on a line (1D), a square (2D), "
"or a cube (3D)."),
license='BSD-2',
keywords="Heat transfer Green's functions",
url='https://github.com/frRoy/Heat',
packages=['heat', 'tests'],
package_dir={'heat':
'heat'},
include_package_data=True,
entry_points={
'console_scripts': [
'heat = heat.command_line:main',
]
},
setup_requires=['pytest-runner', ],
install_requires=[
'Cython',
'click',
],
test_suite='tests.test_class',
tests_require=['pytest', ],
zip_safe=False,
ext_modules=cythonize('heat/*.pyx', gdb_debug=True),
classifiers=[
'Intended Audience :: Developers',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
)
|
from codecs import open
from os import path
# Always prefer setuptools over distutils
from setuptools import find_packages, setup
here = path.abspath(path.dirname(__file__))
about = {}
with open(path.join(here, "bk_monitor_report", "__version__.py"), "r", encoding="utf-8") as f:
exec(f.read(), about)
long_description = "custom reporter python sdk for bk-monitor"
version = about["__version__"]
setup(
name="bk-monitor-report",
version=version,
description="bk-monitor-report", # noqa
long_description=long_description,
url="https://github.com/TencentBlueKing/bk-monitor-report",
author="TencentBlueKing",
author_email="contactus_bk@tencent.com",
include_package_data=True,
packages=find_packages(),
install_requires=[
"requests>=2.20.0",
"prometheus-client>=0.9.0,<1.0.0",
],
zip_safe=False,
)
|
"""
Compute subpixel bias in localization data.
Subpixel bias in localization coordinates may arise depending on the
localization algorithm [1]_.
References
----------
.. [1] Gould, T. J., Verkhusha, V. V. & Hess, S. T.,
Imaging biological structures with fluorescence photoactivation
localization microscopy. Nat. Protoc. 4 (2009), 291–308.
"""
from __future__ import annotations
import logging
import sys
from typing import TYPE_CHECKING, cast
if sys.version_info >= (3, 9):
from collections.abc import Sequence # noqa: F401
else:
from typing import Sequence # noqa: F401
if sys.version_info >= (3, 11):
from typing import Self
else:
from typing_extensions import Self
if TYPE_CHECKING:
from locan.data.locdata import LocData
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from locan.analysis.analysis_base import _Analysis
__all__: list[str] = ["SubpixelBias"]
logger = logging.getLogger(__name__)
# The algorithms
def _subpixel_bias(
locdata, pixel_size: int | float | Sequence[int | float]
) -> pd.DataFrame:
coordinate_labels = locdata.coordinate_keys
coordinates = locdata.coordinates.T
if np.ndim(pixel_size) == 0:
pixel_size = cast("int | float", pixel_size)
pixel_sizes: Sequence[int | float] = [pixel_size] * len(coordinate_labels)
else:
pixel_size = cast("Sequence[int | float]", pixel_size)
if len(pixel_size) != len(coordinate_labels):
raise TypeError("There must be given a pixel_size for each coordinate.")
else:
pixel_sizes = pixel_size
coordinates_modulo = [
np.remainder(coordinates_, pixel_size_)
for coordinates_, pixel_size_ in zip(coordinates, pixel_sizes)
]
results = {
label_ + "_modulo": values_
for label_, values_ in zip(coordinate_labels, coordinates_modulo)
}
return pd.DataFrame(results)
# The specific analysis classes
class SubpixelBias(_Analysis):
"""
Check for subpixel bias by computing the modulo of localization coordinates
for each localization's spatial coordinate in locdata.
Parameters
----------
pixel_size : int | float | Sequence[int | float]
Camera pixel size in coordinate units.
meta : locan.analysis.metadata_analysis_pb2.AMetadata
Metadata about the current analysis routine.
Attributes
----------
count : int
A counter for counting instantiations.
parameter : dict
A dictionary with all settings for the current computation.
meta : locan.analysis.metadata_analysis_pb2.AMetadata
Metadata about the current analysis routine.
results : pandas.DataFrame
The number of localizations per frame or
the number of localizations per frame normalized to region_measure(hull).
"""
count = 0
def __init__(self, meta=None, pixel_size=None) -> None:
parameters = self._get_parameters(locals())
super().__init__(**parameters)
self.results = None
def compute(self, locdata: LocData) -> Self:
"""
Run the computation.
Parameters
----------
locdata : LocData
Localization data.
Returns
-------
Self
"""
if not len(locdata):
logger.warning("Locdata is empty.")
return self
self.results = _subpixel_bias(locdata=locdata, **self.parameter)
return self
def hist(self, ax=None, bins="auto", log=True, **kwargs) -> plt.axes.Axes:
"""
Provide histogram as :class:`matplotlib.axes.Axes` object showing
hist(results). Nan entries are ignored.
Parameters
----------
ax : matplotlib.axes.Axes`
The axes on which to show the image
bins : int | Sequence | str
Bin specifications (passed to :func:`matplotlib.hist`).
log : Bool
Flag for plotting on a log scale.
kwargs : dict
Other parameters passed to :func:`matplotlib.pyplot.hist`.
Returns
-------
matplotlib.axes.Axes
Axes object with the plot.
"""
if ax is None:
ax = plt.gca()
if not self:
return ax
ax.hist(
self.results.dropna(axis=0).to_numpy(),
bins=bins,
**dict(dict(density=True, log=log), **kwargs),
histtype="step",
label=self.results.columns,
)
ax.set(
title="Subpixel Bias",
xlabel="position_modulo_pixel_size",
ylabel="PDF",
)
return ax
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.chrome.options import Options
import time
#smtp import
import smtplib
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email import encoders
options = webdriver.ChromeOptions()
#chrome headless option
options.add_argument('headless')
# set the window size
options.add_argument('window-size=1200x600')
# initialize the driver
driver = webdriver.Chrome(chrome_options=options)
#to to url
driver.get('https://datastudio.google.com/u/0/reporting/0B_U5RNpwhcE6QXg4SXFBVGUwMjg/page/6zXD/preview')
#enter email address
email = driver.find_element_by_css_selector('input[type=email]')
email.send_keys('exaple@gmail.com')
email.send_keys(Keys.ENTER)
time.sleep(5)
#enter password
password = driver.find_element_by_xpath('//*[@id="password"]/div[1]/div/div[1]/input')
password.send_keys('password-here')
password.send_keys(Keys.ENTER)
# sleep up to 5 seconds for the elements to become available
time.sleep(5)
#take a screenshot of the page
driver.get_screenshot_as_file('./img/full-page.png')
time.sleep(2)
#smtp username details
email_user = 'example@gmail.com'
email_password = 'password-here'
#send email to
email_send = 'anthony_lekan@hotmail.com'
subject = 'Reading Headline'
msg = MIMEMultipart()
msg['From'] = email_user
msg['To'] = email_send
msg['Subject'] = subject
body = 'Hi there, sending this email from Python!'
msg.attach(MIMEText(body,'plain'))
#send attached file
filename='./img/full-page.png'
attachment =open(filename,'rb')
part = MIMEBase('application','octet-stream')
part.set_payload((attachment).read())
encoders.encode_base64(part)
part.add_header('Content-Disposition',"attachment; filename= "+filename)
msg.attach(part)
text = msg.as_string()
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login(email_user,email_password)
server.sendmail(email_user,email_send,text)
driver.close()
|
# coding: utf-8
# ### Add upstream, downstream and basin PFAF_ID to database
#
# * Purpose of script: create a table with pfaf_id and upstream_pfaf_id, downstream_pfaf_id and basin_pfaf_id
# * Author: Rutger Hofste
# * Kernel used: python35
# * Date created: 20171123
#
#
# The script requires a file called .password to be stored in the current working directory with the password to the database. Basic functionality
#
# In[1]:
get_ipython().magic('matplotlib inline')
import time, datetime, sys
dateString = time.strftime("Y%YM%mD%d")
timeString = time.strftime("UTC %H:%M")
start = datetime.datetime.now()
print(dateString,timeString)
sys.version
# In[2]:
SCRIPT_NAME = "Y2017M11D23_RH_Upstream_Downstream_Basin_To_Database_V01"
EC2_INPUT_PATH = "/volumes/data/%s/input/" %(SCRIPT_NAME)
EC2_OUTPUT_PATH = "/volumes/data/%s/output/" %(SCRIPT_NAME)
INPUT_VERSION = 1
OUTPUT_VERSION = 2
# Database settings
DATABASE_IDENTIFIER = "aqueduct30v02"
DATABASE_NAME = "database01"
INPUT_FILENAME = "hybas_lev06_v1c_merged_fiona_upstream_downstream_V%0.2d" %(INPUT_VERSION)
S3_INPUT_PATH = "s3://wri-projects/Aqueduct30/processData/Y2017M08D23_RH_Downstream_V01/output/"
# In[3]:
get_ipython().system('rm -r {EC2_INPUT_PATH}')
get_ipython().system('rm -r {EC2_OUTPUT_PATH}')
get_ipython().system('mkdir -p {EC2_INPUT_PATH}')
get_ipython().system('mkdir -p {EC2_OUTPUT_PATH}')
# In[4]:
get_ipython().system('aws s3 cp {S3_INPUT_PATH} {EC2_INPUT_PATH} --recursive')
# In[5]:
import os
import numpy as np
import pandas as pd
from ast import literal_eval
import boto3
import botocore
from sqlalchemy import *
# In[6]:
scopes = ["upstream_pfaf_ids","downstream_pfaf_ids","basin_pfaf_ids"]
# In[7]:
df = pd.read_csv(os.path.join(EC2_INPUT_PATH,INPUT_FILENAME+".csv"))
# In[ ]:
df.columns = map(str.lower, df.columns)
# In[ ]:
df = df.set_index("pfaf_id",drop=False)
# In[ ]:
df = df.drop_duplicates(subset="pfaf_id") #one basin 353020 has two HybasIDs
# In[ ]:
def rowToDataFrame(index,row,columnName):
listje = literal_eval(row[columnName])
dfRow = pd.DataFrame()
for i, item in enumerate(listje):
dfRow.at[i, "pfaf_id"] = np.int64(index)
dfRow.at[i, columnName] = np.int64(item)
return dfRow
def rdsConnect(database_identifier,database_name):
rds = boto3.client('rds')
F = open(".password","r")
password = F.read().splitlines()[0]
F.close()
response = rds.describe_db_instances(DBInstanceIdentifier="%s"%(database_identifier))
status = response["DBInstances"][0]["DBInstanceStatus"]
print("Status:",status)
endpoint = response["DBInstances"][0]["Endpoint"]["Address"]
print("Endpoint:",endpoint)
engine = create_engine('postgresql://rutgerhofste:%s@%s:5432/%s' %(password,endpoint,database_name))
connection = engine.connect()
return engine, connection
# In[ ]:
resultDict = {}
for scope in scopes:
columnName = scope
df2 = pd.DataFrame(data=df[scope],index=df.index)
dfOut = pd.DataFrame()
for index, row in df2.iterrows():
dfRow = rowToDataFrame(index,row,columnName)
dfOut = dfOut.append(dfRow)
dfOut['pfaf_id'] = dfOut['pfaf_id'].astype(np.int64)
dfOut[columnName] = dfOut[columnName].astype(np.int64)
dfOut = dfOut.reset_index(drop=True)
dfOut.index.names = ['id']
resultDict[scope] = dfOut
# In[ ]:
resultDict["upstream_pfaf_ids"].head()
# Store in database
# In[ ]:
engine, connection = rdsConnect(DATABASE_IDENTIFIER,DATABASE_NAME)
# In[ ]:
for key, dfScope in resultDict.items():
if key == "basin_pfaf_ids":
tableName = "basin_pfaf6_v%0.2d" %(OUTPUT_VERSION)
elif key == "upstream_pfaf_ids":
tableName = "upstream_pfaf6_v%0.2d" %(OUTPUT_VERSION)
elif key == "downstream_pfaf_ids":
tableName = "downstream_pfaf6_v%0.2d" %(OUTPUT_VERSION)
else:
tableName = "error"
print("error")
dfScope.to_sql(
name = tableName,
con = connection,
if_exists="replace",
index= True)
# In[ ]:
connection.close()
# In[ ]:
end = datetime.datetime.now()
elapsed = end - start
print(elapsed)
|
#encoding=utf-8
""" ``views`` module.
"""
## wheezy
from wheezy.http import HTTPResponse
from wheezy.http import HTTPRequest
from wheezy.web.handlers import BaseHandler
## project
#from config import cached
#from database import db_session
#from models import Greeting
#from repository import Repository
#from validation import greeting_validator
response = HTTPResponse()
class ListHandler(BaseHandler):
def get(self):
name = 'world'
response = self.render_response('public/list.html',name = name)
return response
def list(self):
name = 'list.'
response = self.render_response('public/list.html', name = name)
return response
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
from django.http import HttpRequest, HttpResponse
from zerver.decorator import webhook_view
from zerver.lib.request import REQ, has_request_variables
from zerver.lib.response import json_success
from zerver.lib.validator import WildValue, check_string, to_wild_value
from zerver.lib.webhooks.common import check_send_webhook_message
from zerver.models import UserProfile
MEMBER_NAME_TEMPLATE = "{name}"
AMOUNT_TEMPLATE = "{amount}"
@webhook_view("OpenCollective")
@has_request_variables
def api_opencollective_webhook(
request: HttpRequest,
user_profile: UserProfile,
payload: WildValue = REQ(argument_type="body", converter=to_wild_value),
) -> HttpResponse:
name = get_name(payload)
amount = get_amount(payload)
# construct the body of the message
body = ""
if name == "Incognito": # Incognito donation
body = f"An **Incognito** member donated **{amount}**! :tada:"
else: # non-Incognito donation
body = f"@_**{name}** donated **{amount}**! :tada:"
topic = "New Member"
# send the message
check_send_webhook_message(request, user_profile, topic, body)
return json_success(request)
def get_name(payload: WildValue) -> str:
return MEMBER_NAME_TEMPLATE.format(
name=payload["data"]["member"]["memberCollective"]["name"].tame(check_string)
)
def get_amount(payload: WildValue) -> str:
return AMOUNT_TEMPLATE.format(
amount=payload["data"]["order"]["formattedAmount"].tame(check_string)
)
|
from os import getlogin
########################## GLOBAL CONST #######################################
USR_NAME = getlogin()
PYAGN_PATH = "/home/{}/Documents/PyAgain/manager".format(USR_NAME)
PR_LIST_PATH = PYAGN_PATH + ".pr_list"
LOOP = True
PROMPT = "[PyAgain] "
USR = ("<{}>".format(USR_NAME))
YES = ["yes", "Yes", "YES", "y", "Y", "yep"]
NO = ["no", "No", "NO", "n", "N", "nope"]
PR_TYPE = ["DJANGO", "AI", "SYSTEM"]
DOC_PATH = "/home/" + USR_NAME + "/Documents/"
######################## GLOBAL FUNCTIONS ####################################
def prompt(string):
print(PROMPT + string)
def yesno():
while LOOP:
answer = input(USR)
for elm in YES:
if elm == answer:
return True
for elm in NO:
if elm == answer:
return False
prompt("ERROR: Unknown answer.") |
# -*- coding: utf-8 -*-
"""Main module."""
# import configparser
# import os
# cp = configparser.ConfigParser()
# txtpath = os.path.dirname(os.path.abspath(__file__))+'/config.txt'
# try:
# with open(txtpath) as f:
# cp.read_file(f)
# token = cp.get('Config','password')
# print(token)
# except:
# cfgfile = open(txtpath,'w')
# cp.add_section('Config')
# token = str(input('There are no saved Github access tokens saved.\nPlease enter your Github token: '))
# cp.set('Config','password',token)
# cp.write(cfgfile)
# cfgfile.close() |
import pandas as pd
import numpy as np
def generate_features():
movie_industry = pd.read_csv("../data/movie_industry.csv", encoding = "ISO-8859-1" )
movie_industry = movie_industry[movie_industry.year >= 2007]
directors2007 = np.unique(movie_industry.director.values)
actors2007 = np.unique(movie_industry.star.values)
df = pd.read_csv("../data/features.csv")
condition = (df.agent.isin(directors2007) & (df.is_director == 1)) | (df.agent.isin(actors2007) & (df.is_director == 0))
df = df[condition]
df = df.loc[df.year>=2007]
a = df[df.year == 2007].drop(columns = ["agent","is_director","id","year","time"]).to_numpy()
b = df[df.year == 2008].drop(columns = ["agent","is_director","id","year","time"]).to_numpy()
S = np.stack((a,b))
for year in np.arange(2009, 2017):
c = df[df.year == year].drop(columns = ["agent","is_director","id","year","time"]).to_numpy()
S = np.concatenate((S, [c]))
return S
def generate_features_small():
keep = ['log_total_gross', 'mean_score', 'mean_votes']
movie_industry = pd.read_csv("../data/movie_industry.csv", encoding = "ISO-8859-1" )
movie_industry = movie_industry[movie_industry.year >= 2007]
directors2007 = np.unique(movie_industry.director.values)
actors2007 = np.unique(movie_industry.star.values)
df = pd.read_csv("../data/features.csv")
condition = (df.agent.isin(directors2007) & (df.is_director == 1)) | (df.agent.isin(actors2007) & (df.is_director == 0))
df = df[condition]
df = df.loc[df.year>=2007]
a = df[df.year == 2007].drop(columns = ["agent","is_director","id","year","time"])[keep].to_numpy()
b = df[df.year == 2008].drop(columns = ["agent","is_director","id","year","time"])[keep].to_numpy()
S = np.stack((a,b))
for year in np.arange(2009, 2017):
c = df[df.year == year].drop(columns = ["agent","is_director","id","year","time"])[keep].to_numpy()
S = np.concatenate((S, [c]))
return S
def get_agents():
movie_industry = pd.read_csv("../data/movie_industry.csv", encoding = "ISO-8859-1" )
movie_industry = movie_industry[movie_industry.year >= 2007]
directors2007 = np.unique(movie_industry.director.values)
actors2007 = np.unique(movie_industry.star.values)
df = pd.read_csv("../data/features.csv")
condition = (df.agent.isin(directors2007) & (df.is_director == 1)) | (df.agent.isin(actors2007) & (df.is_director == 0))
df = df[condition]
df = df.loc[df.year>=2007]
return df[df.year == 2007].agent.values
|
#Print Generators Performance
import random
import time
#import memory_profiler
names=['Mark','Steve','Charles','Ramesh','Tom']
majors=['Computer Science','Math','Biology','Chemistry','Art','Electrical']
def person_list(num):
result = []
for i in range(num):
person = {
'id': i,
'name': random.choice(names),
'major': random.choice(majors)
}
result.append(person)
return result
def person_generator(num):
for i in range(num):
person = {
'id': i,
'name': random.choice(names),
'major': random.choice(majors)
}
yield person
print ("Using list to create 1M items...")
start_time = time.clock()
team = person_list(1000000)
end_time = time.clock()
#print("Memory usage : {} Mb".format(memory_profiler.memory_usage()))
print("Took {} Seconds".format(end_time - start_time))
print ("Using generator to create 1M items...")
start_time = time.clock()
team = person_generator(1000000)
end_time = time.clock()
#print("Memory usage : {} Mb".format(memory_profiler.memory_usage()))
print("Took {} Seconds".format(end_time - start_time))
|
import tensorflow as tf
import numpy as np
from tfModels.layers import residual, conv_lstm
from tfModels.tensor2tensor.common_layers import layer_norm
from .processor import Processor
class CONV_Processor(Processor):
def __init__(self, is_train, args, name='conv_processor'):
self.num_cell_units = args.model.processor.num_cell_units
self.num_filters = args.model.processor.num_filters
self.num_layers = args.model.processor.num_layers
self.size_feat = args.data.dim_input
super().__init__(is_train, args, name)
def process(self, inputs, len_inputs):
size_batch = tf.shape(inputs)[0]
size_length = tf.shape(inputs)[1]
size_feat = int(self.size_feat/3)
x = tf.reshape(inputs, [size_batch, size_length, size_feat, 3])
# x = tf.Print(x, [tf.shape(x)], message='x0: ', summarize=1000)
with tf.variable_scope(self.name):
x = self.normal_conv(
inputs=x,
filter_num=self.num_filters,
kernel=(3,3),
stride=(2,2),
padding='SAME',
use_relu=True,
name="conv1",
w_initializer=None,
norm_type='layer')
x = self.normal_conv(
inputs=x,
filter_num=self.num_filters,
kernel=(3,3),
stride=(2,2),
padding='SAME',
use_relu=True,
name="conv2",
w_initializer=None,
norm_type='layer')
x = self.normal_conv(
inputs=x,
filter_num=self.num_filters,
kernel=(3,3),
stride=(2,2),
padding='SAME',
use_relu=True,
name="conv3",
w_initializer=None,
norm_type='layer')
# x = conv_lstm(
# inputs=x,
# kernel_size=(3,3),
# filters=self.num_filters)
size_feat = self.num_filters * tf.cast(tf.ceil(tf.cast(size_feat,tf.float32)/8), tf.int32)
size_length = tf.cast(tf.ceil(tf.cast(size_length,tf.float32)/8), tf.int32)
len_frames = tf.cast(tf.ceil(tf.cast(len_inputs,tf.float32)/8), tf.int32)
frames = tf.reshape(x, [size_batch, size_length, size_feat])
return frames, len_frames
@staticmethod
def normal_conv(inputs, filter_num, kernel, stride, padding, use_relu, name,
w_initializer=None, norm_type="batch"):
with tf.variable_scope(name):
net = tf.layers.conv2d(inputs, filter_num, kernel, stride, padding,
kernel_initializer=w_initializer, name="conv")
if norm_type == "batch":
net = tf.layers.batch_normalization(net, name="bn")
elif norm_type == "layer":
net = layer_norm(net)
else:
net = net
output = tf.nn.relu(net) if use_relu else net
return output
@staticmethod
def blstm(hidden_output, len_feas, num_cell_units, name, dropout=0.0, use_residual=False):
num_cell_units /= 2
with tf.variable_scope(name):
f_cell = tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(num_cell_units)
b_cell = tf.contrib.cudnn_rnn.CudnnCompatibleLSTMCell(num_cell_units)
x, _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=f_cell,
cell_bw=b_cell,
inputs=hidden_output,
dtype=tf.float32,
time_major=False,
sequence_length=len_feas)
x = tf.concat(x, 2)
if use_residual:
x = residual(hidden_output, x, dropout)
return x
def pooling(self, x, len_sequence, type, name):
x = tf.expand_dims(x, axis=2)
x = self.normal_conv(
x,
self.num_cell_units,
(1, 1),
(1, 1),
'SAME',
'True',
name="tdnn_"+str(name),
norm_type='layer')
if type == 'SAME':
x = tf.layers.max_pooling2d(x, (1, 1), (1, 1), 'SAME')
elif type == 'HALF':
x = tf.layers.max_pooling2d(x, (2, 1), (2, 1), 'SAME')
len_sequence = tf.cast(tf.ceil(tf.cast(len_sequence, tf.float32)/2), tf.int32)
x = tf.squeeze(x, axis=2)
return x, len_sequence
|
import pandas as pd
import shutil
import os
data = pd.read_json('dfdc_train_part_49/metadata.json')
data = data.T
data['file'] = data.index
files = os.listdir('dfdc_train_part_49/audio')
for i in range(len(files)):
for x in range(len(data.file)):
if files[i].split('.')[0]==data.file[x].split('.')[0]:
if data.split[x]=='train':
if data.label[x]=='FAKE':
shutil.move(r'D:\Projects\fake\dfdc_train_part_49\audio\\'+files[i],r'D:\Projects\fake\dfdc_train_part_49\audio\train\FAKE')
else:
shutil.move(r'D:\Projects\fake\dfdc_train_part_49\audio\\'+files[i],r'D:\Projects\fake\dfdc_train_part_49\audio\train\REAL')
else:
if data.label[x]=='FAKE':
shutil.move(r'D:\Projects\fake\dfdc_train_part_49\audio\\'+files[i],r'D:\Projects\fake\dfdc_train_part_49\audio\test\FAKE')
else:
shutil.move(r'D:\Projects\fake\dfdc_train_part_49\audio\\'+files[i],r'D:\Projects\fake\dfdc_train_part_49\audio\test\REAL')
|
from django.contrib import admin
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from classes.views import (classroom_update,classroom_delete,
classroom_create,classroom_detail,classroom_list)
from API.views import (ListView,DetailView,UpdateView,DeleteView,CreateView)
from rest_framework_simplejwt.views import (
TokenObtainPairView,
)
urlpatterns = [
path('admin/', admin.site.urls),
path('classrooms/', classroom_list, name='classroom-list'),
path('classrooms/<int:classroom_id>/', classroom_detail, name='classroom-detail'),
path('classrooms/create', classroom_create, name='classroom-create'),
path('classrooms/<int:classroom_id>/update/',classroom_update, name='classroom-update'),
path('classrooms/<int:classroom_id>/delete/',classroom_delete, name='classroom-delete'),
path('class/list', ListView.as_view(), name='list'),
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('class/<int:classroom_id>/detail', DetailView.as_view(), name='detail'),
path('class/<int:classroom_id>/update', UpdateView.as_view(), name='update'),
path('class/<int:classroom_id>/delete', DeleteView.as_view(), name='delete'),
path('class/create', CreateView.as_view(), name='create'),
]
if settings.DEBUG:
urlpatterns+=static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns+=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
from ftw.upgrade import UpgradeStep
class AddOGDSSyncConfiguration(UpgradeStep):
"""Add OGDS sync configuration.
"""
def __call__(self):
self.install_upgrade_profile()
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
def loadData(filename):
df = pd.read_table(filename, '\t', header=None)
return np.array(df.loc[:,0:1]), np.array(df.loc[:,2])
def showData(X, y, w=None, b=None):
plt.scatter(x=X[:,0], y=X[:,1], c=y)
if (w is None) or (b is None):
pass
else:
a = -w[1,0] / w[0,0]
b = -b / w[1,0]
plt.plot(X[:,0], a * X[:,0]+b, c='red')
def init_w_b(shape_w, shape_b, seed):
np.random.seed(seed)
w = np.random.rand(shape_w[0], shape_w[1])
b = np.random.rand(shape_b[0])+0.01
return w, b
def forward(X, w, b):
z = np.dot(X, w) + b
a = 1.0 / (1.0 + np.exp(-z))
return a
def cost_func(y_, y):
y_ = y_.flatten()
y = y.flatten()
#cost = np.average( -(y*np.log(y_) + (1-y)*np.log(1-y_)) )
cost = np.sum(np.power(y_ - y, 2)) / y.shape[0]
return cost
def train(maxloop, lr, X, y, w, b):
m = X.shape[0]
y = y.reshape((m,1))
for i in range(maxloop):
a = forward(X, w, b)
d_z = a - y
d_w = np.dot(X.T, d_z) / m
d_b = np.sum(d_z) / m
w = w - lr * d_w
b = b - lr * d_b
y_ = a
#print(cost_func(y_, y))
print(calc_accuarcy(y_, y))
return w, b
def harden(y_, sepNum):
y_[y_ > sepNum] = 1
y_[y_ <= sepNum] = 0
return y_
def calc_accuarcy(y_, y):
y = y.flatten()
y_ = y_.flatten()
y_ = harden(y_, 0.5)
correctNum = len(y_[y_ == y])
return float(correctNum) / y.shape[0]
# ------------ main -------------- #
X, y = loadData('./data/testSet.txt')
showData(X, y)
w, b = init_w_b([2,1], [1], seed=314)
a = forward(X, w, b)
y_ = a.flatten()
cost = cost_func(y_, y)
w, b = train(100, 0.1, X, y, w, b)
y_ = forward(X, w, b)
y_ = harden(y_, 0.5)
showData(X, y_, w, b)
|
from django.urls import path
import spendings.room_api.views as views
urlpatterns = [
path("<int:room_id>/state/", views.room_state),
path("<int:room_id>/spendings/", views.room_spendings),
path("<int:room_id>/depts/", views.room_depts),
path("<int:room_id>/settlements/", views.room_settlements),
path("<int:room_id>/members/", views.room_members),
path("<int:room_id>/me/", views.room_member),
path("<int:room_id>/history/", views.room_history),
path("<int:room_id>/data/", views.room_data),
]
|
'''
@Author: your name
@Date: 2020-02-29 09:53:57
@LastEditTime: 2020-02-29 11:17:41
@LastEditors: Please set LastEditors
@Description: In User Settings Edit
@FilePath: /pyenv/numpy/np7.py
'''
import numpy as np
# 71. 对于给定的 5x5 二维数组,在其内部随机放置 p 个值为 1 的数:
p = 3
Z = np.zeros((5, 5))
np.put(Z, np.random.choice(range(5*5), p, replace=False), 1)
print(Z)
# 72. 对于随机的 3x3 二维数组,减去数组每一行的平均值:
X = np.random.rand(3, 3)
print(X)
Y = X - X.mean(axis=1, keepdims=True)
print(Y)
# 73. 获得二维数组点积结果的对角线数组:
A = np.random.uniform(0, 1, (3, 3))
B = np.random.uniform(0, 1, (3, 3))
print(np.dot(A, B))
# 较慢的方法
np.diag(np.dot(A, B))
np.sum(A * B.T, axis=1) # 较快的方法
np.einsum("ij, ji->i", A, B) # 更快的方法
# 74. 找到随机一维数组中前 p 个最大值:
Z = np.random.randint(1, 100, 100)
print(Z)
p = 5
a = Z[np.argsort(Z)[-p:]]
print(a)
# 75. 计算随机一维数组中每个元素的 4 次方数值:
x = np.random.randint(2, 5, 5)
print(x)
a = np.power(x, 4)
print(a)
# 76. 对于二维随机数组中各元素,保留其 2 位小数:
Z = np.random.random((5, 5))
print(Z)
np.set_printoptions(precision=2)
print(Z)
# 77. 使用科学记数法输出 NumPy 数组:
Z = np.random.random([5, 5])
print(Z)
print(Z/1e3)
# 78. 使用 NumPy 找出百分位数(25%,50%,75%):
a = np.arange(15)
print(a)
c = np.percentile(a, q=[25, 50, 75])
print(c)
# 79. 找出数组中缺失值的总数及所在位置:
# 生成含缺失值的 2 维数组
Z = np.random.rand(10, 10)
Z[np.random.randint(10, size=5), np.random.randint(10, size=5)] = np.nan
print(Z)
print("缺失值总数: \n", np.isnan(Z).sum())
print("缺失值索引: \n", np.where(np.isnan(Z)))
# 80. 从随机数组中删除包含缺失值的行:
b = Z[np.sum(np.isnan(Z), axis=1) == 0]
print(b) |
import itertools
import time
def coordinates(length, width):
return itertools.product(range(length), range(width))
def neighbours(xy, graph_len):
x, y = xy[0], xy[1]
neighbour_list = []
if x != 0:
neighbour_list.append((x-1, y))
if x != graph_len-1:
neighbour_list.append((x+1, y))
if y != 0:
neighbour_list.append((x, y-1))
if y != graph_len-1:
neighbour_list.append((x, y+1))
return neighbour_list
# source and target are tuples of (x, y) coordinate
def dijkstra(matrix, source, target):
x, y = source[0], source[1]
m_length = len(matrix)
dist = {(x, y): float("inf") for (x, y) in list(coordinates(m_length, m_length))}
cloud = {(x, y): float("inf") for (x, y) in list(coordinates(m_length, m_length))}
# Initialize source vertex values
dist[(x, y)] = 0
cloud[(x, y)] = 0
# Update source vertex's neighbours
for v in neighbours((x, y), m_length):
if dist[(x, y)] + matrix[v[0]][v[1]] < dist[v]:
dist[v] = matrix[v[0]][v[1]]
cloud[v] = dist[v]
while cloud:
# u = the vertex connected to the cloud (but not in it) with the minimum value
u = min(cloud, key=cloud.get)
del cloud[u]
if dist[u] == -1:
break
for v in neighbours(u, m_length):
if cloud.get(v):
alt = dist[u] + matrix[v[0]][v[1]]
if alt < dist[v]:
dist[v] = alt
cloud[v] = dist[v]
if v == (target[0], target[1]):
return dist
return dist
def main():
start = time.clock()
matrix = [[int(x) for x in line.strip().split(",")] for line in open("matrix.txt", "r")]
length = len(matrix)
solution = matrix[0][0]
matrix = dijkstra(matrix, (0, 0), (length-1, length-1))
print (matrix[(length-1, length-1)] + solution)
print (time.clock() - start)
if __name__ == '__main__':
main()
|
from django.contrib import admin
from .models import productSold
# Register your models here.
admin.site.register(productSold) |
from queue import Queue as PythonQueue
class Queue:
def __init__(self, max_size) -> None:
self.max_size: int = max_size
self.data = [None] * max_size
self.head: int = -1
self.tail: int = -1
def is_empty(self) -> bool:
return self.head == -1
def is_full(self) -> bool:
return ((self.tail + 1) % self.max_size) == self.head
def enqueue(self, value: int) -> bool:
if self.is_full():
return False
if self.is_empty():
self.head = 0
self.tail = (self.tail + 1) % self.max_size
self.data[self.tail] = value
return True
def dequeue(self) -> bool:
if self.is_empty():
return False
if self.head == self.tail:
self.head = -1
self.tail = -1
return True
self.head = (self.head + 1) % self.max_size
return True
def front(self) -> int:
return -1 if self.is_empty() else self.data[self.head]
def rear(self) -> int:
return -1 if self.is_empty() else self.data[self.tail]
class MovingAverage:
def __init__(self, size: int):
self.queue = PythonQueue(maxsize=size)
self.total = 0
def next(self, value: int) -> float:
if self.queue.full():
self.total -= self.queue.get()
self.queue.put(value)
self.total += value
return self.total / self.queue.qsize()
class QueueBaseStack:
def __init__(self):
self.stack1 = []
self.stack2 = []
def push(self, x):
self.stack2.append(x)
def pop(self):
if not self.stack1:
while self.stack2:
self.stack1.append(self.stack2.pop())
return self.stack1.pop()
def peek(self):
if self.stack1:
return self.stack1[-1]
else:
return self.stack2[0]
def empty(self):
return not (self.stack1 or self.stack2)
|
import logging
CONSIDER_NEIGHBORING_NODES_AS_CONTEXT = 6 # defines the number of links the surrounding to consider as neighbouring nodes
class Decider:
'''The Decider class takes the several metrics into account and
identifies a meaning for each link that is most likely to be the correct one in the
given context
'''
'''constructor
@param relatedness_calculator a relatedness calculator to improve the decision making process
'''
def __init__(self, relatedness_calculator):
self._relatedness_calculator = relatedness_calculator
'''decides for a meaning for each word to be disambiguated. The meanings will be sorted with the most likely one
as the first entry in the list of meanings for each link in article['links'].
additionally, a 'relatedness' field is added that determines how relevant the field is
finally, a combined value of 'commonness' and 'relatedness' is calculated which is added as an 'overallMatch' field
@param article an article as a dictionary with the following fields:
'id': the id of the article
'links': a list of links to be disambiguated as dictionaries with the following fields:
'phrase': the phrase used within the link
'meanings' a list of of dictionaries with the following fields:
'target_article_id': the id of the referenced article
'target_article_name': the name of the referenced article
'commonness': a value between 0 and 1 that determines how popular this meaning is
'articleincount': a number how many times this article is linked to
'''
def decide(self, article):
links = article['links']
# create fields
for link in links:
link['done'] = False # will be deleted later, only for temporary usage
link['numCmp'] = 0 # will be deleted later, only for temporary usage
for m in link['meanings']:
m['relatedness'] = 0.0
m['overallMatch'] = 0.0
m['cumulativeRelatedness'] = 0.0 # will be deleted later, only for temporary usage
num_finalized = 0
while (num_finalized != len(links)):
next_link_index = self._find_next_link_index(links)
logging.info('%d of %d words disambiguated | %d%% DONE' % (num_finalized, len(links), float(num_finalized) / float(len(links)) * 100.0))
if next_link_index == -1:
logging.error('No next link was found!')
num_finalized+= 1
else:
# if only meaning, take it
link = links[next_link_index]
if len(link['meanings']) == 1:
logging.info('Only meaning for %s was selected: %s' % (link['phrase'], link['meanings'][0]['target_article_name']))
# if there is no meaning, just continue
elif len(link['meanings']) == 0:
logging.error('link %s does not have any disambiguations' % link['phrase'])
# if multiple meanings are available
else:
start = next_link_index-(CONSIDER_NEIGHBORING_NODES_AS_CONTEXT/2)
if start < 0:
start = 0
end = start + CONSIDER_NEIGHBORING_NODES_AS_CONTEXT
if end >= len(links):
end = len(links) - 1
for index in range(start, end+1):
# correlation with link itself is not useful ;-)
if index != next_link_index:
logging.info('comparing %s to %s' % (link['phrase'], links[index]['phrase']))
# if there is already a meaning selected for the compared link
if links[index]['done']:
if len(links[index]['meanings']) == 0:
neighbour_meanings = []
else:
neighbour_meanings = [links[index]['meanings'][0]]
else:
neighbour_meanings = [] #links[index]['meanings']
# compare each neighboring meaning to the current one
for neighbour_meaning in neighbour_meanings:
for meaning in link['meanings']:
relatedness = self._relatedness_calculator.calculate_relatedness(meaning, neighbour_meaning)
# add to cumulative relatedness
meaning['cumulativeRelatedness'] += (relatedness / float(len(neighbour_meanings)))
# mark link as compared to one more link
link['numCmp'] += 1
# JUST FOR DEBUGGING REASONS
meanings_tmp = list(link['meanings'])
sorted_tmp = sorted(meanings_tmp, key=lambda m: -m['cumulativeRelatedness'])
logging.info('\tcumulative (%f): %s' % (sorted_tmp[0]['cumulativeRelatedness'], sorted_tmp[0]['target_article_name']))
if len(sorted_tmp) > 1:
logging.info('\tcumulative 2nd (%f): %s' % (sorted_tmp[1]['cumulativeRelatedness'], sorted_tmp[1]['target_article_name']))
# calculate relatedness and overall match
total_relatedness = 0.0
for meaning in link['meanings']:
total_relatedness += meaning['cumulativeRelatedness']
for meaning in link['meanings']:
if total_relatedness != 0.0:
meaning['relatedness'] = meaning['cumulativeRelatedness'] / total_relatedness
meaning['overallMatch'] = (meaning['relatedness'] + meaning['commonness']) / 2.0
# take the best match
link['meanings'] = sorted(link['meanings'], key=lambda m: -m['overallMatch'])
logging.info('deciding for %s, rel: %d%%, comm: %d%%, total: %d%%'
% (link['meanings'][0]['target_article_name'],
round(link['meanings'][0]['relatedness']*100.0),
round(link['meanings'][0]['commonness']*100.0),
round(link['meanings'][0]['overallMatch']*100.0)))
if len(link['meanings']) > 1:
logging.info('2nd choice would be %s, rel: %d%%, comm: %d%%, total: %d%%'
% (link['meanings'][1]['target_article_name'],
round(link['meanings'][1]['relatedness']*100.0),
round(link['meanings'][1]['commonness']*100.0),
round(link['meanings'][1]['overallMatch']*100.0)))
link['done'] = True
num_finalized += 1
# cleanup and delete unccessecary fields
for link in links:
del link['done']
del link['numCmp']
for m in link['meanings']:
if m.has_key('cumulativeRelatedness'): # TODO: investigate why this field is missing sometimes
del m['cumulativeRelatedness']
def _find_next_link_index(self, links):
# it there is one with only one meaning left, take that one first
for index in range(0, len(links)):
if len(links[index]['meanings']) == 1 and links[index]['done'] == False:
return index
# otherwise take one with the most number of neighbouring nodes that are already determined and the lowest cardinality next
lowest_cardinality = 99999
highest_neighbours_done = 0
next_link_index = -1
# find a link which is already determined
for index in range(0, len(links)):
if links[index]['done'] == False:
# find neighbours to this link
start = index-(CONSIDER_NEIGHBORING_NODES_AS_CONTEXT/2)
if start < 0:
start = 0
end = start + CONSIDER_NEIGHBORING_NODES_AS_CONTEXT
if end >= len(links):
end = len(links) - 1
neighbours_done = 0
for neighbour_index in range(start, end+1):
if links[neighbour_index]['done']:
neighbours_done+= 1
if (neighbours_done == highest_neighbours_done and len(links[index]['meanings']) < lowest_cardinality) or neighbours_done > highest_neighbours_done:
next_link_index = index
highest_neighbours_done = neighbours_done
lowest_cardinality = len(links[index]['meanings'])
return next_link_index |
# coding=utf-8
import os
import sys
import django
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) # 把manage.py所在目录添加到系统目录
os.environ['DJANGO_SETTINGS_MODULE'] = 'AJoke.settings' # 设置setting文件
django.setup()
from records.models import CSInfo, InterfaceLogs
from ENVS import DICT_TOKEN_PARAM
import asyncio
import json
import time
from datetime import datetime
import aiohttp
import requests
from apscheduler.schedulers.background import BackgroundScheduler
from django_apscheduler.jobstores import DjangoJobStore, register_events, register_job
def get_request_data(url, data):
return json.loads(requests.post(url, headers={'Content-Type': 'application/json'}, data=json.dumps(data)).text)
def get_token(token_url, token_data):
dict_data = get_request_data(token_url, token_data)
return dict_data.setdefault("provider_access_token", '') or dict_data.setdefault("access_token", '')
async def post(url, data):
async with aiohttp.ClientSession() as session:
async with session.post(url, data=json.dumps(data)) as resp:
res_data = await resp.text()
return json.loads(res_data)
async def request(url, data, obj): # 异步
result = await post(url, data)
param = {"status_code": result["errcode"], "status_msg": result["errmsg"], "platform": 0}
if result.setdefault("errmsg", '') == "ok":
param.update(result["device_info"])
InterfaceLogs.objects.create(**param)
obj.is_sync_wx = 1
obj.save()
else:
InterfaceLogs.objects.create(**param)
def request_(url, data, obj): # 同步版
result = get_request_data(url, data)
param = {"status_code": result["errcode"], "status_msg": result["errmsg"], "platform": 0}
if result.setdefault("errmsg", '') == "ok":
param.update(result["device_info"])
InterfaceLogs.objects.create(**param)
obj.is_sync_wx = 1
obj.save()
else:
InterfaceLogs.objects.create(**param)
def main():
print(datetime.now())
while True:
provider_access_token = get_token()
if provider_access_token:
to_sync_apps = CSInfo.objects.filter(is_sync_wx=0) # 待同步的固件升级信息(客户已下载)
post_url = "https://qyapi.weixin.qq.com/cgi-bin/service/add_device?provider_access_token={0}".format(
provider_access_token)
tasks = []
for app in to_sync_apps:
post_data = {"model_id": app.device_type.model_id, "device_sn": app.device_number}
tasks.append(asyncio.ensure_future(request(post_url, post_data, app)))
loop = asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait(tasks))
time.sleep(300)
scheduler = BackgroundScheduler()
scheduler.add_jobstore(DjangoJobStore(), "default")
@register_job(scheduler, "interval", minutes=1, replace_existing=True)
def main_(): # 同步版
print(datetime.now())
def sync_qywx():
provider_access_token = get_token(DICT_TOKEN_PARAM["qywx"]["token_url"], DICT_TOKEN_PARAM["qywx"]["token_data"])
if provider_access_token:
to_sync_apps_wx = CSInfo.objects.filter(is_sync_wx=0, deal_tag_wx=0) # 待同步企业微信的固件升级信息(客户已下载)
post_url_wx = "https://qyapi.weixin.qq.com/cgi-bin/service/add_device?provider_access_token={0}".format(
provider_access_token)
for app in to_sync_apps_wx:
post_data_wx = {"model_id": app.device_type.model_id, "device_sn": app.device_number}
result = get_request_data(post_url_wx, post_data_wx)
param = {"status_code": result["errcode"], "status_msg": result["errmsg"], "platform": 0}
if result.setdefault("errmsg", '') == "ok":
param.update(result["device_info"])
InterfaceLogs.objects.create(**param)
app.is_sync_wx = 1
app.deal_tag_wx = 1
app.save()
elif result["errcode"] == 600021: # 同步失败,企业微信后台已有此设备SN
InterfaceLogs.objects.create(**param)
app.is_sync_wx = 1 # 标记已同步
app.deal_tag_wx = 1 # 标记已处理
app.save()
else:
InterfaceLogs.objects.create(**param)
app.deal_tag_wx = 1 # 标记已处理
app.save()
def sync_fufu():
access_token = get_token(DICT_TOKEN_PARAM["fufu"]["token_url"], DICT_TOKEN_PARAM["fufu"]["token_data"])
if access_token:
to_sync_apps_ff = CSInfo.objects.filter(is_sync_ff=0, deal_tag_ff=0) # 待同步服服CRM的固件升级信息(客户已下载)
post_url = DICT_TOKEN_PARAM["ff_update"]
post_url_ff = post_url.format(access_token)
for app in to_sync_apps_ff:
post_data_ff = {"sn": app.device_number, "type": "W_" + app.device_type.device_name}
result = get_request_data(post_url_ff, post_data_ff)
if result.setdefault("error", "") == "invalid_token":
access_token = get_token(DICT_TOKEN_PARAM["fufu"]["token_url"],
DICT_TOKEN_PARAM["fufu"]["token_data"])
post_url_ff = post_url.format(access_token)
result = get_request_data(post_url_ff, post_data_ff)
try:
param = {"status_code": result["encrypted"], "status_msg": result["data"], "platform": 1}
except:
param = {"status_code": 999, "status_msg": str(result), "platform": 1}
InterfaceLogs.objects.create(**param)
app.deal_tag_ff = 1
app.save()
continue
if result["data"]:
InterfaceLogs.objects.create(**param)
app.deal_tag_ff = 1
app.save()
else:
InterfaceLogs.objects.create(**param)
app.is_sync_ff = 1
app.deal_tag_ff = 1
app.save()
sync_qywx()
sync_fufu()
# main_()
register_events(scheduler)
scheduler.start()
print("Scheduler started!")
|
import math
def isPrime(n):
for i in range(2,int(math.sqrt(n))+1):
if n%i==0:
print('i= ',i)
return 0
num=int(input('Enter a integer number (2~32767): '))
result=isPrime(num)
if result==0:
print('{} is not prime' .format(num))
else:
print('{} is prime' .format(num))
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 5 14:58:52 2017
@author: lracuna
"""
from vision.camera import Camera
from vision.rt_matrix import *
import numpy as np
from matplotlib import pyplot as plt
#%%
# load points
points = np.loadtxt('house.p3d').T
points = np.vstack((points,np.ones(points.shape[1])))
#%%
# setup camera
#P = hstack((eye(3),array([[0],[0],[-10]])))
cam = Camera()
## Test matrix functions
cam.set_K(1460,1460,608,480)
cam.set_width_heigth(1280,960) #TODO Yue
# cam.set_R(0.0, 0.0, 1.0, 0.0)
cam.set_t(0.0, 0.0, -8.0)
cam.set_R_axisAngle(1.0, 0.0, 0.0, np.deg2rad(140.0)) #TODO Yue
cam.set_P()
print(cam.factor())
#%%
x = np.array(cam.project(points))
#%%
# plot projection
plt.figure()
plt.plot(x[0],x[1],'k.')
plt.xlim(0,1280)
plt.ylim(0,960)
plt.show()
#%%
# create transformation
r = 0.03*np.random.rand(3)
r = np.array([ 0.0, 0.0, 1.0])
t = np.array([ 0.0, 0.0, 0.1])
rot = rotation_matrix(r,0.000)
tras = translation_matrix(t)
#%%
# rotate camera and project
plt.figure()
for i in range(20):
cam.P = np.dot(cam.P,rot)
cam.P = np.dot(cam.P,tras)
x = np.array(cam.project(points))
plt.plot(x[0],x[1],'.')
plt.xlim(0,1280)
plt.ylim(0,960)
plt.show()
#Experimental results
#External camera calibration using Physical Chessboard
K_ext = np.array([[492.856172, 0.000000, 338.263513], [0.000000, 526.006429, 257.626108], [0.000000, 0.000000, 1.000000]])
#External camera calibration using Screen Chessboard
K_ext_dyn = np.array([[353.7511506068541, 0, 343.6333596289586], [0, 377.989420116449, 259.6826322930511], [0, 0, 1]])
print (K_ext/K_ext_dyn) |
def get_countries_data():
return {
'Albania': {
'eventName': 'Festivali i Këngës',
'watchLink': 'https://www.rtsh.al/rtsh-live/RTSH1-HD.html',
'stages': ['Night...', 'Final'],
"altEventNames": ["FiK"]
},
'Andorra': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Armenia': {
'eventName': 'Depi Evratesil',
'watchLink': 'https://www.1tv.am/en/',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Australia': {
'eventName': 'Australia Decides',
'watchLink': '-',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Austria': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Azerbaijan': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Belarus': {
'eventName': 'Eurofest',
'watchLink': 'https://www.tvr.by/televidenie/belarus-1/',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Belgium': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Bosnia and Herzegovina': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Bulgaria': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Croatia': {
'eventName': 'Dora',
'watchLink': 'https://hrti.hrt.hr/live/tv?channel=40013 or HRTi OTT smartphone app - account required',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Cyprus': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Czech Republic': {
'eventName': 'Eurovision Song CZ',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["ESCZ"]
},
'Denmark': {
'eventName': 'Dansk Melodi Grand Prix',
'watchLink': 'https://www.dr.dk/tv/live/dr1',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["DMGP"]
},
'Estonia': {
'eventName': 'Eesti Laul',
'watchLink': 'otse.err.ee/k/etv',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Finland': {
'eventName': 'Uuden Musiikin Kilpailu',
'watchLink': 'https://areena.yle.fi/tv/ohjelmat/yle-tv1',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["UMK"]
},
'France': {
'eventName': '-',
'watchLink': '-',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Georgia': {
'eventName': 'Georgian Idol',
'watchLink': '-',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Germany': {
'eventName': 'Unser Lied für ',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Greece': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Hungary': {
'eventName': 'A Dal',
'watchLink': '-',
'stages': ['Heat...', 'Semi-final 1', 'Semi-final 2', 'Final'],
"altEventNames": ["-"]
},
'Iceland': {
'eventName': 'Söngvakeppnin',
'watchLink': 'https://www.ruv.is/sjonvarp/beint?channel=ruv',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Ireland': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Israel': {
'eventName': '-',
'watchLink': 'https://www.kan.org.il/live/tv.aspx?stationid=2',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Italy': {
'eventName': 'Festival di Sanremo',
'watchLink': 'https://www.raiplay.it/dirette/rai1',
'stages': ['Night...', 'Final'],
"altEventNames": ["Sanremo"]
},
'Latvia': {
'eventName': 'Supernova',
'watchLink': 'https://ltv.lsm.lv/lv/tieshraide/ltv1/live.360/',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Lithuania': {
'eventName': 'Pabandom iš naujo',
'watchLink': 'lrt.lt/mediateka/tiesiogiai/lrt-televizija',
'stages': ['Heat...', 'Semi-final 1', 'Semi-final 2', 'Final'],
"altEventNames": ["PiN"]
},
'Luxembourg': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Malta': {
'eventName': 'X Factor Malta',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Moldova': {
'eventName': 'O melodie pentru Europa',
'watchLink': 'http://www.trm.md/ro/moldova-1',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["OMPE"]
},
'Monaco': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Montenegro': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Morocco': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Netherlands': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'North Macedonia': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Norway': {
'eventName': 'Melodi Grand Prix',
'watchLink': 'nrk.no/mgp',
'stages': ['Heat...', 'Final'],
"altEventNames": ["MGP"]
},
'Poland': {
'eventName': 'Szansa na Sukces',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Portugal': {
'eventName': 'Festival da Canção',
'watchLink': 'https://www.rtp.pt/play/direto/rtp1',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["FdC"]
},
'Romania': {
'eventName': 'Selecția Națională',
'watchLink': 'https://www.tvrplus.ro/',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Russia': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'San Marino': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Serbia': {
'eventName': 'Beovizija',
'watchLink': 'RTS Svet (https://rtsplaneta.rs/linear/16889 or RTS Planeta app - account required)',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["-"]
},
'Slovakia': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Slovenia': {
'eventName': 'EMA',
'watchLink': 'https://4d.rtvslo.si/zivo/tvs1',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Spain': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Sweden': {
'eventName': 'Melodifestivalen',
'watchLink': 'https://www.svtplay.se/melodifestivalen',
'stages': ['Heat...', 'Andra Chansen', 'Final'],
"altEventNames": ["Mello", "Melfest"]
},
'Switzerland': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Turkey': {
'eventName': '-',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["-"]
},
'Ukraine': {
'eventName': 'Vidbir (Natsionalnyi Vidbir na Yevrobachennia)',
'watchLink': 'https://www.youtube.com/channel/UCPY6gj8G7dqwPxg9KwHrj5Q',
'stages': ['Semi-final...', 'Final'],
"altEventNames": ["Vidbir"]
},
'United Kingdom': {
'eventName': 'Eurovision: You Decide',
'watchLink': '-',
'stages': ['Night...', 'Final'],
"altEventNames": ["You Decide"]
}
}
def generate_event_stages(events_len, stages, country):
repetition_idx = -1
repetition_expression_count = len(list(filter(lambda s: s.endswith('...'), stages)))
if repetition_expression_count == 0 and events_len > len(stages):
print("Less stages defined than events, check referential data for {}. Default used: [Night..., Final]".format(country))
stages = ['Night...', 'Final']
elif repetition_expression_count > 1:
print("More than one repetition stage found, check referential data for {}. Default used: [Night..., Final]".format(country))
stages = ['Night...', 'Final']
try:
repetition_idx = stages.index(next(s for s in stages if s.endswith('...')))
except StopIteration:
pass
repetition_count = events_len - len(list(filter(lambda s: not s.endswith('...'), stages)))
generated_stages = []
for i in range(0, repetition_idx):
generated_stages.append(stages[i])
for i in range(0, repetition_count):
generated_stages.append(stages[repetition_idx].replace('...', ' ' + str(i+1) if repetition_count > 1 else ''))
for i in range(repetition_idx+1, len(stages)):
generated_stages.append(stages[i])
if events_len < len(generated_stages):
event_stages = []
for i in range(events_len, 0, -1):
event_stages.insert(0, generated_stages[i])
return event_stages
else:
return generated_stages |
# -*- coding: utf-8 -*-
import scrapy
import hashlib
class ParliamentSpider(scrapy.Spider):
name = 'parliament'
source_title = 'House of Commons'
source_link = 'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/'
allowed_domains = ['www.parliament.uk']
start_urls = [
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/events/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/estates-information/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/catering-services-retail/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/the-speaker/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/information-technology/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/human-resources/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/official-expenditure-/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/members-of-the-house-of-commons-and-members-staff/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/other-house-matters/',
'https://www.parliament.uk/site-information/foi/foi-and-eir/commons-foi-disclosures/environmental/',
]
def parse(self, response):
for item in response.css('#ctl00_ctl00_FormContent_SiteSpecificPlaceholder_PageContent_ctlMainBody_wrapperDiv a'):
link = 'https://www.parliament.uk' + item.css('a::attr(href)').get()
yield scrapy.Request(
url=link,
callback=self.parse_item
)
def parse_item(self, response):
question_bits = response.css('.main-introduction *::text').extract()
question = ' '.join(question_bits).strip()[8:].strip()
response = {
'question': question,
'link': response.request.url,
'id': hashlib.md5(response.request.url.encode('utf-8')).hexdigest()
}
return response
|
import torch
from deeprobust.graph.targeted_attack import BaseAttack
from torch.nn.parameter import Parameter
from copy import deepcopy
from deeprobust.graph import utils
import torch.nn.functional as F
import numpy as np
from copy import deepcopy
import scipy.sparse as sp
class RND(BaseAttack):
"""As is described in Adversarial Attacks on Neural Networks for Graph Data (KDD'19),
'Rnd is an attack in which we modify the structure of the graph. Given our target node v,
in each step we randomly sample nodes u whose lable is different from v and
add the edge u,v to the graph structure
Parameters
----------
model :
model to attack
nnodes : int
number of nodes in the input graph
attack_structure : bool
whether to attack graph structure
attack_features : bool
whether to attack node features
device: str
'cpu' or 'cuda'
Examples
--------
>>> from deeprobust.graph.data import Dataset
>>> from deeprobust.graph.targeted_attack import RND
>>> data = Dataset(root='/tmp/', name='cora')
>>> adj, features, labels = data.adj, data.features, data.labels
>>> idx_train, idx_val, idx_test = data.idx_train, data.idx_val, data.idx_test
>>> # Setup Attack Model
>>> target_node = 0
>>> model = RND()
>>> # Attack
>>> model.attack(adj, labels, idx_train, target_node, n_perturbations=5)
>>> modified_adj = model.modified_adj
>>> # # You can also inject nodes
>>> # model.add_nodes(features, adj, labels, idx_train, target_node, n_added=10, n_perturbations=100)
>>> # modified_adj = model.modified_adj
"""
def __init__(self, model=None, nnodes=None, attack_structure=True, attack_features=False, device='cpu'):
super(RND, self).__init__(model, nnodes, attack_structure=attack_structure, attack_features=attack_features, device=device)
assert not self.attack_features, 'RND does NOT support attacking features except adding nodes'
def attack(self, ori_adj, labels, idx_train, target_node, n_perturbations, **kwargs):
"""
Randomly sample nodes u whose lable is different from v and
add the edge u,v to the graph structure. This baseline only
has access to true class labels in training set
Parameters
----------
ori_adj : scipy.sparse.csr_matrix
Original (unperturbed) adjacency matrix
labels :
node labels
idx_train :
node training indices
target_node : int
target node index to be attacked
n_perturbations : int
Number of perturbations on the input graph. Perturbations could
be edge removals/additions or feature removals/additions.
"""
# ori_adj: sp.csr_matrix
print('number of pertubations: %s' % n_perturbations)
modified_adj = ori_adj.tolil()
row = ori_adj[target_node].todense().A1
diff_label_nodes = [x for x in idx_train if labels[x] != labels[target_node] \
and row[x] == 0]
diff_label_nodes = np.random.permutation(diff_label_nodes)
if len(diff_label_nodes) >= n_perturbations:
changed_nodes = diff_label_nodes[: n_perturbations]
modified_adj[target_node, changed_nodes] = 1
modified_adj[changed_nodes, target_node] = 1
else:
changed_nodes = diff_label_nodes
unlabeled_nodes = [x for x in range(ori_adj.shape[0]) if x not in idx_train and row[x] == 0]
unlabeled_nodes = np.random.permutation(unlabeled_nodes)
changed_nodes = np.concatenate([changed_nodes,
unlabeled_nodes[: n_perturbations-len(diff_label_nodes)]])
modified_adj[target_node, changed_nodes] = 1
modified_adj[changed_nodes, target_node] = 1
self.check_adj(modified_adj)
self.modified_adj = modified_adj
# self.modified_features = modified_features
def add_nodes(self, features, ori_adj, labels, idx_train, target_node, n_added=1, n_perturbations=10, **kwargs):
"""
For each added node, first connect the target node with added fake nodes.
Then randomly connect the fake nodes with other nodes whose label is
different from target node. As for the node feature, simply copy arbitary node
"""
# ori_adj: sp.csr_matrix
print('number of pertubations: %s' % n_perturbations)
N = ori_adj.shape[0]
D = features.shape[1]
modified_adj = self.reshape_mx(ori_adj, shape=(N+n_added, N+n_added))
modified_features = self.reshape_mx(features, shape=(N+n_added, D))
diff_labels = [l for l in range(labels.max()+1) if l != labels[target_node]]
diff_labels = np.random.permutation(diff_labels)
possible_nodes = [x for x in idx_train if labels[x] == diff_labels[0]]
for fake_node in range(N, N+n_added):
sampled_nodes = np.random.permutation(possible_nodes)[: n_perturbations]
# connect the fake node with target node
modified_adj[fake_node, target_node] = 1
modified_adj[target_node, fake_node] = 1
# connect the fake node with other nodes
for node in sampled_nodes:
modified_adj[fake_node, node] = 1
modified_adj[node, fake_node] = 1
modified_features[fake_node] = features[node]
self.check_adj(modified_adj)
self.modified_adj = modified_adj
self.modified_features = modified_features
# return modified_adj, modified_features
def reshape_mx(self, mx, shape):
indices = mx.nonzero()
return sp.csr_matrix((mx.data, (indices[0], indices[1])), shape=shape).tolil()
|
import random
import string
def generate_random_string(chars=None, length=4):
"""
Used by various services to create easy to remember
room codes.
:param chars: List of string, Characters to use if provided, otherwise just upper and lower case
:param length: Integer, The length the generated string should be
:return: String, the randomly generated string
"""
# Allow upper and lower case characters
letters = string.ascii_lowercase + string.ascii_uppercase
# remove exceptions
exceptions = ['I', 'l']
[letters.replace(c, "") for c in exceptions]
# Use specific character pool if provided
if chars:
letters = chars
return ''.join(random.choice(letters) for i in range(length)) |
import sys
import re
from porterStemmer import PorterStemmer
from collections import defaultdict
import copy
porter=PorterStemmer()
class QueryIndex:
def __init__(self):
self.index={}
self.titleIndex={}
#term frequencies
self.tf={}
#inverse document frequencies
self.idf={}
def intersectLists(self,lists):
if len(lists)==0:
return []
#start intersecting from the smaller list
lists.sort(key=len)
return list(reduce(lambda x,y: set(x)&set(y),lists))
def getStopwords(self):
f=open(self.stopwordsFile, 'r')
stopwords=[line.rstrip() for line in f]
self.sw=dict.fromkeys(stopwords)
f.close()
def getTerms(self, line):
line=line.lower()
#put spaces instead of non-alphanumeric characters
line=re.sub(r'[^a-z0-9 ]',' ',line)
line=line.split()
line=[x for x in line if x not in self.sw]
line=[ porter.stem(word, 0, len(word)-1) for word in line]
return line
def getPostings(self, terms):
#all terms in the list are guaranteed to be in the index
return [ self.index[term] for term in terms ]
|
from flask import Blueprint, jsonify
from app.models.ability import Ability
from app import db
blueprint = Blueprint('ability_api', __name__, url_prefix='/api/ability')
@blueprint.route('/')
def list():
abilities = db.session.query(Ability).all()
return jsonify(columns=['name', 'description'], data=[[ability.name, ability.description] for ability in abilities])
|
import argparse
from besttags import Manager
def main():
parser = argparse.ArgumentParser(
description="Get the best hashtags for your post")
parser.add_argument('tags', nargs='+',
help="The tags you are interested in")
parser.add_argument('--fix', nargs='+',
help="Some fix tags")
parser.add_argument('--limit', type=int, default=30,
help="Some fix tags")
parser.add_argument('--kind', type=str, default='simple',
choices=['simple', 'all', 'test'],
help="Different types of how the tags are determined")
parser.add_argument('--file', type=str,
help="Save the result in a file")
parser.add_argument('--list', action='store_true',
help="Display every tag in a single line")
args = parser.parse_args()
values = {
'count': int(args.limit),
'fix': args.fix if args.fix else [],
'kind': args.kind,
}
best = Manager(**values)
result = best(args.tags)
if args.list:
for tag in result:
print(tag)
elif args.file:
result.save(args.file)
else:
print(result)
if __name__ == '__main__':
main()
|
def solution(S, K):
# write your code in Python 3.6
newString = ''
count = 0
for i in range(0, len(S)):
if (count == K):
newString += '-'
count = 0
# i = i + 1
if (S[i] != '-'):
newString += S[i]
count = count + 1
newString2 = newString.upper()
return newString2
|
import json
import random
config_file = 'article_app/content_api.json'
stock_file = 'article_app/quotes_api.json'
# Create Controller additions here
def process_config_file(config = config_file):
import os
print(os.getcwd())
with open(config) as json_file:
data = json.load(json_file)
return data
def get_first_article(config = config_file ):
data = process_config_file(config)['results']
for primary_node in data:
for item in primary_node:
if item == 'tags':
for i in primary_node[item]:
if i['slug'] == '10-promise':
return primary_node
def get_random_three_articles(config = config_file):
arr = []
exclude = get_first_article(config)
data = process_config_file(config)['results']
while (len(arr) < 3):
choice = random.choice(data)
if choice != exclude and not choice in arr:
arr.append(choice)
return arr
def get_three_stocks(config = stock_file):
arr = []
data = process_config_file(config)
while (len(arr) < 3):
choice = random.choice(data)
if not choice in arr:
arr.append(choice)
return arr
def retrience_article_by_uuid(uuid):
data = process_config_file(config_file)['results']
for primary_node in data:
if primary_node['uuid'] == uuid:
return primary_node
|
import os
from globus_sdk.exc import GlobusSDKUsageError
def _on_windows():
"""
Per python docs, this is a safe, reliable way of checking the platform.
sys.platform offers more detail -- more than we want, in this case.
"""
return os.name == "nt"
class LocalGlobusConnectPersonal(object):
r"""
A LocalGlobusConnectPersonal object represents the available SDK methods
for inspecting and controlling a running Globus Connect Personal
installation.
These objects do *not* inherit from BaseClient and do not provide methods
for interacting with any Globus Service APIs.
"""
def __init__(self):
self._endpoint_id = None
@property
def endpoint_id(self):
"""
:type: string
The endpoint ID of the local Globus Connect Personal endpoint
installation.
This value is loaded whenever it is first accessed, but saved after
that.
Usage:
>>> from globus_sdk import TransferClient, LocalGlobusConnectPersonal
>>> local_ep = LocalGlobusConnectPersonal()
>>> ep_id = local_ep.endpoint_id
>>> tc = TransferClient(...) # needs auth details
>>> for f in tc.operation_ls(ep_id):
>>> print("Local file: ", f["name"])
You can also reset the value, causing it to load again on next access,
with ``del local_ep.endpoint_id``
"""
if self._endpoint_id is None:
try:
if _on_windows():
appdata = os.getenv("LOCALAPPDATA")
if appdata is None:
raise GlobusSDKUsageError(
"LOCALAPPDATA not detected in Windows environment"
)
fname = os.path.join(appdata, "Globus Connect\\client-id.txt")
else:
fname = os.path.expanduser("~/.globusonline/lta/client-id.txt")
with open(fname) as fp:
self._endpoint_id = fp.read().strip()
except IOError as e:
# no such file or directory
if e.errno == 2:
pass
else:
raise
return self._endpoint_id
@endpoint_id.deleter
def endpoint_id(self):
"""
Deleter for LocalGlobusConnectPersonal.endpoint_id
"""
self._endpoint_id = None
|
import inspect
from models.tf_scikit_template import BaseTfScikitClassifier
from models.classifier.oselm import OSELM
class SciKitOSELM(BaseTfScikitClassifier):
def __init__(self,
input_dim=None,
output_dim=None,
hidden_num=None,
batch_size=None,
flag_preprocess=False,
tensorboard_path=None,
X_valid=None,
Y_valid=None,
save_dir_path=None,
**kwargs
):
args, _, _, values = inspect.getargvalues(inspect.currentframe())
values.pop("self")
for key, val in values.items():
setattr(self, key, val)
def init_clf(self):
self.clf = OSELM(self.input_dim,
self.output_dim,
hidden_num=self.hidden_num,
batch_size=self.batch_size,
flag_preprocess=self.flag_preprocess,
tensorboard_path=self.tensorboard_path)
def fit(self, X, y=None, **kwargs):
super().fit(**kwargs)
self.clf.train(X, y,
X_valid=self.X_valid,
Y_valid=self.Y_valid,
save_dir_path=self.save_dir_path,
)
return self
|
#!/usr/bin/python
from Edge import Edge
from GreedyEdgeSelection import GreedyEdgeSelection
from GreedyTreeGrowing import GreedyTreeGrowing
def loadGraphFrom(filename):
f = open(filename, 'r')
V = int(f.readline())
#print V
ELines = f.readlines()
E = []
for el in ELines:
e = Edge(el)
#print e.i, e.j, e.weight
E.append(e)
f.close()
return V, E
def main():
import argparse
parser = argparse.ArgumentParser(description="Tree Partition")
parser.add_argument('algo', metavar='algo', type=str, help='GreedyTree/GreedyEdge')
parser.add_argument('graphFileName', metavar='graphFile', type=str, help='Graph File Name')
parser.add_argument('maxTreeSize', metavar='maxTreeSize', type=int, help='The max size of trees. Set to -1 if you do not want to control this.')
parser.add_argument('--outputcolor', dest='outColor', action='store_true')
args = parser.parse_args()
V, E = loadGraphFrom(args.graphFileName)
#print args.algo
if args.algo == "GreedyTree":
partition = GreedyTreeGrowing(V, E, args.maxTreeSize)
else:
if args.algo == "GreedyEdge":
partition = GreedyEdgeSelection(V, E, args.maxTreeSize)
else:
print "ERROR! Unknown algorithm"
return
partition = sorted(partition, key=len, reverse=True)
if args.outColor:
treeid = 0
color = [0] * V
for tree in partition:
treeid += 1
for vertex in tree:
color[vertex-1] = treeid
print color
else:
maxlen = len(partition[0])
for tree in partition:
tree.sort()
print tree + [0] * (maxlen - len(tree))
#print partition
if __name__ == "__main__":
main()
|
# Copyright (c) Ralph Meijer.
# See LICENSE for details.
"""
XMPP Component Service.
This provides an XMPP server that accepts External Components connections
and accepts and initiates server-to-server connections for the specified
domain(s).
"""
from twisted.application import service, strports
from twisted.python import usage
from twisted.words.protocols.jabber import component
from wokkel import server
class Options(usage.Options):
optParameters = [
('component-port', None, 'tcp:5347:interface=127.0.0.1',
'Port components connect to'),
('component-secret', None, 'secret',
'Secret components use to connect'),
('server-port', None, 'tcp:5269',
'Port other servers connect to'),
('server-secret', None, None,
'Shared secret for dialback verification'),
]
optFlags = [
('verbose', 'v', 'Log traffic'),
]
def __init__(self):
usage.Options.__init__(self)
self['domains'] = set()
def opt_domain(self, domain):
"""
Domain to accept server connections for. Repeat for more domains.
"""
self['domains'].add(domain)
def postOptions(self):
if not self['domains']:
raise usage.UsageError('Need at least one domain')
def makeService(config):
s = service.MultiService()
router = component.Router()
# Set up the XMPP server service
serverService = server.ServerService(router, secret=config['server-secret'])
serverService.domains = config['domains']
serverService.logTraffic = config['verbose']
# Hook up XMPP server-to-server service
s2sFactory = server.XMPPS2SServerFactory(serverService)
s2sFactory.logTraffic = config['verbose']
s2sService = strports.service(config['server-port'], s2sFactory)
s2sService.setServiceParent(s)
# Hook up XMPP external server-side component service
cFactory = component.XMPPComponentServerFactory(router,
config['component-secret'])
cFactory.logTraffic = config['verbose']
cServer = strports.service(config['component-port'], cFactory)
cServer.setServiceParent(s)
return s
|
# -*- coding: utf-8 -*-
from django.test import TestCase, Client
from django.utils import timezone
from .models import Bitcoin
class TestPages(TestCase):
def setUp(self):
for i in range(3, 600, 5):
Bitcoin.objects.create(
price=i,
time=timezone.now() - timezone.timedelta(seconds=i),
)
def test_index_page(self):
client = Client()
response = client.get('/')
self.assertEqual(response.status_code, 200)
def test_current_price(self):
numbers = range(3, 600, 5)
current_price = numbers[0]
client = Client()
response = client.get('/')
self.assertContains(response, current_price)
def test_avg_price(self):
numbers = range(3, 600, 5)
avg_price = sum(numbers) / len(numbers)
client = Client()
response = client.get('/')
self.assertContains(response, avg_price)
def test_min_prices(self):
numbers = range(3, 600, 5)
client = Client()
response = client.get('/')
for i in range(10):
min_price = min(numbers[i*12:i*12+12])
self.assertContains(response, min_price)
def test_max_prices(self):
numbers = range(3, 600, 5)
client = Client()
response = client.get('/')
for i in range(10):
min_price = max(numbers[i*12:i*12+12])
self.assertContains(response, min_price)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from opyscad import *
import config, bed, vertex, screw
height = bed.bar2bed_h
length = 100.0
bed_mount_dy = 43.0
bed_mount_dx = bed.l - bed.hole_dx
screw = screw.m3
hole_l = 6.0
bed_hole_dx = 10.0
bar_hole_dx = 10.0
nut_depth = 2.75
def create():
bar_mount_dx = vertex.base_center2edge() + vertex.hbar_offset_y
width = bar_mount_dx - bed_mount_dx + bed_hole_dx + bar_hole_dx
res = cube([width, length, height]) << [-bed_hole_dx, -length/2, 0]
cut = screw.hole(height + 1, nut_depth + 1, octo = True) / [180, 0, 0]
res -= cut << [0, bed_mount_dy, nut_depth]
res -= cut << [0, -bed_mount_dy, nut_depth]
sup = screw.z_sup(nut_depth + 0.1) / [180, 0, 0]
res += sup << [0, bed_mount_dy, nut_depth + 0.1]
res += sup << [0, -bed_mount_dy, nut_depth + 0.1]
cut = screw.hole(height + 1, nut_depth + 1) << [bar_mount_dx - bed_mount_dx, 0, height - nut_depth]
res -= cut << [0, bed_mount_dy, 0]
res -= cut << [0, -bed_mount_dy, 0]
return res
if __name__ == '__main__':
create().save('scad/bed_mount.scad')
|
import pycom
import socket
import ssl
import sys
import time
from network import LTE
BLACK = 0x000000
WHITE = 0xFFFFFF
RED = 0xFF0000
GREEN = 0x00FF00
BLUE = 0x0000FF
YELLOW = 0xFFFF00
# send AT command to modem and return response as list
def at(cmd):
print("modem command: {}".format(cmd))
r = lte.send_at_cmd(cmd).split('\r\n')
r = list(filter(None, r))
print("response={}".format(r))
return r
def blink(rgb, n):
for i in range(n):
pycom.rgbled(rgb)
time.sleep(0.25)
pycom.rgbled(BLACK)
time.sleep(0.1)
#####################################################################
print("CAT M1 Test - V0.6 - 4/20/18")
# input("")
# r = input("Enter anything to abort...") # allow opportunity to stop program
# if r != "":
# sys.exit(0)
print("disable MicroPython control of LED")
pycom.heartbeat(False)
pycom.rgbled(WHITE)
print("instantiate LTE object")
lte = LTE(carrier="verizon")
print("delay 4 secs")
time.sleep(4.0)
print("reset modem")
try:
lte.reset()
except:
print("Exception during reset")
print("delay 5 secs")
time.sleep(5.0)
if lte.isattached():
try:
print("LTE was already attached, disconnecting...")
if lte.isconnected():
print("disconnect")
lte.disconnect()
except:
print("Exception during disconnect")
try:
if lte.isattached():
print("detach")
lte.dettach()
except:
print("Exception during dettach")
try:
print("resetting modem...")
lte.reset()
except:
print("Exception during reset")
print("delay 5 secs")
time.sleep(5.0)
# enable network registration and location information, unsolicited result code
at('AT+CEREG=2')
# print("full functionality level")
at('AT+CFUN=1')
time.sleep(1.0)
# using Hologram SIM
at('AT+CGDCONT=1,"IP","hologram"')
print("attempt to attach cell modem to base station...")
# lte.attach() # do not use attach with custom init for Hologram SIM
at("ATI")
time.sleep(2.0)
i = 0
while lte.isattached() == False:
# get EPS Network Registration Status:
# +CEREG: <stat>[,[<tac>],[<ci>],[<AcT>]]
# <tac> values:
# 0 - not registered
# 1 - registered, home network
# 2 - not registered, but searching...
# 3 - registration denied
# 4 - unknown (out of E-UTRAN coverage)
# 5 - registered, roaming
r = at('AT+CEREG?')
try:
r0 = r[0] # +CREG: 2,<tac>
r0x = r0.split(',') # ['+CREG: 2',<tac>]
tac = int(r0x[1]) # 0..5
print("tac={}".format(tac))
except IndexError:
tac = 0
print("Index Error!!!")
# get signal strength
# +CSQ: <rssi>,<ber>
# <rssi>: 0..31, 99-unknown
r = at('AT+CSQ')
# extended error report
# r = at('AT+CEER')
if lte.isattached():
print("Modem attached (isattached() function worked)!!!")
break
if (tac==1) or (tac==5):
print("Modem attached!!!")
break
i = i + 5
print("not attached: {} secs".format(i))
if (tac != 0):
blink(BLUE, tac)
else:
blink(RED, 5)
time.sleep(2)
at('AT+CEREG?')
print("connect: start a data session and obtain an IP address")
lte.connect(cid=3)
i = 0
while not lte.isconnected():
i = i + 1
print("not connected: {}".format(i))
blink(YELLOW, 1)
time.sleep(1.0)
print("connected!!!")
pycom.rgbled(BLUE)
s = socket.socket()
s = ssl.wrap_socket(s)
print("get www.google.com address")
addr = socket.getaddrinfo('www.google.com', 443)
print(addr)
print("connect to {}".format(addr[0][-1]))
s.connect(addr[0][-1])
print("GET 50 bytes from google")
s.send(b"GET / HTTP/1.0\r\n\r\n")
print(s.recv(50)) # receive 50 bytes
print("close socket")
s.close()
try:
lte.disconnect()
except:
print("Exception during disconnect")
try:
lte.dettach()
except:
print("Exception during dettach")
# end of test, Red LED
print("end of test")
while True:
blink(GREEN,5)
time.sleep(1.0)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 1 10:51:11 2018
@author: junseon
"""
import functools
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras import backend as K
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
from sklearn.metrics import confusion_matrix
from sklearn.decomposition import PCA
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
class Neural_Network:
def __init__(self, input_dim, output_dim=1, learning_rate=0.0003, epochs=10000, batch_size=256, hidden_layer=10, batch_normalization=False):
self.input_dim = input_dim
self.output_dim = output_dim
self.learning_rate = learning_rate
self.epochs = epochs
self.batch_size = batch_size
self.hidden_layer = hidden_layer-1
self.first_hidden_node = input_dim
self.model = Sequential()
self.batch_normalization = batch_normalization
def build_model(self):
if self.batch_normalization:
# Input Layer
self.model.add(Dense(int(self.first_hidden_node), input_dim=self.input_dim))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
self.model.add(Dropout(0.5))
# Hidden Layer
for i in range(self.hidden_layer):
self.model.add(Dense(int(self.first_hidden_node/(i+2))))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
self.model.add(Dropout(0.5))
# Output Layer
self.model.add(Dense(self.output_dim))
self.model.add(BatchNormalization())
self.model.add(Activation('sigmoid'))
else:
# Input Layer
self.model.add(Dense(int(self.first_hidden_node), input_dim=self.input_dim))
self.model.add(Activation('relu'))
self.model.add(Dropout(0.5))
# Hidden Layer
for i in range(self.hidden_layer):
self.model.add(Dense(int(self.first_hidden_node/(i+2))))
self.model.add(Activation('relu'))
self.model.add(Dropout(0.5))
# Output Layer
self.model.add(Dense(self.output_dim))
self.model.add(Activation('sigmoid'))
adam = Adam(lr=self.learning_rate)
self.model.compile(optimizer=adam,
loss='binary_crossentropy',
metrics=['accuracy'])
def fit(self, X_train, y_train, X_val, y_val):
early_stopping = EarlyStopping(patience=10)
hist = self.model.fit(X_train, y_train,
epochs=self.epochs, batch_size=self.batch_size,
validation_data=(X_val, y_val),
callbacks=[early_stopping])
fig, loss_ax = plt.subplots(figsize=(8,6))
acc_ax = loss_ax.twinx()
loss_ax.plot(hist.history['loss'], 'y', label='train loss')
loss_ax.plot(hist.history['val_loss'], 'r', label='val loss')
loss_ax.plot(hist.history['acc'], 'b', label='train acc')
loss_ax.plot(hist.history['val_acc'], 'g', label='val acc')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuracy')
loss_ax.legend(loc='upper left')
acc_ax.legend(loc='lower left')
plt.show()
return hist
def evaluate(self, X_test, y_test):
return self.model.evaluate(X_test, y_test, batch_size=64)
def predict(self, X_test, y_test):
y_pred = self.model.predict_classes(X_test)
self.__draw_confusion_matrix__(y_test, y_pred)
return y_pred
@staticmethod
def __as_keras_metric__(method):
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
def __draw_confusion_matrix__(self, y_test, y_pred):
confmat = confusion_matrix(y_true=y_test, y_pred=y_pred)
fig, ax = plt.subplots(figsize=(5, 5))
ax.matshow(confmat, cmap=plt.cm.Blues, alpha=0.3)
for i in range(confmat.shape[0]):
for j in range(confmat.shape[1]):
ax.text(x=j, y=i, s=confmat[i, j], va='center', ha='center')
plt.xlabel('predicted label')
plt.ylabel('true label')
plt.show()
if __name__ == "__main__":
from botnet_data_loader import Botnet_Data_Loader as loader
from botnet_preprocessor import Botnet_Processor as processor
data = loader().botnet_data(sample_size=800000)
botnet_processor = processor(data=data)
X_train, X_test, y_train, y_test = botnet_processor.preprocess()
X_val = X_train[400000:, :]
X_train = X_train[:400000, :]
y_val = y_train[400000:]
y_train = y_train[:400000]
pca = PCA(n_components=110)
X_train_pca, X_val_pca, X_test_pca = botnet_processor.feature_extract_pca(pca, X_train, X_val, X_test)
mms = MinMaxScaler()
X_train_pca = mms.fit_transform(X_train_pca)
X_val_pca = mms.transform(X_val_pca)
X_test_pca = mms.transform(X_test_pca)
# 1
# 98.54, Inf, 0
# 98.54, Inf, 0
nn = Neural_Network(input_dim=X_train.shape[1], hidden_layer=25, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train, y_train, X_val, y_val)
print(nn.evaluate(X_test, y_test))
y_pred = nn.predict(X_test, y_test)
nn = Neural_Network(input_dim=X_train_pca.shape[1], hidden_layer=25, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train_pca, y_train, X_val_pca, y_val)
print(nn.evaluate(X_test_pca, y_test))
y_pred = nn.predict(X_test_pca, y_test)
# 2
# 98.54, Inf, 0
# 98.54, Inf, 0
nn = Neural_Network(input_dim=X_train.shape[1], hidden_layer=30, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train, y_train, X_val, y_val)
print(nn.evaluate(X_test, y_test))
y_pred = nn.predict(X_test, y_test)
nn = Neural_Network(input_dim=X_train_pca.shape[1], hidden_layer=30, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train_pca, y_train, X_val_pca, y_val)
print(nn.evaluate(X_test_pca, y_test))
y_pred = nn.predict(X_test_pca, y_test)
# 3
# 98.54, Inf, 0
# 98.54, Inf, 0
nn = Neural_Network(input_dim=X_train.shape[1], hidden_layer=35, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train, y_train, X_val, y_val)
print(nn.evaluate(X_test, y_test))
y_pred = nn.predict(X_test, y_test)
nn = Neural_Network(input_dim=X_train_pca.shape[1], hidden_layer=35, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train_pca, y_train, X_val_pca, y_val)
print(nn.evaluate(X_test_pca, y_test))
y_pred = nn.predict(X_test_pca, y_test)
# ------------------------------------------------------------
data = loader().botnet_data(sample_size=800000, class_rate=0.5)
botnet_processor = processor(data=data)
X_train, X_test, y_train, y_test = botnet_processor.preprocess()
X_val = X_train[400000:, :]
X_train = X_train[:400000, :]
y_val = y_train[400000:]
y_train = y_train[:400000]
pca = PCA(n_components=110)
X_train_pca, X_val_pca, X_test_pca = botnet_processor.feature_extract_pca(pca, X_train, X_val, X_test)
mms = MinMaxScaler()
X_train_pca = mms.fit_transform(X_train_pca)
X_val_pca = mms.transform(X_val_pca)
X_test_pca = mms.transform(X_test_pca)
# 4
# 95.94, 99.57, 88.08
# 67.00, Inf, 0
nn = Neural_Network(input_dim=X_train.shape[1], hidden_layer=25, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train, y_train, X_val, y_val)
print(nn.evaluate(X_test, y_test))
y_pred = nn.predict(X_test, y_test)
nn = Neural_Network(input_dim=X_train_pca.shape[1], hidden_layer=25, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train_pca, y_train, X_val_pca, y_val)
print(nn.evaluate(X_test_pca, y_test))
y_pred = nn.predict(X_test_pca, y_test)
# 5
# 94.59, 86.37, 99.27
# 67.00, Inf, 0
nn = Neural_Network(input_dim=X_train.shape[1], hidden_layer=30, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train, y_train, X_val, y_val)
print(nn.evaluate(X_test, y_test))
y_pred = nn.predict(X_test, y_test)
nn = Neural_Network(input_dim=X_train_pca.shape[1], hidden_layer=30, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train_pca, y_train, X_val_pca, y_val)
print(nn.evaluate(X_test_pca, y_test))
y_pred = nn.predict(X_test_pca, y_test)
# 6
nn = Neural_Network(input_dim=X_train.shape[1], hidden_layer=35, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train, y_train, X_val, y_val)
print(nn.evaluate(X_test, y_test))
y_pred = nn.predict(X_test, y_test)
nn = Neural_Network(input_dim=X_train_pca.shape[1], hidden_layer=35, learning_rate=0.0003)
nn.build_model()
nn.fit(X_train_pca, y_train, X_val_pca, y_val)
print(nn.evaluate(X_test_pca, y_test))
y_pred = nn.predict(X_test_pca, y_test)
# ------------------------------------------------------------
data = loader().botnet_data(sample_size=800000, class_rate=0.5)
botnet_processor = processor(data=data)
X_train, X_test, y_train, y_test = botnet_processor.preprocess()
X_val = X_train[400000:, :]
X_train = X_train[:400000, :]
y_val = y_train[400000:]
y_train = y_train[:400000]
pca = PCA(n_components=110)
X_train_pca, X_val_pca, X_test_pca = botnet_processor.feature_extract_pca(pca, X_train, X_val, X_test)
mms = MinMaxScaler()
X_train_pca = mms.fit_transform(X_train_pca)
X_val_pca = mms.transform(X_val_pca)
X_test_pca = mms.transform(X_test_pca)
nn = Neural_Network(input_dim=X_train_pca.shape[1], hidden_layer=20, learning_rate=0.0001, batch_size=128)
nn.build_model()
nn.fit(X_train_pca, y_train, X_val_pca, y_val)
print(nn.evaluate(X_test_pca, y_test))
y_pred = nn.predict(X_test_pca, y_test)
|
import logging
import json
from flask import render_template
from flask_wtf import Form
from wtforms import fields
from wtforms.validators import Required
import pandas as pd
from . import app, estimator, target_names
from violent_fe import feature_engineer
logger = logging.getLogger('app')
class PredictForm(Form):
"""Fields for Predict"""
month_list = [(1, "January"), (2, "February"), (3, "March"), (4, "April"), (5, "May"), (6, "June"), (7, "July"),
(8, "August"), (9, "September"), (10, "October"), (11, "November"), (12, "December")]
month = fields.SelectField("Months", choices=month_list, coerce = int)
day_list=[(1,"Monday"), (2, "Tuesday"), (3, "Wednesday"), (4, "Thursday"), (5, "Friday"), (6, "Saturday"),
(7, "Sunday")]
day = fields.SelectField("Days", choices=day_list, coerce = int)
time_list = [(6,'Day'), (17,'Evening'), (2,'LateNight')]
time=fields.SelectField("Times", choices=time_list, coerce = int)
loc_list = [('D2','D2'), ('R3','R3'), ('J2','J2'), ('B2','B2'), ('C1','C1'), ('N1','N1'), ('K1','K1'), ('Q3','Q3'),
('M3','M3'), ('U3','U3'), ('M2', 'M2'), ('N3', 'N3'), ('E1', 'E1'), ('J3','J3'), ('B3','B3'), ('D1','D1'), ('Q1','Q1'), ('C2','C2'),
('F1','F1'), ('L2','L2'), ('E3', 'E3'), ('G3','G3'), ('D3','D3'), ('W1', 'W1'), ('O2','O2'), ('Q2','Q2'),
('S2','S2'), ('F2','F2'), ('K3','K3'), ('W3','W3'), ('N2','N2'), ('F3','F3'), ('U2','U2'), ('B1','B1'), ('R1','R1'),
('J1','J1'), ('O1','O1'), ('L1','L1'), ('K2','K2'), ('M1','M1'),('S1','S1'), ('O3','O3'), ('E2','E2'), ('S3','S3'),
('G1','G1'), ('C3','C3'), ('W2','W2'), ('G2','G2'), ('L3','L3'), ('U1','U1'), ('R2','R2'), ('W','W'), ('DS','DS'),
('99','99'), ('E','E'), ('BS','BS'), ('S','S'), ('WP','WP'), ('US','US'), ('MS','MS'), ('FS','FS'), ('KS','KS'),
('WS','WS'), ('OS','OS'), ('N','N'), ('CTY','CTY'),('KCIO07','KCIO07'), ('SS','SS'), ('CS','CS'), ('DET','DET'),
('TRF','TRF'), ('JS','JS'), ('EP','EP'), ('LS','LS'), ('H3','H3'), ('RS','RS'),('NP','NP'), ('INV','INV'),
('EDD','EDD'),('COMM','COMM'), ('ES','ES'), ('GS','GS'), ('CCD','CCD'), ('SCTR1','SCTR1'), ('NS','NS'), ('QS','QS')]
loc=fields.SelectField("Locations", choices=loc_list)
# HR = fields.DecimalField('HR:', places=2, validators=[Required()])
# sepal_width = fields.DecimalField('D:', places=2, validators=[Required()])
# petal_length = fields.DecimalField('R:', places=2, validators=[Required()])
# petal_width = fields.DecimalField('J:', places=2, validators=[Required()])
submit = fields.SubmitField('Submit')
@app.route('/', methods=('GET', 'POST'))
def index():
"""Index page"""
form = PredictForm()
predicted_crime = None
if form.validate_on_submit():
# store the submitted values
submitted_data = form.data
#import pdb;pdb.set_trace()
# Retrieve values from form
month = float(submitted_data['month'])
day = float(submitted_data['day'])
time = float(submitted_data['time'])
loc = (submitted_data['loc'])
# Create array from values
d={'Scene_Month':[month], 'Scene_DayofWeek':[day], 'Hour':[time], 'Zone/Beat':[loc]}
call_instance = pd.DataFrame(d)
X_test=feature_engineer(call_instance)
my_prediction = estimator.predict(X_test.values)
# Return only the Predicted iris species
predicted_crime = target_names[my_prediction]
else:
print form.errors
return render_template('index.html',
form=form,
prediction=predicted_crime)
|
from reservation.reservation_handler import _ReservationHandler
from reservation.reservation_result import ReservationResult
class ReservationHandlerUseList(_ReservationHandler):
def allows_reservation(self, other_reservation_handler):
return ReservationResult.TRUE
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def sortList(self, head):
pass
def merge(self, l, r):
if l is None:
return r
if r is None:
return l
result = None
if l.val < r.val:
result = l
result.next = self.merge(l.next, r)
else:
result = r
result.next = self.merge(l, r.next)
return result
def merge_sort(self, head):
if head is None:
return
if head.next is None:
return head
mid = self.get_mid(head)
mid_next = mid.next
mid.next = None
l = self.merge_sort(head)
r = self.merge_sort(mid_next)
sort_ll = self.merge(l, r)
return sort_ll
def get_mid(self, head):
tortoise = head
hare = head
if head is None:
return None
while hare and hare.next:
hare = hare.next.next
tortoise = tortoise.next
return tortoise
|
# Generated by Django 2.0.1 on 2018-01-17 02:03
import django.contrib.gis.db.models.fields
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Letters',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('text', models.CharField(max_length=1000)),
('lon', models.FloatField()),
('lat', models.FloatField()),
('point', django.contrib.gis.db.models.fields.PointField(srid=4326)),
],
),
]
|
__author__ = 'Josh'
import os
import urllib
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
vowels = ['a', 'e', 'i', 'o', 'u']
consonants = [x for x in alphabet if x not in vowels]
def join_with_spaces(lst):
#joins together lists of strings into sentences
index, counter, string = len(lst), 1, lst[0]
while counter < index:
string += ' '
string += lst[counter]
counter += 1
return string
class Word(str):
def __init__(self, word):
self.word = word
self.tag = None
def __getitem__(self, i):
return self.word[i]
def __len__(self):
return len(self.word)
def set_tag(self, part):
self.tag = part
class Sentence(object):
def __init__(self, text):
self.words = [Word(elem) for elem in self.remove_punc(text)]
def __repr__(self):
return join_with_spaces(self.words)
def __str__(self):
return str(join_with_spaces(self.words))
def __contains__(self, other):
if other in [x.word for x in self.words]:
return True
else:
return False
def __len__(self):
return len(self.words)
def __getitem__(self, i):
if i < 0:
return False
elif i > len(self.words)-1:
return False
else:
return self.words[i]
def index(self, elem):
return self.words.index(elem)
def get_previous(self, i):
return self.words[i-1]
def get_next(self, i):
return self.words[i+1]
def check_tag(self, i):
if i < 0:
return False
elif i > len(self.words)-1:
return False
else:
return self.words[i].tag
def set_tag(self, i, tag):
self.words[i].set_tag(tag)
def append_tag(self, i, app):
self.words[i].set_tag(self.words[i].tag+app)
def show_tags(self):
return [(elem, elem.tag) for elem in self.words]
def is_last_word(self, i):
if i == len(self.words)-1:
return True
else:
return False
def has_comma(self, i):
if i < 0:
return False
elif i > len(self.words)-1:
return False
else:
if self.words[i][len(self.words[i])-1] == ',':
return True
else:
return False
def remove_punc(self, text):
if text[-1] == '?':
self.question = True
mod = list(text)
mod.pop()
mod = ''.join(mod)
mod = mod.lower()
return mod.split()
else:
self.question = False
mod = list(text)
mod.pop()
mod = ''.join(mod)
mod = mod.lower()
return mod.split()
class Word_Ref (object):
#used for part of speech tagging, and word look up.
def __init__(self, selection):
if selection == 'Verbs':
txt = urllib.request.urlopen('https://raw.githubusercontent.com/jweinst1/Tagineer/master/tagineer/Verbs.txt').read()
wordstring = txt.decode("utf-8")
self.reference = wordstring.split()
elif selection == 'Nouns':
txt = urllib.request.urlopen('https://raw.githubusercontent.com/jweinst1/Tagineer/master/tagineer/Nouns.txt').read()
wordstring = txt.decode("utf-8")
self.reference = wordstring.split()
elif selection == 'Adjectives':
txt = urllib.request.urlopen('https://raw.githubusercontent.com/jweinst1/Tagineer/master/tagineer/Adjectives.txt').read()
wordstring = txt.decode("utf-8")
self.reference = wordstring.split()
elif selection == 'Adverbs':
txt = urllib.request.urlopen('https://raw.githubusercontent.com/jweinst1/Tagineer/master/tagineer/Adverbs.txt').read()
wordstring = txt.decode("utf-8")
self.reference = wordstring.split()
elif selection == 'Pronouns':
self.reference = ['i', 'me', 'my', 'mine', 'myself', 'you', 'your', 'yours', 'yourself', 'he', 'she', 'it', 'him', 'her'
'his', 'hers', 'its', 'himself', 'herself', 'itself', 'we', 'us', 'our', 'ours', 'ourselves',
'they', 'them', 'their', 'theirs', 'themselves', 'that', 'this']
elif selection == 'Coord_Conjunc':
self.reference = ['for', 'and', 'nor', 'but', 'or', 'yet', 'so']
elif selection == 'Be_Verbs':
self.reference = ['is', 'was', 'are', 'were', 'could', 'should', 'would', 'be', 'can', 'cant', 'cannot'
'does', 'do', 'did', 'am', 'been', 'go']
elif selection == 'Subord_Conjunc':
self.reference = ['as', 'after', 'although', 'if', 'how', 'till', 'unless', 'until', 'since', 'where', 'when'
'whenever', 'where', 'wherever', 'while', 'though', 'who', 'because', 'once', 'whereas'
'before', 'to', 'than']
elif selection =='Prepositions':
self.reference = ['on', 'at', 'in', 'of', 'into', 'from']
else:
raise ReferenceError('Must choose a valid reference library.')
def __contains__(self, other):
if other[-1] == ',':
return other[:-1] in self.reference
else:
return other in self.reference
def tag_pronouns(statement):
#will be first process, assumes no tag is given.
pronouns = Word_Ref('Pronouns')
i = 0
while i < len(statement):
if statement[i] in pronouns:
statement.set_tag(i, 'pronoun')
i += 1
else:
i += 1
return statement
def tag_preposition(statement):
preposition = Word_Ref('Prepositions')
articles = ['the', 'an', 'a']
i = 0
while i < len(statement):
if statement[i] in preposition:
statement.set_tag(i, 'preposition')
i += 1
elif statement[i] in articles:
statement.set_tag(i, 'article')
i += 1
else:
i += 1
return statement
def tag_be_verbs(statement):
be_verbs = Word_Ref('Be_Verbs')
i = 0
while i < len(statement):
if statement[i] in be_verbs:
statement.set_tag(i, 'verb')
i += 1
else:
i += 1
return statement
def tag_subord_conj(statement):
subord_conj = Word_Ref('Subord_Conjunc')
i = 0
while i < len(statement):
if statement[i] in subord_conj:
statement.set_tag(i, 'subord_conj')
i += 1
else:
i += 1
return statement
def tag_coord_conj(statement):
coords = Word_Ref('Coord_Conjunc')
i = 0
while i < len(statement):
if statement[i] in coords:
statement.set_tag(i, 'coord_conj')
i += 1
else:
i += 1
return statement
def tag_avna(statement):
adverbs = Word_Ref('Adverbs')
verbs = Word_Ref('Verbs')
nouns = Word_Ref('Nouns')
adjectives = Word_Ref('Adjectives')
i = 0
while i < len(statement):
if statement.check_tag(i) != None:
i += 1
else:
if statement[i] in nouns:
statement.set_tag(i, 'noun')
i += 1
elif statement[i] in verbs:
statement.set_tag(i, 'verb')
i += 1
elif statement[i] in adverbs:
statement.set_tag(i, 'adverb')
i += 1
elif statement[i] in adjectives:
statement.set_tag(i, 'adjective')
i += 1
else:
i += 1
return statement
def post_processing(statement):
#corrects errors in tagging based on rule-based deduction.
be_verbs = ['is', 'was', 'are', 'were']
i = 0
while i < len(statement):
if statement.check_tag(i) == 'noun' and statement.check_tag(i-1) == 'pronoun':
statement.set_tag(i, 'verb')
i += 1
elif statement.check_tag(i) == None and statement.check_tag(i+1) == 'verb':
statement.set_tag(i, 'noun')
i += 1
elif statement.check_tag(i) == None and statement.check_tag(i-1) == 'preposition':
statement.set_tag(i, 'noun')
i += 1
elif statement.check_tag(i) == None and statement.check_tag(i+1) == 'subord_conj':
statement.set_tag(i, 'noun')
i += 1
elif statement.check_tag(i) == 'noun' and statement.check_tag(i-1) == 'noun' and statement.has_comma(i-1) == False:
statement.set_tag(i, 'verb')
i += 1
elif statement.check_tag(i) == 'noun' and statement.check_tag(i-1) == 'adjective' and statement.check_tag(i+1) == 'noun' and statement.is_last_word(i):
statement.set_tag(i, 'adjective')
elif statement.check_tag(i) == 'noun' and statement.check_tag(i-1) == 'article' and statement.check_tag(i+1) == 'noun':
statement.set_tag(i, 'adjective')
i += 1
elif statement.check_tag(i) == 'noun' and statement[i-1] in be_verbs and statement.is_last_word(i) and statement.check_tag(i-2) == 'noun':
statement.set_tag(i, 'adjective')
i += 1
elif statement.check_tag(i) == None and statement.check_tag(i-1) == 'article' and statement.check_tag(i+1) == 'noun':
statement.set_tag(i, 'adjective')
i += 1
elif statement.check_tag(i) == 'noun' and statement.check_tag(i-1) == 'adverb':
statement.set_tag(i, 'verb')
i += 1
else:
i += 1
return statement
def tag_noun_plurals(statement):
i = 0
while i < len(statement):
if statement.check_tag(i) == 'noun':
if statement[i][-1] == 's' and statement[i][-2] in consonants:
statement.append_tag(i, '-P')
i += 1
elif statement[i][-1] == 's' and statement[i][-2] == 'e' and statement[i][-3] in consonants:
statement.append_tag(i, '-P')
i += 1
else:
statement.append_tag(i, '-S')
i += 1
else:
i += 1
return statement
def tag_sentence(statement):
elem = statement
tag = tag_avna(elem)
tag = tag_pronouns(tag)
tag = tag_preposition(tag)
tag = tag_coord_conj(tag)
tag = tag_subord_conj(tag)
tag = tag_be_verbs(tag)
tag = post_processing(tag)
tag = tag_noun_plurals(tag)
return tag
def tag_text(text):
elem = Sentence(text)
tag = tag_avna(elem)
tag = tag_pronouns(tag)
tag = tag_preposition(tag)
tag = tag_coord_conj(tag)
tag = tag_subord_conj(tag)
tag = tag_be_verbs(tag)
tag = post_processing(tag)
tag = tag_noun_plurals(tag)
return tag
def package_sentence(statement):
#packages a tagged sentence into a displayable string
counter = 0
string = ""
while counter < len(statement):
if statement[counter].tag == None:
string += statement[counter].word + " " + "\n"
counter += 1
else:
string += statement[counter].word + " " + statement[counter].tag + "\n"
counter += 1
return string
#condition functions to check for nouns/subjects
#possible secondary processing arguments
def check_articles(i, statement):
articles = ['the', 'a', 'an']
if statement.get_previous(i) in articles:
return True
else:
return False
def check_simpV(i, statement):
simple = ['is', 'was', 'are', 'were', 'can', 'cannot', 'will', 'do', 'does', 'did' 'dont', 'would', 'could', 'should', 'has', 'had', 'have']
if statement.get_next(i) in simple:
return True
else:
return False
def precede_verb(i, statement):
current = statement.get_next(i)
if current[-1] == 's' and current[-2] in consonants:
return True
else:
return False
|
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
def create_list(arr):
if len(arr) == 0:
return None
head = None
tail = None
for x in arr:
new_node = ListNode(x)
if not head:
head = new_node
tail = new_node
else:
tail.next = new_node
tail = tail.next
return head
def show(head):
pcur = head
while pcur:
print(pcur.val, ' ', end='')
pcur = pcur.next
print(" ")
class Solution:
def _merge_sortlist(self, l1, l2):
if not l1:
return l2
if not l2:
return l1
pnew_head = None
pnew_tail = None
p1 = l1
p2 = l2
if p1.val <= p2.val:
pnew_head = p1
pnew_tail = p1
p1 = p1.next
else:
pnew_head = p2
pnew_tail = p2
p2 = p2.next
while p1 and p2:
if p1.val <= p2.val:
pnew_tail.next = p1
p1 = p1.next
pnew_tail = pnew_tail.next
else:
pnew_tail.next = p2
p2 = p2.next
pnew_tail = pnew_tail.next
if p1:
pnew_tail.next = p1
if p2:
pnew_tail.next = p2
return pnew_head
def mergeKLists(self, lists):
cnt = len(lists)
if cnt == 0:
return None
if cnt == 1:
return lists[0]
if cnt == 2:
return self._merge_sortlist(lists[0], lists[1])
mid = cnt // 2
left_half = lists[:mid + 1]
right_half = lists[mid+1:]
res1 = self.mergeKLists(left_half)
res2 = self.mergeKLists(right_half)
res = self._merge_sortlist(res1, res2)
return res
l1 = create_list([1,4,5])
l2 = create_list([1,3,4])
l3 = create_list([2,6])
arr = [l1,l2,l3]
sl = Solution()
res = sl.mergeKLists(arr)
show(res)
|
import pandas as pd
reader = pd.read_csv('C:/bank/data_set/analyze/social.csv')
reader2 = reader.set_index('month', drop=False)
# print(reader2.index.unique().values[2])
# print(reader2.loc['mar', 'cons.conf.idx'].value_counts())
# print(reader2['month'].value_counts())
def printtttt(df):
idx_q = df.index.unique()
i = 0
for idx in idx_q:
print(f"=========================================\n"
f"==============index:{idx}==================\n"
f"=========================================\n"
f"---------------------------------")
for col in df.columns:
while idx_q.values[i] == idx:
print(f"{df.loc[idx, col].value_counts()}\n---------------------------------")
break
i += 1
printtttt(reader2)
# name = reader2['month'].unique()
# mar = reader2.loc[name == 'mar']
# print(type(name))
|
# Dependencies
from splinter import Browser
from bs4 import BeautifulSoup as bs
import pymongo
import pandas as pd
import time
import os
# trying Splinter
def init_browser():
# chromedriver = os.getenv(str(os.environ.get('CHROMEDRIVER_PATH')), "chromedriver.exe")
# executable_path = {"executable_path": chromedriver}
# executable_path = {"executable_path": "chromedriver.exe"} # When running locally
executable_path = {"executable_path": str(os.environ.get('CHROMEDRIVER_PATH'))} # When running on heroku
return Browser('chrome', **executable_path, headless=False)
def scrape_info():
titles = ['Data Engineer', 'Business Analyst','Software Engineer']
# titles = ['Data Engineer'] # for testing
postings = []
for title in titles:
browser = init_browser()
url = 'https://www.indeed.com/jobs?q={}&l='.format(title)
browser.visit(url)
browser.is_text_present('Indeed', wait_time=10)
html = browser.html
soup = bs(html, 'html.parser')
jobs = soup.find('div', id="refineresults")
# try:
# Salary Data
salary = jobs.find('div', id='SALARY_rbo')
#loop through and make objects into strings
salarieslist = salary.find_all('span', class_='rbLabel')
salarieslist = [x.text for x in salarieslist]
#loop through and make objects into strings
salariescount = salary.find_all('span', class_='rbCount')
salariescount = [x.text for x in salariescount]
# Job Data
jobtype = jobs.find('div',id='JOB_TYPE_rbo')
jobtypelist = jobtype.find_all('span',class_='rbLabel')
jobtypelist = [x.text for x in jobtypelist]
jobtypecount = jobtype.find_all('span',class_='rbCount')
jobtypecount = [x.text for x in jobtypecount]
# Location Data
location = jobs.find('div',id='LOCATION_rbo')
locationlist = location.find_all('span',class_='rbLabel')
locationlist = [x.text for x in locationlist]
locationcount = location.find_all('span',class_='rbCount')
locationcount = [x.text for x in locationcount]
# Company Data
company = jobs.find('div',id='COMPANY_rbo')
companylist = company.find_all('span',class_='rbLabel')
companylist = [x.text for x in companylist]
companycount = company.find_all('span',class_='rbCount')
companycount = [x.text for x in companycount]
# Experience Data
experience = jobs.find('div',id='EXP_LVL_rbo')
experiencelist = experience.find_all('span',class_='rbLabel')
experiencelist = [x.text for x in experiencelist]
experiencecount = experience.find_all('span',class_='rbCount')
experiencecount = [x.text for x in experiencecount]
# Run only if title, price, and link are available
if (salary and jobtype and location and company and experience):
# Dictionary to be inserted as a MongoDB document
post ={
'title': title,
'company': companylist,
'company_count':companycount,
'salary': salarieslist,
'salary_count': salariescount,
'location': locationlist,
'location_count': locationcount,
'jobtype':jobtypelist,
'jobtype_count': jobtypecount,
'experience_level': experiencelist,
'experience_count':experiencecount
}
postings.append(post)
# except Exception as e:
# print("{}: {}".format(type(e), str(e)))
#close browser after scraping
browser.quit()
# return post
return postings
|
# Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from botocore.session import Session
from botocore.loaders import Loader
from botocore.exceptions import DataNotFoundError
def _test_model_is_not_lost(service_name, type_name,
previous_version, latest_version):
# Make sure if a paginator and/or waiter exists in previous version,
# there will be a successor existing in latest version.
loader = Loader()
try:
previous = loader.load_service_model(
service_name, type_name, previous_version)
except DataNotFoundError:
pass
else:
try:
latest = loader.load_service_model(
service_name, type_name, latest_version)
except DataNotFoundError as e:
raise AssertionError(
"%s must exist for %s: %s" % (type_name, service_name, e))
def test_paginators_and_waiters_are_not_lost_in_new_version():
for service_name in Session().get_available_services():
versions = Loader().list_api_versions(service_name, 'service-2')
if len(versions) > 1:
for type_name in ['paginators-1', 'waiters-2']:
yield (_test_model_is_not_lost, service_name,
type_name, versions[-2], versions[-1])
|
"""
Utilities module (internal)
This module contains a number of tools that help with the interface for this
plugin. This contains things such as the multicase decorator, the matcher
class for querying and filtering lists of things, support for aliasing
functions, and a number of functional programming primitives (combinators).
"""
import six
from six.moves import builtins
import logging, types, weakref
import functools, operator, itertools
import sys, heapq, collections
import multiprocessing, Queue
import idaapi
__all__ = ['fbox','fboxed','funbox','finstance','fhasitem','fitemQ','fgetitem','fitem','fhasattr','fattributeQ','fgetattr','fattribute','fconstant','fpassthru','fdefault','fpass','fidentity','fid','first','second','third','last','fcompose','fdiscard','fcondition','fmap','flazy','fmemo','fpartial','fapply','fcurry','frpartial','freversed','fexc','fexception','fcatch','fcomplement','fnot','ilist','liter','ituple','titer','itake','iget','imap','ifilter','ichain','izip','count']
### functional programming primitives (FIXME: probably better to document these with examples)
# box any specified arguments
fbox = fboxed = lambda *a: a
# return a closure that executes ``f`` with the arguments unboxed.
funbox = lambda f, *a, **k: lambda *ap, **kp: f(*(a + builtins.reduce(operator.add, builtins.map(builtins.tuple, ap), ())), **builtins.dict(k.items() + kp.items()))
# return a closure that will check that ``object`` is an instance of ``type``.
finstance = lambda *type: frpartial(builtins.isinstance, type)
# return a closure that will check if its argument has an item ``key``.
fhasitem = fitemQ = lambda key: fcompose(fcatch(frpartial(operator.getitem, key)), iter, next, fpartial(operator.eq, None))
# return a closure that will get a particular element from an object
fgetitem = fitem = lambda item, *default: lambda object: default[0] if default and item not in object else object[item]
# return a closure that will check if its argument has an ``attribute``.
fhasattr = fattributeQ = lambda attribute: frpartial(hasattr, attribute)
# return a closure that will get a particular attribute from an object
fgetattr = fattribute = lambda attribute, *default: lambda object: getattr(object, attribute, *default)
# return a closure that always returns ``object``.
fconstant = fconst = falways = lambda object: lambda *a, **k: object
# a closure that returns its argument always
fpassthru = fpass = fidentity = fid = lambda object: object
# a closure that returns a default value if its object is false-y
fdefault = lambda default: lambda object: object or default
# return the first, second, or third item of a box.
first, second, third, last = operator.itemgetter(0), operator.itemgetter(1), operator.itemgetter(2), operator.itemgetter(-1)
# return a closure that executes a list of functions one after another from left-to-right
fcompose = lambda *f: builtins.reduce(lambda f1, f2: lambda *a: f1(f2(*a)), builtins.reversed(f))
# return a closure that executes function ``f`` whilst discarding any extra arguments
fdiscard = lambda f: lambda *a, **k: f()
# return a closure that executes function ``crit`` and then returns/executes ``f`` or ``t`` based on whether or not it's successful.
fcondition = fcond = lambda crit: lambda t, f: \
lambda *a, **k: (t(*a, **k) if builtins.callable(t) else t) if crit(*a, **k) else (f(*a, **k) if builtins.callable(f) else f)
# return a closure that takes a list of functions to execute with the provided arguments
fmap = lambda *fa: lambda *a, **k: (f(*a, **k) for f in fa)
#lazy = lambda f, state={}: lambda *a, **k: state[(f, a, builtins.tuple(builtins.sorted(k.items())))] if (f, a, builtins.tuple(builtins.sorted(k.items()))) in state else state.setdefault((f, a, builtins.tuple(builtins.sorted(k.items()))), f(*a, **k))
#lazy = lambda f, *a, **k: lambda *ap, **kp: f(*(a+ap), **dict(k.items() + kp.items()))
# return a memoized closure that's lazy and only executes when evaluated
def flazy(f, *a, **k):
sortedtuple, state = fcompose(builtins.sorted, builtins.tuple), {}
def lazy(*ap, **kp):
A, K = a+ap, sortedtuple(k.items() + kp.items())
return state[(A, K)] if (A, K) in state else state.setdefault((A, K), f(*A, **builtins.dict(k.items()+kp.items())))
return lazy
fmemo = flazy
# return a closure with the function's arglist partially applied
fpartial = functools.partial
# return a closure that applies the provided arguments to the function ``f``.
fapply = lambda f, *a, **k: lambda *ap, **kp: f(*(a+ap), **builtins.dict(k.items() + kp.items()))
# return a closure that will use the specified arguments to call the provided function.
fcurry = lambda *a, **k: lambda f, *ap, **kp: f(*(a+ap), **builtins.dict(k.items() + kp.items()))
# return a closure that applies the initial arglist to the end of function ``f``.
frpartial = lambda f, *a, **k: lambda *ap, **kp: f(*(ap + builtins.tuple(builtins.reversed(a))), **builtins.dict(k.items() + kp.items()))
# return a closure that applies the arglist to function ``f`` in reverse.
freversed = freverse = lambda f, *a, **k: lambda *ap, **kp: f(*builtins.reversed(a + ap), **builtins.dict(k.items() + kp.items()))
# return a closure that executes function ``f`` and includes the caught exception (or None) as the first element in the boxed result.
def fcatch(f, *a, **k):
def fcatch(*a, **k):
try: return builtins.None, f(*a, **k)
except: return sys.exc_info()[1], builtins.None
return functools.partial(fcatch, *a, **k)
fexc = fexception = fcatch
# boolean inversion of the result of a function
fcomplement = fnot = frpartial(fcompose, operator.not_)
# converts a list to an iterator, or an iterator to a list
ilist, liter = fcompose(builtins.list, builtins.iter), fcompose(builtins.iter, builtins.list)
# converts a tuple to an iterator, or an iterator to a tuple
ituple, titer = fcompose(builtins.tuple, builtins.iter), fcompose(builtins.iter, builtins.tuple)
# take ``count`` number of elements from an iterator
itake = lambda count: fcompose(builtins.iter, fmap(*(builtins.next,)*count), builtins.tuple)
# get the ``nth`` element from an iterator
iget = lambda count: fcompose(builtins.iter, fmap(*(builtins.next,)*(count)), builtins.tuple, operator.itemgetter(-1))
# copy from itertools
imap, ifilter, ichain, izip = itertools.imap, itertools.ifilter, itertools.chain, itertools.izip
# count number of elements of a container
count = fcompose(builtins.iter, builtins.list, builtins.len)
# cheap pattern-like matching
class Pattern(object):
'''Base class for fake pattern matching against a tuple.'''
def __eq__(self, other):
return self.__cmp__(other) == 0
__call__ = __eq__
def __repr__(self):
return 'Pattern()'
class PatternAny(Pattern):
'''Object for matching against anything it is compared against.'''
def __cmp__(self, other):
return 0
def __repr__(self):
return "{:s}({:s})".format('Pattern', '*')
class PatternAnyType(Pattern):
'''Object for matching against any type it is compared against.'''
def __init__(self, other):
self.type = other
def __cmp__(self, other):
return 0 if isinstance(other, self.type) else -1
def __repr__(self):
return "{:s}({:s})".format('Pattern', '|'.join(n.__name__ for n in self.type) if hasattr(self.type, '__iter__') else self.type.__name__)
### decorators
class multicase(object):
"""
A lot of magic is in this class which allows one to define multiple cases
for a single function.
"""
CO_OPTIMIZED = 0x00001
CO_NEWLOCALS = 0x00002
CO_VARARGS = 0x00004
CO_VARKEYWORDS = 0x00008
CO_NESTED = 0x00010
CO_VARGEN = 0x00020
CO_NOFREE = 0x00040
CO_COROUTINE = 0x00080
CO_ITERABLE = 0x00100
CO_GENERATOR_ALLOWED = 0x01000
CO_FUTURE_DIVISION = 0x02000
CO_FUTURE_ABSOLUTE_IMPORT = 0x04000
CO_FUTURE_WITH_STATEMENT = 0x08000
CO_FUTURE_PRINT_FUNCTION = 0x10000
CO_FUTURE_UNICODE_LITERALS = 0x20000
CO_FUTURE_BARRY_AS_BDFL = 0x40000
CO_FUTURE_GENERATOR_STOP = 0x80000
cache_name = '__multicase_cache__'
def __new__(cls, *other, **t_args):
'''Decorate a case of a function with the specified types.'''
def result(wrapped):
# extract the FunctionType and its arg types
cons, func = cls.reconstructor(wrapped), cls.ex_function(wrapped)
args, defaults, (star, starstar) = cls.ex_args(func)
s_args = 1 if isinstance(wrapped, (classmethod, types.MethodType)) else 0
# determine if the user included the previous function
if len(other):
ok, prev = True, other[0]
# ..otherwise we just figure it out by looking in the caller's locals
elif func.func_name in sys._getframe().f_back.f_locals:
ok, prev = True, sys._getframe().f_back.f_locals[func.func_name]
# ..otherwise, first blood and we're not ok.
else:
ok = False
# so, a wrapper was found and we need to steal its cache
res = ok and cls.ex_function(prev)
if ok and hasattr(res, cls.cache_name):
cache = getattr(res, cls.cache_name)
# ..otherwise, we just create a new one.
else:
cache = []
res = cls.new_wrapper(func, cache)
res.__module__ = getattr(wrapped, '__module__', getattr(func, '__module__', '__main__'))
# calculate the priority by trying to match the most first
argtuple = s_args, args, defaults, (star, starstar)
priority = len(args) - s_args - len(t_args) + (len(args) and (next((float(i) for i,a in enumerate(args[s_args:]) if a in t_args), 0) / len(args))) + sum(0.3 for _ in filter(None, (star, starstar)))
# check to see if our func is already in the cache
current = tuple(t_args.get(_,None) for _ in args),(star,starstar)
for i, (p, (_, t, a)) in enumerate(cache):
if p != priority: continue
# verify that it actually matches the entry
if current == (tuple(t.get(_,None) for _ in a[1]), a[3]):
# yuuup, update it.
cache[i] = (priority, (func, t_args, argtuple))
res.__doc__ = cls.document(func.__name__, [n for _, n in cache])
return cons(res)
continue
# everything is ok...so should be safe to add it
heapq.heappush(cache, (priority, (func, t_args, argtuple)))
# now we can update the docs
res.__doc__ = cls.document(func.__name__, [n for _, n in cache])
# ..and then restore the wrapper to its former glory
return cons(res)
if len(other) > 1:
raise SyntaxError("{:s} : More than one callable was specified ({!r}). Not sure which callable to clone original state from.".format('.'.join((__name__, cls.__name__)), other))
return result
@classmethod
def document(cls, name, cache):
'''Generate documentation for a multicased function.'''
res = []
for func, types, _ in cache:
doc = (func.__doc__ or '').split('\n')
if len(doc) > 1:
res.append("{:s} ->".format(cls.prototype(func, types)))
res.extend("{: >{padding:d}s}".format(n, padding=len(name)+len(n)+1) for n in map(operator.methodcaller('strip'), doc))
elif len(doc) == 1:
res.append(cls.prototype(func, types) + (" -> {:s}".format(doc[0]) if len(doc[0]) else ''))
continue
return '\n'.join(res)
@classmethod
def prototype(cls, func, types={}):
'''Generate a prototype for an instance of a function.'''
args, defaults, (star, starstar) = cls.ex_args(func)
argsiter = (("{:s}={:s}".format(n, "{:s}".format('|'.join(t.__name__ for t in types[n])) if not isinstance(types[n], type) and hasattr(types[n], '__iter__') else types[n].__name__) if types.has_key(n) else n) for n in args)
res = (argsiter, ("*{:s}".format(star),) if star else (), ("**{:s}".format(starstar),) if starstar else ())
return "{:s}({:s})".format(func.func_name, ', '.join(itertools.chain(*res)))
@classmethod
def match(cls, (args, kwds), heap):
'''Given the specified ``args`` and ``kwds``, find the correct function according to its types.'''
# FIXME: yep, done in O(n) time.
for f, ts, (sa, af, defaults, (argname, kwdname)) in heap:
# populate our arguments
ac, kc = (n for n in args), dict(kwds)
# skip some args in our tuple
map(next, (ac,)*sa)
# build the argument tuple using the generator, kwds, or our defaults.
a = []
try:
for n in af[sa:]:
try: a.append(next(ac))
except StopIteration: a.append(kc.pop(n) if n in kc else defaults.pop(n))
except KeyError: pass
finally: a = tuple(a)
# now anything left in ac or kc goes in the wildcards. if there aren't any, then this iteration doesn't match.
wA, wK = list(ac), dict(kc)
if (not argname and len(wA)) or (not kwdname and wK):
continue
# if our perceived argument length doesn't match, then this iteration doesn't match either
if len(a) != len(af[sa:]):
continue
# figure out how to match the types by checking if it's a regular type or it's a callable
predicateF = lambda t: callable if t == callable else (lambda v: isinstance(v, t))
# now we can finally start checking that the types match
if any(not predicateF(ts[t])(v) for t, v in zip(af[sa:], a) if t in ts):
continue
# we should have a match
return f, (tuple(args[:sa]) + a, wA, wK)
error_arguments = (n.__class__.__name__ for n in args)
error_keywords = ("{:s}={:s}".format(n, kwds[n].__class__.__name__) for n in kwds)
raise LookupError("@multicase.call({:s}, The type {{{:s}}}) does not match any of the available prototypes. The prototypes that are available are {:s}.".format(', '.join(error_arguments) if args else '*()', ', '.join(error_keywords), ', '.join(cls.prototype(f,t) for f,t,_ in heap)))
@classmethod
def new_wrapper(cls, func, cache):
'''Create a new wrapper that will determine the correct function to call.'''
# define the wrapper...
def F(*arguments, **keywords):
heap = [res for _,res in heapq.nsmallest(len(cache), cache)]
f, (a, w, k) = cls.match((arguments[:],keywords), heap)
return f(*arguments, **keywords)
#return f(*(arguments + tuple(w)), **keywords)
# swap out the original code object with our wrapper's
f, c = F, F.func_code
cargs = c.co_argcount, c.co_nlocals, c.co_stacksize, c.co_flags, \
c.co_code, c.co_consts, c.co_names, c.co_varnames, \
c.co_filename, '.'.join((func.__module__, func.func_name)), \
c.co_firstlineno, c.co_lnotab, c.co_freevars, c.co_cellvars
newcode = types.CodeType(*cargs)
res = types.FunctionType(newcode, f.func_globals, f.func_name, f.func_defaults, f.func_closure)
res.func_name, res.func_doc = func.func_name, func.func_doc
# assign the specified cache to it
setattr(res, cls.cache_name, cache)
# ...and finally add a default docstring
setattr(res, '__doc__', '')
return res
@classmethod
def ex_function(cls, object):
'''Extract the actual function type from a callable.'''
if isinstance(object, types.FunctionType):
return object
elif isinstance(object, types.MethodType):
return object.im_func
elif isinstance(object, types.CodeType):
res, = (n for n in gc.get_referrers(c) if n.func_name == c.co_name and isinstance(n, types.FunctionType))
return res
elif isinstance(object, (staticmethod,classmethod)):
return object.__func__
raise TypeError, object
@classmethod
def reconstructor(cls, n):
'''Return a closure that returns the original callable type for a function.'''
if isinstance(n, types.FunctionType):
return lambda f: f
if isinstance(n, types.MethodType):
return lambda f: types.MethodType(f, n.im_self, n.im_class)
if isinstance(n, (staticmethod,classmethod)):
return lambda f: type(n)(f)
if isinstance(n, types.InstanceType):
return lambda f: types.InstanceType(type(n), dict(f.__dict__))
if isinstance(n, (types.TypeType,types.ClassType)):
return lambda f: type(n)(n.__name__, n.__bases__, dict(f.__dict__))
raise NotImplementedError, type(func)
@classmethod
def ex_args(cls, f):
'''Extract the arguments from a function.'''
c = f.func_code
varnames_count, varnames_iter = c.co_argcount, iter(c.co_varnames)
args = tuple(itertools.islice(varnames_iter, varnames_count))
res = { a : v for v,a in zip(reversed(f.func_defaults or []), reversed(args)) }
try: starargs = next(varnames_iter) if c.co_flags & cls.CO_VARARGS else ""
except StopIteration: starargs = ""
try: kwdargs = next(varnames_iter) if c.co_flags & cls.CO_VARKEYWORDS else ""
except StopIteration: kwdargs = ""
return args, res, (starargs, kwdargs)
@classmethod
def generatorQ(cls, func):
'''Returns true if ``func`` is a generator.'''
func = cls.ex_function(func)
return bool(func.func_code.co_flags & CO_VARGEN)
class alias(object):
def __new__(cls, other, klass=None):
cons, func = multicase.reconstructor(other), multicase.ex_function(other)
if isinstance(other, types.MethodType) or klass:
module = (func.__module__, klass or other.im_self.__name__)
else:
module = (func.__module__,)
document = "Alias for `{:s}`.".format('.'.join(module + (func.func_name,)))
res = cls.new_wrapper(func, document)
return cons(res)
@classmethod
def new_wrapper(cls, func, document):
# build the wrapper...
def fn(*arguments, **keywords):
return func(*arguments, **keywords)
res = functools.update_wrapper(fn, func)
res.__doc__ = document
return res
### asynchronous process monitor
import sys,os,threading,weakref,subprocess,time,itertools,operator
# monitoring an external process' i/o via threads/queues
class process(object):
"""Spawns a program along with a few monitoring threads for allowing asynchronous(heh) interaction with a subprocess.
mutable properties:
program -- subprocess.Popen instance
commandline -- subprocess.Popen commandline
eventWorking -- threading.Event() instance for signalling task status to monitor threads
stdout,stderr -- callables that are used to process available work in the taskQueue
properties:
id -- subprocess pid
running -- returns true if process is running and monitor threads are workingj
working -- returns true if monitor threads are working
threads -- list of threads that are monitoring subprocess pipes
taskQueue -- Queue.Queue() instance that contains work to be processed
exceptionQueue -- Queue.Queue() instance containing exceptions generated during processing
(process.stdout, process.stderr)<Queue> -- Queues containing output from the spawned process.
"""
program = None # subprocess.Popen object
id = property(fget=lambda s: s.program and s.program.pid or -1)
running = property(fget=lambda s: False if s.program is None else s.program.poll() is None)
working = property(fget=lambda s: s.running and not s.eventWorking.is_set())
threads = property(fget=lambda s: list(s.__threads))
updater = property(fget=lambda s: s.__updater)
taskQueue = property(fget=lambda s: s.__taskQueue)
exceptionQueue = property(fget=lambda s: s.__exceptionQueue)
def __init__(self, command, **kwds):
"""Creates a new instance that monitors subprocess.Popen(``command``), the created process starts in a paused state.
Keyword options:
env<dict> = os.environ -- environment to execute program with
cwd<str> = os.getcwd() -- directory to execute program in
shell<bool> = True -- whether to treat program as an argument to a shell, or a path to an executable
newlines<bool> = True -- allow python to tamper with i/o to convert newlines
show<bool> = False -- if within a windowed environment, open up a console for the process.
paused<bool> = False -- if enabled, then don't start the process until .start() is called
timeout<float> = -1 -- if positive, then raise a Queue.Empty exception at the specified interval.
"""
# default properties
self.__updater = None
self.__threads = weakref.WeakSet()
self.__kwds = kwds
self.commandline = command
import Queue
self.eventWorking = threading.Event()
self.__taskQueue = Queue.Queue()
self.__exceptionQueue = Queue.Queue()
self.stdout = kwds.pop('stdout')
self.stderr = kwds.pop('stderr')
# start the process
not kwds.get('paused',False) and self.start(command)
def start(self, command=None, **options):
'''Start the specified ``command`` with the requested ``options``.'''
if self.running:
raise OSError("Process {:d} is still running.".format(self.id))
if self.updater or len(self.threads):
raise OSError("Process {:d} management threads are still running.".format(self.id))
kwds = dict(self.__kwds)
kwds.update(options)
command = command or self.commandline
env = kwds.get('env', os.environ)
cwd = kwds.get('cwd', os.getcwd())
newlines = kwds.get('newlines', True)
shell = kwds.get('shell', False)
stdout,stderr = options.pop('stdout',self.stdout),options.pop('stderr',self.stderr)
self.program = process.subprocess(command, cwd, env, newlines, joined=(stderr is None) or stdout == stderr, shell=shell, show=kwds.get('show', False))
self.commandline = command
self.eventWorking.clear()
# monitor program's i/o
self.__start_monitoring(stdout, stderr)
self.__start_updater(timeout=kwds.get('timeout',-1))
# start monitoring
self.eventWorking.set()
return self
def __start_updater(self, daemon=True, timeout=0):
'''Start the updater thread. **used internally**'''
import Queue
def task_exec(emit, data):
if hasattr(emit,'send'):
res = emit.send(data)
res and P.write(res)
else: emit(data)
def task_get_timeout(P, timeout):
try:
emit,data = P.taskQueue.get(block=True, timeout=timeout)
except Queue.Empty:
_,_,tb = sys.exc_info()
P.exceptionQueue.put(StopIteration,StopIteration(),tb)
return ()
return emit,data
def task_get_notimeout(P, timeout):
return P.taskQueue.get(block=True)
task_get = task_get_timeout if timeout > 0 else task_get_notimeout
def update(P, timeout):
P.eventWorking.wait()
while P.eventWorking.is_set():
res = task_get(P, timeout)
if not res: continue
emit,data = res
try:
task_exec(emit,data)
except StopIteration:
P.eventWorking.clear()
except:
P.exceptionQueue.put(sys.exc_info())
finally:
P.taskQueue.task_done()
continue
return
self.__updater = updater = threading.Thread(target=update, name="thread-%x.update"% self.id, args=(self,timeout))
updater.daemon = daemon
updater.start()
return updater
def __start_monitoring(self, stdout, stderr=None):
'''Start monitoring threads. **used internally**'''
program = self.program
name = "thread-{:x}".format(program.pid)
# create monitoring threads + coroutines
if stderr:
res = process.monitorPipe(self.taskQueue, (stdout,program.stdout),(stderr,program.stderr), name=name)
else:
res = process.monitorPipe(self.taskQueue, (stdout,program.stdout), name=name)
res = map(None, res)
# attach a method for injecting data into a monitor
for t,q in res: t.send = q.send
threads,senders = zip(*res)
# update threads for destruction later
self.__threads.update(threads)
# set things off
for t in threads: t.start()
@staticmethod
def subprocess(program, cwd, environment, newlines, joined, shell=True, show=False):
'''Create a subprocess using subprocess.Popen.'''
stderr = subprocess.STDOUT if joined else subprocess.PIPE
if os.name == 'nt':
si = subprocess.STARTUPINFO()
si.dwFlags = subprocess.STARTF_USESHOWWINDOW
si.wShowWindow = 0 if show else subprocess.SW_HIDE
cf = subprocess.CREATE_NEW_CONSOLE if show else 0
return subprocess.Popen(program, universal_newlines=newlines, shell=shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stderr, close_fds=False, startupinfo=si, creationflags=cf, cwd=cwd, env=environment)
return subprocess.Popen(program, universal_newlines=newlines, shell=shell, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=stderr, close_fds=True, cwd=cwd, env=environment)
@staticmethod
def monitorPipe(q, (id,pipe), *more, **options):
"""Attach a coroutine to a monitoring thread for stuffing queue `q` with data read from `pipe`
Yields a list of (thread,coro) tuples given the arguments provided.
Each thread will read from `pipe`, and stuff the value combined with `id` into `q`.
"""
def stuff(q,*key):
while True: q.put(key+((yield),))
for id,pipe in itertools.chain([(id,pipe)],more):
res,name = stuff(q,id), "{:s}<{!r}>".format(options.get('name',''),id)
yield process.monitor(res.next() or res.send, pipe, name=name),res
return
@staticmethod
def monitor(send, pipe, blocksize=1, daemon=True, name=None):
"""Spawn a thread that reads `blocksize` bytes from `pipe` and dispatches it to `send`
For every single byte, `send` is called. The thread is named according to
the `name` parameter.
Returns the monitoring threading.thread instance
"""
def shuffle(send, pipe):
while not pipe.closed:
data = pipe.read(blocksize)
if len(data) == 0:
# pipe.read syscall was interrupted. so since we can't really
# determine why (cause...y'know..python), stop dancing so
# the parent will actually be able to terminate us
break
map(send,data)
return
if name:
monitorThread = threading.Thread(target=shuffle, name=name, args=(send,pipe))
else:
monitorThread = threading.Thread(target=shuffle, args=(send,pipe))
monitorThread.daemon = daemon
return monitorThread
def __format_process_state(self):
if self.program is None:
return "Process \"{:s}\" {:s}.".format(self.commandline, 'was never started')
res = self.program.poll()
return "Process {:d} {:s}".format(self.id, 'is still running' if res is None else "has terminated with code {:d}".format(res))
def write(self, data):
'''Write `data` directly to program's stdin.'''
if self.running and not self.program.stdin.closed:
if self.updater and self.updater.is_alive():
return self.program.stdin.write(data)
raise IOError("Unable to write to stdin for process {:d}. Updater thread has prematurely terminated.".format(self.id))
raise IOError("Unable to write to stdin for process. Current state is {:s}.".format(self.__format_process_state()))
def close(self):
'''Closes stdin of the program.'''
if self.running and not self.program.stdin.closed:
return self.program.stdin.close()
raise IOError("Unable to close stdin for process. Current state is {:s}.".format(self.__format_process_state()))
def signal(self, signal):
'''Raise a signal to the program.'''
if self.running:
return self.program.send_signal(signal)
raise IOError("Unable to raise signal {!r} to process. Current state is {:s}.".format(signal, self.__format_process_state()))
def exception(self):
'''Grab an exception if there's any in the queue.'''
if self.exceptionQueue.empty(): return
res = self.exceptionQueue.get()
self.exceptionQueue.task_done()
return res
def wait(self, timeout=0.0):
'''Wait a given amount of time for the process to terminate.'''
program = self.program
if program is None:
raise RuntimeError("Program {:s} is not running.".format(self.commandline))
if not self.running: return program.returncode
self.updater.is_alive() and self.eventWorking.wait()
if timeout:
t = time.time()
while self.running and self.eventWorking.is_set() and time.time() - t < timeout: # spin cpu until we timeout
if not self.exceptionQueue.empty():
res = self.exception()
raise res[0],res[1],res[2]
continue
return program.returncode if self.eventWorking.is_set() else self.__terminate()
# return program.wait() # XXX: doesn't work correctly with PIPEs due to
# pythonic programmers' inability to understand os semantics
while self.running and self.eventWorking.is_set():
if not self.exceptionQueue.empty():
res = self.exception()
raise res[0],res[1],res[2]
continue # ugh...poll-forever/kill-cpu until program terminates...
if not self.eventWorking.is_set():
return self.__terminate()
return program.returncode
def stop(self):
self.eventWorking.clear()
return self.__terminate()
def __terminate(self):
'''Sends a SIGKILL signal and then waits for program to complete.'''
self.program.kill()
while self.running: continue
self.__stop_monitoring()
if self.exceptionQueue.empty():
return self.program.returncode
res = self.exception()
raise res[0],res[1],res[2]
def __stop_monitoring(self):
'''Cleanup monitoring threads.'''
P = self.program
if P.poll() is None:
raise RuntimeError("Unable to stop monitoring while process {!r} is still running.".format(P))
# stop the update thread
self.eventWorking.clear()
# forcefully close pipes that still open, this should terminate the monitor threads
# also, this fixes a resource leak since python doesn't do this on subprocess death
for p in (P.stdin,P.stdout,P.stderr):
while p and not p.closed:
try: p.close()
except: pass
continue
# join all monitoring threads
map(operator.methodcaller('join'), self.threads)
# now spin until none of them are alive
while len(self.threads) > 0:
for th in self.threads[:]:
if not th.is_alive(): self.__threads.discard(th)
del(th)
continue
# join the updater thread, and then remove it
self.taskQueue.put(None)
self.updater.join()
assert not self.updater.is_alive()
self.__updater = None
return
def __repr__(self):
ok = self.exceptionQueue.empty()
state = "running pid:{:d}".format(self.id) if self.running else "stopped cmd:\"{:s}\"".format(self.commandline)
threads = [
('updater', 0 if self.updater is None else self.updater.is_alive()),
('input/output', len(self.threads))
]
return "<process {:s}{:s} threads{{{:s}}}>".format(state, (' !exception!' if not ok else ''), ' '.join("{:s}:{:d}".format(n,v) for n,v in threads))
## interface for wrapping the process class
def spawn(stdout, command, **options):
"""Spawn `command` with the specified `**options`.
If program writes anything to stdout, dispatch it to the `stdout` callable.
If `stderr` is defined, call `stderr` with anything written to the program's stderr.
"""
# grab arguments that we care about
stderr = options.pop('stderr', None)
daemon = options.pop('daemon', True)
# empty out the first generator result if a coroutine is passed
if hasattr(stdout,'send'):
res = stdout.next()
res and P.write(res)
if hasattr(stderr,'send'):
res = stderr.next()
res and P.write(res)
# spawn the sub-process
return process(command, stdout=stdout, stderr=stderr, **options)
### scheduler
class execution(object):
__slots__ = ('queue','state','result','ev_unpaused','ev_terminating')
__slots__+= ('thread','lock')
def __init__(self):
'''Execute a function asynchronously in another thread.'''
# management of execution queue
res = multiprocessing.Lock()
self.queue = multiprocessing.Condition(res)
self.state = []
# results
self.result = Queue.Queue()
# thread management
self.ev_unpaused = multiprocessing.Event()
self.ev_terminating = multiprocessing.Event()
self.thread = threading.Thread(target=self.__run__, name="Thread-{:s}-{:x}".format(self.__class__.__name__, id(self)))
# FIXME: we can support multiple threads, but since this is
# being bound by a single lock due to my distrust for IDA
# and race-conditions...we only use one.
self.lock = multiprocessing.Lock()
return self.__start()
def release(self):
'''Release any resources required to execute a function asynchronously.'''
self.queue.acquire()
self.state = []
self.queue.release()
return self.__stop()
def __del__(self):
self.release()
def __repr__(self):
cls = self.__class__
state = 'paused'
if self.ev_unpaused.is_set():
state = 'running'
if self.ev_terminating.is_set():
state = 'terminated'
if not self.thread.is_alive():
state = 'dead'
res = tuple(self.state)
return "<class '{:s}'> {:s} Queue:{:d} Results:{:d}".format('.'.join(('internal',__name__,cls.__name__)), state, len(res), self.result.unfinished_tasks)
running = property(fget=lambda s: s.thread.is_alive() and s.ev_unpaused.is_set() and not s.ev_terminating.is_set())
dead = property(fget=lambda s: s.thread.is_alive())
def notify(self):
'''Notify the execution queue that it should process anything that is queued.'''
logging.debug("{:s}.notify : Waking up execution queue {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self))
self.queue.acquire()
self.queue.notify()
self.queue.release()
def next(self):
'''Notify the execution queue that a result is needed, then return the next one available.'''
self.queue.acquire()
while self.state:
self.queue.notify()
self.queue.release()
if self.result.empty():
raise StopIteration
return self.pop()
def __start(self):
cls = self.__class__
logging.debug("{:s}.start : Starting execution queue thread {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self.thread))
self.ev_terminating.clear(), self.ev_unpaused.set()
self.thread.daemon = True
return self.thread.start()
def __stop(self):
cls = self.__class__
logging.debug("{:s}.stop : Terminating execution queue thread {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self.thread))
if not self.thread.is_alive():
cls = self.__class__
logging.warn("{:s}.stop : Execution queue has already been terminated as {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self))
return
self.ev_unpaused.set(), self.ev_terminating.set()
self.queue.acquire()
self.queue.notify_all()
self.queue.release()
return self.thread.join()
def start(self):
'''Start to dispatch callables in the execution queue.'''
cls = self.__class__
if not self.thread.is_alive():
logging.fatal("{:s}.start : Unable to resume an already terminated execution queue {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self))
return False
logging.info("{:s}.start : Resuming the execution queue {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self.thread))
res, _ = self.ev_unpaused.is_set(), self.ev_unpaused.set()
self.queue.acquire()
self.queue.notify_all()
self.queue.release()
return not res
def stop(self):
'''Pause the execution queue.'''
cls = self.__class__
if not self.thread.is_alive():
logging.fatal("{:s}.stop : Unable to pause the execution queue {!r} as it has already been terminated.".format('.'.join(('internal',__name__,cls.__name__)), self))
return False
logging.info("{:s}.stop : Pausing execution queue thread {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self.thread))
res, _ = self.ev_unpaused.is_set(), self.ev_unpaused.clear()
self.queue.acquire()
self.queue.notify_all()
self.queue.release()
return res
def push(self, F, *args, **kwds):
'''Push ``F`` with the provided ``args`` and ``kwds`` onto the execution queue.'''
# package it all into a single function
res = functools.partial(F, *args, **kwds)
cls = self.__class__
logging.debug("{:s}.push : Adding the callable {!r} to the execution queue {!r}.".format('.'.join(('internal',__name__,cls.__name__)), F, self))
# shove it down a multiprocessing.Queue
self.queue.acquire()
self.state.append(res)
self.queue.notify()
self.queue.release()
return True
def pop(self):
'''Pop a result off of the result queue.'''
cls = self.__class__
if not self.thread.is_alive():
logging.fatal("{:s}.pop : Refusing to wait for a result when execution queue has already terminated. Execution queue is {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self))
raise Queue.Empty
logging.debug("{:s}.pop : Popping result off of the execution queue {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self))
try:
_, res, err = self.result.get(block=0)
if err != (None, None, None):
t, e, tb = err
raise t, e, tb
finally:
self.result.task_done()
return res
@classmethod
def __consume(cls, event, queue, state):
while True:
if event.is_set():
break
queue.wait()
if state: yield state.pop(0)
yield # prevents us from having to catch a StopIteration
@classmethod
def __dispatch(cls, lock):
res, error = None, (None, None, None)
while True:
F = (yield res, error)
lock.acquire()
try:
res = F()
except:
res, error = None, sys.exc_info()
else:
error = None, None, None
finally: lock.release()
return
def __run__(self):
cls = self.__class__
consumer = self.__consume(self.ev_terminating, self.queue, self.state)
executor = self.__dispatch(self.lock); next(executor)
logging.debug("{:s}.running : The execution queue is now running with thread {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self.thread))
while not self.ev_terminating.is_set():
# check if we're allowed to execute
if not self.ev_unpaused.is_set():
self.ev_unpaused.wait()
# pull a callable out of the queue
logging.debug("{:s}.running : Waiting for an item on thread {!r}.".format('.'.join(('internal',__name__,cls.__name__)), self.thread))
self.queue.acquire()
item = next(consumer)
self.queue.release()
if not self.ev_unpaused.is_set():
self.ev_unpaused.wait()
# check if we're terminating
if self.ev_terminating.is_set(): break
# now we can execute it
logging.debug("{:s}.running : Executing {!r} asynchronously with thread {!r}.".format('.'.join(('internal',__name__,cls.__name__)), item, self.thread))
res, err = executor.send(item)
# and stash our result
logging.debug("{:s}.running : Received result {!r} from {!r} on thread {!r}.".format('.'.join(('internal',__name__,cls.__name__)), (res,err), item, self.thread))
self.result.put((item,res,err))
return
# FIXME: figure out how to match against a bounds in a non-hacky way
class matcher(object):
"""
An object that allows one to match or filter a list of things in an
sort of elegant way.
"""
def __init__(self):
self.__predicate__ = {}
def __attrib__(self, *attribute):
if not attribute:
return lambda n: n
res = [(operator.attrgetter(a) if isinstance(a,basestring) else a) for a in attribute]
return lambda o: tuple(x(o) for x in res) if len(res) > 1 else res[0](o)
def attribute(self, type, *attribute):
attr = self.__attrib__(*attribute)
self.__predicate__[type] = lambda v: fcompose(attr, functools.partial(functools.partial(operator.eq, v)))
def mapping(self, type, function, *attribute):
attr = self.__attrib__(*attribute)
mapper = fcompose(attr, function)
self.__predicate__[type] = lambda v: fcompose(mapper, functools.partial(operator.eq, v))
def boolean(self, type, function, *attribute):
attr = self.__attrib__(*attribute)
self.__predicate__[type] = lambda v: fcompose(attr, functools.partial(function, v))
def predicate(self, type, *attribute):
attr = self.__attrib__(*attribute)
self.__predicate__[type] = functools.partial(fcompose, attr)
def match(self, type, value, iterable):
matcher = self.__predicate__[type](value)
return itertools.ifilter(matcher, iterable)
|
from matplotlib.pyplot import *
x, y1, y2, y3 = [], [], [], []
for i in range(11):
x.append(i)
y1.append(i)
y2.append(i ** 2)
y3.append(i ** 3)
plot(x, y1,"r--", label="x")
plot(x, y2,"go", label="y^2")
plot(x, y3,"bs", label="y^3")
axis([0, 10, 0, 100])
title("my diagram dajumm")
xlabel("x")
ylabel("y")
grid(True)
legend()
show()
|
from configs import sac_default as default_lib
def get_config():
config = default_lib.get_config()
config.tau = 1.0
config.target_update_period = 50
return config
|
Given an array of strings, group anagrams together.
For example, given: ["eat", "tea", "tan", "ate", "nat", "bat"],
Return:
[
["ate", "eat","tea"],
["nat","tan"],
["bat"]
]
key points:
1.善用 for k,v in dTlb.items():
2. sorted(“cba”) 回傳['a','b','c']
class Solution(object):
def groupAnagrams(self, strs):
"""
:type strs: List[str]
:rtype: List[List[str]]
"""
dTlb={}
for s in strs:
sortStr=''.join(sorted(s))
if sortStr in dTlb:
dTlb[sortStr].append(s)
else:
dTlb[sortStr]=[s]
res=[]
for k,v in dTlb.items():
res.append(v)
return res
sol =Solution()
ary = ["eat", "tea", "tan", "ate", "nat", "bat"]
print sol.groupAnagrams(ary) |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.views.generic import View
from django.shortcuts import render_to_response
from fuzzyapp.database import fuzzyQuery, convert_fuzzy
from fuzzyapp.models import Materia
from fuzzyapp.forms import FiltroMateriasForm, AgruparMateriasForm
class ListaMateriasView(View):
def get(self, request):
# Por default lista todas las materias y muestra un formulario
# para filtrarlas y ordenarlas.
materias = Materia.objects.all()
form = FiltroMateriasForm()
return render_to_response(
"fuzzyapp/lista_materias.html",
{"materias": list(materias), "form": form}
)
def post(self, request):
# Filtra y ordena las materias si el formulario era válido
# sino, por default hace los mismo que GET
form = FiltroMateriasForm(request.POST)
materias = Materia.objects.all()
if form.is_valid():
if form.cleaned_data["filtrar_dptos"]:
materias.id_unidad_in(form.cleaned_data["dptos"])
orden1 = form.cleaned_data["orden1"]
orden2 = form.cleaned_data["orden2"]
orden3 = form.cleaned_data["orden3"]
if orden1 != '':
materias.order_by(orden1, direction=form.cleaned_data["asc1"])
if orden2 != '':
materias.order_by(orden2, direction=form.cleaned_data["asc2"])
if orden3 != '':
materias.order_by(orden3, direction=form.cleaned_data["asc3"])
return render_to_response(
"fuzzyapp/lista_materias.html",
{"materias": list(materias), "form": form}
)
c = {
"calificacion": "Calificación",
"preparacion": "Preparación",
"dificultad": "Dificultad"
}
class AgruparMateriasView(View):
"""
En este view no estoy usando nuestro models, sino que uso directamente fuzzyQuery.
La razón es que no hay manera fácil de integrar la funcionalidad de agregación a
nuestro miniframework de materias, así que es más fácil hacerlo a mano.
"""
def get(self, request):
resultado = []
form = AgruparMateriasForm()
return render_to_response(
"fuzzyapp/agrupar_materias.html",
{"resultado": resultado, "form": form}
)
def post(self, request):
form = AgruparMateriasForm(request.POST)
resultado = []
if form.is_valid():
campo = form.cleaned_data["campo"]
if campo == '':
return render_to_response(
"fuzzyapp/agrupar_materias.html",
{"resultado": resultado, "form": form}
)
query = (
"SELECT array_agg(a.codigo) as codigos, array_agg(a.nombre) as nombres, array_agg(af.{campo}) as campos "
"FROM opinion.asignatura as a "
"JOIN opinion.asignatura_fuzzy as af USING(codigo) "
"GROUP BY af.{campo} "
)
query = query.format(campo=campo)
columns = {
"codigos": {"type": "array", "subtype_converter": unicode},
"nombres": {"type": "array", "subtype_converter": unicode},
"campos": {"type": "array", "subtype_converter": lambda x: convert_fuzzy(x.toString(), int)}
}
resultado = list(fuzzyQuery(query, columns))
resultado = list(map(lambda x: zip(x['codigos'], x['nombres'], x['campos']), resultado))
return render_to_response(
"fuzzyapp/agrupar_materias.html",
{"resultado": resultado, "form": form, "campo": c[campo]}
)
|
# TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes)
# pep8-disable:E128
"""Backend of API Explorer
Serves as a proxy between client side code and API server
"""
import cgi
import json
import logging
import sys
import flask
import oauth
import werkzeug.debug
import api_explorer_oauth_client
import js_version
try:
import secrets
except ImportError:
# If the secrets aren't present, we can't run the server.
logging.critical("Can't find secrets.py.\nCopy secrets.example.py" +
" to secrets.py, enter the necessary values, and try again.")
sys.exit(1)
app = flask.Flask(__name__)
app.config.from_object('explorer.settings')
if app.debug:
app.wsgi_app = werkzeug.debug.DebuggedApplication(app.wsgi_app,
evalex=True)
# Keep around an instance of the client. It's reusable because all the
# stateful stuff is passed around as parameters.
OAuthClient = api_explorer_oauth_client.APIExplorerOAuthClient(
secrets.server_url, secrets.consumer_key, secrets.consumer_secret)
@app.route("/")
def index():
return flask.render_template("index.html",
prod=(not app.debug),
js_version=js_version.SHASUM,
is_logged_in=is_logged_in())
@app.route("/group/<path:group>")
def group_url(group):
if not flask.request.is_xhr:
return index()
else:
return "Invalid request", 400
@app.route("/api/v1/<path:method>")
def api_proxy(method):
# Relies on X-Requested-With header
# http://flask.pocoo.org/docs/api/#flask.Request.is_xhr
url_template = "api/v1/{0}"
if flask.request.is_xhr:
resource = OAuthClient.access_api_resource(
url_template.format(method), access_token(),
query_params=flask.request.args.items(),
method=flask.request.method)
response_text = resource.text
if "text/html" in resource.headers["Content-Type"]:
# per this stackoverflow thread
# http://stackoverflow.com/questions/1061697/whats-the-easiest-way-to-escape-html-in-python
response_text = cgi.escape(response_text).encode("ascii",
"xmlcharrefreplace")
# Include the original headers with response body.
# The client side will know what to do with these.
# There is a limit of 498 bytes per header, which will not be changed
# https://code.google.com/p/googleappengine/issues/detail?id=407
response = flask.make_response(json.dumps({
"headers": dict(resource.headers),
"response": response_text}))
response.headers["X-Original-Status"] = resource.status_code
response.headers["Content-Type"] = resource.headers["Content-Type"]
return response
else:
return index()
# Begin the process of getting a request token from Khan.
@app.route("/oauth_get_request_token")
def oauth_get_request_token():
request_token_url = OAuthClient.url_for_request_token(
flask.url_for("oauth_callback",
continuation=flask.request.args.get("continue"),
_external=True))
logging.debug("Redirecting to request token URL: \n{0}".format(
request_token_url))
return flask.redirect(request_token_url)
# The OAuth approval flow finishes here.
# Query string version would have been preferable though it causes oauth
# signature errors.
@app.route("/oauth_callback")
@app.route("/oauth_callback/<path:continuation>")
def oauth_callback(continuation=None):
oauth_token = flask.request.args.get("oauth_token", "")
oauth_secret = flask.request.args.get("oauth_token_secret", "")
oauth_verifier = flask.request.args.get("oauth_verifier", "")
request_token = oauth.OAuthToken(oauth_token, oauth_secret)
request_token.set_verifier(oauth_verifier)
flask.session["request_token_string"] = request_token.to_string()
# We do this before we redirect so that there's no "limbo" state where the
# user has a request token but no access token.
access_token = OAuthClient.fetch_access_token(request_token)
flask.session["oauth_token_string"] = access_token.to_string()
# We're done authenticating, and the credentials are now stored in the
# session. We can redirect back home.
if continuation:
return flask.redirect(continuation)
else:
return flask.redirect(flask.url_for("index"))
def access_token():
token_string = flask.session.get("oauth_token_string")
# Sanity check.
if not token_string:
clear_session()
return None
return oauth.OAuthToken.from_string(token_string)
def is_logged_in():
return ("request_token_string" in flask.session and
"oauth_token_string" in flask.session)
def clear_session():
flask.session.pop("request_token_string", None)
flask.session.pop("oauth_token_string", None)
|
from django.urls import path
from .views import HomePagesView, AboutPageView
urlpatterns = [
path('about/', AboutPageView.as_view(), name='about'),
path('', HomePagesView.as_view(), name='home'),
] |
import numpy as np
import matplotlib.pyplot as plt
from code_base.classifiers.cnn import *
from code_base.data_utils import get_CIFAR2_data
from code_base.layers import *
from code_base.solver import Solver
data = get_CIFAR2_data()
for k, v in data.items():
print('%s: ' % k, v.shape)
model = ThreeLayerConvNet(num_classes=2, weight_scale=0.001, hidden_dim=500, reg=0.001)
solver = Solver(model, data,
num_epochs=1, batch_size=50,
update_rule='adam',
optim_config={
'learning_rate': 1e-3,
},
verbose=True, print_every=20)
print('start train')
solver.train()
print('finish train')
plt.subplot(2, 1, 1)
plt.plot(solver.loss_history, 'o')
plt.xlabel('iteration')
plt.ylabel('loss')
plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, '-o')
plt.plot(solver.val_acc_history, '-o')
plt.legend(['train', 'val'], loc='upper left')
plt.xlabel('epoch')
plt.ylabel('accuracy')
plt.show()
|
from django.test import TestCase
from bookmarks.models import Bookmark, Folder
from django.urls import reverse
class BookmarkListViewTest(TestCase):
@classmethod
def setUpTestData(cls):
folder1 = Folder.objects.create(name="Folder11")
folder2 = Folder.objects.create(name="Folder2")
alphabets = ["A", "B", "C", "D", "E"]
for alphabet in alphabets[:2]:
Bookmark.objects.create(
name=f"{alphabet} Bookmark",
description="Sample description",
url="https://google.co.in",
folder=folder1,
)
for alphabet in alphabets[2:]:
Bookmark.objects.create(
name=f"{alphabet} Bookmark",
description="Sample description",
url="https://google.co.in",
folder=folder2,
)
def test_view_url_exists_at_desired_location(self):
response = self.client.get("/bookmarks/")
self.assertEqual(response.status_code, 200)
def test_url_accessible_by_name(self):
response = self.client.get(reverse("bookmarks"))
self.assertEqual(response.status_code, 200)
def test_view_uses_correct_template(self):
response = self.client.get(reverse("bookmarks"))
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, "bookmarks/bookmark_list.html")
def test_filter_by_ascending(self):
response = self.client.get(reverse("bookmarks") + "?sort_by=ascending")
self.assertEqual(response.context["filter"].qs[0].name, "A Bookmark")
self.assertEqual(response.context["filter"].qs[1].name, "B Bookmark")
def test_filter_by_descending(self):
response = self.client.get(reverse("bookmarks") + "?sort_by=descending")
self.assertEqual(response.context["filter"].qs[0].name, "E Bookmark")
self.assertEqual(response.context["filter"].qs[1].name, "D Bookmark")
def test_filter_by_name_contains(self):
response = self.client.get(reverse("bookmarks") + "?name__icontains=B+Bookmark")
self.assertEqual(response.context["filter"].qs[0].name, "B Bookmark")
def test_filter_by_folder(self):
response = self.client.get(
reverse("bookmarks") + "?folder=Folder1&sort_by=ascending"
)
self.assertEqual(response.context["filter"].qs[0].name, "A Bookmark")
self.assertEqual(response.context["filter"].qs[1].name, "B Bookmark")
self.assertNotEqual(response.context["filter"].qs[0].name, "C Bookmark")
|
from rest_framework.permissions import BasePermission
from .models import Article
class IsAuthenticatedAndOwner(BasePermission):
def has_permission(self, request, view):
if request.user.is_authenticated:
print('---->', 'request.user.is_authenticated', request.user.is_authenticated)
article = Article.objects.get(pk=view.kwargs['pk'])
print('====>', article.user.id == request.user.id)
print('====>', article.user.id , request.user.id)
return False if not article else article.user.id == request.user.id
return False
|
"""
Methods for manipulating winched profiler data.
Tuomas Karna 2013-01-17
"""
import numpy as np
from scipy.interpolate import interp1d
from crane.data import dataContainer
from crane.data import timeArray
def generateSat01ProfilerModData(obsWProfilerDC, modProfileDC):
# merge time ranges
tmin = max(obsWProfilerDC.time.getDatetime(
0), modProfileDC.time.getDatetime(0))
tmax = min(obsWProfilerDC.time.getDatetime(-1),
modProfileDC.time.getDatetime(-1))
o = obsWProfilerDC.timeWindow(tmin, tmax, includeEnd=True)
m = modProfileDC.timeWindow(tmin, tmax, includeEnd=True)
# get all distinct time stamps in obs (it's a track)
to = o.time.asEpoch().array
tArr, ixTo = np.unique(to, return_index=True)
ix = np.zeros((len(ixTo), 2), dtype=int)
ix[:, 0] = ixTo
ix[:-1, 1] = ixTo[1:]
ix[-1, 1] = len(to)
# align times
tix = m.time.getAlignedTimeIndices(timeArray.timeArray(tArr, 'epoch'))
tArr = tArr[tix]
ta = timeArray.timeArray(tArr, 'epoch')
m = m.interpolateInTime(ta, acceptNaNs=True)
if not np.array_equal(tArr, m.time.array):
print tArr.shape, m.time.array.shape
print tArr.min(), tArr.max()
print m.time.array.min(), m.time.array.max()
raise Exception(
'the time stamps of observation and model do not agree: ' +
str(ta.getDatetime(0)) + ', ' + str(m.time.getDatetime(0)))
if not m.zDependsOnTime or m.z.shape[1] != len(tArr):
raise Exception(
'model z coordinate does not have correct time dimension: ' +
str(m.z.shape))
# for all time stamps, process each vertical
znew = []
vnew = []
tnew = []
for i, t in enumerate(tArr):
# get observation z coordinates (depth)
zobs = o.z[0, ix[i, 0]:ix[i, 1]]
# get model z coords
zmod = m.z[:, i]
# convert to depth
zmod = zmod.max() - zmod # surf_val - z_coord
zsortix = np.argsort(zmod)
zmod = zmod[zsortix]
# get model values
vmod = m.data[:, 0, i][zsortix]
# discard zobs values that are out of range
goodIx = np.logical_and(zobs <= zmod.max(), zobs >= zmod.min())
zobs = zobs[goodIx]
# do the interpolation
vint = interp1d(zmod, vmod)(zobs)
znew.append(zobs)
vnew.append(vint)
tnew.append(t * np.ones_like(zobs))
znew = np.concatenate(tuple(znew))
vnew = np.concatenate(tuple(vnew))
tnew = np.concatenate(tuple(tnew))
ta = timeArray.timeArray(tnew, 'epoch', acceptDuplicates=True)
# create dataContainer
z = znew[None, :]
data = vnew[None, None, :]
meta = m.getMetaData()
meta['dataType'] = 'profiler'
# meta.pop( 'msldepth', None ) # TODO
dc = dataContainer.dataContainer(
'',
ta,
m.x[0],
m.y[0],
z,
data,
o.fieldNames,
m.coordSys,
meta,
acceptNaNs=True)
return dc
def generateSat01ProfilerObsData(depDC, varDC):
"""Generate winched profiler data for Saturn01.
Observation data is binned in time and depth to produce vertical columns of
data at regular intervals.
Args:
depDC -- (dataContainer) time series of profiler depth [m below surf]
varDC -- (dataContainer) time series of measured variable
Returns:
dc -- (dataContainer) track data with correct depth
"""
tReso = 15 * 60 # target time step (15min to compare with model)
zReso = 0.25 # target vertical resolution
# interpolate pressure data on instrument time
depDC, varDC = depDC.alignTimes(varDC)
# bin data in bot time and depth
# round start/end time to nearest sharp 15min
t = depDC.time.asEpoch()
z = depDC.data.squeeze()
varArr = varDC.data.squeeze()
ts = t[0]
te = t[-1]
binDt = tReso
ts = round(ts / binDt) * binDt
te = round(te / binDt) * binDt
# divide t to tReso bins
tBins = np.arange(ts - binDt / 2, te + binDt * 3 / 2, binDt) # bin bnds
ixBin = np.digitize(t, tBins) - 1 # bin index for each time stamp
# generate list of time stamps for each bin TODO slow, optimize
binMembers = [[] for _ in tBins[1:]]
for i, ibin in enumerate(ixBin):
binMembers[ibin].append(i)
#tmp, bix = np.unique( ixBin, return_index=True )
#ix = np.zeros((len(bix),2),dtype=int)
#ix[:,0] = bix
#ix[:-1,1] = bix[1:]
#ix[-1,1] = len(ixBin)
tNew = []
zNew = []
varNew = []
for i in range(ixBin[0], ixBin[-1]):
tCenter = (tBins[i] + tBins[i + 1]) / 2
# find min/max z
iii = binMembers[i]
if len(iii) == 0:
continue
zBin = z[iii]
tBin = t[iii]
varBin = varArr[iii]
# generate z grid at bin mean time
zMin = zBin.min()
zMax = zBin.max()
zGrid = np.arange(zMin, zMax + zReso, zReso)
tGrid = np.ones_like(zGrid) * tCenter
# Catch cases where number of hist bins < zGrid points
zBins = np.arange(zMin - zReso / 2, zMax + 2 * zReso, zReso)
if zBins[-2] > zGrid[-1]:
zBins = np.arange(zMin - zReso / 2, zMax + 3 / 2 * zReso, zReso)
histW, b = np.histogram(zBin, zBins, weights=varBin)
hist, b = np.histogram(zBin, zBins)
goodIx = hist > 0
varGrid = histW[goodIx] / hist[goodIx]
tGrid = tGrid[goodIx]
zGrid = zGrid[goodIx]
# store data
tNew.append(tGrid)
zNew.append(zGrid)
varNew.append(varGrid)
if len(tNew) == 0:
print 'Could not generate obs profiler data'
return None
tNew = np.concatenate(tuple(tNew))
zNew = np.concatenate(tuple(zNew))
varNew = np.concatenate(tuple(varNew))
# create new dataContainer
tNew = timeArray.timeArray(tNew, 'epoch', acceptDuplicates=True)
z = np.reshape(zNew, (1, -1))
data = np.reshape(varNew, (1, 1, -1))
var = varDC.fieldNames[0]
meta = varDC.getMetaData()
# meta.pop( 'msldepth', None ) # TODO
meta['dataType'] = 'profiler'
dc = dataContainer.dataContainer(
'', tNew, varDC.x, varDC.y, z, data, [var],
varDC.coordSys, metaData=meta)
return dc
|
import pandas as pd
from utils_data import create_calibrated_df
from utils_mturk import get_list_id_within_doc, prepare_df_for_evaluation, perform_evaluation
# data preparation
df_results_mturk = pd.read_csv('data/pairwise_race_cs.csv')
# Single models
for random_seed in [0, 3, 42]:
df_predictions = create_calibrated_df(['output_bert_seed%d_test.csv' % random_seed])
list_id_within_doc = get_list_id_within_doc(df_predictions)
df_predictions['id'] = list_id_within_doc
df_for_evaluation = prepare_df_for_evaluation(df_results_mturk, df_predictions)
output_filename = 'output/pairwise_race_cs_bert_%d_test.txt' % random_seed
output_file = open(output_filename, "w")
perform_evaluation(df_for_evaluation, output_file=output_file)
output_file.close()
# ensemble
df_predictions = create_calibrated_df([
'output_bert_seed0_test.csv', 'output_bert_seed3_test.csv', 'output_bert_seed42_test.csv'])
list_id_within_doc = get_list_id_within_doc(df_predictions)
df_predictions['id'] = list_id_within_doc
df_for_evaluation = prepare_df_for_evaluation(df_results_mturk, df_predictions)
output_filename = 'output/pairwise_race_cs_bert_ensemble_test.txt'
output_file = open(output_filename, "w")
perform_evaluation(df_for_evaluation, output_file=output_file)
output_file.close()
|
import os
from libavg.utils import getMediaDir, createImagePreviewNode
from . import schubser
__all__ = [ 'apps', ]
def createPreviewNode(maxSize):
filename = os.path.join(getMediaDir(__file__), 'preview.png')
return createImagePreviewNode(maxSize, absHref = filename)
apps = (
{'class': schubser.Schubser,
'createPreviewNode': createPreviewNode},
)
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import unicode_literals
from abc import (
ABCMeta,
abstractmethod,
)
import os
import sys
from mach.mixin.logging import LoggingMixin
from ..frontend.data import SandboxDerived
from .configenvironment import ConfigEnvironment
class BuildBackend(LoggingMixin):
"""Abstract base class for build backends.
A build backend is merely a consumer of the build configuration (the output
of the frontend processing). It does something with said data. What exactly
is the discretion of the specific implementation.
"""
__metaclass__ = ABCMeta
def __init__(self, environment):
assert isinstance(environment, ConfigEnvironment)
self.populate_logger()
self.environment = environment
self._init()
def _init():
"""Hook point for child classes to perform actions during __init__.
This exists so child classes don't need to implement __init__.
"""
def consume(self, objs):
"""Consume a stream of TreeMetadata instances.
This is the main method of the interface. This is what takes the
frontend output and does something with it.
Child classes are not expected to implement this method. Instead, the
base class consumes objects and calls methods (possibly) implemented by
child classes.
"""
for obj in objs:
self.consume_object(obj)
# Write out a file indicating when this backend was last generated.
age_file = os.path.join(self.environment.topobjdir,
'backend.%s.built' % self.__class__.__name__)
with open(age_file, 'a'):
os.utime(age_file, None)
self.consume_finished()
@abstractmethod
def consume_object(self, obj):
"""Consumes an individual TreeMetadata instance.
This is the main method used by child classes to react to build
metadata.
"""
def consume_finished(self):
"""Called when consume() has completed handling all objects."""
|
from __future__ import division
import numpy as np
import copy
from cwc.evaluation.metrics import average_cross_entropy
from sklearn.svm import SVC
from sklearn.preprocessing import label_binarize
from scipy.optimize import minimize
from ovo_classifier import OvoClassifier
from confident_classifier import ConfidentClassifier
class Ensemble(object):
def __init__(self, base_classifier=OvoClassifier(), n_ensemble=1,
bootstrap_percent=0.75, lambd=0.0):
self._base_classifier = base_classifier
self._classifiers = []
self._weights = []
self._n_ensemble = n_ensemble
self._percent = bootstrap_percent
self._lambda = lambd
def fit(self, X, y, xs=None, ys=None):
init = xs is None
if init:
xs = []
ys = []
for c_index in np.arange(self._n_ensemble):
if init:
x_train, y_train = bootstrap(X, y, self._percent)
xs.append(x_train)
ys.append(y_train)
else:
x_train = xs[c_index]
y_train = ys[c_index]
c = copy.deepcopy(self._base_classifier)
c.fit(x_train, y_train, np.alen(np.unique(y)))
self._classifiers.append(c)
self.prune_ensemble(X, y)
return xs, ys
def prune_ensemble_old(self, X, y):
predictions, confidences = self.get_weights(X, y)
sorted_indices = np.argsort(1.0/(self._weights + 1.0))
n_classes = np.alen(np.unique(y))
n = np.alen(X)
accuracies = np.zeros(self._n_ensemble+1)
for j in np.arange(self._n_ensemble+1):
votes = np.zeros((n, n_classes))
for c_index in np.arange(0, j):
i = sorted_indices[c_index]
pred = predictions[:, i]
conf = confidences[:, i]
votes[range(n), pred] += conf * self._weights[i]
if not np.all(votes == 0.0):
accuracies[j] = np.mean(votes.argmax(axis=1) == y)
final_j = np.argmax(accuracies)
self._classifiers = [self._classifiers[sorted_indices[i]] for i in
range(final_j)]
self._weights = self._weights[sorted_indices[:final_j]]
self._n_ensemble = final_j
def prune_ensemble(self, X, y):
predictions, confidences = self.get_weights(X, y)
sorted_indices = np.argsort(1.0/(self._weights + 1.0))
n_classes = np.alen(np.unique(y))
n = np.alen(X)
accuracies = np.zeros(self._n_ensemble)
votes = np.zeros((n, n_classes))
for c_index in np.arange(self._n_ensemble):
i = sorted_indices[c_index]
pred = predictions[:, i]
conf = confidences[:, i]
votes[range(n), pred] += conf * self._weights[i]
# if not np.all(votes == 0.0):
accuracies[c_index] = np.mean(votes.argmax(axis=1) == y)
final_j = np.argmax(accuracies) + 1
self._classifiers = [self._classifiers[sorted_indices[i]] for i in
range(final_j)]
self._weights = self._weights[sorted_indices[:final_j]]
self._n_ensemble = final_j
def get_weights(self, X, y):
if type(self._base_classifier) is not ConfidentClassifier\
and self._base_classifier.classifier_type is not ConfidentClassifier:
return self.get_weights_li(X, y)
else:
return self.get_weights_bc(X, y)
def get_weights_li(self, X, y):
n = np.alen(y)
predictions = np.zeros((n, self._n_ensemble))
confidences = np.zeros((n, self._n_ensemble))
margins = np.zeros((n, self._n_ensemble))
for c_index, c in enumerate(self._classifiers):
res = c.predict(X)
predictions[:, c_index] = res[0]
marg = (res[0] == y).astype(float)
marg[marg == 0] = -1
margins[:, c_index] = marg
confidences[:, c_index] = res[1]
# conf_pred = predictions * confidences
marg_pred = margins * confidences
# f = lambda w: np.sum(np.power(1.0 - y*(w*conf_pred).sum(axis=1),
# 2.0)) + self._lambda*np.linalg.norm(w)
f = lambda w: np.sum(np.power(1.0 - (w*marg_pred).sum(axis=1),
2.0)) + self._lambda*np.linalg.norm(w)
w0 = np.ones(self._n_ensemble)/self._n_ensemble
cons = ({'type': 'eq', 'fun': lambda w: 1.0 - np.sum(w)})
bounds = [(0, 1) for c_index in range(self._n_ensemble)]
res = minimize(f, w0, bounds=bounds, constraints=cons)
self._weights = res.x
return predictions.astype(int), confidences
def get_weights_bc(self, X, y):
n = np.alen(y)
predictions = np.zeros((n, self._n_ensemble))
confidences = np.zeros((n, self._n_ensemble))
checks = np.zeros(self._n_ensemble)
for c_index, c in enumerate(self._classifiers):
res = get_predictions(c, X)
predictions[:, c_index] = res[0]
confidences[:, c_index] = res[1]
checks[c_index] = np.mean(res[2])
self._weights = checks / np.sum(checks)
return predictions.astype(int), confidences
def predict(self, X):
n = np.alen(X)
for c_index, c in enumerate(self._classifiers):
res = get_predictions(c, X)
pred = res[0]
conf = res[1]
if c_index == 0:
votes = np.zeros((n, c.n_classes))
votes[range(n), pred] += conf * self._weights[c_index]
return votes.argmax(axis=1)
def predict_proba(self, X):
n = np.alen(X)
for c_index, c in enumerate(self._classifiers):
res = get_predictions(c, X)
pred = res[0]
conf = res[1]
if c_index == 0:
votes = np.zeros((n, c.n_classes))
votes[range(n), pred] += conf * self._weights[c_index]
proba = votes/votes.sum(axis=1).reshape(-1,1)
proba[np.isnan(proba)] = 1/proba.shape[1]
return proba
def accuracy(self, X, y):
predictions = self.predict(X)
return np.mean(predictions == y)
def log_loss(self, X, y):
y_pred = self.predict_proba(X)
y_actu = label_binarize(y, range(y_pred.shape[1]))
return average_cross_entropy(y_actu,y_pred)
def get_predictions(c, X):
if type(c) is OvoClassifier:
return c.predict(X)
elif type(c) is ConfidentClassifier:
n = np.alen(X)
probas = c.predict_proba(X)
predictions = np.argmax(probas[:, :-1], axis=1)
confidences = probas[range(n), predictions]
checks = 1.0 - probas[:, -1]
return [predictions, confidences, checks]
def bootstrap(x, y, percent):
n = np.alen(x)
indices = np.random.choice(n, int(np.around(n * percent)))
return x[indices], y[indices]
|
import cv2 as cv
import numpy as np
img=cv.imread('i.png')
cv.imshow('Picture',img)
## Converting to GrayScale
#gray= cv.cvtColor(img,cv.COLOR_BGR2GRAY)
#cv.imshow('Grayscale',gray)
## blur
#blur=cv.blur(img,(5,5),borderType=cv.BORDER_DEFAULT)
#cv.imshow('Blur',blur)
## Edge Cascade
#canny=cv.Canny(img,125,175)
#cv.imshow('Canny Edges',canny)
##Resize
#resized=cv.resize(img,(250,250),interpolation=cv.INTER_AREA)
#cv.imshow('Resized',resized)
##crop
cropped=img[50:200,200:400]
cv.imshow('cropped',cropped)
cv.waitKey(0) |
from nitrogen_db_client import NitrogenDbClient
from odd import Odd
from pro_match import ProMatch
def embed_all():
cursor = ProMatch.get_all_matches()
print cursor.count()
for m in cursor:
print "here"
match = ProMatch.from_dict(m)
odds_cursor = Odd.get_odds_for_match(match.id)
for o in odds_cursor:
match.odds.append(o)
match.save()
embed_all()
|
from flask import Flask, render_template,request
import streamlit as st
def praxis():
st.title("Learning Streamlit")
name = st.text_input("student_name","Type here")
num = st.text_input("roll_no","Type here")
result = ""
if st.button("Show Result"):
st.success(f"The Student name is {name} with number {num}")
if __name__ =="__main__":
praxis()
|
from PyQt5 import QtCore, QtGui, QtWidgets
import pandas as pd
import datetime
from talib import SMA,STDDEV
import numpy as np
class MABolClass():
def BuyAndHold(self, ui, KBar):
self.ui = ui
# 初始資金
InitCapital=1000000
OrderPrice = None
OrderQty = 0
CoverPrice = None
# 買進日期
OrderDate = datetime.datetime.strptime('2004/1/5' , '%Y/%m/%d')
# 賣出日期
CoverDate = datetime.datetime.strptime('2018/12/28' , '%Y/%m/%d')
for i in range(len(KBar['date'])):
Date=KBar['date'][i]
# 如果指定時間購買
if Date == OrderDate and OrderQty == 0 :
# 開盤價買進
OrderPrice = KBar['open'][i]
OrderQty = int(InitCapital/(KBar['close'][i-1])/1000)
if Date == CoverDate and OrderQty != 0 :
# 最後一個收盤價賣出
CoverPrice = KBar['close'][i]
break
print( '買進時間:', OrderDate.strftime('%Y/%m/%d'),'買進價格:',OrderPrice , '買進數量:' ,OrderQty )
print( '售出時間:', CoverDate.strftime('%Y/%m/%d') , '售出價格:' , CoverPrice )
print( '獲利:',(CoverPrice-OrderPrice)*OrderQty*1000 )
data = {
'買進時間:': [OrderDate.strftime('%Y/%m/%d')],
'買進價格:': [OrderPrice],
'買進數量:': [OrderQty],
'售出時間:': [CoverDate.strftime('%Y/%m/%d')],
'售出價格:': [CoverPrice],
'獲利:': [(CoverPrice-OrderPrice)*OrderQty*1000],
}
result = pd.DataFrame(data)
self.change_data(result)
def StopProfitLoss(self, ui, KBar):
self.ui = ui
#from function import getKbar
import datetime
from talib import SMA,STDDEV
import numpy as np
# 計算移動平均線 、 標準差 、 低點
KBar['MA'] = SMA(KBar['close'], timeperiod= 120)
KBar['STD'] = STDDEV(KBar['close'], timeperiod= 120)
KBar['BD'] = KBar['MA']-0.75*KBar['STD']
# 初始資金
InitCapital=1000000
# 進場價格 、 進場數量
OrderPrice = None
OrderQty = 0
# 出場價格
CoverPrice = None
# 停損 、 停利價
StopLoss = None
TakeProfit = None
# 總獲利 、 交易次數
TotalProfit = []
TotalTreadeNum = 0
for i in range(1,len(KBar['date'])):
Date = KBar['date'][i]
Close = KBar['close'][i]
LastClose = KBar['close'][i-1]
BD = KBar['BD'][i]
LastBD = KBar['BD'][i-1]
status = []
# 進場條件
if LastClose < LastBD and Close >= BD and OrderQty == 0 :
# 進場時間、價格、數量
OrderDate = KBar['date'][i+1]
OrderPrice = KBar['open'][i+1]
OrderQty = int(InitCapital/(Close)/1000)
# 停損價、停利價
StopLoss = OrderPrice *0.8
TakeProfit = OrderPrice *1.6
status = ' 低點 '
print( '買進時間:', OrderDate.strftime('%Y/%m/%d') , '買進價格:',OrderPrice , '買進數量:' ,OrderQty )
# 停損判斷
elif OrderQty != 0 and Close < StopLoss :
# 出場時間、價格
CoverDate = KBar['date'][i+1]
CoverPrice = KBar['open'][i+1]
# 績效紀錄
Profit = (CoverPrice-OrderPrice)*OrderQty*1000
TotalProfit += [Profit]
TotalTreadeNum += 1
# InitCapital += Profit
# 下單數量歸零,重新進場
OrderQty = 0
status = ' 停損 '
print( '售出時間:', CoverDate.strftime('%Y/%m/%d') , '售出價格:' , CoverPrice ,'虧損:',Profit )
# 停利判斷
elif OrderQty != 0 and Close > TakeProfit :
# 出場時間、價格
CoverDate = KBar['date'][i+1]
CoverPrice = KBar['open'][i+1]
# 績效紀錄
Profit = (CoverPrice-OrderPrice)*OrderQty*1000
TotalProfit += [Profit]
TotalTreadeNum += 1
# InitCapital += Profit
# 下單數量歸零,重新進場
OrderQty = 0
status = ' 停利 '
print( '售出時間:', CoverDate.strftime('%Y/%m/%d') , '售出價格:' , CoverPrice ,'獲利:',Profit )
# 回測時間結束,則出場
elif OrderQty != 0 and i == len(KBar['date'])-1:
# 出場時間、價格
CoverDate = Date
CoverPrice = Close
# 績效紀錄
Profit = (CoverPrice-OrderPrice)*OrderQty*1000
TotalProfit += [Profit]
TotalTreadeNum += 1
# InitCapital += Profit
# 下單數量歸零,重新進場
OrderQty = 0
status = ' 結束 '
print( '售出時間:', CoverDate.strftime('%Y/%m/%d') , '售出價格:' , CoverPrice ,'盈虧:',Profit )
data = {
'狀態': [status],
'買進時間': [OrderDate.strftime('%Y/%m/%d')],
'買進價格': [OrderPrice],
'買進數量': [OrderQty],
'售出時間': [CoverDate.strftime('%Y/%m/%d')],
'售出價格': [CoverPrice],
'獲利': [(CoverPrice-OrderPrice)*OrderQty*1000],
'總盈虧': [TotalProfit],
'交易次數': [TotalTreadeNum],
}
result = result.append(data)
print( '交易次數:' , TotalTreadeNum , '總盈虧:', sum(TotalProfit) )
#result = pd.DataFrame(data)
self.change_data(result)
import matplotlib.pyplot as plt # 匯出績效圖表
ax = plt.subplot(111) # 新增繪圖圖片
ax.plot( np.cumsum(TotalProfit), 'k-' ) # 繪製圖案 ( X軸物件, Y軸物件, 線風格 )
plt.show()
def change_data(self, df):
# 根據lineEdit 中的文字,呼叫 yF_Kbar 製作 K 線圖
#self.label_2.setPixmap(QtGui.QPixmap("stock_Kbar.png")) # label 置換圖片
columns_num = df.shape[1] # DataFrame 的 欄位數
index_num = df.shape[0] # DataFrame 的 列數
df_columns = df.columns # DataFrame 的 欄位名稱
df_index = df.index # DataFrame 的 索引列表
self.ui.tableWidget.setColumnCount(columns_num) # 修改 Table Wedget 的欄位數
self.ui.tableWidget.setRowCount(index_num) # 修改 Table Wedget 的列數
_translate = QtCore.QCoreApplication.translate
# 修改欄位相關資訊
for c in range(columns_num):
item = QtWidgets.QTableWidgetItem()
self.ui.tableWidget.setHorizontalHeaderItem(c, item) # 依據欄位列表依序建構欄位
item = self.ui.tableWidget.horizontalHeaderItem(c) # 選取該欄位
item.setText(_translate("MainWindow", df_columns[c])) # 修改欄位標題文字
for i in range(index_num):
item = QtWidgets.QTableWidgetItem()
self.ui.tableWidget.setVerticalHeaderItem(i, item) # 依據索引列表依序建構欄位
item = self.ui.tableWidget.verticalHeaderItem(i) # 選取該索引
item.setText(_translate("MainWindow", str(df_index[i]) )) # 修改索引標題文字
for c in range(columns_num): # 走訪欄位
for i in range(index_num): # 走訪索引
item = QtWidgets.QTableWidgetItem()
self.ui.tableWidget.setItem(i, c, item) # 建構儲存格
item = self.ui.tableWidget.item(i, c) # 選取儲存格
item.setText(_translate("MainWindow", str(df.iloc[i, c]))) # 修改儲存格文字
|
from math import sqrt
def inputvars():
f = open('input.txt')
num = int(f.readline())
numlist = f.readline().split()
return int(numlist[0]), int(numlist[1])
def outputvars(num, output):
f = open('output.txt', 'w')
f.write('Case #1:\n')
for x in range(num):
f.write(str(output[x]) + '\n')
f.close()
def comp(num):
for x in range(2, int(sqrt(num) + 1)):
if(num % x == 0):
return x
return -1
def main():
length, numcoins = inputvars()
output = []
numstring = ''.join(['1'] + ['0'] * (length - 2) + ['1'])
curnum = int(numstring, 2)
numstring = ''.join(['1'] + ['1'] * (length - 2) + ['1'])
maxnum = int(numstring, 2)
while len(output) < numcoins and curnum < maxnum:
curlist = bin(curnum)[2:]
outputstr = [curlist]
for base in range(2, 11):
num = int(curlist, base)
factor = comp(num)
if(factor == -1):
break
outputstr.append(str(factor))
if(len(outputstr) > 9):
# print(outputstr)
output.append(' '.join(outputstr))
curnum += 2
# print(output)
outputvars(numcoins, output)
main() |
print ('Calculando a área do círculo.')
print()
pi = 3.14
raio = float(input('Para começar o calculo da área do circulo, informe o raio em cm: '))
print(f'Já sabemos que PI é uma constante e vale {pi}.')
A = pi * (raio * raio)
print()
print(f'A área do circulo com raio = {raio}² multiplicado por PI ({pi}) é igual a {A}cm²') |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Commands.
"""
from ergo.core import Command, COMMANDS
from aochat.aoml import *
### CALLBACKS ##################################################################
def help_callback(chat, player, args):
if args and args[0] in COMMANDS:
command = COMMANDS[args[0]]
window = text(
"Help on %s command:%s%s%s%s" % (
command.name, br(2),
command.desc, br(2),
command.help() if command.help else "No help available."
),
command.name
)
return "Help on command: %s" % window
return "Type 'help <command>' for command specified help: %s." % text(help_help(), "available commands")
def help_help():
return "Available commands:%s%s" % (
br(),
br().join(map(lambda name: text(COMMANDS[name].help(), name), filter(lambda name: name != "help", sorted(COMMANDS))))
),
def join_callback(chat, player, args):
chat.private_channel_invite(player.id)
def join_help():
return "Bot will invite you to private channel."
def leave_callback(chat, player, args):
chat.private_channel_kick(player.id)
def leave_help():
return "Bot will kick you from private channel."
def ban_callback(chat, player, args):
pass
def ban_help():
return ""
### COMMANDS ###################################################################
help = Command(
name = "help",
desc = "Usage information",
callback = help_callback,
help = help_help,
)
join = Command(
name = "join",
desc = "Join private channel",
callback = join_callback,
help = join_help,
)
leave = Command(
name = "leave",
desc = "Leave private channel",
callback = leave_callback,
help = leave_help,
)
|
import matplotlib
import matplotlib.pyplot as plt
import json
import os
matplotlib.rcParams['font.family'] = "Times New Roman"
matplotlib.rcParams['font.size'] = 10
nbins=8
# fig_dir = os.getcwd()
fig_dir = "/Users/crankshaw/ModelServingPaper/osdi_2016/figs"
f = open('../results/cache_error.json','r')
res = json.load(f)
cache_miss_rate = res['cache_miss']
error_list = res['error']
fig,ax = plt.subplots()
plt.locator_params(nbins=nbins)
ax.scatter(cache_miss_rate, error_list,color="black", s=10)
#ax.plot([0.0,1.0],[error, error],'--', label="non-cache")
ax.set_xlabel('Cache Miss Rate')
ax.set_ylabel('Error')
#ax.set_ylim((0,ax.get_ylim()[1]))
# ax.legend()
ax.set_xlim(-0.1, 1.1)
ax.set_ylim(0.28,0.415)
fig.set_size_inches(3*1.5,1.0*1.5)
fig.savefig(fig_dir + '/' + 'cache_miss' +'.pdf',bbox_inches='tight')
|
def find(N, K):
data = [0]
i = 1
while i < N:
size = len(data)
for j in range(size):
c = data[j]
if c == 0:
data.append(1)
else:
data.append(0)
i += 1
return data[K - 1]
if __name__ == '__main__':
print(find(30, 434991989))
|
#!/usr/bin/env python3
import logging
import math
from math_helpers import pgcde
from prime import _getPrime
from multiprocessing import Process, Pipe, cpu_count, Queue
import base64
class RSA(object):
"""RSA main class"""
def __init__(self, size=4096):
# prime number
self.p, self.q = 2, 3
# encryption key
self.e = 65537
# decryption key
self.d = 0
# modulus
self.n = 0
# block size for encryption assuming 4096 / 8 which is 512 quite correct maybe a little too big
# as python handle byte more efficiently than bits we directly divide by 64
self.blockSize = int(size / (8*8))
self.size = size
# logger to see what's going on
self.logger = logging.getLogger(__name__)
def getPrimes(self, numberOfKeys=2):
# TODO: place our call to one of the function in prime
numbers = []
for _ in range(numberOfKeys):
try:
processes = []
(pipe_recv, pipe_send) = Pipe(duplex=False)
nbOfJobs = int(cpu_count())
self.logger.debug(
"Prime generation | number of jobs {}".format(nbOfJobs))
processes = [Process(target=_getPrime, args=(
pipe_send, self.size)) for _ in range(nbOfJobs)]
for process in processes:
process.daemon = True
process.start()
self.logger.debug("Starting processes")
numbers.append(pipe_recv.recv())
finally:
pipe_recv.close()
pipe_send.close()
for process in processes:
if process.is_alive():
process.terminate()
(self.p, self.q) = numbers
def getKeys(self):
print(self.e)
self.n = self.p * self.q
self.phi = (self.p - 1) * (self.q - 1)
while pgcde(self.e, self.phi)[0] != 1:
self.e += 1
print(self.e)
"""
'd' à verifier
"""
self.d = (self.phi + (pgcde(self.e, self.phi)[1])) % self.phi
print(self.d)
# Find e
# Find d
pass
def encrypt(self, m):
"""handle hex stream"""
return pow(int(m, 16), self.e, self.n)
def decrypt(self, x):
"""handle hex stream"""
return pow(int(x, 16), self.d, self.n)
def encryptBlock(self, blocks):
"""compute encryption on one block must be async"""
return [self.encrypt(block) for block in blocks]
def decryptBlock(self, blocks):
"""compute encryption on one block must be async"""
return [self.decrypt(block) for block in blocks]
def splitBuffer(self, buffer):
"""convert to ascii"""
hex_ascii = ''.join('%02x'%ord(i) for i in buffer)
blocks = [hex_ascii[i:i+self.blockSize] for i in range(0, len(hex_ascii), self.blockSize)]
blocks[-1] = self.addPadding(blocks[-1])
print(blocks)
return blocks
def addPadding(self, block, method="PKSC7"):
# add padding to a block, this is not a block cipher but we need maleability you know
if method == "PKSC7":
deficit = (self.blockSize - len(block)) // 2
for _ in range(deficit):
block += '%02x'%deficit
return block
def main():
rsa = RSA(1024)
rsa.getPrimes()
rsa.getKeys()
print("p={}\nq={}".format(rsa.p, rsa.q))
text = "Hello world!"
plain = rsa.splitBuffer(text)
a = (rsa.encryptBlock(plain))
for i in a:
print(len(str(i)))
print("encrypted {} ".format(a))
#b = rsa.decrypt(a)
# print("".join([chr(i) for i in b]))
if __name__ == "__main__":
main()
|
# Calculates the number of fruits that can be collected
def fruit_baskets(fruits):
totals_array = []
for i in range(0, len(fruits) - 1):
j = i
fruit = fruits[i]
one = fruits[i]
two = fruits[i+1]
total = 0
while fruit == one or fruit == two:
total += 1
j += 1
try:
fruit = fruits[j]
except:
break
totals_array.append(total)
return max(totals_array)
print(fruit_baskets([3,3,3,1,2,1,1,2,3,3,4])) |
"""
Class for manipulating with page https://sgpano.com/create-new-virtual-tour/
"""
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from Lib.common.NonAppSpecific import send_text, check_if_elem_exist
from Lib.common.Log import Log
from Lib.common.WaitAction import wait_until
class BasicInformationTour:
def __init__(self, driver):
self.driver = driver
self.log = Log(driver)
def inpTitle(self):
return self.driver.find_element_by_id("title")
def inpAddress(self):
return self.driver.find_element_by_id("address")
def inpWatermarkText(self):
return self.driver.find_element_by_id("watermark")
def txtDescription(self):
return self.driver.find_element_by_id("formGroupExampleInput7")
def rbtnPublicAccess(self, answer):
radio_btns = self.driver.find_element_by_class_name("check-box_radio")
return radio_btns.find_element_by_xpath("//span[contains(text(),'{}')]/preceding-sibling::input".format(answer))
def btnSubmit(self):
return self.driver.find_element_by_id("btnSubmit")
def check_insert_basic_info_successfully(self):
try:
self.driver.find_element_by_xpath("//h3[contains(text(),'upload your scenes')]")
return True
except:
return False
def set_basic_info(self, title, address, description, watermark="", publicAccess=True, mode="set"):
"""
Insert basic info for creating new tour
:param title: Title of tour
:type title: str
:param address: Tour address
:type address: str
:param description: Tour description
:type description: str
:param watermark: Watermark
:type watermark: str
:param publicAccess: Choose radio button for public access
:type publicAccess: bool
:param mode: Possible values are set or update
:type mode: str
"""
self.log.info("Execute method set_basic_info with parameters title={}, address={}, description={},"
" watermark={}, publicAccess={}, mode={}".format(title, address, description, watermark,
publicAccess, mode))
if title:
wait_until(lambda: check_if_elem_exist(self.inpTitle), timeout=30)
send_text(self.inpTitle(), title, mode=mode)
if address:
send_text(self.inpAddress(), address, mode=mode)
if watermark:
send_text(self.inpWatermarkText(), watermark, mode="update")
if publicAccess:
self.rbtnPublicAccess("Yes").click()
else:
self.rbtnPublicAccess("No").click()
if description:
send_text(self.txtDescription(), description, mode=mode)
self.log.screenshot("Data entered. Click on button submit")
self.btnSubmit().click()
wait = WebDriverWait(self.driver, 60)
wait.until(expected_conditions.visibility_of_element_located(
(By.CSS_SELECTOR, "div[class*='qq-upload-button']")))
self.log.info("Submit done")
|
import os
import sys
import subprocess
if (len(sys.argv) < 2):
print("Usage: python3 walk_stack.py <.dmp file>")
exit(1)
dmp = sys.argv[1]
syms = [f for f in os.listdir(os.getcwd()) if f.endswith(".sym")]
minidump_stackwalk = os.path.dirname(os.path.realpath(__file__)) + "/minidump_stackwalk"
for sym in syms:
with open(sym, "r") as f:
info = f.readline()[:-1].split()
if len(info) == 5:
dir = "sym/%s/%s/" % (info[-1], info[-2])
subprocess.run("mkdir -p %s" % dir, shell=True, check=True)
subprocess.run("cp %s %s" % (sym, dir), shell=True, check=True)
subprocess.run("%s %s sym" % (minidump_stackwalk, dmp), shell=True, check=True)
|
print("Hey how is it going?")
statement = input()
while statement != "stop copying me":
print(statement)
statement = input()
print("UGH FINE YOU WIN")
|
#!/usr/bin/env python
# coding=utf-8
import sys
#find result
import sys
count = 0.0
current_key = -1
Pt_sum = 0
event_files = set()
for line in sys.stdin:
key,value = line.split('\t')
key = int(key)
if key != current_key:
if current_key != -1:
print("{0},{1},{2}".format(current_key, len(event_files), Pt_sum/count))
current_key = key
count = 0.0
Pt_sum = 0.0
event_files = set()
event_file, Pt = value.split(',')
event_files.add(int(event_file))
Pt_sum += float(Pt)
count += 1.0
print("{0},{1},{2}".format(key,len(event_files),Pt_sum/count))
#INPUT:
"""
key -
0 - antiNucleus INT
value -
1 - eventFile UINT
2 - Pt FLOAT
"""
#OUTPUT:
# antiNucleus, number of eventFiles, mean of Pt |
from django.shortcuts import redirect, render, get_object_or_404
from todoapp.models import Category, TodoList
from .forms import CreateTask
def index(request):
todos = TodoList.objects.all()
return render(request, "todoapp/todo.html", {'todos': todos})
def create_task(request):
query_results = Category.objects.all()
if request.method == "POST":
form = CreateTask(request.POST)
if form.is_valid():
form.save()
return redirect("index")
else:
form = CreateTask()
return render(request, "todoapp/create.html", {"form": form, "category": query_results})
def task_detail(request, pk):
task = get_object_or_404(TodoList, pk=pk)
return render(request, "todoapp/detail.html", {'task': task})
def edit_task(request, pk):
task = get_object_or_404(TodoList, pk=pk)
if request.method == "POST":
form = CreateTask(request.POST, instance=task)
if form.is_valid():
form.save()
return redirect("detail")
else:
form = CreateTask(instance=task)
return render(request, "todoapp/edit.html", {"form": form})
def delete_task(request, pk):
task = get_object_or_404(TodoList, pk=pk)
if request.method == "POST":
task.delete()
return redirect("index")
return render(request, "todoapp/delete.html", {'task': task})
|
"""
MWHair - Mediawiki wrapper
Description - This is a mediawiki client written by Hairr <hairrazerrr@gmail.com>
It was orignally created to be used at http://runescape.wikia.com/
This library is free software; you can redistribute it and/or modify it under
the terms of the GNU Lesser General Public License as published by the Free
Software Foundation; either version 2.1 of the License, or (at your option)
any later version.
This library is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
import urllib2
import urllib
import json
import sys
import time
from cookielib import CookieJar
__version__ = 2.0
cj = CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
opener.add_headers = [('User-Agent','Python Mwhair')]
def site(site):
"""
@description: Sets the wiki's api
@use:
import mwhair
mwhair.wiki('http://foo.com/api.php')
@other: You must specifiy the url of the api with the http protocol and without the www
"""
global wiki
wiki = site
def login(username, password):
"""
@description: Used to login to the mediawiki through the API
@use:
import mwhair
mwhair.login(username, password)
"""
login_data = {
'action' : 'login',
'lgname' : username,
"lgpassword": password,
'format' : 'json'
}
data = urllib.urlencode(login_data)
response = opener.open(wiki, data)
content = json.load(response)
login_data['lgtoken'] = content['login']['token']
data = urllib.urlencode(login_data)
response = opener.open(wiki, data)
content = json.load(response)
if content['login']['result'] == 'Success':
print 'Now logged in as %s' % content['login']['lgusername']
edittokens()
elif content ['login']['result'] == 'NeedToken':
print 'Error occured while trying to log in...'
sys.exit(1)
elif content ['login']['result'] == 'WrongPass':
print 'Incorrect password.'
sys.exit(1)
else:
print 'Error occured.'
sys.exit(1)
def logout():
"""
@description: Used to logout of the wiki through the API
@use:
import mwhair
mwhair.logout()
"""
logout_data = {
'action':'logout',
'format':'json'
}
data = urllib.urlencode(logout_data)
response = opener.open(wiki, data)
content = json.load(response)
print "Successfully logged out"
def edittokens():
"""
@description: Used to gather tokens to edit, delete, protect, move, block, unblock, email, and import
@use: This shouldn't be used in a seperate script, the information is gathered on login and used throughout mwhair
"""
edit_token_data = {
'action':'query',
'prop':'info',
'titles':'Main Page',
'intoken':'edit|delete|protect|move|block|unblock|email|import',
'format':'json'
}
data = urllib.urlencode(edit_token_data)
response = opener.open(wiki, data)
content = json.load(response)
s = content['query']['pages']
thes = tuple(s.values())[0]
try:
warnings = content['warnings']['info']['*']
except:
warnings = None
if warnings != None:
if 'edit' in warnings:
print 'No edit token: Quitting....'
sys.exit(1)
else:
global edit_token
edit_token = thes['edittoken']
if 'delete' in warnings:
global delete_token
delete_token = None
else:
delete_token = thes['deletetoken']
if 'protect' in warnings:
global protect_token
protect_token = None
else:
protect_token = thes['protecttoken']
if 'move' in warnings:
global move_token
move_token = None
else:
move_token = thes['movetoken']
if 'block' in warnings:
global block_token
block_token = None
else:
block_token = thes['blocktoken']
if 'unblock' in warnings:
global unblock_token
unblock_token = None
else:
unblock_token = thes['unblocktoken']
if 'email' in warnings:
email_token = None
else:
email_token = thes['emailtoken']
if 'import' in warnings:
import_token = None
else:
import_token = thes['importtoken']
else:
edit_token = thes['edittoken']
delete_token = thes['deletetoken']
protect_token = thes['protecttoken']
move_token = thes['movetoken']
block_token = thes['blocktoken']
unblock_token = thes['unblocktoken']
email_token = thes['emailtoken']
import_token = thes['importtoken']
def purge(title):
"""
@description: Purges the specified page
@use:
import mwhair
mwhair.purge('foo')
"""
purge_data = {
'action':'purge',
'titles':title,
'format':'json'
}
data = urllib.urlencode(purge_data)
response = opener.open(wiki,data)
content = json.load(response)
def edit(title, section=None):
"""
@description: Gathers information about a specified page
@use:import mwhair
foo = mwhair.edit('bar')
@other: This then makes the variable foo the contents of bar
"""
read_page_data = {
'action':'query',
'prop':'revisions',
'titles':title,
'rvprop':'timestamp|content',
'format':'json'
}
if section:
read_page_data['rvsection'] = section
data = urllib.urlencode(read_page_data)
response = opener.open(wiki, data)
content = json.load(response)
s = content['query']['pages']
thes = tuple(s.values())[0]
try:
wikipage = thes['revisions'][0]['*']
return wikipage
except KeyError:
wikipage = ''
return wikipage
def save(title, text='',summary='',minor=False,bot=True,section=False):
"""
@description: Saves the contents of the page
@use:
import mwhair
mwhair.save('foo')
@other: text needs to be specified, if not, the page will only be purged
to create a non-bot edit, specifiy bot=False, otherwise, it'll be marked as a bot edit
"""
save_data = {
'action':'edit',
'title':title,
'summary':summary,
'token':edit_token,
'format':'json'
}
try:
save_data['text'] = text.encode('utf-8')
except:
save_data['text'] = text
if bot is False:
pass
else:
save_data['bot'] = 'True'
if minor != False:
save_data['minor'] = minor
if not text:
save_data['text'] = purge(title) # This will make the page purge
if section != False:
save_data['section'] = section
if text:
data = urllib.urlencode(save_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
def move(fromp, to, movesubpages=True, movetalk=True,reason='',noredirect=False):
"""
@description: Moves one page to another
@use:
import mwhair
mwhair.move('Foo','Bar')
"""
move_data = {
'action':'move',
'from':fromp,
'to':to,
'reason':reason,
'token':move_token,
'format':'json'
}
if movesubpages == True:
move_data['movesubpages'] = True
if movetalk == True:
move_data['movetalk'] = True
if noredirect == True:
move_data['noredirect'] = True
if move_token is not None:
data = urllib.urlencode(move_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
else:
print 'You do not have permission to move.'
def upload(filename, url, comment=''):
"""
@description: Uploads a file from another url
@use:
import mwhair
mwhair.upload('File.png','http://foo.com/bar.png')
"""
upload_data = {
'action':'upload',
'filename':filename,
'url':url,
'comment':comment,
'token':edit_token,
'format':'json'
}
data = urllib.urlencode(upload_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
def delete(title, reason=None):
"""
@description: Deletes a specified page
@use:
import mwhair
mwhair.delete('Foo')
"""
delete_data = {
'action':'delete',
'title':title,
'token':delete_token,
'format':'json'
}
if reason != None:
delete_data['reason'] = reason
if delete_token is not None:
data = urllib.urlencode(delete_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
else:
print 'You do not have permission to delete.'
def protect(title,edit="all",move="all",expiry="infinite",reason=None):
"""
@description: Protects the specified page
@use:
import mwhair
mwhair.protect('foo')
"""
protect_data = {
'action':'protect',
'title':title,
'protections':'edit=' + edit + '|move=' + move,
'expiry':expiry,
'token':protect_token,
'format':'json'
}
if reason != None:
protect_data['reason'] = reason
if protect_token is not None:
data = urllib.urlencode(protect_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
else:
print 'You do not have permission to protect.'
def unprotect(title,edit="all",move="all",reason=None):
unprotect_data = {
'action':'protect',
'title':title,
'protections':'edit=' + edit +'|move=' + move,
'token':protect_token,
'format':'json'
}
if reason != None:
unprotect_data['reason'] = reason
if protect_data is not None:
data = urllib.urlencode(unprotect_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
else:
print 'You do not have permission to unprotect.'
def undo(title,summary=False,minor=False,bot=True):
"""
@description: Undo's the current revision on the page
@use:
import mwhair
mwhair.undo('foo')
@other: If no summary is specified, an automatic summary will be put instead
"""
undo_data = {
'action':'edit',
'title':title,
'token':edit_token,
'undo':revnumber(title),
'format':'json'
}
if summary != False:
undo_data['summary'] = summary
if minor != False:
undo_data['minor'] = minor
if bot == True:
undo_data['bot'] = '1'
data = urllib.urlencode(undo_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
def rollback(title,summary=None,markbot=False):
"""
@description: Rollbacks the last (or various) revisions on a page
@use:
import mwhair
mwhair.rollback('Foo')
@other: If no summary is provided, a default one will be used
"""
rollback_data_1 = {
'action':'query',
'prop':'revisions',
'titles':title,
'rvtoken':'rollback',
'format':'json'
}
data = urllib.urlencode(rollback_data_1)
response = opener.open(wiki,data)
content = json.load(response)
s = content['query']['pages']
thes = tuple(s.values())[0]
rollback_token = thes['revisions'][0]['rollbacktoken']
user = thes['revisions'][0]['user']
rollback_data_2 = {
'action':'rollback',
'title':title,
'user':user,
'token':rollback_token,
'format':'json'
}
if summary != None:
rollback_data_2['summary'] = summary
if markbot != False:
rollback_data_2['markbot'] = 1
data = urllib.urlencode(rollback_data_2)
response = opener.open(wiki,data)
content = json.load(response)
return content
def block(user,expiry='infinite',reason=None,nocreate=False,
autoblock=False,noemail=False,talkpage=True,reblock=False,watch=False):
"""
@description: Blocks a specified user
@use:
import mwhair
mwhair.block('Foo')
@other: nocreate will prevent account creation, autoblock will block the last used IP's and
any sussequent IP addresses, noemail will prevent the user from sending an email through the wiki,
if talkpage is false, the user will not be able to edit their talk page, reblock will overwrite any
existing blocks, if watch is false, you will watch the user/IP's user and talk pages
"""
block_data = {
'action':'block',
'user':user,
'expiry':expiry,
'token':block_token,
'format':'json'
}
if reason != None:
block_data['reason'] = reason
if nocreate != False:
block_data['nocreate'] = True
if autoblock != False:
block_data['autoblock'] = True
if noemail != False:
block_data['noemail'] = True
if talkpage != True:
block_data['notalkpage'] = True
if reblock != False:
block_data['reblock'] = True
if watch != False:
block_data['watchuser'] = True
if block_token != None:
data = urllib.urlencode(block_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
else:
print 'You do not have permission to block.'
def unblock(user,reason=None):
"""
@description: Unblocks a specified user
@use:
import mwhair
mwhair.unblock('Foo')
"""
unblock_data = {
'action':'unblock',
'user':user,
'token':unblock_token,
'format':'json'
}
if reason != None:
unblock_data['reason'] = reason
if unblock_token != None:
data = urllib.urlencode(unblock_data)
response = opener.open(wiki,data)
content = json.load(response)
return content
else:
print 'You do not have permission to unblock.'
def recentchanges(bot=False,limit=20,start=False,end=False):
"""
@description: Gets the last 20 pages edited on the recent changes
@use:
import mwhair
foo = mwhair.recentchanges()
for pages in foo:
print page ## This is an example of how to show the pages
... tasks being performed ...
@other: Start and End syntax is: YYYYMMDDHHMMSS or YYYY-MM-DD-HH-MM-SS
"""
recent_changes_data = {
'action':'query',
'list':'recentchanges',
'rcprop':'user|title',
'rclimit':limit,
'format':'json'
}
if bot is False:
recent_changes_data['rcshow'] = '!bot'
if start != False:
recent_changes_data['rcstart'] = start
if end != False:
recent_changes_data['rcend'] = end
data = urllib.urlencode(recent_changes_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['recentchanges'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
def allpages(limit=10,time=False,namespace=None):
"""
@description: Gets the first (default: 10) pages listed in Special:AllPages
@use:
import mwhair
pages = allpages()
"""
returnlist = []
if limit != 'max':
all_pages_data = {
'action':'query',
'list':'allpages',
'aplimit':limit,
'format':'json'
}
if namespace != None:
all_pages_data['apnamespace'] = namespace
data = urllib.urlencode(all_pages_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['allpages'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
else:
while 1:
all_pages_data = {
'action':'query',
'list':'allpages',
'aplimit':'max',
'format':'json'
}
if namespace != None:
all_pages_data['apnamespace'] = namespace
data = urllib.urlencode(all_pages_data)
response = opener.open(wiki,data)
content = json.load(response)
content2 = tuple(content['query']['allpages'])
for page in content2:
returnlist.append(page['title'])
try:
all_pages_data['apcontinue'] = content['query-continue']['allpages']['apcontinue']
if time != False: time.sleep(1)
except:
return returnlist
def newpages(bot=False,limit=20,start=False,end=False,namespace=None):
"""
@description: Gets the last (default: 20) pages listed in Special:NewPages or are new pages
@use:
import mwhair
pages = mwhair.newpages()
for page in pages:
print page ## This is only an example to show the page
"""
new_pages_data = {
'action':'query',
'list':'recentchanges',
'rctype':'new',
'rclimit':limit,
'format':'json'
}
if bot is False:
new_pages_data['rcshow'] = '!bot'
if namespace != None:
new_pages_data['rcnamespace'] = namespace
if start != False:
new_pages_data['rcstart'] = start
if end != False:
new_pages_data['rcend'] = end
data = urllib.urlencode(new_pages_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['recentchanges'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
def logs(letype=None,leaction=None,lelimit=50,lestart=None,leend=None):
"""
@description: Gets (default: 50) pages in the specified log, if none specified, it'll list all the logs
@use:
import mwhair
foo = mwhair.logs()
for pages in foo:
print page ## This is only an example to show the pages
... tasks being performed ...
@other: To specify a log, letype="logtype". leaction will override letype.
"""
log_events_data = {
'action':'query',
'list':'logevents',
'lelimit':lelimit,
'format':'json'
}
if letype != None:
log_events_data['letype'] = letype
else:
pass
if leaction != None:
log_events_data['leaction'] = leaction
else:
pass
if lestart != None:
log_events_data['lestart'] = lestart
else:
pass
if leend != None:
log_events_data['leend'] = leend
else:
pass
data = urllib.urlencode(log_events_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['logevents'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
def links(title,limit=10,namespace=None):
returnlist = []
if limit != 'max':
links_data = {
'action':'query',
'prop':'links',
'titles':title,
'pllimit':limit,
'format':'json'
}
if namespace != None: links_data['plnamespace'] = namespace
data = urllib.urlencode(links_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['pages'].values())
try:
for title in pages[0]['links']:
returnlist = [title['title'] for title in pages[0]['links']]
return returnlist
except:
return None
else:
while 1:
links_data = {
'action':'query',
'prop':'links',
'titles':title,
'pllimit':limit,
'format':'json'
}
if namespace != None: links_data['plnamespace'] = namespace
data = urllib.urlencode(links_data)
response = opener.open(wiki,data)
content = json.load(response)
content2 = tuple(content['query']['pages'].values())
for page in content2[0]['links']:
returnlist.append(page['title'])
try:
links_data['plcontinue'] = content['query-continue']['links']['plcontinue']
except:
return returnlist
def backlinks(title,limit=10,namespace=None,redirects=True):
"""
@description: Gets (default: 10) pages that link to the specified title
@use:
import mwhair
foo = mwhair.backlinks('bar')
for pages in foo:
print pages ## This is only an example to show the pages
... tasks being performed ...
"""
if limit != 'max':
backlink_data = {
'action':'query',
'list':'backlinks',
'bltitle':title,
'bllimit':limit,
'format':'json'
}
if namespace != None:
backlink_data['blnamespace'] = namespace
if redirects != True:
backlink_data['blfilterredir'] = 'nonredirects'
data = urllib.urlencode(backlink_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['backlinks'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
else:
returnlist = []
while 1:
backlink_data = {
'action':'query',
'list':'backlinks',
'bltitle':title,
'bllimit':'max',
'format':'json'
}
if namespace != None:
backlink_data['blnamespace'] = namespace
data = urllib.urlencode(backlink_data)
response = opener.open(wiki,data)
content = json.load(response)
content2 = content['query']['backlinks']
for page in content2:
returnlist.append(page['title'])
try:
backlink_data['blcontinue'] = content['query-continue']['backlinks']['blcontinue']
except:
return returnlist
def imageusage(title,iulimit=10,iunamespace=None):
"""
@description: Gets (default: 10) pages that use the specified image
@use:
import mwhair
foo = mwhair.imageusage('File:Bar.png')
for pages in foo:
print pages ## This is only an example to show the pages
... tasks being performed ...
"""
imageusage_data = {
'action':'query',
'list':'imageusage',
'iutitle':title,
'iulimit':iulimit,
'format':'json'
}
if iunamespace != None:
imageusage_data['iunamespace'] = iunamespace
data = urllib.urlencode(imageusage_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['imageusage'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
def category(title,limit=10,cmnamespace=None,time=False):
"""
@description: Gets (default: 10) pages that are used in the specified category
@use:
import mwhair
foo = mwhair.category('Category:Bar')
for pages in foo:
print pages ## This is only an example to show the pages
... tasks being performed ...
@other: If time is true, this will most likely stop some errors with bandwith
"""
if limit != 'max':
category_data = {
'action':'query',
'list':'categorymembers',
'cmtitle':title,
'cmlimit':limit,
'format':'json'
}
if cmnamespace != None:
category_data['cmnamespace'] = cmnamespace
data = urllib.urlencode(category_data)
response = opener.open(wiki,data)
content = json.load(response)
try:
pages = tuple(content['query']['categorymembers'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
except:
return None
else:
returnlist = []
while 1:
category_data = {
"action":"query",
"list":"categorymembers",
"cmtitle":title,
"cmlimit":"max",
"cmprop":"title",
"format":"json"
}
if cmnamespace != None:
category_data['cmnamespace'] = cmnamespace
data = urllib.urlencode(category_data)
response = opener.open(wiki,data)
content = json.load(response)
content2 = content["query"]["categorymembers"]
for page in content2:
returnlist.append(page["title"])
try:
category_data['cmcontinue'] = content["query-continue"]["categorymembers"]["cmcontinue"]
if time != False:
time.sleep(5)
except:
return returnlist
def template(title,eilimit=10,einamespace=None,eicontinue=None):
"""
@description: Gets (default: 10) pages that use the specified template
@use:
import mwhair
foo = mwhair.template('Template:Bar')
for pages in foo:
print pages ## This is only an example to show the pages
... tasks being performed ...
"""
template_data = {
'action':'query',
'list':'embeddedin',
'eititle':title,
'eilimit':eilimit,
'format':'json'
}
if einamespace != None:
template_data['einamespace'] = einamespace
if eicontinue != None:
template_data['eicontinue'] = eicontinue
data = urllib.urlencode(template_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['embeddedin'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
def usercontribs(title,limit=10,namespace=None,top=False):
"""
@description: Gets (default: 10) pages last edited by the specified user
@use:
import mwhair
foo = mwhair.usercontribs('Bar')
for pages in foo:
print pages ## This is only an example to show the pages
... tasks being performed ...
"""
user_contrib_data = {
'action':'query',
'list':'usercontribs',
'ucuser':title,
'uclimit':limit,
'format':'json'
}
if namespace != None:
user_contrib_data['ucnamespace'] = namespace
if top != False:
user_contrib_data['uctoponly'] = True
data = urllib.urlencode(user_contrib_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['usercontribs'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
def prefix(title,aplimit=10,apprlevel=None,apnamespace=None):
"""
@description: Gets (default: 10) pages that begin with the specified title
@use:
import mwhair
foo = mwhair.prefix('bar')
for pages in foo:
print pages ## This is only an example to show the pages
... tasks being performed ...
@other: If this being done in another namespace (Talk, User, etc..) the title input
would be the name of the page without the namespace specified (ex. User:Foo would only be Foo)
with the appropriate namespace number in apnamespace. Apprlevel is the protection level,
default None|Semi|Full
"""
prefix_data = {
'action':'query',
'list':'allpages',
'apprefix':title,
'aplimit':aplimit,
'format':'json'
}
if apprlevel != None:
prefix_data['apprlevel'] = apprlevel
if apnamespace != None:
prefix_data['apnamespace'] = apnamespace
data = urllib.urlencode(prefix_data)
response = opener.open(wiki,data)
content = json.load(response)
pages = tuple(content['query']['allpages'])
for title in pages:
returnlist = [title['title'] for title in pages]
return returnlist
def userrights(title):
"""
@description: Gets the userrights for the specified user
@use:
import mwhair
foo = mwhair.userrights('Bar')
for rights in foo:
print rights ## This is only an example to show the rights
... tasks being performed ...
"""
user_right_data = {
'action':'query',
'list':'users',
'ususers':title,
'usprop':'groups',
'format':'json'
}
data = urllib.urlencode(user_right_data)
response = opener.open(wiki,data)
content = json.load(response)
rights = tuple(content['query']['users'][0]['groups'])
for group in rights:
returnlist = [group for group in rights]
return returnlist
def pageid(title):
"""
@description: Get's the page id for the specified page
@use: NOTICE: This isn't necessarily supposed to be used for another script
import mwhair
pageid = mwhair.pageid('foo')
"""
pageid_data = {
'action':'query',
'prop':'revisions',
'titles':title,
'format':'json'
}
data = urllib.urlencode(pageid_data)
response = opener.open(wiki,data)
content = json.load(response)
s = content['query']['pages']
thes = tuple(s.values())[0]
pageid = thes['pageid']
return pageid
def revnumber(title):
"""
@description: Get's the current revision number for the specified page
@use: NOTICE: This isn't necessarily supposed to be used for another script
import mwhair
revnumber = mwhair.revnumber('foo')
"""
revnumber_data = {
'action':'query',
'prop':'revisions',
'titles':title,
'format':'json'
}
data = urllib.urlencode(revnumber_data)
response = opener.open(wiki,data)
content = json.load(response)
s = content['query']['pages']
thes = tuple(s.values())[0]
revnumber = thes['revisions'][0]['revid']
return revnumber
def revuser(title):
"""
@description: Gets the last user to edit the specified pages
@use:
import mwhair
revuser = mwhair.revuser('Foo')
"""
revuser_data = {
'action':'query',
'prop':'revisions',
'titles':title,
'rvprop':'user',
'format':'json'
}
data = urllib.urlencode(revuser_data)
response = opener.open(wiki,data)
content = json.load(response)
s = content['query']['pages']
thes = tuple(s.values())[0]
revuser = thes['revisions'][0]['user']
return revuser
def namespace(title):
namespace_data = {
'action':'query',
'prop':'info',
'titles':title,
'format':'json'
}
data = urllib.urlencode(namespace_data)
response = opener.open(wiki,data)
content = json.load(response)
namespace = tuple(content['query']['pages'].values())[0]['ns']
return namespace
|
from atcoder.dsu import DSU
N = int(input())
sx, sy, tx, ty = (int(x) for x in input().split())
cs = []
for _ in range(N):
x, y, r = (int(x) for x in input().split())
cs.append((x,y,r))
dsu = DSU(N)
for i in range(N):
for j in range(i+1,N):
x1, y1, r1 = cs[i]
x2, y2, r2 = cs[j]
ds = (x1-x2)**2 + (y1-y2)**2
d1 = abs(r1 - r2)
d2 = r1 + r2
if d1**2 == ds or d2**2 == ds:
dsu.merge(i,j)
if d1**2 < ds < d2**2:
dsu.merge(i,j)
first = None
for i in range(N):
x, y, r = cs[i]
if (x-sx)**2 + (y-sy)**2 == r**2:
first = i
break
second = None
for i in range(N):
x, y, r = cs[i]
if (x-tx)**2 + (y-ty)**2 == r**2:
second = i
break
if dsu.same(first,second):
print("Yes")
else:
print("No")
|
import argparse
import csv
import os
import math
from scipy import stats
# PROJECTS = ['Closure', 'Lang', 'Chart', 'Math', 'Mockito', 'Time']
PROJECTS = ['Math']
# PROJECT_BUGS = [
# [str(x) for x in range(1, 134)],
# [str(x) for x in range(1, 66)],
# [str(x) for x in range(1, 27)],
# [str(x) for x in range(1, 107)],
# [str(x) for x in range(1, 39)],
# [str(x) for x in range(1, 28)]
# ]
PROJECT_BUGS = [
[str(x) for x in range(1, 107)]
]
#FORMULA = ['barinel', 'dstar2', 'jaccard', 'muse', 'ochiai', 'opt2', 'tarantula']
#FORMULA = ['tarantula']
FORMULA = ['dstar2']
def evaluate_algorithms(input_file_dstar2, input_file_addition, buggy_lines_file, output_file, top_N_lines, project, bug, formula):
"""
find the recency of the last update for every suspiciouss line
Parameters
----------
input_file : str (file contains sorted suspicousness lines with date)
output_file: str (file contains sorted suspiciousness lines file with date, author and recency)
project: str (project name)
bug: str (bug id)
formula: str (fault localization technique)
commit_id: str (commit id of the buggy vesion)
"""
input_file_dstar2 = "/home/kanag23/Desktop/Fault_loc/Python_scripts_July_25/" + input_file_dstar2
input_file_addition = "/home/kanag23/Desktop/Fault_loc/Python_scripts_July_25/" + input_file_addition
buggy_lines_file = "/home/kanag23/Desktop/Fault_loc/Python_scripts_July_25/" + buggy_lines_file
sorted_susp_lines_dstar2 = read_susp_lines_from_file(input_file_dstar2)
sorted_susp_lines_addition = read_susp_lines_from_file(input_file_addition)
buggy_lines = read_susp_lines_from_file(buggy_lines_file)
# output file
output_file = "/home/kanag23/Desktop/Fault_loc/Python_scripts_July_25/" + output_file
dcg_ideal = 0.0
for i in range(top_N_lines):
if i >= len(buggy_lines):
break
dcg_ideal += (2 ** 1 - 1) / math.log(i + 2, 2)
dcg_dstar2 = 0.0
for i in range(top_N_lines):
# print(f"index: {i}, len(sorted_susp_lines_dstar2): {len(sorted_susp_lines_dstar2)}")
is_found_dstar2 = find_if_predicted_line_is_buggy(sorted_susp_lines_dstar2[i], buggy_lines)
dcg_dstar2 += (2 ** is_found_dstar2 - 1) / math.log(i + 2, 2) # dcg formula
ndcg_dstar2_value = dcg_dstar2 / dcg_ideal
dcg_addition = 0.0
for i in range(top_N_lines):
# print(f"index: {i}, len(sorted_susp_lines_dstar2): {len(sorted_susp_lines_dstar2)}")
is_found_addition = find_if_predicted_line_is_buggy(sorted_susp_lines_addition[i], buggy_lines)
dcg_addition += (2 ** is_found_addition - 1) / math.log(i + 2, 2) # dcg formula
ndcg_dstar2_addition = dcg_addition / dcg_ideal
return ndcg_dstar2_value, ndcg_dstar2_addition
def find_if_predicted_line_is_buggy(predicted_weighted_line, buggy_lines):
for buggy_line in buggy_lines:
buggy_line_parts = buggy_line[0].split('#')
buggy_line_id = buggy_line_parts[0] + "#" + buggy_line_parts[1]
if predicted_weighted_line[0] == buggy_line_id:
return 1
return 0
def read_susp_lines_from_file(input_file):
"""
reads the suspiciousness lines data from the sorted suspiciousness file
Parameters:
----------
input_file: str
return:
------
sorted_susp_lines: list (2D)
"""
susp_data = csv.reader(open(input_file, encoding="latin-1"), delimiter=',')
sorted_susp_lines = [susp_line for susp_line in susp_data]
return sorted_susp_lines
def wilcoxon_tests(ndcg_dstar2_for_each_bug, ndcg_new_for_each_bug):
# WILCOXON TEST
print("================ Stats Results Start: ====================")
print(f"Length of ndcg_dstar2_for_each_bug: {len(ndcg_dstar2_for_each_bug)}")
print(f"Values: {ndcg_dstar2_for_each_bug}")
print(f"Length of ndcg_new_for_each_bug: {len(ndcg_new_for_each_bug)}")
print(f"Values: {ndcg_new_for_each_bug}")
print(stats.wilcoxon(ndcg_dstar2_for_each_bug, ndcg_new_for_each_bug, zero_method="wilcox"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--suspiciousness-data-dir', required=True, help='Suspiciousness data directory')
parser.add_argument('-b', '--buggy-lines-dir', required=True, help='Buggy Lines data directory')
parser.add_argument('-o', '--output-dir', required=True, help='Output directory')
args = parser.parse_args()
# top_N_lines = [1, 5, 10, 20, 50]
top_N_lines = [50] # top 10 lines are only considered from the final recommended ranked lines
# For stats test
ndcg_dstar2_for_each_bug = []
ndcg_new_for_each_bug = []
print("\t\t\tdstar2 \t Product")
for project, bugs in zip(PROJECTS, PROJECT_BUGS):
# for susp_weight, recency_weight in [(0.5, 0.5), (0.6, 0.4), (0.7, 0.3), (0.8, 0.2), (0.9, 0.1), (1.0, 1.0)]:
for susp_weight, recency_weight in [(0.9, 0.1)]:
print(f"======= susp weight: {susp_weight} and recency_weight: {recency_weight}======")
for top_N in top_N_lines:
total_ndcg_dstar2, total_ndcg_addition = 0.0, 0.0
for bug in bugs:
for formula in FORMULA:
input_csv_dstar2 = f"{project}-{bug}-{formula}-sorted-weighted-susp-recency-{formula}-{susp_weight}-{recency_weight}"
input_csv_addition = f"{project}-{bug}-{formula}-sorted-weighted-susp-recency-addition-{susp_weight}-{recency_weight}"
output_csv = f"{project}-{bug}-{formula}-sorted-weighted-susp-recency"
buggy_lines_csv = f"{project}-{bug}.buggy.lines"
# Calling the main functionality
ndcg_dstar2, ndcg_addition = evaluate_algorithms(os.path.join(args.suspiciousness_data_dir, input_csv_dstar2),
os.path.join(args.suspiciousness_data_dir, input_csv_addition),
os.path.join(args.buggy_lines_dir, buggy_lines_csv),
os.path.join(args.output_dir, output_csv),
top_N, project, bug, formula)
ndcg_dstar2_for_each_bug.append(ndcg_dstar2) # Adding
ndcg_new_for_each_bug.append(ndcg_addition) # Adding
total_ndcg_dstar2 += ndcg_dstar2
total_ndcg_addition += ndcg_addition
# print(f"NDCG score for top {top_N}\t dstar2 \t\t\t addition")
# print(f"NDCG score for top {top_N}: \t\t\t\t\t{total_ndcg_dstar2:.4f} \t\t\t\t {total_ndcg_addition:.4f}", end="")
# if total_ndcg_dstar2 >= total_ndcg_addition:
# print(f"\t\t\t dstar2")
# else:
# print(f"\t\t\t addition")
print(f"NDCG score for top {top_N}: \t\t\t\t\t{(total_ndcg_dstar2 / len(bugs)):.4f} \t\t\t\t {(total_ndcg_addition / len(bugs)):.4f}", end="")
if (total_ndcg_dstar2 / len(bugs)) >= (total_ndcg_addition / len(bugs)):
print(f"\t\t\t dstar2")
else:
print(f"\t\t\t addition")
# print(f"Top {top_N} lines: \t {buggy_line_in_topN_dstar2_counter} \t\t\t {buggy_line_in_topN_addition_counter}", end="")
# if buggy_line_in_topN_dstar2_counter > buggy_line_in_topN_addition_counter:
# print(f"\t\t dstar2")
# else:
# print(f"\t\t Weighted Addition")
print("===================\n\n")
wilcoxon_tests(ndcg_dstar2_for_each_bug, ndcg_new_for_each_bug)
|
import pymysql
def connect():
#conn = pymysql.connect("opencab1.miniserver.com","3306","ok_gopi","Optometry123","ok_mcd")
conn = pymysql.connect(host='opencab1.miniserver.com', user='ok_gopi', passwd='Optometry123', db='ok_mcd', charset='utf8', port=3306)
cur = conn.cursor()
try:
cur.execute("CREATE TABLE IF NOT EXISTS `patient_data` (`id` int(10) NOT NULL,\
`patient_name` varchar(50) DEFAULT NULL,\
`year_of_birth` int(10) DEFAULT NULL,\
PRIMARY KEY (`id`))")
cur.execute("CREATE TABLE IF NOT EXISTS `family_history_enquiry` (`id` int(10) NOT NULL AUTO_INCREMENT,\
`Jewish_ancestry` varchar(10) DEFAULT NULL,\
`BRCA1_BRCA2_TP53` varchar(10) DEFAULT NULL,\
`Childhood_adrenal_carcinomas_relative` varchar(10) DEFAULT NULL,\
`Glioma_relative` varchar(10) DEFAULT NULL,\
`Multiple_cancers_at_a_young_age_relative` varchar(10) DEFAULT NULL,\
`Sarcoma_diagnosed_less_than_45_years_relative` varchar(10) DEFAULT NULL,\
`patient_id` int(10) NOT NULL,\
PRIMARY KEY (`id`))")
cur.execute("CREATE TABLE IF NOT EXISTS `patient_history` (`id` int(10) NOT NULL AUTO_INCREMENT,\
`Family_history_of_cancer` varchar(10) DEFAULT NULL,\
`Personal_history_of_breast_cancer` varchar(10) DEFAULT NULL,\
`Personal_history_of_melanoma` varchar(10) DEFAULT NULL,\
`patient_id` int(10) NOT NULL,\
PRIMARY KEY (`id`))")
cur.execute("CREATE TABLE IF NOT EXISTS `relative_bilateral_breast_cancer_info` (`id` int(10) NOT NULL AUTO_INCREMENT,\
`Breast_cancer_bilateral` varchar(10) DEFAULT NULL,\
`patient_id` int(10) NOT NULL,\
PRIMARY KEY (`id`)")
cur.execute("CREATE TABLE IF NOT EXISTS `relative_cancer_info` (`id` int(10) NOT NULL AUTO_INCREMENT,\
`Name_of_relative_affected` varchar(50) DEFAULT NULL,\
`Type_of_relative_affected` varchar(20) DEFAULT NULL,\
`Type_of_cancer` varchar(20) DEFAULT NULL,\
`Other_cancer_type` varchar(50) DEFAULT NULL,\
`Age_of_diagnosis` int(10) DEFAULT NULL,\
`Age_of_death` int(10) DEFAULT NULL,\
`patient_id` int(10) NOT NULL,\
PRIMARY KEY (`id`))")
conn.commit()
except:
conn.rollback()
conn.close()
def view():
conn = pymysql.connect(host='opencab1.miniserver.com', user='ok_gopi', passwd='Optometry123', db='ok_mcd', charset='utf8', port=3306)
cur = conn.cursor()
cur.execute("SELECT * FROM patient_data ")
rows = cur.fetchall()
conn.commit()
conn.close()
return rows
def search(id="",patient_name="",year_of_birth=""):
conn = pymysql.connect(host='opencab1.miniserver.com', user='ok_gopi', passwd='Optometry123', db='ok_mcd', charset='utf8', port=3306)
cur = conn.cursor()
cur.execute("SELECT * FROM patient_data where id=%s OR patient_name=%s OR year_of_birth=%s",(id,patient_name,year_of_birth))
rows = cur.fetchall()
conn.commit()
conn.close()
return rows
def update(patient_name,year_of_birth,id):
conn = pymysql.connect(host='opencab1.miniserver.com', user='ok_gopi', passwd='Optometry123', db='ok_mcd', charset='utf8', port=3306)
cur = conn.cursor()
cur.execute("UPDATE patient_data SET patient_name=%s, year_of_birth=%s WHERE id=%s",(patient_name,year_of_birth,id))
conn.commit()
conn.close()
def delete(id):
conn = pymysql.connect(host='opencab1.miniserver.com', user='ok_gopi', passwd='Optometry123', db='ok_mcd', charset='utf8', port=3306)
cur = conn.cursor()
cur.execute("DELETE a.*,b.*,c.*,d.* FROM patient_data a \
LEFT JOIN family_history_enquiry b ON a.id = b.patient_id \
LEFT JOIN patient_history c ON a.id = c.patient_id \
LEFT JOIN relative_cancer_info d ON a.id = d.patient_id \
WHERE a.id = %s",(id))
conn.commit()
conn.close()
#connect()
#print(view())
#update('George',1985,1123)
#search(print(search(year_of_birth=1944)))
#delete(1123)
|
from scenario import *
import matplotlib
from matplotlib import animation
pl = plane(np.array([0,0,0,0]), normal=np.array([1,0,0,0]))
import time
rout = routine(f=goof, tInit=0,tFinal=10000, y0=qp0, ordinaryStepLen=1e-1,method="StV",timeline=True, savePlaneCuts=True, nopythonExe=True, timelineJumps=30)
startT = time.time()
rout.planes.append(pl)
y, times, cuts = rout.run()
endT = time.time()
print(endT-startT)
q1,q2,p1,p2=y.T
q=np.array([q1,q2]).T
p=np.array([p1,p2]).T
import matplotlib.pyplot as plt
plt.plot(q1,q2, linewidth=0.03)
plt.show()
#plt.plot(times, K(p),label="K")
#plt.plot(times, U(q),label="U")
plt.plot(times, H(y),label="H")
plt.legend()
plt.show()
cuts = np.array(cuts[0])
from matplotlib.animation import FuncAnimation
fig, ax = plt.subplots(1,1)
curve, = ax.plot([],[], "ko", markersize=0.5, label="PoincareCut")
xhat, = ax.plot([],[], "r-", label=r"$q_2$")
yhat, = ax.plot([],[], "g-", label=r"$p_1$")
zhat, = ax.plot([],[], "b-", label=r"$p_2$")
ax.set_xlim([-1.2,1.2])
ax.set_ylim([-1.2,1.2])
ax.set_aspect('equal')
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.legend(loc="upper right")
@jit(nopython=True)
def project(cutPoint, plposition, plbasis, sourceCrd, dim):
def normalize(vec): return vec/np.sum(np.sqrt(vec**2))
plnormal = plbasis[0]
vector = normalize(cutPoint - sourceCrd)
if(np.abs(np.sum(plnormal * vector)) < 1e-2):return None, False
planeDistance = np.sum(plnormal * (plposition-sourceCrd))
scaling = planeDistance/(np.sum(vector * plnormal))
#if(scaling < 0): return None, False
landingPoint = vector * scaling + sourceCrd
def projectedHyperplaneCoordinates(position):
relpos = position - plposition
return np.array([np.dot(plbasis[i + 1], relpos) for i in range(dim - 1)])
return projectedHyperplaneCoordinates(landingPoint), True
def projectSet(dataSet, plane, source):
projections = []
for cutPoint in dataSet:
var, success = project(cutPoint, plane.position, plane.basis, source, plane.dim)
if(success) : projections.append(var)
return np.array(projections)
def init():
curve.set_data([],[])
frames = 120
r = 1
def expose(i):
theta = 2*np.pi/frames * i
normal = np.array([np.cos(theta), np.sin(theta), 0])
crd = normal*r
zeta = normal
eta = np.array([0,0,1])
xi = cross(np.array([eta, zeta]))
basis = np.array([zeta,xi,eta])
pl = plane(-crd, basis=basis)
xv = projectSet(np.array([[0,0,-0.5],[0.1,0,-0.5]]), pl, crd)
xhat.set_data(xv.T)
yv = projectSet(np.array([[0, 0, -0.5], [0, 0.1, -0.5]]), pl, crd)
yhat.set_data(yv.T)
zv = projectSet(np.array([[0, 0, -0.5], [0, 0, -0.4]]), pl, crd)
zhat.set_data(zv.T)
projectedData = projectSet(cuts, pl, crd)
curve.set_data(projectedData.T)
return curve
matplotlib.rcParams['animation.ffmpeg_path'] = r"../../ffmpegLibFiler/bin/ffmpeg.exe";
writer = animation.FFMpegWriter(fps=30);
fa = FuncAnimation(fig, expose, frames=frames,save_count=frames, init_func=init)
#fa.save("fig.gif", fps=30)
fa.save("illustrasjon.mp4",writer=writer,dpi=200)
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import gym
import tflearn
from collections import deque
from policy_gradient.ddpg import Actor, Critic, OrnsteinUhlenbeckActionNoise
from policy_gradient.memory import SequentialMemory
tf.app.flags.DEFINE_string('checkpoint', '', 'load a checkpoint file for model')
tf.app.flags.DEFINE_string('save_checkpoint_dir', './models/ddpg_pendulum/', 'dir for storing checkpoints')
tf.app.flags.DEFINE_boolean('dont_save', False, 'whether to save checkpoints')
tf.app.flags.DEFINE_boolean('render', False, 'render of not')
tf.app.flags.DEFINE_boolean('train', True, 'train or not')
tf.app.flags.DEFINE_integer('seed', 0, 'seed for tf and numpy')
tf.app.flags.DEFINE_float('actor_lr', 0.0001, 'learning rate for actor')
tf.app.flags.DEFINE_float('critic_lr', 0.001, 'learning rate for critic')
tf.app.flags.DEFINE_float('tau', 0.001, 'tau')
FLAGS = tf.app.flags.FLAGS
print 'seed is {}'.format(FLAGS.seed)
np.random.seed(FLAGS.seed)
tf.set_random_seed(FLAGS.seed)
env = gym.make('Pendulum-v0')
# gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.33)
# sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
sess = tf.Session()
actor_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.actor_lr)
critic_optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.critic_lr)
observation_shape = env.observation_space.shape[0]
action_shape = env.action_space.shape[0]
ACTION_SCALE_MAX = [2.0]
ACTION_SCALE_MIN = [-2.0]
ACTION_SCALE_VALID = [True]
BATCH_SIZE = 64
def actor_network(states):
with tf.variable_scope('actor'):
net = slim.stack(states, slim.fully_connected, [400, 300], activation_fn=tf.nn.relu, scope='stack')
# NOTE: make weights 0 so zero output always? for testing q function approximation
net = slim.fully_connected(net, action_shape, activation_fn=tf.nn.tanh, scope='full', weights_initializer=tf.random_uniform_initializer(-3e-4, 3e-4))
# net = tflearn.fully_connected(net, action_shape)
# mult with action bounds
net = ACTION_SCALE_MAX * net
return net
def critic_network(states, actions):
with tf.variable_scope('critic'):
# state_net = tflearn.fully_connected(states, 300, activation='relu', scope='full_state')
# action_net = tflearn.fully_connected(actions, 300, activation='relu', scope='full_action')
state_net = slim.stack(states, slim.fully_connected, [400], activation_fn=tf.nn.relu, scope='stack_state')
# action_net = slim.stack(actions, slim.fully_connected, [300], activation_fn=tf.nn.relu, scope='stack_action')
# net = tf.contrib.layers.fully_connected(states, 400, scope='full_state')
# net = tflearn.fully_connected(states, 400)
# net = tflearn.layers.normalization.batch_normalization(net)
# net = tflearn.activations.relu(net)
net = tf.concat([state_net, actions], 1)
# net = tf.contrib.layers.fully_connected(net, 400)
net = slim.fully_connected(net, 300, activation_fn=tf.nn.relu, scope='full')
# w1 = tf.get_variable('w1', shape=[400, 300], dtype=tf.float32)
# w2 = tf.get_variable('w2', shape=[1, 300], dtype=tf.float32)
# b = tf.get_variable('b', shape=[300], dtype=tf.float32)
# t1 = tflearn.fully_connected(net, 300)
# t2 = tflearn.fully_connected(actions, 300)
# print t1.W, t2.W
# net = tflearn.activation(
# tf.matmul(net, t1.W) + tf.matmul(actions, t2.W) + t2.b, activation='relu')
# net = tf.matmul(net, w1) + tf.matmul(actions, w2) + b
# net = tf.nn.relu(net)
# net = slim.stack(net, slim.fully_connected, [5], activation_fn=tf.nn.relu, scope='stack')
# net = slim.fully_connected(net, 1, activation_fn=tf.nn.relu, scope='full')
# net = tf.contrib.layers.fully_connected(net, 1, scope='last')
# w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
# net = slim.stack(net, slim.fully_connected, [24, 1], scope='final', biases_initializer=tf.zeros_initializer())
# net = tf.layers.dense(net, 1, activation=tf.nn.relu, use_bias=True, name='last')
# net = tflearn.fully_connected(net, 1)
net = slim.fully_connected(net, 1, activation_fn=None, scope='last', weights_initializer=tf.random_uniform_initializer(-3e-4, 3e-4))
net = tf.squeeze(net, axis=[1])
return net
def actor_network_tflearn(states):
with tf.variable_scope('actor'):
net = tflearn.fully_connected(states, 400)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
net = tflearn.fully_connected(net, 300)
net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
# Final layer weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(
net, action_shape, activation='tanh', weights_init=w_init)
# Scale output to -action_bound to action_bound
scaled_out = tf.multiply(out, env.action_space.high)
return scaled_out
def critic_network_tflearn(states, actions):
with tf.variable_scope('critic'):
net = tflearn.fully_connected(states, 400)
# net = tflearn.layers.normalization.batch_normalization(net)
net = tflearn.activations.relu(net)
# Add the action tensor in the 2nd hidden layer
# Use two temp layers to get the corresponding weights and biases
t1 = tflearn.fully_connected(net, 300)
t2 = tflearn.fully_connected(actions, 300)
print t1.W, t2.W
net = tflearn.activation(
tf.matmul(net, t1.W) + tf.matmul(actions, t2.W) + t2.b, activation='relu')
# linear layer connected to 1 output representing Q(s,a)
# Weights are init to Uniform[-3e-3, 3e-3]
w_init = tflearn.initializations.uniform(minval=-0.003, maxval=0.003)
out = tflearn.fully_connected(net, 1, weights_init=w_init)
# out = tflearn.fully_connected(net, 1)
out = tf.squeeze(out, axis=[1])
return out
def main(_):
actor = Actor(actor_network, actor_optimizer, sess, observation_shape, action_shape, tau=FLAGS.tau)
critic = Critic(critic_network, critic_optimizer, sess, observation_shape, action_shape, tau=FLAGS.tau)
actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_shape), sigma=0.2)
writer = tf.summary.FileWriter("logs/ddpg", sess.graph)
MAX_EPISODES = 10000
MAX_STEPS = 1000
saver = tf.train.Saver()
if FLAGS.checkpoint:
saver.restore(sess, FLAGS.checkpoint)
else:
sess.run(tf.global_variables_initializer())
# hard update target networks
actor.hard_update()
critic.hard_update()
episode_history = deque(maxlen=100)
memory = SequentialMemory(limit=1000000, window_length=1)
tot_rewards = deque(maxlen=10000)
numsteps = 0
for e in range(MAX_EPISODES):
state = env.reset()
cum_reward = 0
ep_ave_max_q = 0
ep_ave_q = 0
tot_loss = 0
actor_noises = []
for j in range(MAX_STEPS):
if FLAGS.render:
env.render()
noise = actor_noise() if FLAGS.train else 0
action = actor.predict([state])[0] + noise
actor_noises.append(np.abs(noise))
next_state, reward, done, _ = env.step(action)
cum_reward += reward
tot_rewards.append(reward)
# memory_buffer.append((state, action, reward, next_state, 1.0 if not done else 0.0))
memory.append(state, action, reward, done)
numsteps += 1
if numsteps > BATCH_SIZE and FLAGS.train:
# indices = np.random.choice(len(memory_buffer), BATCH_SIZE, replace=False)
# indices = range(64)
states, actions, rewards, next_states, notdones = memory.sample_and_split(BATCH_SIZE)
rewards, notdones = [np.squeeze(x) for x in [rewards, notdones]]
next_actions = actor.predict_target(next_states)
qs, qloss, _ = critic.train(states=states,
actions=actions,
rewards=rewards,
next_states=next_states,
next_actions=next_actions,
notdones=notdones
)
# print target_net_qs
# print qs
# print np.mean(np.square(target_qs-qs)) - qloss
# print qloss
ep_ave_max_q += np.amax(qs)
ep_ave_q += np.mean(qs)
tot_loss += qloss
predicted_actions = actor.predict(states)
action_gradients = critic.get_action_gradients(states, predicted_actions)
inverted_grads = []
for grad, action in zip(action_gradients, predicted_actions):
# inverting gradients approach
newgrad = []
for delp, p, pmin, pmax, valid in zip(grad, action, ACTION_SCALE_MIN, ACTION_SCALE_MAX, ACTION_SCALE_VALID):
if not valid:
newgrad.append(delp)
else:
if delp > 0:
newgrad.append(delp * (pmax - p) / (pmax - pmin))
else:
newgrad.append(delp * (p - pmin) / (pmax - pmin))
inverted_grads.append(newgrad)
actor.train(states=states, action_gradients=action_gradients)
# don't train actor, just see if q function is learnt correctly
# actor.train(states=states, action_gradients=inverted_grads)
# update targets
actor.update_target()
critic.update_target()
if done:
# train agent
# print the score and break out of the loop
episode_history.append(cum_reward)
print("episode: {}/{}, score: {}, avg score for 100 runs: {:.2f}, maxQ: {:.2f}, avg q: {:.2f}, avg loss: {:.5f}, avg noise: {:.3f}".format(
e,
MAX_EPISODES,
cum_reward,
np.mean(episode_history),
ep_ave_max_q / float(j),
ep_ave_q / float(j),
tot_loss / float(j),
np.mean(actor_noises)))
break
state = next_state
if e%100 == 0 and not FLAGS.dont_save:
save_path = saver.save(sess, FLAGS.save_checkpoint_dir + 'model-' + str(e) + '.ckpt')
print 'saved model ' + save_path
if __name__ == '__main__':
tf.app.run() |
import pandas as pd
from django.db import DatabaseError, transaction
from items import models as activoM
from proveedores.models import Proveedor
from operaciones.models import MANTENIMIENTO_CHOICES, Operacion
from django.contrib.auth.models import User
def choiceHelper(x,choices,default):
# y choices
for choice in choices:
#print(x[0:3].lower(),' : ',choice[0].lower())
if x[0:2].lower() in choice[0].lower() :
return choice[0]
return default
def localHelper(x,choices):
for choice in choices:
print(x.lower(),' : ',choice[0].lower())
if x.lower() in choice[0].lower() :
return choice[0]
def inRevision(x):
if x != 'ok':
return True
return False
def isOwned(x):
if x == 'PROPIA':
return True
return False
## ciclo para insertar Activos usando el orm de django
activosFrame = pd.read_excel('/Users/juanjosebonilla/Desktop/Sistemas/WebProjects/activos/src/utils/TablasSql.xlsx',sheet_name='activos')
operacionesFrame = pd.read_excel('/Users/juanjosebonilla/Desktop/Sistemas/WebProjects/activos/src/utils/TablasSql.xlsx',sheet_name='operaciones')
@transaction.atomic
def saveActivos():
try:
# nombre = models.CharField(max_length=150)
# correo = models.CharField(max_length=150, blank=True)
# telefono = models.CharField(max_length=150, blank=True)
# objects = ProveedorManager()
activosFrame['vidaUtil'] = activosFrame['vidaUtil'].fillna(0)
#activosFrame['vidaUtil'] = activosFrame['vidaUtil'].astype(int)
print(activosFrame)
operacionesFrame = pd.read_excel('/Users/juanjosebonilla/Desktop/Sistemas/WebProjects/activos/src/utils/TablasSql.xlsx',sheet_name='operaciones')
print(operacionesFrame)
operaciones = []
for index, row in activosFrame.iterrows():
# descripcion = models.CharField(max_length=120)
# slug = models.SlugField(blank=True, unique=True)
# estado = models.CharField(max_length=120, default='ok', choices=ITEM_STATUS_CHOICES)
# enObservacion = models.BooleanField(default=False)
# pdv = models.CharField(max_length=120, default='104', choices=ITEM_PDV_CHOICES)
# ubicacion = models.CharField(max_length=120, default='ok', choices=ITEM_AREA_CHOICES)
# proveedor = models.ForeignKey(Proveedor, null=True, blank=True)
# marca = models.CharField(max_length=120, blank=True)
# modelo = models.CharField(max_length=120, blank=True)
# serie = models.CharField(max_length=120, blank=True)
# placa = models.CharField(max_length=120, blank=True)
# propio = models.BooleanField(default=True)
# observaciones = models.TextField(blank=True)
# vidautil = models.IntegerField()
# image = models.ImageField(upload_to=upload_image_path, null=True, blank=True)
#price = models.DecimalField(decimal_places=2,max_digits=20,default=39.99)
# objects = ActivoManager()
#image = models.ImageField(upload_to=upload_image_path,null=True,blank=True)
#featured = models.BooleanField(default=False)
# timestamp = models.DateTimeField(auto_now_add=True)
estadoChoice = choiceHelper(row['estado'],activoM.ITEM_STATUS_CHOICES,None)
pdvChoice = localHelper(row['local'],activoM.ITEM_PDV_CHOICES)
areaChoice = choiceHelper(row['ubicacion2'],activoM.ITEM_AREA_CHOICES,'comedor')
revBool = inRevision(estadoChoice)
fields = {'descripcion':row['nombre'],'estado':estadoChoice,'enObservacion':revBool,'pdv':pdvChoice,'ubicacion':areaChoice,
'proveedor':None,'marca':row['marca'],'modelo':row['modelo'],'serie':row['serie'],'placa':row['placa'],'propio':isOwned(row['activoPropio']),
'vidautil':int(row['vidaUtil'])
#'vidautil':float(row['vidaUtil'] if row['vidaUtil'] != None else None )
}
activo = activoM.Activo(**fields)
activo.save()
## Agregar operaciones
opActivoFr = operacionesFrame[operacionesFrame['activo_id'] == row['id']]
# user = models.ForeignKey(User)
# mantenimiento = models.CharField(max_length=50,default='correctivo',choices=MANTENIMIENTO_CHOICES)
# total = models.DecimalField(default=0.00, max_digits=15,decimal_places=6)
# timestamp = models.DateTimeField(auto_now_add=True)
# observacion = models.TextField(blank=True)
# activo = models.ForeignKey(Activo)
# proveedor = models.ForeignKey(Proveedor)
# history = HistoricalRecords()
# objects = OperacionManager()
if len(opActivoFr.index) > 0:
for index2, row2 in opActivoFr.iterrows():
# query proveedor
fields = {'mantenimiento':choiceHelper(row2['tipoOperacion'],MANTENIMIENTO_CHOICES,'correctivo'),'total':row2['costo'],'observacion':row2['descripción']}
op = Operacion(**fields)
provName = row2['proveedor']
if (provName != None) and (provName != "") :
proveedores = Proveedor.objects.filter(nombre = provName)
if len(proveedores) > 0:
proveedor = proveedores[0]
op.proveedor = proveedor
user = User.objects.filter(username = 'juanjosebonilla')[0]
op.user = user
op.activo = activo
op.timestamp = row2['fecha']
operaciones.append(op)
operaciones.sort(key=lambda x: x.timestamp,reverse = False)
print(operaciones)
for operacion in operaciones:
overDate = operacion.timestamp
print(overDate)
operacion.save()
operacion.timestamp = overDate
operacion.save()
except DatabaseError as e:
message = 'Database Error: ' + str(e.message)
def saveProveedores():
proveedores = operacionesFrame['proveedor'].unique()
for prov in proveedores:
proveedor = Proveedor(nombre = prov)
proveedor.save()
saveProveedores()
saveActivos()
|
from typing import List, Iterable, Callable, Dict, Tuple, Set
from matplotlib import pyplot as plt
from sklearn.metrics import precision_recall_curve, auc
from contradiction.medical_claims.cont_classification.path_helper import load_raw_predictions
from contradiction.medical_claims.token_tagging.acc_eval.path_helper import load_sbl_binary_label
from contradiction.medical_claims.token_tagging.path_helper import get_binary_save_path_w_opt, get_save_path2
from contradiction.medical_claims.token_tagging.print_score.auc import load_run
from contradiction.token_tagging.acc_eval.defs import SentTokenLabel
from contradiction.token_tagging.acc_eval.eval_codes import calc_prec_rec_acc
from contradiction.token_tagging.acc_eval.parser import load_sent_token_binary_predictions
from list_lib import index_by_fn
from misc_lib import average, str_float_list
from tab_print import print_table
from trec.trec_parse import load_ranked_list_grouped
from trec.types import TrecRankedListEntry
def eval_micro_auc(
rlg: Dict[str, List[TrecRankedListEntry]],
labels: List[SentTokenLabel], adjust_to_prediction_length=False
) -> float:
labels_d: Dict[str, SentTokenLabel] = index_by_fn(lambda x: x.qid, labels)
all_labels = []
all_predictions = []
for qid in rlg:
entries: List[TrecRankedListEntry] = rlg[qid]
try:
labels_for_qid: List[int] = labels_d[qid].labels
doc_id_to_score = {}
for e in entries:
doc_id_to_score[e.doc_id] = e.score
predictions: List[float] = [doc_id_to_score[str(i)] for i in range(len(labels_for_qid))]
assert len(labels_for_qid) == len(predictions)
all_labels.extend(labels_for_qid)
all_predictions.extend(predictions)
except KeyError:
pass
prec_list, recall_list, threshold_list = precision_recall_curve(all_labels, all_predictions)
return prec_list, recall_list
from typing import List, Iterable, Callable, Dict, Tuple, Set
def main():
tag = "mismatch"
labels: List[SentTokenLabel] = load_sbl_binary_label(tag, "val")
rlg1 = load_run("nlits86", tag)
rlg2 = load_run("exact_match", tag)
prec_list1, recall_list1 = eval_micro_auc(rlg1, labels)
prec_list2, recall_list2 = eval_micro_auc(rlg2, labels)
fig, ax = plt.subplots()
ax.plot(recall_list1, prec_list1, color='blue')
ax.plot(recall_list2, prec_list2, color='red')
ax.set_title('Precision-Recall Curve')
ax.set_ylabel('Precision')
ax.set_xlabel('Recall')
#display plot
plt.show()
if __name__ == "__main__":
main() |
from typing import Tuple
import enum
class Side(enum.Enum):
def __new__(cls, tl, tr, axis) -> Side: ...
UP = ()
DOWN = ()
NORTH = ()
SOUTH = ()
EAST = ()
WEST = ()
@property
def tl(self) -> int: ...
@property
def br(self) -> int: ...
@property
def ids(self) -> Tuple[int, int]: ...
@property
def axis(self) -> str: ...
|
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 24 19:20:29 2017
@author: thodoris
"""
import pandas
import numpy
import os
import copy
import core
from sklearn.model_selection import ParameterGrid
from sklearn.model_selection import train_test_split
from sklearn.model_selection import StratifiedKFold
from sklearn import metrics
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
class Performance(object):
def __init__(self, y_true, y_pred, pos_probas, pos_label, neg_label):
self.accuracy = round(metrics.accuracy_score(y_true, y_pred), 4)
self.sensitivity = round(metrics.recall_score(y_true, y_pred, pos_label=pos_label), 4)
self.specificity = round(metrics.recall_score(y_true, y_pred, pos_label=neg_label), 4)
self.mcc = round(metrics.matthews_corrcoef(y_true, y_pred), 4)
fpr, tpr, thresholds = metrics.roc_curve(y_true, pos_probas, pos_label=pos_label)
self.auc = round(metrics.auc(fpr, tpr), 4)
def summary(self):
record = [self.accuracy, self.sensitivity, self.specificity,
self.mcc, self.auc]
return record
class AveragePerformance(object):
def __init__(self, pos_label, neg_label):
self.accuracy_list = []
self.sensitivity_list = []
self.specificity_list = []
self.mcc_list = []
self.auc_list = []
self.confusion_matrix = []
self.pos_label = pos_label
self.neg_label = neg_label
self.update()
def mean_worker(self, tmp_list):
if tmp_list == []:
return 0
else:
m = round(numpy.mean(tmp_list), 4)
return m
def std_worker(self, tmp_list):
if tmp_list == []:
return 0
else:
s = round(numpy.std(tmp_list), 4)
return s
def update(self):
self.accuracy = self.mean_worker(self.accuracy_list)
self.accuracy_std = self.std_worker(self.accuracy_list)
self.sensitivity = self.mean_worker(self.sensitivity_list)
self.sensitivity_std = self.std_worker(self.sensitivity_list)
self.specificity = self.mean_worker(self.specificity_list)
self.specificity_std = self.std_worker(self.specificity_list)
self.mcc = self.mean_worker(self.mcc_list)
self.mcc_std = self.std_worker(self.mcc_list)
self.auc = self.mean_worker(self.auc_list)
self.auc_std = self.std_worker(self.auc_list)
#self.confusion_matrix
def append(self, y_true, y_pred, pos_probas):
tmp = Performance(y_true, y_pred, pos_probas, self.pos_label, self.neg_label)
self.accuracy_list.append(tmp.accuracy)
self.sensitivity_list.append(tmp.sensitivity)
self.specificity_list.append(tmp.specificity)
self.mcc_list.append(tmp.mcc)
self.auc_list.append(tmp.auc)
self.update()
def summary(self):
record = [self.accuracy, self.accuracy_std,
self.sensitivity, self.sensitivity_std,
self.specificity, self.specificity_std,
self.mcc, self.mcc_std,
self.auc, self.auc_std]
return record
class ModelValidation(object):
def __init__(self, classifier, pos_label, neg_label):
self.classifier = classifier
self.pos_label = pos_label
self.neg_label = neg_label
def index_of_pos_class(self, classes, pos_class):
return list(classes).index(pos_class)
def cross_validation(self, learning_X, learning_y, n_splits=10):
if learning_X.shape[0] < 30:
n_splits = 5
skf = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=1234)
splits = skf.split(learning_X, learning_y)
performance = AveragePerformance(self.pos_label, self.neg_label)
for train, test in splits:
self.classifier.fit(learning_X[train], learning_y[train])
probas = self.classifier.predict_proba(learning_X[test])
predictions = self.classifier.predict(learning_X[test])
class_index = self.index_of_pos_class(self.classifier.classes_,
self.pos_label)
pos_probas = list(entry[class_index] for entry in list(probas))
performance.append(learning_y[test], predictions, pos_probas)
results = performance.summary()
return results
def validation(self, learning_X, learning_y, validation_X, validation_y):
self.classifier.fit(learning_X, learning_y)
probas = self.classifier.predict_proba(validation_X)
predictions = self.classifier.predict(validation_X)
class_index = self.index_of_pos_class(self.classifier.classes_, self.pos_label)
pos_probas = list(entry[class_index] for entry in list(probas))
performance = Performance(validation_y, predictions, pos_probas,
self.pos_label, self.neg_label)
results = performance.summary()
return results
class GridSearchParam(object):
def __init__(self):
self.parameters = []
self.param_combinations = []
self.name = None
self.classifier = None
self.results = {}
def record_results(self, process, input_list):
if process == "learning":
self.results.setdefault("learning", []).append(input_list)
elif process == "validation":
self.results.setdefault("validation", []).append(input_list)
else:
pass
def parameterization(self, x, y, pos_label, neg_label, cv=10,
validation_size=0.3):
numpy.random.seed(1234)
splitted_data = train_test_split(x, y, test_size=validation_size, stratify=y)
learning_X = splitted_data[0]
learning_y = splitted_data[2].ravel()
validation_X = splitted_data[1]
validation_y = splitted_data[3].ravel()
if self.param_combinations == []:
param_id = self.name + "_None"
model = ModelValidation(self.classifier, pos_label, neg_label)
cv_results = model.cross_validation(learning_X, learning_y)
valid_results = model.validation(learning_X, learning_y,
validation_X, validation_y)
cv_results.insert(0, {})
cv_results.insert(0, self.name)
cv_results.insert(0, param_id)
self.record_results("learning", cv_results)
valid_results.insert(0, {})
valid_results.insert(0, self.name)
valid_results.insert(0, param_id)
self.record_results("validation", valid_results)
else:
for param_set in self.param_combinations:
print self.name, param_set
param_id = "_".join([str(i) for i in param_set.values()])
param_id = self.name + "_" +param_id
self.classifier.set_parameters(param_set)
model = ModelValidation(self.classifier, pos_label, neg_label)
cv_results = model.cross_validation(learning_X, learning_y)
valid_results = model.validation(learning_X, learning_y,
validation_X, validation_y)
cv_results.insert(0, param_set)
cv_results.insert(0, self.name)
cv_results.insert(0, param_id)
self.record_results("learning", cv_results)
valid_results.insert(0, param_set)
valid_results.insert(0, self.name)
valid_results.insert(0, param_id)
self.record_results("validation", valid_results)
def get_cv_results(self):
columns = ["ID", "Classifier" ,"Parameters", "Accuracy", "Accuracy_std",
"Sensitivity", "Sensitivity_std", "Specificity", "Specificity_std",
"MCC", "MCC_std","AUC", "AUC_std"]
df = pandas.DataFrame(self.results["learning"], columns=columns)
return df
def get_validation_results(self):
columns = ["ID", "Classifier" ,"Parameters", "Accuracy", "Sensitivity",
"Specificity", "MCC", "AUC"]
df = pandas.DataFrame(self.results["validation"], columns=columns)
return df
class ChangeParameters(object):
def set_parameters(self, input_dict):
for key, value in input_dict.items():
setattr(self, key, value)
class NaiveBayesGridSearchParam(GridSearchParam):
def __init__(self):
self.name = "gaussian_naive_bayes"
self.parameters = []
self.classifier = NaiveBayes()
self.results = {}
def get_parameters(self):
self.param_combinations = []
return self.param_combinations
class NaiveBayes(GaussianNB, ChangeParameters):
pass
class LogisticRegressionGridSearchParam(GridSearchParam):
'''
> The loss function to be used. Defaults to ‘hinge’, which gives a linear SVM.
The ‘log’ loss gives logistic regression, a probabilistic classifier.
‘modified_huber’ is another smooth loss that brings tolerance to outliers
as well as probability estimates. ‘squared_hinge’ is like hinge but
is quadratically penalized. ‘perceptron’ is the linear loss used by
the perceptron algorithm.
> Using loss="log" or loss="modified_huber" enables the predict_proba
> Penalties:
penalty="l2": L2 norm penalty on coef_
penalty="l1": L1 norm penalty on coef_
penalty="elasticnet": Convex combination of L2 and L1; (1-l1_ratio)*L2+l1_ratio*L1
'''
def __init__(self, dimensionality):
self.name = "logistic_regression"
self.dimensionality = dimensionality
self.classifier = LogisticRegression()
self.results = {}
if self.dimensionality <= 5:
l1_ratios = [0.]
elif self.dimensionality <= 10:
l1_ratios = [0. , 0.2]
elif self.dimensionality <=20:
l1_ratios = [0., 0.2, 0.4, 0.6]
else:
l1_ratios = [0.2, 0.4, 0.6, 0.8, 1.0]
alphas = [0.001, 0.005, 0.01, 0.05, 0.1, 0.5]
self.parameters = [{'loss':['log', 'modified_huber'],
'penalty':['elasticnet'],
'l1_ratio':l1_ratios, 'alpha':alphas,
'class_weight':['balanced', None]}]
def get_parameters(self):
self.param_combinations = []
for comb in ParameterGrid(self.parameters):
self.param_combinations.append(comb)
return self.param_combinations
class LogisticRegression(SGDClassifier, ChangeParameters):
pass
class DecisionTree(DecisionTreeClassifier, ChangeParameters):
pass
class DecisionTreeGridSearchParam(GridSearchParam):
def __init__(self, dimensionality):
self.name = "decision_tree"
self.classifier = DecisionTree()
self.dimensionality = dimensionality
self.results = {}
highest_depth = 20
if self.dimensionality < 3:
valid_depths = [self.dimensionality]
elif self.dimensionality < 5:
valid_depths = [2, self.dimensionality]
else:
valid_depths = [2] + range(4, min([highest_depth, self.dimensionality]), 2)
self.parameters = [{'criterion':['gini'],
'max_depth':valid_depths,
'max_features':[0.6, 1.0],
'class_weight':['balanced', None]}]
self.param_combinations = []
for comb in ParameterGrid(self.parameters):
self.param_combinations.append(comb)
def get_parameters(self):
return self.param_combinations
class SVM(SVC, ChangeParameters):
pass
class SVMGridSearchParam(GridSearchParam):
'''
http://crsouza.com/2010/03/17/kernel-functions-for-machine-learning-applications/#linear
'''
def __init__(self, dimensionality):
self.name = "support_vector_machines"
self.classifier = SVM(probability=True)
self.results = {}
if dimensionality == 1:
self.parameters = [
{"C":[0.1, 1.], 'kernel':['linear', 'rbf'], 'class_weight':['balanced',None]},
{"C":[0.1, 1.], 'kernel':['sigmoid'], 'gamma':['auto', 0.001, 0.01, 0.1],
'class_weight':['balanced', None]}
]
else:
self.parameters = [
{"C":[0.1, 1., 10.], 'kernel':['linear', 'rbf'], 'class_weight':['balanced',None]},
{"C":[0.1, 1.], 'kernel':['poly'], 'degree':[2], 'class_weight':['balanced',None]},
{"C":[0.1, 1.], 'kernel':['sigmoid'], 'gamma':['auto', 0.001, 0.01, 0.1],
'class_weight':['balanced', None]}
]
def get_parameters(self):
self.param_combinations = []
for comb in ParameterGrid(self.parameters):
self.param_combinations.append(comb)
return self.param_combinations
def max_estimators(input_x):
x = numpy.linspace(-100, 100, 200)
max_old = 100.
max_new = 300.
min_old = -100.
min_new = 0.
x_scaled = (x-min_old)*(max_new-min_new)/(max_old-min_old) + min_new
x_discr = numpy.ceil(numpy.ceil(x_scaled)/10.)*10.
y = 1/(1+numpy.exp(-0.04*x))
max_old = 1.
max_new = 300.
min_old = 0.
min_new = 1.
y_scaled = (y-min_old)*(max_new-min_new)/(max_old-min_old) + min_new
y_discr = numpy.ceil(numpy.ceil(y_scaled)/10.)*10.
tuples = zip(x_discr, y_discr)
func = {}
for tup in tuples:
key = tup[0]
value = tup[1]
func.setdefault(key, []).append(value)
for key, values in func.items():
func[key] = max(values)
input_x_discr = numpy.ceil(numpy.ceil(input_x)/10.)*10.
return int(func[input_x_discr])
class RandomForest(RandomForestClassifier, ChangeParameters):
pass
class RandomForestGridSearchParam(GridSearchParam):
def __init__(self, dimensionality):
self.name = "random_forest"
self.classifier = RandomForest()
self.dimensionality = dimensionality
self.results = {}
DTGSP = DecisionTreeGridSearchParam(dimensionality)
tree_parameters = DTGSP.parameters
if dimensionality <= 5:
n_estimators = [2, 4]
valid_depths = [2,3]
elif dimensionality <= 10:
n_estimators = [2, 5, 10]
valid_depths = [2,3]
else:
if dimensionality >= 300:
max_value = 300.
else:
max_value = max_estimators(dimensionality)
n_estimators = range(0, max_value+1, 20)
n_estimators.remove(0)
valid_depths = [2,3,4]
self.parameters = []
for entry in tree_parameters:
entry.update({'n_estimators':n_estimators})
entry.update({'max_depth':valid_depths})
self.parameters.append(entry)
def get_parameters(self):
self.param_combinations = []
for comb in ParameterGrid(self.parameters):
self.param_combinations.append(comb)
return self.param_combinations
class AdaBoost(AdaBoostClassifier, ChangeParameters):
pass
class AdaBoostGridSearchParam(GridSearchParam):
def __init__(self, dimensionality):
self.name = "adaboost"
self.classifier = AdaBoost()
self.dimensionality = dimensionality
self.results = {}
DTGSP = DecisionTreeGridSearchParam(dimensionality)
tree_parameters = DTGSP.parameters
if dimensionality <= 5:
n_estimators = [2, 4]
valid_depths = [2,3]
elif dimensionality <= 10:
n_estimators = [2, 5, 10]
valid_depths = [2,3]
else:
if dimensionality >= 300:
max_value = 300.
else:
max_value = max_estimators(dimensionality)
n_estimators = range(0, max_value+1, 20)
n_estimators.remove(0)
valid_depths = [2,3,4]
self.parameters = []
for entry in tree_parameters:
entry.update({'n_estimators':n_estimators})
entry.update({'max_depth':valid_depths})
self.parameters.append(entry)
def get_parameters(self):
self.param_combinations = []
for comb in ParameterGrid(self.parameters):
self.param_combinations.append(comb)
return self.param_combinations
class TrainingDatathon(object):
def __init__(self, ec_number, mongo_db_name, hmm_db_name, main_dir):
CF = core.ClassifierFramework(ec_number, hmm_db_name, mongo_db_name,
"data", "ec_numbers", "training_"+hmm_db_name, main_dir)
integer = CF.construct_framework()
if integer == 0:
CF.set_training_data()
CF.deal_with_imbalance()
CF.dimensionality_reduction()
#CF.summarization()
CF.write_training_data()
class Classificathon(object):
def __init__(self, ec_number, main_dir, data_frame=None):
ec_numbers_linkage = core.find_ec_number_linkage(ec_number)
path = "/".join(ec_numbers_linkage)+"/"
self.whole_dir = main_dir + path
if os.path.isfile(self.whole_dir+ec_number+".csv"):
self.csv = self.whole_dir+ec_number+".csv"
self.df = pandas.read_csv(self.csv, index_col=0)
self.y = self.df['class'].as_matrix().ravel()
self.x = self.df.ix[:,self.df.columns != 'class'].as_matrix()
self.pos_label = ec_number
self.neg_label = "no_"+ec_number
self.learning_scores_file = self.whole_dir+"cv_results.csv"
self.validation_scores_file = self.whole_dir+"valid_results.csv"
else:
print "no training data"
return
def construct_classifiers(self):
dimensionality = self.x.shape[1]
tmp = ["gaussian_naive_bayes", "logistic_regression", "decision_tree",
"support_vector_machines", "random_forest", "adaboost"]
for method in tmp:
if method == "gaussian_naive_bayes":
GSP = NaiveBayesGridSearchParam()
GSP.get_parameters()
GSP.parameterization(self.x, self.y, self.pos_label, self.neg_label)
nv_cv_results = GSP.get_cv_results()
nv_valid_results = GSP.get_validation_results()
elif method == "logistic_regression":
GSP = LogisticRegressionGridSearchParam(dimensionality)
GSP.get_parameters()
GSP.parameterization(self.x, self.y, self.pos_label, self.neg_label)
log_reg_cv_results = GSP.get_cv_results()
log_reg_valid_results = GSP.get_validation_results()
elif method == "decision_tree":
GSP = DecisionTreeGridSearchParam(dimensionality)
GSP.get_parameters()
GSP.parameterization(self.x, self.y, self.pos_label, self.neg_label)
dec_tree_cv_results = GSP.get_cv_results()
dec_tree_valid_results = GSP.get_validation_results()
elif method == "support_vector_machines":
GSP = SVMGridSearchParam(dimensionality)
GSP.get_parameters()
GSP.parameterization(self.x, self.y, self.pos_label, self.neg_label)
svm_cv_results = GSP.get_cv_results()
svm_valid_results = GSP.get_validation_results()
elif method == "random_forest":
GSP = RandomForestGridSearchParam(dimensionality)
GSP.get_parameters()
GSP.parameterization(self.x, self.y, self.pos_label, self.neg_label)
rf_cv_results = GSP.get_cv_results()
rf_valid_results = GSP.get_validation_results()
elif method == "adaboost":
GSP = AdaBoostGridSearchParam(dimensionality)
GSP.get_parameters()
GSP.parameterization(self.x, self.y, self.pos_label, self.neg_label)
ada_cv_results = GSP.get_cv_results()
ada_valid_results = GSP.get_validation_results()
else:
pass
valid_results_df = pandas.concat([nv_valid_results,
log_reg_valid_results,
dec_tree_valid_results,
svm_valid_results,
rf_valid_results,
ada_valid_results],
ignore_index=True)
valid_results_df = valid_results_df.sort_values("MCC", ascending=False)
self.valid_results_df = valid_results_df
valid_results_df.to_csv(self.validation_scores_file)
cv_results_df = pandas.concat([nv_cv_results,
log_reg_cv_results,
dec_tree_cv_results,
svm_cv_results,
rf_cv_results,
ada_cv_results],
ignore_index=True)
cv_results_df = cv_results_df.sort_values("MCC", ascending=False)
self.cv_results_df = cv_results_df
cv_results_df.to_csv(self.learning_scores_file)
def get_best_estimator_params(self, top=20):
cv_top = self.cv_results_df.ix[:top][["ID", "MCC", "Parameters"]]
valid_top = self.valid_results_df.ix[:top][["ID", "MCC", "Parameters"]]
print cv_top.shape
tmp_dict = {}
for i in range(top):
print cv_top.ix[i,:]
tmp_id = cv_top.ix[i,:]["ID"]
cv_score = cv_top.ix[i,:]["MCC"]
param = cv_top.ix[i,:]["Parameters"]
tmp_dict.setfefault(tmp_id, {}).update({"parameters":param})
tmp_dict.setfefault(tmp_id, {}).setdefault("scores", []).append(0.4*cv_score)
tmp_id = valid_top.ix[i,:]["ID"]
valid_score = valid_top.ix[i,:]["MCC"]
param = valid_top.ix[i,:]["Parameters"]
tmp_dict.setfefault(tmp_id, {}).update({"parameters":param})
tmp_dict.setfefault(tmp_id, {}).setdefault("scores", []).append(0.6*valid_score)
for key, values in tmp_dict.values():
final_score = round(sum(values["scores"]), 4)
values.update({"final_score":final_score})
tmp_dict.update({key:values})
'''
best_params = ranked_parameters[0]
best_estimator = self.estimator
for key, value in best_params.items():
setattr(best_estimator, key, value)
best_estimator.fit(X, y)
return best_estimator
'''
def construct_best_estimator(self):
pass
'''
#p = P('3.1.4.4')
#p = P('3.5.1.23')
#p = P('3.1.1.1')
#p = P('4.1.99.19')
p = P('1.14.13.165')
#p = P('3.4.22.10')
ecs = [
'6.2.1.36',
'2.7.1.72',
'4.1.1.21',
'3.2.1.93',
'4.1.99.19',
'4.1.1.44',
'2.4.1.251',
'3.5.2.2',
'1.3.8.1',
'3.5.1.24',
'3.5.1.23',
'2.4.1.175',
'1.11.1.18',
'1.7.7.-',
'1.7.1.6',
'1.7.1.7',
'1.7.1.4',
'2.4.1.230',
'6.3.5.9',
'3.4.14.12',
'1.1.1.309',
'3.4.14.11',
'1.14.13.114',
'1.14.13.113',
'1.7.7.1',
'1.7.7.2',
'1.5.8.2',
'2.1.1.218',
'2.1.1.219',
'2.1.1.210',
'2.1.1.213',
'2.1.1.215',
'2.1.1.216',
'2.1.1.217',
'3.5.4.3',
'3.7.1.4',
'3.5.4.1',
'1.2.1.-',
'1.2.3.3',
'1.14.12.14',
'1.14.12.15',
'1.14.12.17',
'1.14.12.10',
'1.14.12.11',
'1.14.12.12',
'1.2.1.8',
'1.2.1.7',
'1.2.1.5',
'1.2.1.4',
'1.2.1.3',
'1.14.12.19',
'1.4.3.3',
'4.2.1.19',
]
#for ec in ecs:
#p = P(ec)
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.