text stringlengths 38 1.54M |
|---|
# Generated by Django 3.0.5 on 2020-10-30 10:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0039_auto_20201030_1007'),
]
operations = [
migrations.AlterField(
model_name='attendance',
name='date',
field=models.DateField(auto_now_add=True, db_index=True),
),
]
|
from KnuthAlgorithmR import knuthR
from MitchellGenerate import genDeBruijn
from MitchellGenerate import doublepuncture
class iterbruijn:
def __init__(self, n):
self.n = n
def __iter__(self):
return iterbruijn_iter(self.n)
class iterbruijn_iter:
def __init__(self, n):
if n % 2 == 0 and n > 2:
halfn = int(n / 2)
# print("Mitchell({})".format(halfn))
self.a = genDeBruijn(halfn, iterbruijn(halfn))
elif n % 2 == 1 and n > 3:
prevn = n - 1
# print("Knuth({})".format(prevn))
self.a = knuthR( prevn, iterbruijn(prevn))
elif n == 2:
# print("Trivial n=2")
self.a = iter([0, 0, 1, 1])
else: # n==3
# if n is actually 1 or 0, this code just kinda assumes its 3. 1 and 0 are kinda meaningless/trivial deBruijns
# print("Trivial n=3")
self.a = iter([0, 0, 0, 1, 0, 1, 1, 1])
def __iter__(self):
return self
def __next__(self):
try:
return next(self.a)
except StopIteration:
raise StopIteration
class iterdpdB:
def __init__(self,n):
self.n = n
def __iter__(self):
return iterdpdB_iter(self.n)
class iterdpdB_iter:
def __init__(self,v):
self.count=2**v-2
self.a=doublepuncture(v,iterbruijn(v))
def __iter__(self):
return self
def __next__(self):
self.count -= 1
if self.count >=0 :
return next(self.a)
else:
raise StopIteration
if __name__ == "__main__":
for i in [3,4,5,6,8]:
print("{:4}: {}".format(i,[x for x in iterbruijn(i)]))
print()
for i in [3,4,5,6,8]:
print("dp{:2}: {}".format(i,[x for x in iterdpdB(i)]))
|
"""area_n_points.py:
"""
__author__ = "Dilawar Singh"
__copyright__ = "Copyright 2017-, Dilawar Singh"
__version__ = "1.0.0"
__maintainer__ = "Dilawar Singh"
__email__ = "dilawars@ncbs.res.in"
__status__ = "Development"
import sys
import os
import numpy as np
def triangularization( pts ):
return []
def area(pts, plot = True):
if plot:
import matplotlib as mpl
import matplotlib.pyplot as plt
x, y = zip(*pts)
mpl.style.use( ['bmh', 'fivethirtyeight'] )
plt.plot( x, y, 'o' )
plt.savefig( 'test.png' )
a = 0.0
tris = triangularization(pts)
if plot:
for tri in tris:
print(tri)
return a
def main():
n = 6
xs = np.random.randint( 0, 100, n)
ys = np.random.randint( 0, 100, n)
pts = list(zip(xs, ys))
print( pts )
a = area(pts)
print( '[INFO] Area bouded by points %f' % a)
if __name__ == '__main__':
main()
|
# Author: Melanie Huynh
# Date: 27 January 2021
# Description: This program uses a binary search to find a target. If the target
# is not found, it raises an exception.
def bin_except(a_list, target):
"""
Searches a_list for an occurrence of target
If found, returns the index of its position in the list
If not found, returns -1, indicating the target value isn't in the list
"""
first = 0
last = len(a_list) - 1
while first <= last:
middle = (first + last) // 2
if a_list[middle] == target:
return middle
if a_list[middle] > target:
last = middle - 1
else:
first = middle + 1
raise TargetNotFound
class TargetNotFound(Exception):
"""
Error raised when a target is not found
"""
pass
def main():
"""
Main function for testing bin_except
"""
a_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
target = 11
return print(bin_except(a_list, target))
if __name__ == '__main__':
main()
|
# get the data needed to plot the TEP with a quiver plot on top of it
import numpy as np
#import pickle
import os
import pandas as pd
import csv
from scipy.interpolate import interp1d
import sys
sys.path.insert(0,'../../functions') # so I can import the functions
from cycle_funcs import calc_fitness, calc_dfitness, sample_wsV_dimorph
# =====================================================================================================================
# model parameters
# ---
# which parameter set to find the isoclines for (comment out ones I've done)
suffix = '_1'
'''
# default parameter values
ID = 0
ngrid = 31 # number of grid points in log space along the resident1 axis
'''
# high dispersal cost parameter values
ID = 17
ngrid = 31 # number of grid points in log space along the resident1 axis
# where results will be stored
dir_results = '../../results/circular/'
# default algorithm parameters
params = {
'tol_res': 1e-10, # the euclidean distance in final resident structure before say it's equilibriated
'tol_res_dimorph': 1e-6,# the euclidean distance in final dimorphic resident structure before say it's equilibriated
'tol_mut': 1e-10, # the euclidean distance in final mutant structure before say it's equilibriated
'delta_m': 1e-9, # step size to estimate gradients; default heuristic sqrt(machine error) * approx value
}
# read in the info from the sing_strat_x.csv file and repopulate params dictionary
# ---
par_names = ['suffix', 'layout', 'f', 'c', 'r', 'KL', 'h', 'KS_mean', 'p_cat'] # names of missing parameter values
fname = dir_results + 'sing_strat' + suffix + '.csv'
df = pd.read_csv(fname)
ss_res = df.iloc[ID] # the particular row we want
for par_name in par_names:
params[par_name] = ss_res[par_name]
# read in the isoclines to define the TEP region
# ---
fname = dir_results + 'isocline' + suffix + '_' + str(ID) + '.csv'
df = pd.read_csv(fname)
df = df.sort_values(by=['m_mut'])
# grab the isoclines
m_iso1v = df['m_res'].values
m_iso2v = df['m_mut'].values
# flip all points below the diagonal so that we define the TEP region
region = [ (m_mut, m_res) if m_res > m_mut else (m_res, m_mut) for m_res, m_mut in zip(m_iso1v, m_iso2v) ]
# make sure the first entry is 0,0
if region[0] != (0,0):
region += [(0,0)] + region
# make sure the last entry is 0,y_intercept
if region[-1][1] == 1:
region[-1] = (0, ss_res['y_intercept'])
else:
region += [(0, ss_res['y_intercept'])]
# split the region boundary into an upper and lower bound on resident 2
# ---
# find the extremal point of the region in the resident 1 dimension, the bulging out to the right of the PIP graph
m_iso1V, m_iso2V = zip(*region)
extremal_res1 = max(m_iso1V)
extremal_idx = m_iso1V.index( extremal_res1 )
extremal_res2 = m_iso2V[extremal_idx]
# split the region boundary into two lines, one for a lower bound on res2, and one for an upper bound
line_lo = [ (m_res1, m_res2) for m_res1, m_res2 in region if m_res2 <= extremal_res2 ]
line_hi = [ (m_res1, m_res2) for m_res1, m_res2 in region if m_res2 >= extremal_res2 ]
# create functions that will return the lower and upper bound on res2 for a given res1
m_res1V, m_res2V = zip(*line_lo)
f_lo = interp1d(m_res1V, m_res2V)
m_res1V, m_res2V = zip(*line_hi)
f_hi = interp1d(m_res1V, m_res2V)
# create a grid along the resident 1 dimension, and find the isocline at each point along that grid
# ---
# do it in log space
pwrV = np.linspace(-6, np.log10(extremal_res1), ngrid)[:-1]
m_res1V = [ 10**pwr for pwr in pwrV ]
# if the csv file doesn't exist yet, create it, and include the resident 1 = 0 point in the grid
# ---
fname = dir_results + 'dimorph_isocline' + suffix + '_' + str(ID) + '.csv'
if not os.path.isfile(fname):
# add the zero point to our search
m_res1V = [0] + m_res1V
# write the column headers
with open(fname, "w", newline="") as ftarget:
writer = csv.writer(ftarget)
writer.writerow( ['m_res1', 'm_res2', 'dfit2'] )
# for each resident 1 strategy, find where the resident 2 mutant invasion fitness gradient equals 0
# (I know that this is an attractor)
# ---
for m_res1 in m_res1V:
m_res2_hi_bnd = f_hi([m_res1])[0]
m_res2_lo_bnd = f_lo([m_res1])[0]
# find where resident 2 invasion fitness gradient goes positive
# ---
# initialise
m_res2_lo = m_res2_hi_bnd
dfit2_lo = -1 # at this point, the resident-2 fitness gradient is negative
nL = None
print('find where gradient goes positive')
while dfit2_lo < 0:
print(m_res2_lo)
# update
m_res2_hi = m_res2_lo
dfit2_hi = dfit2_lo
nL_hi = nL
# halve distance to lower bound for the new low estimate
m_res2_lo = (m_res2_lo_bnd + m_res2_hi) / 2
# find the fitness gradient here
wsV, nL = sample_wsV_dimorph(m_res1, m_res2_lo, params, return_nT=True, nL=nL)
dfit2_lo = calc_dfitness(m_res2_lo, wsV, params)
nL_lo = nL
print('m_res2_lo = ' + str(m_res2_lo))
# use bisection method to find the root
# ---
print('find root')
tol_dfit = 1e-7 # maximum derivative that we'll accept as being close enough to 0
dfit2_mid = dfit2_lo
while abs(dfit2_mid) > tol_dfit:
m_res2_mid = (m_res2_lo + m_res2_hi) / 2
nL_mid = [ (nL_lo[0]+nL_hi[0])/2 , (nL_lo[1]+nL_hi[1])/2 ]
wsV_mid, nL_mid = sample_wsV_dimorph(m_res1, m_res2_mid, params, return_nT=True, nL=nL_mid)
dfit2_mid = calc_dfitness(m_res2_mid, wsV_mid, params)
if np.sign(dfit2_lo) == np.sign(dfit2_mid):
nL_lo = nL_mid
wsV_lo = wsV_mid
dfit2_lo = dfit2_mid
m_res2_lo = m_res2_mid
else:
nL_hi = nL_mid
wsV_hi = wsV_mid
dfit2_hi = dfit2_mid
m_res2_hi = m_res2_mid
print(m_res2_mid)
# write this isocline point to the csv
# ---
with open(fname, "a", newline="") as ftarget:
writer = csv.writer(ftarget)
writer.writerow([m_res1, m_res2_mid, dfit2_mid])
|
"""
This script is used
"""
from sqlalchemy import create_engine
import pandas as pd
import numpy as np
import time
# reminder: dont store your credentials like this, this is only for illustrative purpose
connect_string = 'mysql+pymysql://{}:{}@{}:{}/{}?charset=utf8mb4'.format("root", "MyP4ssword123!", "34.65.173.67", "3306", "webapp_data")
engine = create_engine(connect_string)
stores = ["Zurich", "Paris", "Berlin"]
items = {"Shirt": 50.0, "Trousers": 65.0, "Jumper": 80.0}
cols = ["Purchase_ID", "Location", "Item", "Amount", "Price", "Total"]
df = pd.DataFrame(columns=cols, data= np.zeros([0,6]))
for i in range(10):
store = np.random.choice(stores)
item = np.random.choice(list(items.keys()))
price = items[item]
amount = np.random.choice([1,2,3])
total = price * amount
row = [int(i), store, item, price, amount, total]
new_row = pd.DataFrame(data=[row], columns=cols)
df = df.append(new_row, ignore_index=True)
df.to_sql("shop_analytics", con=engine, if_exists="replace", index=False)
for i in range(60):
print(i) if (i%10 == 0) else None
store = np.random.choice(stores)
item = np.random.choice(list(items.keys()))
price = items[item]
amount = np.random.choice([1,2,3])
total = price * amount
identifier = int(len(df)+i)
row = [identifier, store, item, price, amount, total]
new_row = pd.DataFrame(data=[row], columns=cols, index=[None])
new_row.to_sql("shop_analytics", con=engine, if_exists="append", index=False)
time.sleep(0.5)
pd.read_sql("shop_analytics", con=engine)
|
personas={}
n = int(input())
for i in range(n):#numero que se van ha solicitar los datos n=3
nombre = input("Nombre: ")
fecha = input("Fecha Nac.:")
personas[nombre]=fecha
print(personas)
|
from tkinter import *
from random import (choice)
SIZE = 20
KEY = str("")
EX, EY = 0, 0
DELAY = 100
SNAKE = []
COORD = []
class Jogo:
def __init__(self):
self.window = Tk()
self.window.geometry("500x500+400+100")
self.window.bind("<KeyPress>", self.keypress)
self.map()
def map(self):
self.mapa = Frame(
self.window, width=500, height=500, bg="teal"
)
self.mapa.pack(expand=True)
self.snake()
self.place()
self.food()
self.loop()
def keypress(self, key):
global KEY
KEY = key.keysym
def move(self):
global EX, EY
if KEY == "Right":
EX = EX + SIZE
elif KEY == "Left":
EX = EX - SIZE
elif KEY == "Up":
EY = EY - SIZE
elif KEY == "Down":
EY = EY + SIZE
self.cobra.place(x=EX, y=EY)
def snake(self):
self.cobra = Frame(
self.mapa, width=SIZE, height=SIZE, bg="blue"
)
self.cobra.place(x=0, y=0)
def place(self):
lis = []
for k in range(0, 500 - SIZE, SIZE):
lis.append(k)
self.x = choice(lis)
self.y = choice(lis)
def food(self):
cor = [
"white", "gray", "black",
"pink", "blue", "purple",
"red", "violet", "yellow",
"yellow", "orange", "pink",
"#9900ff", "#cccc00", "#33bbff",
"#4d4d00", "#ff80b3", "#77b300"
]
self.fruit = Frame(
self.mapa, width=SIZE, height=SIZE, bg=choice(cor)
)
self.fruit.place(x=self.x, y=self.y)
def collision(self):
if EX < 0 or EX > 500 - SIZE:
return "saiu"
if EY < 0 or EY > 500 - SIZE:
return "saiu"
if EX == self.x and EY == self.y:
return "comeu"
tup = tuple((EX, EY))
if tup in COORD:
return "morreu"
def game_over(self):
self.texto = Label(
self.mapa, fg="red", text="Fim de Jogo",
font=("Arial", 50, "bold"), bg="teal"
)
self.pontos = Label(
self.mapa, fg="blue", text="Pontos: " +
str(len(SNAKE) * 5) + "\nTamanho: " + str(len(SNAKE)),
font=("Arial", 30, "bold"), bg="teal",
)
self.bt = Button(
self.mapa, text="Jogar Novamente", relief="flat",
cursor="exchange", font=("Arial", 20, "bold"),
fg="blue", bg="green", command=self.clear
)
self.texto.place(relx=0.1, rely=0.1)
self.pontos.place(relx=0.3, rely=0.3)
self.bt.place(relx=0.25, rely=0.5)
def clear(self):
global EX, EY, SNAKE, COORD, KEY
EX, EY, KEY = 0, 0, ""
SNAKE, COORD = [], []
for k in [
self.mapa, self.fruit,
self.texto, self.pontos,
self.cobra
]:
k.destroy()
self.map()
return
def loop(self):
global SNAKE, COORD
colisao = self.collision()
if colisao == "saiu":
self.game_over()
return
if colisao == "comeu":
self.fruit.destroy()
self.place()
self.food()
SNAKE.append(Frame(self.mapa, width=SIZE, height=SIZE, bg=str(self.fruit['bg'])))
if colisao == "morreu":
self.game_over()
return
COORD.append((EX, EY))
if len(COORD) > len(SNAKE):
del(COORD[0])
diminu = len(SNAKE)
for self.k in SNAKE:
self.k.place(x=COORD[-diminu][0], y=COORD[-diminu][1])
diminu = diminu - 1
self.move()
self.window.after(DELAY, self.loop)
def run(self):
self.window.mainloop()
if __name__ == "__main__":
snake = Jogo()
snake.run()
|
import metrics as mt
import dippykit as dip
import matplotlib.pyplot as plt
import skfuzzy as fuzz
import skimage
import exposure
import matlab.engine
import numpy as np
'''
def mu1(x,fh2):
return np.exp(-((255-x)**2)/(2*fh2))
def mu2(x,a,ex):
gamma = (10*(ex-0.5))**1.1
return 0.99*((x-a)/(255-a))**gamma
def restore_overexposure(img):
img2 = np.copy(img)
img2 = skimage.img_as_ubyte(skimage.color.rgb2hsv(img2))
h = skimage.exposure.histogram(img2[:,:,2],normalize=True)
img2 = img2.astype(np.double)
ex = (1/h[0].size)*(np.sum(h[0]*h[1])/np.sum(h[0]))
a =np.floor( h[0].size *(1-ex))
fh2 = 0.5 * (np.sum(((255-h[1])**4) * h[0]))/(np.sum(((255-h[1])**2) * h[0]))
for i in range(img2[:,:,2].shape[0]):
for j in range(img2[:,:,2].shape[1]):
if (img2[i,j,2] >= a):
img2[i,j,2] = mu2(img2[i,j,2],a,ex)
else:
img2[i,j,2] = mu1(img2[i,j,2],fh2)
for i in range(img2[:,:,1].shape[0]):
for j in range(img2[:,:,1].shape[1]):
img2[i,j,1] = (1/(1+np.exp(-12*(mu1(img2[i,j,1],fh2)-0.5))))* 0.7
img2[:,:,2] = np.floor(255*img2[:,:,2])
img2[:,:,1] = np.floor(255*img2[:,:,1])
img2 = img2.astype(np.uint8)
return skimage.color.hsv2rgb(img2)
'''
def restore_overexposure(img):
return exposure.histogram_equalization(img)
if __name__ == "__main__":
f_basename = ("brussels3","espresso_square","wiseonRocks_square")
rang1 = (0,1,2)
rang2 = (1,)
histograms = True
stats = True
show = False
paths =[]
titles = []
for j in rang1:
paths.clear()
titles.clear()
prev_path = "Data_students\\"+f_basename[j]+"\\"+f_basename[j]
if j == 2:
orig_path = prev_path + "_01_0.png"
else:
orig_path = prev_path + "_01_0.jpg"
orig_img = dip.im_read(orig_path)
#orig_img = skimage.img_as_ubyte(skimage.color.rgb2gray(orig_img))
plt.figure()
plt.title(f_basename[j] + 'Original Image')
plt.imshow(orig_img)
plt.axis("off")
if histograms:
orig_hist = mt.histogram(orig_img)
plt.figure()
plt.title(f_basename[j] + ' Original Image. Histogram')
plt.stem(orig_hist[1],orig_hist[0],use_line_collection=True)
plt.savefig('overexposure_restored\\'+f_basename[j]+"_hist.png")
for i in rang2:
f = prev_path+"_04_"+str(i)+".jpg"
img = dip.im_read(f)
#img = skimage.img_as_ubyte(skimage.color.rgb2gray(img))
title = f_basename[j] + " overexposure level: "+str(i)
titles.append(title)
rec_img = restore_overexposure(img)
if histograms:
h_rec = mt.histogram(rec_img)
h = mt.histogram(img)
plt.figure()
plt.suptitle(f_basename[j] + ' overexposure level: '+str(i))
plt.subplot(1,2,1)
plt.title( 'Image histogram')
plt.stem(h[1],h[0],use_line_collection=True)
plt.subplot(1,2,2)
plt.title('Recovered image histogram')
plt.stem(h_rec[1],h_rec[0],use_line_collection=True)
fig_size = plt.gcf().get_size_inches()
plt.gcf().set_size_inches(2 * fig_size)
plt.savefig("overexposure_restored\\"+f_basename[j]+"level"+str(i)+"_hist.pdf")
plt.figure()
plt.imshow(img)
plt.title(title+'. Image')
plt.axis("off")
plt.figure()
plt.title(title+ '. Recovered image')
plt.imshow(rec_img)
plt.axis("off")
path = "overexposure_restored\\"+f_basename[j]+"level"+str(i)
dip.im_write(rec_img, path+"_restored.bmp")
dip.im_write(img, path+".bmp")
paths.append(path+'_restored.bmp')
if stats:
mt.compute_stats(paths,titles,orig_path,False)
if show:
plt.show() |
if __name__ == '__main__':
str = "ASDqweASD"
new_str = ""
for i in range(len(str)):
if ord(str[i]) >= 65 | ord(str[i]) <= 90 :
new_str += str[i]
print(new_str)
|
import time
start_time = time.time()
# ----------------------------------------------------------------
# http://stackoverflow.com/questions/4114167/checking-if-a-number-is-a-prime-number-in-python
# As the challenge here is not about finding prime numbers, I'll be using a nice clean option.
def is_prime(a):
return all(a % i for i in xrange(2, a))
def calculate(n,a,b):
return (n*n + a*n + b)
maxN = 0
product = 0
for a in xrange(-1000,1000):
# Because negative numbers cannot be prime, by Googling.
for b in xrange(2,1000):
if(is_prime(b)):
i = 0
while(is_prime(abs(calculate(i,a,b)))):
i+=1
if i > maxN:
print str(a) + " and " + str(b) + " have made " + str(i) + " consecutive primes."
maxN = i
product = a*b
print "maxN is " + str(maxN) + " and product is " + str(product)
# print "The largest product is " + str(product) + " with " + str(maxN) + " primes."
print("--- %s seconds ---" % (time.time() - start_time))
# ----------------------------------------------------------------
|
from flask import request, jsonify, json
from flask_restful import Resource
from flask_jwt import jwt_required, current_identity
from flasgger import swag_from
import pyexcel as p
from flask import make_response, jsonify
from services.report_service import ReportService
from utils.util import model_to_dict
class ReportMonthlyResource (Resource):
report_service = ReportService()
@swag_from('../../spec/reports/monthly.yml')
def get(self):
try:
req_data = self.report_service.monthly(request.args)
if (len(req_data)) != 0:
res_data = [[i[0] for i in req_data[0].items()]] + [list(i) for i in req_data]
sheet = p.Sheet(res_data)
output = make_response(sheet.csv)
output.headers["Content-Disposition"] = "attachment; filename=export.csv"
output.headers["Content-type"] = "text/csv"
return output
else:
res_json = {'status': 1, 'message': 'No Data found in the specified range'}
return jsonify(res_json)
except Exception as e:
print(e)
if e.args:
res_data = e.args[0]
else:
res_data = e
res_json = {'status': 0, 'error': res_data}
return jsonify(res_json) |
import os
import mala
import numpy as np
from mala.datahandling.data_repo import data_repo_path
data_path = os.path.join(data_repo_path, "Be2")
"""
Shows how MALA can be used to optimize descriptor
parameters based on the ACSD analysis (see hyperparameter paper in the
documentation for mathematical details).
"""
####################
# 1. DETAILS OF THE ACSD ANALYSIS
# Define how many points should be used for the ACSD analysis
# and which values should be used for the bispectrum hyperparameters.
####################
parameters = mala.Parameters()
# Specify the details of the ACSD analysis.
parameters.descriptors.acsd_points = 100
hyperoptimizer = mala.ACSDAnalyzer(parameters)
hyperoptimizer.add_hyperparameter("bispectrum_twojmax", [2, 4])
hyperoptimizer.add_hyperparameter("bispectrum_cutoff", [1.0, 2.0])
####################
# 2. DATA
# When adding data for the ACSD analysis, add preprocessed LDOS data for
# and a calculation output for the descriptor calculation.
####################
hyperoptimizer.add_snapshot("espresso-out", os.path.join(data_path, "Be_snapshot1.out"),
"numpy", os.path.join(data_path, "Be_snapshot1.out.npy"),
target_units="1/(Ry*Bohr^3)")
hyperoptimizer.add_snapshot("espresso-out", os.path.join(data_path, "Be_snapshot2.out"),
"numpy", os.path.join(data_path, "Be_snapshot2.out.npy"),
target_units="1/(Ry*Bohr^3)")
# If you plan to plot the results (recommended for exploratory searches),
# the optimizer can return the necessary quantities to plot.
hyperoptimizer.perform_study(return_plotting=False)
hyperoptimizer.set_optimal_parameters()
|
from django.test import TestCase
class TestSuiteRunsTestCase(TestCase):
def test_suite_should_run(self):
# This test verifies, if the application even runs - ie. if it is
# executed correctly, we didn't have any syntax errors, import
# errors etc.
pass
|
# Copyright (c) 2019-present, HuggingFace Inc.
# All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree.
import os
from argparse import ArgumentParser
import torch
import torch.nn.functional as F
from model import BertModel
from data_utils.tokenization import BertWordPieceTokenizer
from torch.serialization import default_restore_location
from data_utils.utils import move_to_cuda
import pdb
def read_file(path):
with open(path, encoding="utf-8") as f:
for l in f:
if not l.strip():
continue
yield l.strip()
def sample_sequence(model, tokenizer, length, context=None, temperature=1, top_k=0, device='cuda', sample=False):
input_tokens = context["input_tokens"]
output = input_tokens
past = None
eos_id = tokenizer.sep()
with torch.no_grad():
model.eval()
for i in range(length):
logits, _, past = model(input_tokens=input_tokens, clm=True, past=past)
logits = logits[:, -1, :] / temperature
logits = top_k_logits(logits, k=top_k)
log_probs = F.softmax(logits, dim=-1)
# if sample:
# prev = torch.multinomial(log_probs, num_samples=1)
# else:
# _, prev = torch.topk(log_probs, k=1, dim=-1)
_, prev = torch.topk(log_probs, k=top_k, dim=-1)
next_id = prev[0][2].item()
pdb.set_trace()
if next_id == eos_id:
break
input_tokens = prev
output = torch.cat((output, prev), dim=1)
return output[0, 1:].tolist()
def top_k_logits(logits, k):
"""
Masks everything but the k top entries as -infinity (1e10).
Used to mask logits such that e^-infinity -> 0 won't contribute to the
sum of the denominator.
"""
if k == 0:
return logits
else:
values = torch.topk(logits, k)[0]
batch_mins = values[:, -1].view(-1, 1).expand_as(logits)
return torch.where(logits < batch_mins, torch.ones_like(logits) * -1e10, logits)
def convert_content(tokenizer, text):
input_ids = torch.LongTensor([[tokenizer.cls(
)]+tokenizer.convert_text_to_ids(text)])
return {
'input_tokens': input_ids
}
def convert_model(state_dict):
new_dict = {}
for key, value in state_dict.items():
key = key.replace("module.", "")
new_dict[key] = value
return new_dict
def generate(model, tokenizer, device, data_text, sample=True, top_k=5, beam_size=6, outlens=30):
# device = model.device
result = []
with torch.no_grad():
model.eval()
for text in read_file(data_text):
context = convert_content(tokenizer, text=text)
context = move_to_cuda(context, device)
out = sample_sequence(model, tokenizer, outlens, context=context, temperature=1, top_k=top_k, device=device, sample=True)
out = tokenizer.convert_ids_to_text(out)
out = out.replace("##", "")
result.append(out)
print(result)
def main():
parser = ArgumentParser()
parser.add_argument("--model-config", type=str, default="openai-gpt",
help="Path, url or short name of the model")
parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available()
else "cpu", help="Device (cuda or cpu)")
parser.add_argument("--outlens", type=int, default=30)
parser.add_argument("--beam", type=int, default=1)
parser.add_argument("--checkpoints", type=str)
parser.add_argument("--data", type=str, default="file")
args = parser.parse_args()
args.load_model = True
model = BertModel(None, args)
state_dict = convert_model(torch.load(args.checkpoints)['sd'])
model.load_state_dict(state_dict)
model.to(args.device)
tokenizer = BertWordPieceTokenizer("bert-base-chinese", cache_dir="temp_cache_dir")
generate(model, tokenizer, args.device, args.data, sample=True, top_k=5, beam_size=6, outlens=30)
if __name__ == "__main__":
main()
|
'''
Author: Guanghan Ning
E-mail: guanghan.ning@jd.com
October 22th, 2018
Unit test for data preparation
'''
import sys, os
sys.path.append(os.path.abspath("../utils/"))
from keypoints_to_graph import *
import pickle
def test_load_data_for_gcn_train():
dataset_str = "posetrack_18"
dataset_split_str = "train"
graph_pair_list_all = load_data_for_gcn(dataset_str, dataset_split_str)
print("graph_pair_list Top 10: {}".format(graph_pair_list_all[0:10]))
print("number of graph pairs collected: {}".format(len(graph_pair_list_all)))
output_folder = "."
data_out_path = '{}/posetrack_train_data.pickle'.format(output_folder)
with open(data_out_path, 'wb') as handle:
pickle.dump(graph_pair_list_all, handle)
with open('./posetrack_train_data.pickle', 'rb') as handle:
restore = pickle.load(handle)
print(restore == graph_pair_list_all)
def test_load_data_for_gcn_val():
dataset_str = "posetrack_18"
dataset_split_str = "val"
graph_pair_list_all = load_data_for_gcn(dataset_str, dataset_split_str)
print("graph_pair_list Top 10: {}".format(graph_pair_list_all[0:10]))
print("number of graph pairs collected: {}".format(len(graph_pair_list_all)))
output_folder = "."
data_out_path = '{}/posetrack_val_data.pickle'.format(output_folder)
with open(data_out_path, 'wb') as handle:
pickle.dump(graph_pair_list_all, handle)
with open('./posetrack_val_data.pickle', 'rb') as handle:
restore = pickle.load(handle)
print(restore == graph_pair_list_all)
if __name__ == "__main__":
test_load_data_for_gcn_train()
test_load_data_for_gcn_val()
|
import datetime
from isoweek import Week
import calendar
def get_week(timestamp):
timestamp = datetime.datetime.utcfromtimestamp(float(timestamp))
date = timestamp.date()
iso_info = date.isocalendar()
week = iso_info[1] - 1
return week
def get_week_timestamp(year, week):
d = Week(year, week).monday()
return calendar.timegm(d.timetuple())
def day_week(timestamp):
timestamp = datetime.datetime.utcfromtimestamp(float(timestamp))
date = timestamp.date()
iso_info = date.isocalendar()
week = iso_info[1] - 1
day = week * 7 + iso_info[2] - 1
return day, week
def month_year(timestamp):
date = datetime.datetime.utcfromtimestamp(int(timestamp))
return (date.year, date.month)
def month_year_add(month_year, increment):
month_year = (month_year[0] + (increment + month_year[1] - 1) / 12,
(month_year[1] + increment - 1) % 12 + 1)
return month_year
def previous_month_year(month_year):
month_year = (month_year[0], month_year[1]-1)
if month_year[1] < 1:
month_year= (month_year[0] - 1, 12)
return month_year
|
# -*- coding: UTF-8 -*-
from pluginsinterface.PluginLoader import on_message, Session, on_preprocessor, on_plugloaded
from pluginsinterface.PluginLoader import PlugMsgReturn, plugRegistered, PlugMsgTypeEnum, PluginsManage
from pluginsinterface.PluginLoader import PlugArgFilter
from pluginsinterface.PermissionGroup import permNameCheck, authLegalGetGroupListDes, authLegalGetList, authObjGetList
import asyncio
from helper import getlogger
logger = getlogger(__name__)
"""
权限管理插件
"""
@plugRegistered('权限管理', 'permission')
def _():
return {
'plugmanagement': '1.0', # 插件注册管理(原样)
'version': '1.0', # 插件版本
'auther': 'chenxuan', # 插件作者
'des': '用于管理权限的插件' # 插件描述
}
@on_plugloaded()
def _(plug: PluginsManage):
if plug:
# 注册权限
plug.registerPerm('manage',
des='管理权限',
defaultperm=PlugMsgTypeEnum.none)
plug.registerPerm('infocheck',
des='信息查看权限',
defaultperm=PlugMsgTypeEnum.none)
@on_preprocessor()
async def _(session: Session) -> PlugMsgReturn:
msg: str = session.sourcefiltermsg
if msg.startswith(('!', '!')):
session.sourcefiltermsg = msg[1:]
return PlugMsgReturn.Allow
return PlugMsgReturn.Refuse
argfilter = PlugArgFilter()
argfilter.addArg(
'page',
'页码',
'页码',
verif='uintnozero',
canSkip=True,
vlimit={'': 1} # 设置默认值
)
@on_message(msgfilter='合法权限组列表',
argfilter=argfilter,
bindsendperm='infocheck',
des='合法权限组列表 页码 - 合法权限组列表')
async def _(session: Session):
page = session.filterargs['page']
msg = authLegalGetGroupListDes(page)
session.send(msg)
argfilter = PlugArgFilter()
argfilter.addArg('groupname',
'权限组',
'需要输入有效的权限组名称',
prefunc=(lambda arg: (arg if permNameCheck(arg) else None)),
vlimit={'': None},
canSkip=True)
argfilter.addArg(
'page',
'页码',
'页码',
verif='uintnozero',
canSkip=True,
vlimit={'': 1} # 设置默认值
)
@on_message(msgfilter='合法权限列表',
argfilter=argfilter,
bindsendperm='infocheck',
des='合法权限列表 页码 - 合法权限列表')
async def _(session: Session):
groupname = session.filterargs['groupname']
page = session.filterargs['page']
msg = authLegalGetList(groupname, page)
session.send(msg)
@on_message(msgfilter='查看授权', argfilter=argfilter, des='查看授权 页码 - 查看授权权限')
async def _(session: Session):
page = session.filterargs['page']
msg = authObjGetList(session.bottype, session.botuuid,
PlugMsgTypeEnum.getMsgtype(session.msgtype),
session.uuid, page)
session.send(msg)
argfilter = PlugArgFilter()
argfilter.addArg('msgtype',
'消息来源',
'需要输入有效的消息来源名称',
canSkip=False,
vlimit={
'群聊': 'group',
'私聊': 'private',
'group': '',
'private': '',
})
argfilter.addArg(
'uuid',
'消息来源ID',
'需要输入有效的消息来源名称',
canSkip=False,
)
argfilter.addArg('groupname',
'权限组',
'需要输入有效的权限组名称',
prefunc=(lambda arg: (arg if permNameCheck(arg) else None)),
canSkip=False)
argfilter.addArg('perm',
'权限名',
'需要输入有效的权限名',
prefunc=(lambda arg: (arg if permNameCheck(arg) else None)),
canSkip=False)
@on_message(msgfilter='远程授权',
argfilter=argfilter,
bindsendperm='manage',
des='远程授权 消息来源标识 消息来源ID 权限组 权限名 - 消息来源标识为群聊、私聊中的任意一项')
async def _(session: Session):
msgtype = session.filterargs['msgtype']
uuid = session.filterargs['uuid']
groupname = session.filterargs['groupname']
perm = session.filterargs['perm']
res = session.authAllow(session.bottype, session.botuuid, msgtype, uuid,
groupname, perm)
session.send(res[1])
@on_message(msgfilter='远程取消授权',
argfilter=argfilter,
bindsendperm='manage',
des='远程取消授权 消息来源标识 消息来源ID 权限组 权限名 - 消息来源标识为群聊、私聊中的任意一项')
async def _(session: Session):
msgtype = session.filterargs['msgtype']
uuid = session.filterargs['uuid']
groupname = session.filterargs['groupname']
perm = session.filterargs['perm']
res = session.authRemoval(session.bottype, session.botuuid, msgtype, uuid,
groupname, perm)
session.send(res[1])
@on_message(msgfilter='远程授权禁用',
argfilter=argfilter,
bindsendperm='manage',
des='远程授权禁用 消息来源标识 消息来源ID 权限组 权限名 - 消息来源标识为群聊、私聊中的任意一项')
async def _(session: Session):
msgtype = session.filterargs['msgtype']
uuid = session.filterargs['uuid']
groupname = session.filterargs['groupname']
perm = session.filterargs['perm']
res = session.authDeny(session.bottype, session.botuuid, msgtype, uuid,
groupname, perm)
session.send(res[1])
argfilter = PlugArgFilter()
argfilter.addArg('msgtype',
'消息来源',
'需要输入有效的消息来源名称',
canSkip=False,
vlimit={
'群聊': 'group',
'私聊': 'private',
'group': '',
'private': '',
})
argfilter.addArg(
'uuid',
'消息来源ID',
'需要输入有效的消息来源名称',
canSkip=False,
)
argfilter.addArg(
'page',
'页码',
'页码',
verif='uintnozero',
canSkip=True,
vlimit={'': 1} # 设置默认值
)
@on_message(msgfilter='查询授权',
argfilter=argfilter,
bindsendperm='infocheck',
des='查询授权 消息来源标识 消息来源ID 页码 - 查询指定对象的权限')
async def _(session: Session):
msgtype = session.filterargs['msgtype']
uuid = session.filterargs['uuid']
page = session.filterargs['page']
msg = authObjGetList(session.bottype, session.botuuid, msgtype, uuid, page)
session.send(msg)
|
# -- coding: utf8 --
__author__ = 'elmira'
import MySQLdb as mdb
from heritage_corpus.settings import DATABASES
USER = DATABASES['default']['USER']
PASSWORD = DATABASES['default']['PASSWORD']
NAME = DATABASES['default']['NAME']
class Database(object):
"""Класс для общения с базой данных MySQL"""
def __init__(self):
"""Создать соединение с базой данных.
В параметрах соединения указывается хост, логин, пароль, название базы данных, кодировка.
"""
self._connection = mdb.connect('', USER, PASSWORD, NAME, charset='utf8')
def commit(self):
self._connection.commit()
def execute(self, q):
"""Вернуть результат выполнения запроса в виде массива кортежей.
Каждый кортеж - строка базы данных, сформированная по запросу.
"""
self.cur = self._connection.cursor() # mdb.cursors.DictCursor
self.cur.execute(q)
res = self.cur.fetchall()
self.cur.close()
return res
|
i=int(input())
n=len(str(i))
e=0
j=i
while(j>0):
e=e+(j%10)**n
j=j//10
if (i==e):
print("yes")
else:
print("no")
|
import pyodbc
import sys
import os.path
import csv
def csvInternalObject(inFile):
mapFile=open(inFile, 'rb')
mapDict=csv.DictReader(mapFile)
csvList=[]
for row in mapDict:
rowDict={}
for i in mapDict.fieldnames:
rowDict[i]=row[i]
csvList.append(rowDict)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2018-07-28 20:11
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('operation', '0003_usercomment_add_time'),
('course', '0011_bannercourse'),
]
operations = [
migrations.RemoveField(
model_name='bannercourse',
name='course_ptr',
),
migrations.DeleteModel(
name='BannerCourse',
),
]
|
from typing import List
from collections import defaultdict
class Solution:
def validPath(self, n: int, edges: List[List[int]], source: int, destination: int) -> bool:
if source == destination:
return True
edge_dict = defaultdict(set)
for u, v in edges:
edge_dict[u].add(v)
edge_dict[v].add(u)
visited = set()
def dfs(node: int) -> bool:
visited.add(node)
for neighbor in edge_dict[node]:
if neighbor == destination:
return True
if neighbor not in visited:
if dfs(neighbor):
return True
return False
return dfs(source)
|
# Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from concurrent import futures
from transformers import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
import multiprocessing
import time
import math
import logging
import argparse
import grpc
import jarvis_nlp_pb2
import jarvis_nlp_pb2_grpc
def get_args():
parser = argparse.ArgumentParser(description="Jarvis Question Answering client sample")
parser.add_argument("--listen", default="[::]:50052", type=str, help="Address to listen to")
parser.add_argument("--model-name", default="twmkn9/bert-base-uncased-squad2", type=str, help="pretrained HF model to use")
parser.add_argument("--model-cache", default="/data/models", type=str, help="path to location to store downloaded checkpoints")
return parser.parse_args()
class JarvisNLPServicer(jarvis_nlp_pb2_grpc.JarvisNLPServicer):
def __init__(self, model_name, cache=None):
tokenizer = AutoTokenizer.from_pretrained(model_name, cache_dir=cache)
model = AutoModelForQuestionAnswering.from_pretrained(model_name, cache_dir=cache)
self.model = pipeline('question-answering',
model=model, tokenizer=tokenizer)
print(f"Model loaded, serving: {model_name}")
def NaturalQuery(self, request, context):
"""NaturalQuery is a search function that enables querying one or more documents
or contexts with a query that is written in natural language.
"""
result = self.model({
'question': str(request.query),
'context': str(request.context)
}, handle_impossible_answer=True)
response = jarvis_nlp_pb2.NaturalQueryResponse()
response.results.append(jarvis_nlp_pb2.NaturalQueryResult(answer=result['answer'], score=result['score']))
return response
def serve(uri="[::]:50051", model="twmkn9/distilbert-base-uncased-squad2", model_cache=None):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=multiprocessing.cpu_count()))
jarvis_nlp_pb2_grpc.add_JarvisNLPServicer_to_server(
JarvisNLPServicer(model, cache=model_cache), server)
server.add_insecure_port(uri,)
server.start()
server.wait_for_termination()
if __name__ == '__main__':
args = get_args()
logging.basicConfig()
serve(uri=args.listen, model=args.model_name, model_cache=args.model_cache)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class Bottleneck(nn.Module):
def __init__(self, last_planes, in_planes, out_planes, dense_depth, stride, first_layer):
super(Bottleneck, self).__init__()
self.out_planes = out_planes
self.dense_depth = dense_depth
self.conv1 = nn.Conv2d(last_planes, in_planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(in_planes)
self.conv2 = nn.Conv2d(in_planes, in_planes, kernel_size=3, stride=stride, padding=1, groups=32, bias=False)
self.bn2 = nn.BatchNorm2d(in_planes)
self.conv3 = nn.Conv2d(in_planes, out_planes+dense_depth, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_planes+dense_depth)
self.shortcut = nn.Sequential()
if first_layer:
self.shortcut = nn.Sequential(
nn.Conv2d(last_planes, out_planes+dense_depth, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_planes+dense_depth)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
x = self.shortcut(x)
d = self.out_planes
out = torch.cat([x[:,:d,:,:]+out[:,:d,:,:], x[:,d:,:,:], out[:,d:,:,:]], 1)
out = F.relu(out)
return out
class DPNInputLayer(nn.Module):
def __init__(self):
super(DPNInputLayer, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
return out
class DPNBlockLayer(nn.Module):
def __init__(self, cfg):
super(DPNBlockLayer, self).__init__()
in_planes, out_planes, num_blocks, dense_depth, self.last_planes, stride = cfg[0], cfg[1], cfg[2], cfg[3], cfg[4], cfg[5]
self.layer = self._make_layer(in_planes, out_planes, num_blocks, dense_depth, stride)
def _make_layer(self, in_planes, out_planes, num_blocks, dense_depth, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for i,stride in enumerate(strides):
layers.append(Bottleneck(self.last_planes, in_planes, out_planes, dense_depth, stride, i==0))
self.last_planes = out_planes + (i+2) * dense_depth
return nn.Sequential(*layers)
def forward(self, out):
out = self.layer(out)
return out
class DPNOutputLayer(nn.Module):
def __init__(self, out_planes, num_blocks, dense_depth):
super(DPNOutputLayer, self).__init__()
self.linear = nn.Linear(out_planes + (num_blocks + 1) * dense_depth, 10)
def forward(self, out):
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.linear(out)
return out
class THDPNGroup0(nn.Module):
def __init__(self):
super(THDPNGroup0, self).__init__()
self.input_layer = DPNInputLayer()
layer_cfg0 = [96, 256, 3, 16, 64, 1]
layer_cfg1 = [192, 512, 4, 32, 320, 2]
self.layer0 = DPNBlockLayer(layer_cfg0)
self.layer1 = DPNBlockLayer(layer_cfg1)
def forward(self, x):
out = self.input_layer(x)
out = self.layer0(out)
out = self.layer1(out)
return out
class THDPNGroup1(nn.Module):
def __init__(self):
super(THDPNGroup1, self).__init__()
layer_cfg2 = [384, 1024, 20, 24, 672, 2]
self.layer = DPNBlockLayer(layer_cfg2)
def forward(self, x):
out = self.layer(x)
return out
class THDPNGroup2(nn.Module):
def __init__(self):
super(THDPNGroup2, self).__init__()
layer_cfg3 = [768, 2048, 3, 128, 1528, 2]
self.layer0 = DPNBlockLayer(layer_cfg3)
self.layer1 = DPNOutputLayer(layer_cfg3[1], layer_cfg3[2], layer_cfg3[3])
def forward(self, x):
out = self.layer0(x)
out = self.layer1(out)
return out
"""
dpn92
torch.Size([1, 672, 16, 16])
torch.Size([1, 1528, 8, 8])
torch.Size([1, 10])
"""
def test():
group0 = THDPNGroup0()
group1 = THDPNGroup1()
group2 = THDPNGroup2()
x = torch.randn(1, 3, 32, 32)
y = group0(x)
print(y.size())
y = group1(y)
print(y.size())
y = group2(y)
print(y.size())
|
# coding: utf8
# Author: Wing Yung Chan (~wy)
# Date: 2017
#26 - Reciprocal Cycles
#Learnt some new maths here
import itertools
#finds the first number in the sequence (9,99,...) that is divisible by x
def find_divisible_repunit(x):
assert x%2!=0 and x%5 != 0
for i in itertools.count(1):
repunit = int("9"*i)
if repunit % x == 0:
return repunit
def form(denominator):
numerator = 1
shift = 0
for x in (2,5):
while denominator % x == 0:
denominator //= x
numerator *= (10/x)
shift += 1 # this is a factor of 10 power shift
repunit = find_divisible_repunit(denominator)
return len(str(repunit))
def problem26():
max = (1,1)
for i in range(1,1001):
x = form(i)
if x > max[0]:
max = (x,i)
return max
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import ops
from keras.engine.topology import Layer
import keras.backend as K
from keras.layers import Activation
from keras.utils.generic_utils import get_custom_objects
def eednstep(x,name=None):
g = tf.get_default_graph()
with g.gradient_override_map({"Identity": "EednStepGrad"}):
y = tf.greater(x,0.0)
y = tf.cast(y,x.dtype)
return tf.identity(x) + tf.stop_gradient(y-x)
@ops.RegisterGradient("EednStepGrad")
def eednsteptestgrad(op, grad):
x = op.inputs[0]
#x = tf.Print(x,[x])
out = tf.maximum(0.0,1.0-tf.abs(x))
return out*grad # zero out to see the difference:
get_custom_objects().update({'eednstep': Activation(eednstep)})
def switch(condition, t, e):
if K.backend() == 'tensorflow':
import tensorflow as tf
return tf.where(condition, t, e)
elif K.backend() == 'theano':
import theano.tensor as tt
return tt.switch(condition, t, e)
def _ternarize(W, W_old, hysteresis=0.1, H=1):
'''The weights' ternarization function,
# References:
- [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902)
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
- Copied from https://github.com/DingKe/nn_playground/tree/master/ternarynet
'''
W /= H
ones = K.ones_like(W)
zeros = K.zeros_like(W)
Wt = switch(W >= 0.5+hysteresis, ones, switch(W <= -0.5-hysteresis, -ones,
switch(tf.logical_or(W >= -0.5+hysteresis,W <= 0.5-hysteresis),
W_old, zeros)))
Wt *= H
tf.assign(W_old,Wt)
return Wt
def ternarize(W, W_old, hysteresis=0.1, H=1):
'''The weights' ternarization function,
# References:
- [Recurrent Neural Networks with Limited Numerical Precision](http://arxiv.org/abs/1608.06902)
- [Ternary Weight Networks](http://arxiv.org/abs/1605.04711)
- Copied from https://github.com/DingKe/nn_playground/tree/master/ternarynet
'''
Wt = _ternarize(W, W_old, hysteresis, H)
return W + K.stop_gradient(Wt - W) |
def lengthOfLongestSubstring(s: str) -> int:
length = len(s)
right = 0
left = 0
ans = 0
letters = {}
while left < length and right < length:
element = s[right]
if element in letters:
left = max(left, letters[element] + 1)
letters[element] = right
ans = max(ans, right - left + 1)
right += 1
return ans
#3
input1 = "abcabcbb"
#1
input2 = "bbbbb"
#3
input3 = "pwwkew"
print(lengthOfLongestSubstring(input1))
print(lengthOfLongestSubstring(input2))
print(lengthOfLongestSubstring(input3)) |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
"""# Splitting the dataset into the Training set and Test set
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)"""
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(degree=4)
X_poly = poly.fit_transform(X)
linear = LinearRegression()
linear.fit(X_poly, y)
y_pred = linear.predict(poly.fit_transform(6.5))
plt.scatter(X, y, color='red')
plt.plot(X, linear.predict(X_poly), color='blue' ) #linear.predict(poly.fit_transform(X))
plt.title('Polynomial Linear Regression')
plt.xlabel('Posiiton / Level of Employee')
plt.ylabel('Salary')
plt.show() |
# Copyright 2017 Neural Networks and Deep Learning lab, MIPT
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import re
import time
from logging import getLogger
from typing import Tuple, List, Optional, Union, Dict, Any
import nltk
from deeppavlov.core.models.component import Component
from deeppavlov.core.models.serializable import Serializable
from deeppavlov.core.common.file import read_json
from deeppavlov.core.commands.utils import expand_path
from deeppavlov.models.kbqa.template_matcher import TemplateMatcher
from deeppavlov.models.kbqa.entity_linking import EntityLinker
from deeppavlov.models.kbqa.rel_ranking_infer import RelRankerInfer
from deeppavlov.models.kbqa.rel_ranking_bert_infer import RelRankerBertInfer
log = getLogger(__name__)
class QueryGeneratorBase(Component, Serializable):
"""
This class takes as input entity substrings, defines the template of the query and
fills the slots of the template with candidate entities and relations.
"""
def __init__(self, template_matcher: TemplateMatcher,
linker_entities: EntityLinker,
linker_types: EntityLinker,
rel_ranker: Union[RelRankerInfer, RelRankerBertInfer],
load_path: str,
rank_rels_filename_1: str,
rank_rels_filename_2: str,
sparql_queries_filename: str,
wiki_parser = None,
entities_to_leave: int = 5,
rels_to_leave: int = 7,
syntax_structure_known: bool = False,
return_answers: bool = False, *args, **kwargs) -> None:
"""
Args:
template_matcher: component deeppavlov.models.kbqa.template_matcher
linker_entities: component deeppavlov.models.kbqa.entity_linking for linking of entities
linker_types: component deeppavlov.models.kbqa.entity_linking for linking of types
rel_ranker: component deeppavlov.models.kbqa.rel_ranking_infer
load_path: path to folder with wikidata files
rank_rels_filename_1: file with list of rels for first rels in questions with ranking
rank_rels_filename_2: file with list of rels for second rels in questions with ranking
sparql_queries_filename: file with sparql query templates
wiki_parser: component deeppavlov.models.kbqa.wiki_parser
entities_to_leave: how many entities to leave after entity linking
rels_to_leave: how many relations to leave after relation ranking
syntax_structure_known: if syntax tree parser was used to define query template type
return_answers: whether to return answers or candidate answers
"""
super().__init__(save_path=None, load_path=load_path)
self.template_matcher = template_matcher
self.linker_entities = linker_entities
self.linker_types = linker_types
self.wiki_parser = wiki_parser
self.rel_ranker = rel_ranker
self.rank_rels_filename_1 = rank_rels_filename_1
self.rank_rels_filename_2 = rank_rels_filename_2
self.rank_list_0 = []
self.rank_list_1 = []
self.entities_to_leave = entities_to_leave
self.rels_to_leave = rels_to_leave
self.syntax_structure_known = syntax_structure_known
self.sparql_queries_filename = sparql_queries_filename
self.return_answers = return_answers
self.load()
def load(self) -> None:
with open(self.load_path / self.rank_rels_filename_1, 'r') as fl1:
lines = fl1.readlines()
self.rank_list_0 = [line.split('\t')[0] for line in lines]
with open(self.load_path / self.rank_rels_filename_2, 'r') as fl2:
lines = fl2.readlines()
self.rank_list_1 = [line.split('\t')[0] for line in lines]
self.template_queries = read_json(str(expand_path(self.sparql_queries_filename)))
def save(self) -> None:
pass
def find_candidate_answers(self, question: str,
question_sanitized: str,
template_types: Union[List[str], str],
entities_from_ner: List[str],
types_from_ner: List[str]) -> Union[List[Tuple[str, Any]], List[str]]:
candidate_outputs = []
self.template_nums = template_types
replace_tokens = [(' - ', '-'), (' .', ''), ('{', ''), ('}', ''), (' ', ' '), ('"', "'"), ('(', ''),
(')', ''), ('–', '-')]
for old, new in replace_tokens:
question = question.replace(old, new)
tm1 = time.time()
entities_from_template, types_from_template, rels_from_template, rel_dirs_from_template, \
query_type_template, template_found = self.template_matcher(question, entities_from_ner)
self.template_nums = [query_type_template]
log.debug(f"question: {question}\n")
log.debug(f"template_type {self.template_nums}")
if entities_from_template or types_from_template:
tm1 = time.time()
entity_ids = self.get_entity_ids(entities_from_template, "entities", template_found, question)
type_ids = self.get_entity_ids(types_from_template, "types")
log.debug(f"entities_from_template {entities_from_template}")
log.debug(f"types_from_template {types_from_template}")
log.debug(f"rels_from_template {rels_from_template}")
log.debug(f"entity_ids {entity_ids}")
log.debug(f"type_ids {type_ids}")
tm1 = time.time()
candidate_outputs = self.sparql_template_parser(question_sanitized, entity_ids, type_ids, rels_from_template,
rel_dirs_from_template)
if not candidate_outputs and entities_from_ner:
log.debug(f"(__call__)entities_from_ner: {entities_from_ner}")
log.debug(f"(__call__)types_from_ner: {types_from_ner}")
entity_ids = self.get_entity_ids(entities_from_ner, "entities", question=question)
type_ids = self.get_entity_ids(types_from_ner, "types")
log.debug(f"(__call__)entity_ids: {entity_ids}")
log.debug(f"(__call__)type_ids: {type_ids}")
self.template_nums = template_types
log.debug(f"(__call__)self.template_nums: {self.template_nums}")
if not self.syntax_structure_known:
entity_ids = entity_ids[:3]
tm1 = time.time()
candidate_outputs = self.sparql_template_parser(question_sanitized, entity_ids, type_ids)
return candidate_outputs
def get_entity_ids(self, entities: List[str],
what_to_link: str,
template_found: str = None,
question: str = None) -> List[List[str]]:
entity_ids = []
for entity in entities:
entity_id = []
if what_to_link == "entities":
entity_id, confidences = self.linker_entities.link_entity(entity, context=question, template_found=template_found)
if what_to_link == "types":
entity_id, confidences = self.linker_types.link_entity(entity)
entity_ids.append(entity_id[:15])
return entity_ids
def sparql_template_parser(self, question: str,
entity_ids: List[List[str]],
type_ids: List[List[str]],
rels_from_template: Optional[List[Tuple[str]]] = None,
rel_dirs_from_template: Optional[List[str]] = None) -> List[Tuple[str]]:
candidate_outputs = []
log.debug(f"(find_candidate_answers)self.template_nums: {self.template_nums}")
templates = []
for template_num in self.template_nums:
for num, template in self.template_queries.items():
if (num == template_num and self.syntax_structure_known) or \
(template["template_num"] == template_num and not self.syntax_structure_known):
templates.append(template)
templates = [template for template in templates if \
(not self.syntax_structure_known and [len(entity_ids), len(type_ids)] == template["entities_and_types_num"]) \
or self.syntax_structure_known]
templates_string = '\n'.join([template["query_template"] for template in templates])
log.debug(f"{templates_string}")
if not templates:
return candidate_outputs
if rels_from_template is not None:
query_template = {}
for template in templates:
if template["rel_dirs"] == rel_dirs_from_template:
query_template = template
if query_template:
entities_and_types_select = query_template["entities_and_types_select"]
candidate_outputs = self.query_parser(question, query_template, entities_and_types_select,
entity_ids, type_ids, rels_from_template)
else:
for template in templates:
entities_and_types_select = template["entities_and_types_select"]
candidate_outputs = self.query_parser(question, template, entities_and_types_select,
entity_ids, type_ids, rels_from_template)
if candidate_outputs:
return candidate_outputs
if not candidate_outputs:
alternative_templates = templates[0]["alternative_templates"]
for template_num, entities_and_types_select in alternative_templates:
candidate_outputs = self.query_parser(question, self.template_queries[template_num],
entities_and_types_select, entity_ids, type_ids, rels_from_template)
return candidate_outputs
log.debug("candidate_rels_and_answers:\n" + '\n'.join([str(output) for output in candidate_outputs[:5]]))
return candidate_outputs
def find_top_rels(self, question: str, entity_ids: List[List[str]], triplet_info: Tuple) -> List[str]:
ex_rels = []
direction, source, rel_type = triplet_info
if source == "wiki":
for entity_id in entity_ids:
for entity in entity_id[:self.entities_to_leave]:
ex_rels += self.wiki_parser.find_rels(entity, direction, rel_type)
ex_rels = list(set(ex_rels))
ex_rels = [rel.split('/')[-1] for rel in ex_rels]
elif source == "rank_list_1":
ex_rels = self.rank_list_0
elif source == "rank_list_2":
ex_rels = self.rank_list_1
rels_with_scores = self.rel_ranker.rank_rels(question, ex_rels)
return rels_with_scores[:self.rels_to_leave]
|
import numpy as np
import sync_generator as syncgen
import sbm_generator as sbmgen
import noise_generator as gen
import burer_monteiro as bm
from sklearn import cluster
import aux
def _spectral_gap(A, z):
"""
Returns dual spectral gap given observation A and ground truth z.
"""
gap = aux.laplacian_eigs(A, z)[1]
print('Spectral gap: {}'.format(gap))
return gap
def _gen_sync(n, percentage, snr):
"""
Returns a random observation from synchronization problem.
"""
return syncgen.synchronization_usual(n, percentage, snr)
def _gen_sbm(n, a, b):
"""
Returns a random observation from connected SBM.
"""
return sbmgen.sbm_logarithm(n, a, b)
def _check_spectral_gap(A, z):
"""
Returns if the spectral gap is positive.
"""
if _spectral_gap(A, z) > np.exp(-8):
return True
else:
return False
def _gen_sparse_mat(n, level):
"""
Returns a sparse matrix with at most 4 elements non-zero.
"""
mat = np.zeros(n**2)
for i in range(2):
index = np.random.randint(n**2)
mat[index] = level
mat = mat.reshape((n, n))
triu = np.triu(mat)
mat = triu + triu.T
return mat
def _gen_row_mat(n, level):
"""
Returns a matrix with at most 4 rows and cols non-zero.
"""
mat = np.zeros((n, n))
for i in range(2):
index = np.random.randint(n)
mat[index, :] = level
mat[:, index] = level
return mat
def search_counter_eg(n, level, drift, n_iter, n_trail):
"""
Returns instances where SDP recovers but BM only finds local maximizer.
"""
# found_target = False
examples = []
while True: # level > 0
# level += .05
print('+++++++++++++++++++++++++++++++++++++++++++++++++')
print('Starting loops with noise level = {}...'.format(level))
print('+++++++++++++++++++++++++++++++++++++++++++++++++')
n_tests = 0
print('This is #{} loop...........'.format(n_tests))
for i in range(n_iter):
print('Loop #{}'.format(i + 1))
# z = aux.rounding_with_prob(np.random.random_sample(n), .5)
# z = 2 * z.reshape(n, 1) - 1
z = np.ones(n).reshape((-1, 1))
ground_truth = z.dot(z.T)
# noise = [i for i in range(n)]
# noise = np.array(noise).reshape((-1, 1))
# mat = noise.dot(noise.T)
# N_pre = aux.laplacian(mat)
# N_pre = N_pre - np.diag(np.diag(N_pre))
# N = - level * N_pre
# N = gen.uniform_noise(n, level) + drift
N = _gen_row_mat(n, level)
N = N - np.diag(np.diag(N))
A = ground_truth + N
# A = aux.demean_adversary(A)
# A, z = _gen_sbm(n, 10, 2)
if _check_spectral_gap(A, z):
print('------------Found matrix where SDP tight------------')
for j in range(n_trail):
print(
'Finding global optimizer with BM (trail {})...'.format(j + 1))
# Q = bm.augmented_lagrangian(
# A, 2, plotting=False, printing=False)
result = bm.trust_region(
A, 2, plotting=False, printing=False)
Q_vec = result.x
Q = Q_vec.reshape((n, 2))
flag = result.success
print('Success: {}'.format(flag))
# kmeans = cluster.KMeans(
# n_clusters=2, random_state=0).fit(Q)
# clustering = 2 * kmeans.labels_ - 1
# err = aux.error_rate(clustering, z.ravel())
# print('The error rate for BM is: {}...'.format(err))
X_result = Q.dot(Q.T)
X = z.dot(z.T)
err = np.linalg.norm(X - X_result, 1)
corr = np.linalg.norm(np.dot(Q.T, z), 2)
largest_diff = np.max(np.abs(X - X_result))
pair_diff = 0
for k in range(n):
for l in range(n):
if k != l:
vec1 = Q[k, :]
vec2 = Q[l, :]
d = np.linalg.norm(vec1 - vec2, 2)
if d > pair_diff:
pair_diff = d
print('>>>>>>The correlation factor is: {}...'.format(corr / n))
print('>>>>>>The norm 1 error for BM is: {}...'.format(err / n**2))
print('>>>>>>The largest element diff is: {}...'.format(
largest_diff))
print('>>>>>>The largest pairwise difference is: {}...'.format(
pair_diff))
N = A - z.dot(z.T)
diagN = np.diag(N.dot(z).ravel())
spectral_overall = np.sort(np.linalg.eigvals(N - diagN))
print('Max eigenvalue overall: {}'.format(
spectral_overall[-1]))
spectral_N = np.sort(np.linalg.eigvals(N))
print('###### Max eigenvalue of N: {} ######'.format(
spectral_N[-1]))
print('Min eigenvalue of N: {}'.format(spectral_N[0]))
spectral_diagN = np.sort(np.linalg.eigvals(diagN))
print('Max eigenvalue of diagN: {}'.format(
spectral_diagN[-1]))
print('###### Min eigenvalue of diagN: {} ######'.format(
spectral_diagN[0]))
if pair_diff > .1:
gap = aux.laplacian_eigs(A, z)[1]
if gap > .01:
# found_target = True
print(
'One instance found when noise level = {}!'.format(level))
example = CounterExample(A, z, Q, gap, level)
examples.append(example)
print(A)
exit(0)
else:
print('===SDP fails===')
Q = bm.augmented_lagrangian(
A, 2, plotting=False, printing=False)
kmeans = cluster.KMeans(
n_clusters=2, random_state=0).fit(Q)
clustering = 2 * kmeans.labels_ - 1
err = aux.error_rate(clustering, z.ravel())
print('===Error rate for rounded BM is: {}==='.format(err))
Q = bm.augmented_lagrangian(
A, 2, plotting=False, printing=False)
# kmeans = cluster.KMeans(
# n_clusters=2, random_state=0).fit(Q)
# clustering = 2 * kmeans.labels_ - 1
# err = aux.error_rate(clustering, z.ravel())
# print('The error rate for BM is: {}...'.format(err))
X_result = Q.dot(Q.T)
X = z.dot(z.T)
err = np.linalg.norm(X - X_result, 1)
corr = np.linalg.norm(np.dot(Q.T, z), 2)
largest_diff = np.max(np.abs(X - X_result))
pair_diff = 0
for k in range(n):
for l in range(n):
if k != l:
vec1 = Q[k, :]
vec2 = Q[l, :]
d = np.linalg.norm(vec1 - vec2, 2)
if d > pair_diff:
pair_diff = d
print('The correlation factor is: {}...'.format(corr / n))
print('The norm 1 error for BM is: {}...'.format(err / n**2))
print('The largest element diff is: {}...'.format(largest_diff))
print('The largest pairwise difference is: {}...'.format(pair_diff))
N = A - z.dot(z.T)
diagN = np.diag(N.dot(z).ravel())
spectral_overall = np.sort(np.linalg.eigvals(N - diagN))
print('Max eigenvalue overall: {}'.format(
spectral_overall[-1]))
spectral_N = np.sort(np.linalg.eigvals(N))
print('>>Max eigenvalue of N: {}'.format(spectral_N[-1]))
print('Min eigenvalue of N: {}'.format(spectral_N[0]))
spectral_diagN = np.sort(np.linalg.eigvals(diagN))
print('Max eigenvalue of diagN: {}'.format(
spectral_diagN[-1]))
print('>>Min eigenvalue of diagN: {}'.format(
spectral_diagN[0]))
return examples
class CounterExample():
"""
Records the found counter example
"""
def __init__(self, A, z, Q, gap, snr):
self.A = A
self.z = z
self.Q = Q
self.gap = gap
self.snr = snr
def get_noise(self):
return self.A - self.z.dot(self.z.T)
def printing(self):
print('Noise Level: {}'.format(self.snr))
print('Dual Gap: {}'.format(self.gap))
print('Noise: ')
print(self.get_noise())
if __name__ == '__main__':
examples = search_counter_eg(10, 10, 0, 1, 100)
for example in examples:
example.printing()
|
# Program symulujący działania czujnika wilgotności:
print("Program symulujący czujnik wilgotności!")
# Wektor temperatur:
humidities = [76.5, 79.4, 80.6, 81.0, 82.8, 83.7, 86.7, 60.8, 68.9, 70.1]
iter = 0
# Importuj bibliotekę Redis do obsługi bazy danych:
import redis
# Połącz się z bazą Redis:
r = redis.Redis()
# Twórz obiekt PubSub:
p = r.pubsub()
# Zasubskrybuj kanał "devices":
p.subscribe("devices")
# Oczekuj, aż coś przyjdzie:
for mes in p.listen():
print("Odebrano wiadomość:")
print(mes)
print(humidities[iter])
print(r.xadd("d_humi", {"humidity": humidities[iter]}))
iter = (iter + 1) % len(humidities)
|
from wpilib.command import CommandGroup
from .open_claw import OpenClaw
from .set_wrist_setpoint import SetWristSetpoint
from .set_elevator_setpoint import SetElevatorSetpoint
class Place(CommandGroup):
"""Place a held soda can onto the platform."""
def __init__(self, robot):
super().__init__()
self.addSequential(SetElevatorSetpoint(robot, 0.25))
self.addSequential(SetWristSetpoint(robot, 0))
self.addSequential(OpenClaw(robot))
|
# from vnpy.trader.ui import QtGui
from PyQt5 import QtGui
WHITE_COLOR = (255, 255, 255)
BLACK_COLOR = (0, 0, 0)
GREY_COLOR = (100, 100, 100)
UP_COLOR = (178,34,34)
DOWN_COLOR = (0,255,255)
CURSOR_COLOR = (255, 245, 162)
PEN_WIDTH = 1
BAR_WIDTH = 0.4
AXIS_WIDTH = 0.8
NORMAL_FONT = QtGui.QFont("Arial", 9)
# def to_int(value: float) -> int:
def to_int(value):
""""""
return int(round(value, 0))
|
from Crypto.Util.number import *
m = "flag{Gr34t!_y0u_h4v3_d0n3_it!!}"
p = getPrime(512)
q = getPrime(512)
n = p*q
e = 65537
phin = (p-1)*(q-1)
def egcd(a,b):
if b == 0:
return a
else:
return egcd(b,a%b)
def mod_inv(a,b,x1,x2,y1,y2):
gcd = egcd(p,q)
if gcd == 1:
if b == 0:
return x1
else:
x = (x1 - ((a/b)*x2))
y = (y1 - ((a/b)*y2))
return mod_inv(b,a%b,x2,x,y2,y)
else:
print "mod_inv_doesn't_exist"
m = bytes_to_long(m)
d = (mod_inv(e,phin,1,0,0,1))%phin
r = 13907
c = pow(m,e,n)
ct = c * pow(r,e,n)
ct = pow(ct,d,n)
pt = ct/r
print long_to_bytes(pt)
|
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import AbstractBaseUser, PermissionsMixin
from .managers import UserManager
# Create your models here.
class Tag(models.Model):
name = models.CharField(max_length=200, null=True)
class BlogPost(models.Model):
title = models.CharField(max_length=200, unique=True)
created_at = models.DateField(default=timezone.now)
updated_at = models.DateField(default=timezone.now)
content = models.TextField(blank=False)
author = models.ManyToManyField('BlogUser', related_name='posts')
tag = models.ManyToManyField(Tag, related_name='posts') # връща обратно от инстанция на ТАГ постовете свързани с него
comment = models.ForeignKey('Comment', related_name='posts', null=True)
class BlogUser(AbstractBaseUser,PermissionsMixin):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.EmailField(unique=True)
is_active = models.BooleanField(default=True)
is_staff = models.BooleanField(default=False)
objects = UserManager()
USERNAME_FIELD = 'email'
def __str__(self):
return str('{}'.format(self.email))
class Comment(models.Model):
author = models.ManyToManyField(BlogUser)
created_at = models.DateField(default=timezone.now)
content = models.TextField(blank=False)
post = models.ForeignKey('BlogPost',related_name='comments', null=True)
|
#!/usr/bin/env python
"""
Calculate the density of low redshift objects
in magnitude space of SVA1 GOLD galaxies
using COSMOS photo-z's.
"""
from multiprocessing import Pool
import itertools
import time
import numpy as np
import os
from astropy.io import ascii,fits
import matplotlib.pyplot as plt
num_threads = 4
home_dir = '/home/ckrawiec'
this_file = '{}/git/workspace/lo-z_mag_density.py'.format(home_dir)
#will be overwritten
output_file = '{}/DES/magnification/lbgselect/lo-z_mag_density.fits'.format(home_dir)
sva1_gold_file = '{}/DES/data/sva1_gold_detmodel_gals.fits'.format(home_dir)
sva1_cosmos_file = '{}/DES/data/sva1_coadd_cosmos.fits'.format(home_dir)
cosmos_file = '{}/COSMOS/data/COSMOS2015_Laigle+_v1.1.fits'.format(home_dir)
#indices for sva1_gold_detmodel_gals.fits
gold_cosmos15_indices_file = '{}/DES/magnification/lbgselect/gold_cosmos15_indices.txt'.format(home_dir)
gold_no_cosmos_indices_file = '{}/DES/magnification/lbgselect/gold_no_cosmos_indices.txt'.format(home_dir)
cosmos15_indices_file = '{}/DES/magnification/lbgselect/cosmos15_indices.txt'.format(home_dir)
def nwrapper(args):
return n(*args)
def n(vals, errs, truevals):
"""
sum the gaussian likelihoods L(vals|truevals) over truevals using the errs on vals
vals, errs, and truevals are lists or arrays of data/error vectors
"""
out = np.array([])
nchunks = 500
ntruechunks = 1000
chunks = itertools.izip([vals[i:i+nchunks] for i in xrange(0, len(vals), nchunks)],
[errs[i:i+nchunks] for i in xrange(0, len(vals), nchunks)])
for chunk, errchunk in chunks:
trueout = np.zeros(len(chunk))
covIs = 1./errchunk**2.
A = 1./np.sqrt( (2.*np.pi)**len(vals[0]) * np.prod(errchunk**2., axis=1 ) )
truechunks = (truevals[i:i+ntruechunks] for i in xrange(0, len(truevals), ntruechunks))
for truechunk in truechunks:
diff = chunk[:,np.newaxis,:]-truechunk[np.newaxis,:,:]
B = -0.5 * np.sum(diff**2.*covIs[:,np.newaxis], axis=2)
C = A[:,np.newaxis] * np.exp(B)
trueout += np.sum(C, axis=1)
out = np.concatenate((out, trueout))
return out
def main():
now = time.strftime("%Y-%m-%d %H:%M")
print "#"+now
print "num_threads="+str(num_threads)
os.environ['OMP_NUM_THREADS']=str(num_threads)
setup_start = time.time()
#data sets
sva1_gold = fits.open(sva1_gold_file)[1].data
sva1_cosmos = fits.open(sva1_cosmos_file)[1].data
cosmos15 = fits.open(cosmos_file)[1].data
data_time = time.time()
print "Loaded data sets in {} s".format(data_time-setup_start)
gold_cosmos15 = np.loadtxt(gold_cosmos15_indices_file, dtype=int)
gold_no_cosmos = np.loadtxt(gold_no_cosmos_indices_file, dtype=int)
cosmos15_indices = np.loadtxt(cosmos15_indices_file, dtype=int)
#COSMOS2015 photo-z
#z_cosmos = 9.99 --> X-ray object, z_cosmos = 0 --> star
z_cosmos = cosmos15['photoz'][cosmos15_indices]
z0mask = (z_cosmos > 0) & (z_cosmos < 9.9)
z3mask = (z_cosmos >= 3.) & (z_cosmos < 9.9)
z4mask = (z_cosmos >= 4.) & (z_cosmos < 9.9)
#cosmos fluxes and errors from sva1 gold
def maketable(datatype, mask=None, cosmos=False, filters=['g','r','i','z','Y']):
table = {}
for f in filters:
if cosmos:
table[f] = sva1_gold[datatype+'_detmodel_'+f][gold_cosmos15][mask]
table[f+'err'] = sva1_gold[datatype+'err_detmodel_'+f][gold_cosmos15][mask]
else:
table[f] = sva1_gold[datatype+'_detmodel_'+f][gold_no_cosmos]
table[f+'err'] = sva1_gold[datatype+'err_detmodel_'+f][gold_no_cosmos]
return table
cosmos_tab_time = time.time()
lo_z_mag_cosmos = maketable('mag', mask=(z0mask & ~z4mask), cosmos=True)
cosmos_tab_end = time.time()
print "Made COSMOS tables in {}s".format(cosmos_tab_end-cosmos_tab_time)
lo_z_mags = np.array( zip(lo_z_mag_cosmos['g'],
lo_z_mag_cosmos['r'],
lo_z_mag_cosmos['i'],
lo_z_mag_cosmos['z']) )
sva1_tab_time = time.time()
mag_sva1_gold = maketable('mag')
sva1_tab_end = time.time()
print "Made SVA1 table in {}s".format(sva1_tab_end-sva1_tab_time)
mags = np.array( zip(mag_sva1_gold['g'],
mag_sva1_gold['r'],
mag_sva1_gold['i'],
mag_sva1_gold['z']) )
magerrs = np.array( zip(mag_sva1_gold['gerr'],
mag_sva1_gold['rerr'],
mag_sva1_gold['ierr'],
mag_sva1_gold['zerr']) )
setup_time = time.time()-setup_start
print "Total setup time took {} s".format(setup_time)
print " "
print "# sva1 gold, good region mask, not in COSMOS field: {}".format(len(gold_no_cosmos))
print "# sva1 gold, good region mask, in COSMOS field: {}".format(len(gold_cosmos15))
print "# sva1 gold/COSMOS2015 matched, z>4: {}".format(len(gold_cosmos15[z4mask]))
print "# sva1 gold/COSMOS2015 matched, 0<z<4: {}".format(len(gold_cosmos15[z0mask & ~z4mask]))
start = time.time()
N_try = len(mags)
print "Working on {} galaxies...".format(N_try)
n_per_process = int( np.ceil(N_try/num_threads) )
mag_chunks = [mags[i:i+n_per_process] for i in xrange(0, N_try, n_per_process)]
magerr_chunks = [magerrs[i:i+n_per_process] for i in xrange(0, N_try, n_per_process)]
#multiprocessing
pool = Pool(processes=num_threads)
lo_results = pool.map(nwrapper, itertools.izip(mag_chunks, magerr_chunks, itertools.repeat(lo_z_mags)))
lo_final_results = np.concatenate(lo_results)/len(lo_z_mags)
work_time = time.time() - start
print "Work completed in {} s".format(work_time)
#write results to fits file
tbhdu = fits.BinTableHDU.from_columns(fits.ColDefs(
[fits.Column(name='coadd_objects_id', format='K', array=sva1_gold['coadd_objects_id'][gold_no_cosmos]),
fits.Column(name='lo-z_density', format='D', array=lo_final_results)]), nrows=len(lo_final_results))
prihdr = fits.Header()
prihdr['COMMENT'] = "Output from {}".format(this_file)
prihdu = fits.PrimaryHDU(header=prihdr)
thdulist = fits.HDUList([prihdu, tbhdu])
thdulist.writeto(output_file, clobber=True)
now = time.strftime("%Y-%m-%d %H:%M")
print "#"+now
if __name__=="__main__":
main()
|
import sys
n = int(sys.stdin.readline())
def find():
d = [0 for _ in range(n + 1)]
for k in range(2, n+1):
d[k] = d[k-1] + 1
if k % 3 == 0:
d[k] = min(d[k//3] + 1, d[k])
if k % 2 == 0:
d[k] = min(d[k//2] + 1, d[k])
print(d[n])
find()
|
import math
import numpy as np
import random
def sigmoid(x, derivative=False):
if derivative:
return 1 / (1 + math.e ** -x) * (1 - 1 / (1 + math.e ** -x))
else:
return 1 / (1 + math.e ** -x)
def relu(X, derivative=False):
if derivative:
X[X <= 0] = 0
X[X > 0] = 1
else:
np.maximum(X, 0, out=X)
return X
def round_randomly(x):
"""rounds float x randomly to int"""
return int(x) + (random.random() < x - int(x))
|
import os
import sys
import datetime
import configparser
from flask import Flask, render_template, request, flash, session, redirect, url_for
import mysql.connector
# Read configuration from file.
config = configparser.ConfigParser()
config.read('config.ini')
# Set up application server.
app = Flask(__name__)
app.secret_key = "adbi327fds"
# Create a function for fetching data from the database.
def sql_query(sql):
db = mysql.connector.connect(**config['mysql.connector'])
cursor = db.cursor()
cursor.execute(sql)
result = cursor.fetchall()
cursor.close()
db.close()
return result
# Function for executing INSERT
def sql_execute(sql):
db = mysql.connector.connect(**config['mysql.connector'])
cursor = db.cursor()
cursor.execute(*sql)
db.commit()
cursor.close()
db.close()
# Function for deleting data
def sql_delete(sql):
db = mysql.connector.connect(**config['mysql.connector'])
cursor = db.cursor()
cursor.execute(sql)
db.commit()
cursor.close()
db.close()
# This route works for login
@app.route('/')
@app.route('/login', methods = ['GET', 'POST'])
def login():
if 'user' in session:
return redirect(url_for('home'))
message = None
if request.method == "POST":
usern = request.form.get("username")
passw = request.form.get("password")
sql = "SELECT user.password FROM user WHERE user.username = '{usern}'".format(usern=usern)
result = sql_query(sql)
if result:
password = result[0]
if password[0] == passw:
session['user'] = usern
return redirect(url_for('home'))
message = "Username or password is incorrect."
return render_template("login.html", message=message)
# route for account registration
@app.route("/register", methods=["GET", "POST"])
def register():
message = None
if 'user' in session:
return redirect(url_for('home'))
if request.method == "POST":
usern = request.form.get("username")
sqlcheck = "select * from user where user.username = '{usern}'".format(usern=usern)
res_check = sql_query(sqlcheck)
if not res_check:
passw = request.form.get("password")
sql = ("INSERT INTO user (username, password) VALUES (%s,%s)", (usern, passw))
sql_execute(sql)
session['user'] = usern
return redirect(url_for('home'))
else:
flash('Username is already taken.')
return redirect(url_for('register'))
return render_template("registration.html", message=message)
# route for user's account
@app.route("/account", methods=['GET', 'POST'])
def account():
if 'user' not in session:
return redirect(url_for('login'))
usern = session['user']
if "edit-review" in request.form:
review_id = int(request.form["edit-review"])
session['review'] = review_id
return redirect(url_for('edit'))
if "delete-review" in request.form:
review_id = int(request.form["delete-review"])
sql = "delete from review where review_id = {review_id}".format(review_id=review_id)
sql_delete(sql)
delete_sql_ra = "DELETE FROM review_album WHERE review_id = {review_id}".format(review_id=review_id)
sql_delete(delete_sql_ra)
delete_sql_rb = "DELETE FROM review_by WHERE review_id = {review_id}".format(review_id=review_id)
sql_delete(delete_sql_rb)
if "home" in request.form:
return redirect(url_for('home'))
sqlID = "select user.user_id from user where user.username = '{usern}'".format(usern=usern)
result = sql_query(sqlID)
user_id = result[0]
template_data = {}
#sql = "select * from review, review_by where review_by.review_id=review.review_id and review_by.user_id={user_id}".format(user_id=user_id[0])
sql = "select review.review_id, review.review_text, review.review_score, review.review_date, album.album_name from review, review_by, album, review_album where review_by.review_id=review.review_id and review_album.review_id = review.review_id and review_album.album_id = album.album_id and review_by.user_id={user_id}".format(user_id=user_id[0])
reviews = sql_query(sql)
template_data['reviews'] = reviews
return render_template('account.html', template_data=template_data, name=usern)
#route for logout
@app.route("/logout")
def logout():
session.pop('user', session['user'])
return redirect(url_for('login'))
# Home page after login
@app.route('/home', methods=['GET', 'POST'])
def home():
if 'user' not in session:
return redirect(url_for('login'))
usern = session['user']
if request.method == "POST":
if "search" in request.form:
album = request.form['album']
sql = "select album.album_id from album where album.album_name = '{album}'".format(album=album)
result = sql_query(sql)
if result:
album_id = result[0]
session['album'] = album_id[0]
return redirect(url_for('album'))
flash('No results could be found for your search, please try again.')
if "account" in request.form:
return redirect(url_for('account'))
if "logout" in request.form:
session.pop('user', session['user'])
return redirect(url_for('login'))
return render_template('home.html')
# Page for album info and reviews
@app.route('/album', methods=['GET', 'POST'])
def album():
if 'user' not in session:
return redirect(url_for('login'))
album_id = session['album']
usern = session['user']
if "createreview" in request.form:
return redirect(url_for('createreview'))
if "home" in request.form:
session.pop('album', session['album'])
return redirect(url_for('home'))
sqlname = "select album.album_name from album where album.album_id = {album_id}".format(album_id=album_id)
result_name = sql_query(sqlname)
name = result_name[0]
sqlart = "select album.album_artist from album where album.album_id = {album_id}".format(album_id=album_id)
result_artist = sql_query(sqlart)
artist = result_artist[0]
sqlgen = "select album.album_genre from album where album.album_id = {album_id}".format(album_id=album_id)
result_genre = sql_query(sqlgen)
genre = result_genre[0]
sqlrat = "select avg(review.review_score) from review, review_album, album where review.review_id = review_album.review_id and review_album.album_id = album.album_id and album.album_id = {album_id}".format(album_id=album_id)
result_rating = sql_query(sqlrat)
rating = result_rating[0]
sqlnum = "select count(review.review_score) from review, review_album, album where review.review_id = review_album.review_id and review_album.album_id = album.album_id and album.album_id = {album_id}".format(album_id=album_id)
result_num = sql_query(sqlnum)
num = result_num[0]
sqlrev = "select * from review, review_album, album, review_by, user where review_by.user_id = user.user_id and review_by.review_id = review.review_id and review.review_id = review_album.review_id and review_album.album_id = album.album_id and album.album_id = {album_id}".format(album_id=album_id)
reviews = sql_query(sqlrev)
if not reviews:
flash('No reviews available')
return render_template('album.html', name=name[0], artist=artist[0], genre=genre[0], rating=rating[0], num=num[0], template_data=reviews)
# Create review page
@app.route('/createreview', methods=["GET", "POST"])
def createreview():
if 'user' not in session:
return redirect(url_for('login'))
username = session['user']
album_id = session['album']
album_sql = "SELECT * FROM album WHERE album.album_id = {album_id}".format(album_id=album_id)
result_album = sql_query(album_sql)
album = result_album[0]
if request.method == "POST":
if "submit" in request.form:
score = int(request.form['score'])
comment = str(request.form['comment'])
if score > 0 and score < 101:
date = datetime.datetime.now()
#date = 11112
sql = ("INSERT INTO review (review_text, review_score, review_date) VALUES (%s, %s, %s)", (comment, score, date))
sql_execute(sql)
sql_rev = "SELECT MAX(review.review_id) FROM review"
reviews = sql_query(sql_rev)
review = reviews[0]
sql_relation_ra = ("INSERT INTO review_album (review_id, album_id) VALUES (%s, %s)", (review[0], album[0]))
sql_execute(sql_relation_ra)
sqlID = "select user.user_id from user where user.username = '{username}'".format(username=username)
user = sql_query(sqlID)
user_id = user[0]
sql_relation_rb = ("INSERT INTO review_by (review_id, user_id) VALUES (%s, %s)", (review[0], user_id[0]))
sql_execute(sql_relation_rb)
return redirect(url_for('album'))
else:
flash('Please enter an integer between 1 and 100')
return redirect(url_for('createreview'))
if "home" in request.form:
session.pop('album', session['album'])
return redirect(url_for('home'))
return render_template('createreview.html', album=album[2])
# Edit Review Page
@app.route('/edit', methods=["GET", "POST"])
def edit():
if 'user' not in session:
return redirect(url_for('login'))
username = session['user']
sqlID = "select user.user_id from user where user.username = '{username}'".format(username=username)
user = sql_query(sqlID)
user_id = user[0]
review_id = session['review']
prevcommentsql = "SELECT review.review_text FROM review WHERE review.review_id = {review_id}".format(review_id=review_id)
result_prevcomment = sql_query(prevcommentsql)
prevcomment = result_prevcomment[0]
prevscoresql = "SELECT review.review_score FROM review WHERE review.review_id = {review_id}".format(review_id=review_id)
result_prevscore = sql_query(prevscoresql)
prevscore = result_prevscore[0]
albumsql = "select * from album, review_album, review where album.album_id = review_album.album_id and review_album.review_id = {review_id}".format(review_id=review_id)
albums = sql_query(albumsql)
album = albums[0]
if request.method == "POST":
if "submit" in request.form:
score = int(request.form['score'])
comment = request.form['comment']
if score > 0 and score < 101:
date = datetime.datetime.now()
#date = 11111111
delete_sql = "DELETE FROM review WHERE review_id = {review_id}".format(review_id=review_id)
sql_delete(delete_sql)
delete_sql_ra = "DELETE FROM review_album WHERE review_id = {review_id}".format(review_id=review_id)
sql_delete(delete_sql_ra)
delete_sql_rb = "DELETE FROM review_by WHERE review_id = {review_id}".format(review_id=review_id)
sql_delete(delete_sql_rb)
sql = ("INSERT INTO review (review_text, review_score, review_date) VALUES (%s, %s, %s)", (comment, score, date))
sql_execute(sql)
sql_rev = "SELECT MAX(review.review_id) FROM review"
reviews = sql_query(sql_rev)
review = reviews[0]
sql_relation_ra = ("INSERT INTO review_album (review_id, album_id) VALUES (%s, %s)", (review[0], album[0]))
sql_execute(sql_relation_ra)
sql_relation_rb = ("INSERT INTO review_by (review_id, user_id) VALUES (%s, %s)", (review[0], user_id[0]))
sql_execute(sql_relation_rb)
session.pop('review', session['review'])
return redirect(url_for('account'))
if "cancel" in request.form:
session.pop('review', session['review'])
return redirect(url_for('account'))
return render_template('edit.html', prevcomment=prevcomment[0], prevscore=prevscore[0], album=album[1])
if __name__ == '__main__':
app.run(**config['app'])
|
# -*- coding: utf-8 -*-
"""
Demonstrates common image analysis tools.
Many of the features demonstrated here are already provided by the ImageView
widget, but here we present a lower-level approach that provides finer control
over the user interface.
"""
import initExample ## Add path to library (just for examples; you do not need this)
import pyqtgraph as pg
from pyqtgraph.Qt import QtCore, QtGui
import numpy as np
# Interpret image data as row-major instead of col-major
pg.setConfigOptions(imageAxisOrder='row-major')
pg.mkQApp()
win = pg.GraphicsLayoutWidget()
win.setWindowTitle('pyqtgraph example: Image Analysis')
# A plot area (ViewBox + axes) for displaying the image
p1 = win.addPlot()
# Item for displaying image data
img = pg.ImageItem()
p1.addItem(img)
# Custom ROI for selecting an image region
roi = pg.ROI([-8, 14], [6, 5])
roi.addScaleHandle([0.5, 1], [0.5, 0.5])
roi.addScaleHandle([0, 0.5], [0.5, 0.5])
p1.addItem(roi)
roi.setZValue(10) # make sure ROI is drawn above image
# Isocurve drawing
iso = pg.IsocurveItem(level=0.8, pen='g')
iso.setParentItem(img)
iso.setZValue(5)
# Contrast/color control
hist = pg.HistogramLUTItem()
hist.setImageItem(img)
win.addItem(hist)
# Draggable line for setting isocurve level
isoLine = pg.InfiniteLine(angle=0, movable=True, pen='g')
hist.vb.addItem(isoLine)
hist.vb.setMouseEnabled(y=False) # makes user interaction a little easier
isoLine.setValue(0.8)
isoLine.setZValue(1000) # bring iso line above contrast controls
# Another plot area for displaying ROI data
win.nextRow()
p2 = win.addPlot(colspan=2)
p2.setMaximumHeight(250)
win.resize(800, 800)
win.show()
# Generate image data
data = np.random.normal(size=(200, 100))
data[20:80, 20:80] += 2.
data = pg.gaussianFilter(data, (3, 3))
data += np.random.normal(size=(200, 100)) * 0.1
img.setImage(data)
hist.setLevels(data.min(), data.max())
# build isocurves from smoothed data
iso.setData(pg.gaussianFilter(data, (2, 2)))
# set position and scale of image
img.scale(0.2, 0.2)
img.translate(-50, 0)
# zoom to fit imageo
p1.autoRange()
# Callbacks for handling user interaction
def updatePlot():
global img, roi, data, p2
selected = roi.getArrayRegion(data, img)
p2.plot(selected.mean(axis=0), clear=True)
roi.sigRegionChanged.connect(updatePlot)
updatePlot()
def updateIsocurve():
global isoLine, iso
iso.setLevel(isoLine.value())
isoLine.sigDragged.connect(updateIsocurve)
## Start Qt event loop unless running in interactive mode or using pyside.
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
|
from pycloudia.packages.interfaces import IEncoder
from pycloudia.packages.exceptions import InvalidEncodingError
class Encoder(object, IEncoder):
encoding = None
content_delimiter = None
headers_delimiter = None
def encode(self, package):
assert isinstance(package.content, str)
assert isinstance(package.headers, dict)
message = self._create_message(package)
message = self._convert_message(message)
return message
def _create_message(self, package):
return '{headers}{delimiter}{content}'.format(
headers=self._encode_headers(package.headers),
content=package.content,
delimiter=self.delimiter,
)
def _convert_message(self, message):
try:
return str(message)
except UnicodeEncodeError:
try:
return message.encode(self.encoding)
except UnicodeEncodeError:
raise InvalidEncodingError('Unable convert package to {0}'.format(self.encoding))
def _encode_headers(self, headers):
return self.headers_delimiter.join([
'{0}:{1}'.format(name, value)
for name, value
in headers.data.iteritems()
])
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun May 3 17:09:46 2020
@author: rohitmathew and guoyichen
"""
# make sure to install these packages before running:
# pip install sodapy
import pandas as pd
from sodapy import Socrata
from datetime import datetime,date
import matplotlib.pyplot as plt
## Below section of codes obtains data
client = Socrata("health.data.ny.gov", None)
results = client.get("xdss-u53e", limit=50000)
data = pd.DataFrame.from_records(results)
## Below section cleans and makes data points into a readable format in Python
df = data.sort_values(by=['county', "test_date"])
df["new_positives"] = pd.to_numeric(df["new_positives"],errors='coerce')
df["cumulative_number_of_positives"] = pd.to_numeric(df["cumulative_number_of_positives"],errors='coerce')
df["total_number_of_tests"] = pd.to_numeric(df["total_number_of_tests"],errors='coerce')
df["cumulative_number_of_tests"] = pd.to_numeric(df["cumulative_number_of_tests"],errors='coerce')
df['test_date']= pd.to_datetime(df['test_date'])
#df['test_date'] = df['test_date'].dt.date
# Asks for country and puts into correct format
def county_name():
county_name = input("What county in NYS do you want info on? ")
county_name = county_name.capitalize()
return county_name
# Asks for a date and puts into correct format
def what_date():
date = input("Please enter a date in mmDDyyyy format: ")
date = datetime.strptime(date,'%m%d%Y')
return date
# For county infomation on dataframe.
def county_df ():
name = county_name()
data = df[(df.county == name)]
return (data)
# For total cases by day in NYS
def by_date():
df1 = df.groupby('test_date', as_index=False)['new_positives'].sum()
return df1
"""
# Returns dataframe on selected county
#def county_date():
#county_data = county(county_name)
"""
# Barchart showing ditrubution of new cases. Needs some fixing.
#ax = county_data.plot.bar(x='test_date', y='new_positives', rot=0)
### CODE BELOW FOR MENU
def menu():
print("")
print ("MAIN MENU:","\n1. Current County Statistics \n2. County Information on Certain date \n3. County with highest and lowest total positive cases \n4. Number of New Cases per Day in NYS; Barchart \n5. Summary of new cases in county by date; Barchart")
number = input("Enter your selection: ")
number = int(number)
# County statistics
if number == 1:
county_info = county_df()
total_pos = county_info["new_positives"].sum()
total_tests = county_info["total_number_of_tests"].sum()
total_neg = total_tests - total_pos
pencentage_of_positives= total_pos/total_tests *100
print("Information as of: ",max(df['test_date']) ,"\nTotal Number of Postives Cases are: ", total_pos, "\nTotal Number of Negatives are: ", total_neg, "\nTotal number of tests are: ", total_tests,"\n% of positive cases: ", pencentage_of_positives)
menu()
# County Information on Certain date
elif number == 2:
county = county_name()
date = what_date()
date_info = df.loc[(df['test_date'] == date) & (df['county'] == county)]
day_pos = (date_info["new_positives"]).sum()
day_total = (date_info["total_number_of_tests"]).sum()
day_neg = day_total - day_pos
print("Cases for date: ", date, "\nNumber of Postives Cases: ", day_pos, "\nNumber of Negative Cases: ", day_neg, "\nNumber of tests done were: ", day_total)
menu()
# County with highest total cases
elif number == 3:
data = df[(df["test_date"] == max(df['test_date']))]
data3 = (data[data.cumulative_number_of_positives == data.cumulative_number_of_positives.max()])
opt_3 = data3[['county','cumulative_number_of_positives']]
data4 = (data[data.cumulative_number_of_positives == data.cumulative_number_of_positives.min()])
opt_4 = data4[['county','cumulative_number_of_positives']]
print (opt_3.iat[0, 0], "has the hightest number of positive cases with", opt_3.iat[0, 1], "cases.")
print (opt_4.iat[0, 0], "has the lowest number of positive cases with", opt_4.iat[0, 1], "cases.")
menu()
# Dataframe ~ BARCHART Number of Positive New Cases per Day in NYS
elif number == 4:
data4 = df.groupby(["test_date"])[["new_positives"]].sum()
print(data4)
ans = input("Would you like a barchart of this data? y or n: ").lower()
if ans == "y":
ax = data4.plot(kind='bar')
ax.axes.get_xaxis().set_visible(False)
plt.xlabel('test date')
plt.ylabel('numbers of new positive cases')
plt.title('Number of Positive New Cases per Day in NYS')
plt.grid(True)
plt.show()
menu()
else:
menu()
# BARCHART summary of new cases in county by date
elif number == 5:
county_info = county_df()
data5 = county_info.groupby(["test_date"])[["new_positives"]].sum()
print(data5)
ans = input("Would you like a barchart of this data? y or n: ").lower()
if ans == "y":
ax = data5.plot(kind='bar')
ax.axes.get_xaxis().set_visible(False)
plt.xlabel('test date')
plt.ylabel('numbers of new positive cases in this county')
plt.title('Summary of New Cases in the County')
plt.grid(True)
plt.show()
menu()
else:
menu()
else:
print("\nInvalid Selection, try again.")
menu()
menu()
|
import cv2
from fdet import io, RetinaFace
BATCH_SIZE = 10
detector = RetinaFace(backbone='MOBILENET', cuda_devices=[0])
vid_cap = cv2.VideoCapture('test_video.mp4')
video_face_detections = [] # list to store all video face detections
image_buffer = [] # buffer to store the batch
while True:
success, frame = vid_cap.read() # read the frame from video capture
if not success:
break # end of video
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) # convert to RGB
image_buffer.append(frame) # add frame to buffer
if len(image_buffer) == BATCH_SIZE: # if buffer is full, detect the batch
batch_detections = detector.batch_detect(image_buffer)
video_face_detections.extend(batch_detections)
image_buffer.clear() # clear the buffer
if image_buffer: # checks if images remain in the buffer and detect it
batch_detections = detector.batch_detect(image_buffer)
video_face_detections.extend(batch_detections)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""mergeBedGraphs.py
This script can be used to merge BED graphs produced by bismark_SE/PE.sh and bamToBedGraph.sh and a sample annotation file (see below) into a single long-format table (for lm() in R).
NOTE: minCov >= 5, maxCov <= 100, and minGroupCount >= 2 are hard-coded.
sampleTable.csv has a header and contains the following columns:
<required>
\tsampleID
\tbedFile
\tgroup (should be the smallest grouping possible)
<optional>
\tother factors which will be used in the model later on
<example>
sampleID,bedFile,group,organ,sex
lm1,lm1.bed,liver_male,liver,male
lm2,lm2.bed,liver_male,liver,male
lm3,lm3.bed,liver_male,liver,male
lf1,lf1.bed,liver_female,liver,female
lf2,lf2.bed,liver_female,liver,female
lf3,lf3.bed,liver_female,liver,female
hm1,hm1.bed,heart_male,heart,male
hm2,hm2.bed,heart_male,heart,male
hm3,hm3.bed,heart_male,heart,male
hf1,hf1.bed,heart_female,heart,female
hf2,hf2.bed,heart_female,heart,female
hf3,hf3.bed,heart_female,heart,female
Notes:
\tgroup = <organ>_<sex>
\tthe bed files don't have to contain the sampleID, but you should provide the full path
Acknowledgements:
I would like to thank Önder Kartal. Most of this script is taken from his shannon script. See: gitlab.com/okartal/shannon
ToDo:
add requirements and a readme for the preprocessing.
"""
__author__ = "Marc W Schmid"
__version__ = "0"
import argparse
import collections
import os
import subprocess
import sys
import textwrap
import math
import numpy as np
import pandas as pd
import logging
logging.basicConfig(format="=== %(levelname)s === %(asctime)s === %(message)s", level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
def mergeBedGraphs(sampleTableFile, chrom, ctxt, outfileName, query=None, bedFileType="MethAn_bismark"):
"""The main driver function.
"""
# read the sample table
logging.info("Reading sample table.")
try:
sampleTab = pd.read_csv(sampleTableFile, comment='#', sep=',', header=0)
except FileNotFoundError as e:
logging.critical("Could not find the sampleTable.csv file.")
sys.exit(66)
# filter it if requested
if query:
logging.info("Selecting samples according to query.")
sampleTab.query(query, inplace=True)
else:
logging.info("Selecting all samples.")
# merge the bedGraphs
logging.info("Merging BED files.")
okCounter = 0
chromCounter = 1
if not os.path.isfile(chrom):
worked = mergeFiles(sampleTab, chrom, ctxt, outfileName, bedFileType)
if worked:
okCounter += 1
else:
with open(chrom, "rb") as infile:
chromList = [line[:-1].decode("ascii") for line in infile]
chromCounter = len(chromList)
for chrom in chromList:
worked = mergeFiles(sampleTab, chrom, ctxt, outfileName, bedFileType)
if worked:
okCounter += 1
# no return, just print a time information
logging.info("Finished ("+str(okCounter)+"/"+str(chromCounter)+" with data/total)")
pass
def mergeFiles(sampleTab, chrom, ctxt, outfileName, fileType="MethAn_bismark", imputation=False):
"""Merge individual BED files and the sample annotation into a long-format table.
"""
inputFiles = sampleTab["bedFile"]
labels = sampleTab["sampleID"]
groups = sampleTab["group"]
factorNames = list(sampleTab.columns.values)[3:]
# initialize the output table
logging.info("Initializing table.")
column = collections.OrderedDict()
if fileType == "MethAn_bismark":
column[0] = "chrom"
column[1] = "pos"
#column[2] = "end"
#column[3] = "pcentmeth"
column[4] = 'M' # methylated coverage
column[5] = 'U' # unmethylated coverage
types = [str, np.int64, np.int32, np.int32]
dtype = dict(zip(column.values(), types))
else:
logging.critical("User-provided file type is not implemented (see --bedFileType).")
sys.exit(64)
# Get the query region:
# load is average number of expected sites in the region, this determines
# the memory requirements
logging.info("Preparing for sequential reading.")
LOAD = 1e6
supChrom = chromSupremum(inputFiles, chrom)
if supChrom == None:
logging.info("Skipping because chromosome is missing.")
return False
supNsites = nsitesSupremum(inputFiles, chrom)
if supNsites == None or supNsites == 0:
logging.info("Skipping because there are no entries.")
return False
stepSize = math.ceil(supChrom/supNsites * LOAD)
if stepSize < supChrom:
step = stepSize
logging.info("step size: "+format(step))
else:
step = supChrom
logging.info("step size: "+format(step)+" (max. for contig {0}).".format(chrom))
posStart = list(range(0, supChrom, step + 1))
posEnd = list(range(step, supChrom, step + 1)) + [supChrom]
regions = zip(posStart, posEnd)
logging.info("Merging data.")
for interval in regions:
region = chrom + ':{0}-{1}'.format(*interval)
# load into data frame
logging.info("Loading "+region)
tabixQuery = (subprocess.Popen(['tabix', f, region],
stdout=subprocess.PIPE,
universal_newlines=True)
for f in inputFiles)
dataframes = (pd.read_table(query.stdout, comment='#', header=None,
usecols=list(column.keys()),
names=list(column.values()), dtype=dtype)
for query in tabixQuery)
# add annotation
logging.info("Adding annotation to "+region)
rowNums = range(len(labels))
reformDFs = []
for rowNum, sample, group, df in zip(rowNums, labels, groups, dataframes):
if ctxt != "FILE":
df["ctxt"] = ctxt
df["coverage"] = df['M']+df['U']
df["pcentmeth"] = df['M']/df["coverage"]
#df["pcentmeth"] = 0.0
df["sample"] = sample
df["group"] = group
del df['M']
del df['U']
curSample = sampleTab["sampleID"].get_value(rowNum)
for fn in factorNames:
df[fn] = sampleTab[fn].get_value(rowNum)
reformDFs.append(df)
# merge
logging.info("Merging "+region)
mergedTab = pd.concat(reformDFs, ignore_index = True)
# sort
logging.info("Sorting "+region)
mergedTab.sort_values(["pos"], ascending=True, inplace=True)
# filter
logging.info("Filtering "+region)
#mergedTab = mergedTab[mergedTab["cov"]>=5]
#mergedTabGrouped = mergedTab.groupby(by=["pos", "group"])
#groupCounts = mergedTabGrouped.apply(lambda g: len(g))
minGroupCount = 2
groupCounts = mergedTab.query("coverage>=5 & coverage<=100").groupby(by=["pos", "group"])["coverage"].size()
posCounts = groupCounts[groupCounts>=minGroupCount].count(level="pos")
okPositions = list(posCounts[posCounts == len(set(groups))].index)
selectedPositions = mergedTab[mergedTab["pos"].isin(okPositions)]
# add the output - write the header in in case the file does not exist
logging.info("Writing "+region)
writeHeader = not os.path.isfile(outfileName)
selectedPositions.query("coverage>=5 & coverage<=100").to_csv(outfileName, header=writeHeader, sep='\t', index=False, mode='a')
# write also all positions that have a coverage of at least 5 and max 100
outfileNameNoGroupFilter = outfileName+".noGroupFilter"
writeHeader = not os.path.isfile(outfileNameNoGroupFilter)
mergedTab.query("coverage>=5 & coverage<=100").to_csv(outfileNameNoGroupFilter, header=writeHeader, sep='\t', index=False, mode='a')
return True
def chromSupremum(tabixfiles, chrom):
"""Return the least upper bound for the chrom end coordinate.
"""
end_coordinate = list()
for f in tabixfiles:
tabix = subprocess.Popen(["tabix", f, chrom], stdout=subprocess.PIPE)
tail = subprocess.Popen(["tail", "-1"], stdin=tabix.stdout, stdout=subprocess.PIPE)
cut = subprocess.Popen(["cut", "-f3"], stdin=tail.stdout, stdout=subprocess.PIPE)
tabix.stdout.close() # Allow first process to receive a SIGPIPE if process 2 exits.
try:
base_position = int(cut.communicate()[0])
end_coordinate.append(base_position)
except ValueError:
continue
try:
out = np.max(end_coordinate)
except ValueError:
out = None
return out
def nsitesSupremum(tabixfiles, chrom):
"""Return the least upper bound for the number of covered sites.
"""
sites = list()
for f in tabixfiles:
tabix = subprocess.Popen(["tabix", f, chrom], stdout=subprocess.PIPE)
wcl = subprocess.Popen(["wc", "-l"], stdin=tabix.stdout, stdout=subprocess.PIPE)
tabix.stdout.close() # Allow tabix to receive a SIGPIPE if wcl exits.
try:
site_count = int(wcl.communicate()[0])
sites.append(site_count)
except ValueError:
continue
try:
out = np.max(sites)
except ValueError:
out = None
return out
def impute(data, method='pseudocount'):
"""This should not be used - it's just a placeholder.
"""
logging.critical("DO NOT USE IMPUTATION.")
sys.exit(64)
if method == 'pseudocount':
value = 1
return value
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog="mergeBedGraphs.py",
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent("""\
Merge BED graphs produced by bismark_SE/PE.sh and
bamToBedGraph.sh and a sample annotation file into
a single long-format table (for lm() in R).
NOTE: minCov >= 5, maxCov <= 100, and minGroupCount >= 2 are hard-coded.
======================================================
"""),
epilog=textwrap.dedent("""\
======================================================
Format of the sampleTable:
<required columns>
* sampleID
* bedFile
* group (should be the smallest grouping possible)
<optional columns>
* other factors which will be used in the model later on
<example>
sampleID,bedFile,group,organ,sex
lm1,lm1.bed,liver_male,liver,male
lm2,lm2.bed,liver_male,liver,male
lm3,lm3.bed,liver_male,liver,male
lf1,lf1.bed,liver_female,liver,female
lf2,lf2.bed,liver_female,liver,female
lf3,lf3.bed,liver_female,liver,female
hm1,hm1.bed,heart_male,heart,male
hm2,hm2.bed,heart_male,heart,male
hm3,hm3.bed,heart_male,heart,male
hf1,hf1.bed,heart_female,heart,female
hf2,hf2.bed,heart_female,heart,female
hf3,hf3.bed,heart_female,heart,female
Notes:
* group = <organ>_<sex>
* the bed files do not have to contain the sampleID,
but you should provide the full path
======================================================
Acknowledgements:
I would like to thank Önder Kartal. Substantial parts
of this script are taken from him.
See: gitlab.com/okartal/shannon
"""))
parser.add_argument("-v", "--version", action="version",
version='%(prog)s {0}'.format(__version__))
parser.add_argument("chromosome", metavar="chromOrFile", type=str,
help="""
Chromosome name or path to a file with several
chromosome names (one per line).
""")
parser.add_argument("nucleotideContext", metavar="ctxt", type=str,
help="Nucleotide context (CG, CHG, CHH, FILE).")
parser.add_argument("sampleTable", type=str,
help="""
A csv table with metadata for each sample/record.
Lines starting with # are ignored. The first line
is interpreted as the header. Details and example below.
""")
parser.add_argument("outputFile", type=str,
help="Name of the output file.")
parser.add_argument("-q", "--query", metavar='"STR"', type=str, required=False,
help="""
Query expression to select a subset of samples. The
expression has to be in double quotes. Examples: "organ ==
'heart'", "age >= 32".
""")
parser.add_argument("-b", "--bedFileType", metavar='[MethAn_bismark]', type=str, required=False,
default = "MethAn_bismark",
help="""
Describe the type of the bed file. Only MethAn_bismark
is currently implemented (default).
""")
args = parser.parse_args()
mergeBedGraphs(sampleTableFile=args.sampleTable,
chrom=args.chromosome,
ctxt=args.nucleotideContext,
outfileName=args.outputFile,
query=args.query,
bedFileType=args.bedFileType)
|
import sys
from .. import SetWallpaper
class DarwinSetWallpaper(SetWallpaper):
@staticmethod
def platform_check(config):
return sys.platform == 'darwin'
@staticmethod
def set(config):
import subprocess
DARWIN_SCRIPT = """/usr/bin/osascript << END
tell application "Finder"
set desktop picture to POSIX file "%s"
end tell
END
"""
subprocess.Popen(DARWIN_SCRIPT % config['wallpaper'], shell=True)
|
#!/usr/bin/env python
#ATTENTION: DO NOT MODIFY THIS CODE WITHOUT FIRST CONSULTING CARTER SHEAN
#(at least until Bruin 2 is finished)
#-----------------------------------------------------------------
#Python file for managing the GUI for the Bruin 2 Robot
#using PyQT5 modules and QT designer paired with the Pyuic command
#-----------------------------------------------------------------
#import the necessary modules
import sys
import os
import rospy
import rospkg
import std_msgs.msg
import subprocess
#TODO: I think we need to use the screeninfo module to move the robot (since QT's positioning is based on pixels and pixels are relative to the
#monitor, I thought moving the robot locally would need to be based on the pixels of each individual screen)
#from screeninfo import get_monitors
from compass.msg import CompassDataMsg
from state_machine.msg import MsgsForGUI
from master_gui.msg import GUImsg
from roboteq_msgs.msg import Command
from sensor_msgs.msg import NavSatFix
from PyQt5 import QtCore, QtWidgets, QtGui
#create absolute paths to resources and code folders
resource_path = os.path.join(rospkg.RosPack().get_path('master_gui'), 'resource')
src_path = os.path.join(rospkg.RosPack().get_path('master_gui'), 'src', "master_gui")
#auto generate the files we are trying to import from
os.system("pyuic5 -o " + os.path.join(src_path, "bruin2.py") + " " + os.path.join(resource_path, "bruin2.ui") )
os.system("pyuic5 -o " + os.path.join(src_path, "bruin2state.py") + " " + os.path.join(resource_path, "bruin2state.ui"))
os.system("pyuic5 -o " + os.path.join(src_path, "gostopscreen.py") + " " + os.path.join(resource_path, "gostopscreen.ui"))
#import the information we need from those files
from bruin2 import *
from bruin2state import *
from gostopscreen import *
#global variables
debuggingWindowText = ""
currentStation = "None"
x = 0
y = 0
turnDirection = ""
direction = ""
coordinates = ""
currentRobotState = "None"
goToStation = False
#Constants (PYTHON DOES NOT ACTUALLY HAVE CONSTANTS
#SO BE CAREFUL ABOUT CHANGING THESE)
CONST_CLOCK_SPEED = 100
#class that intializes the GUI and handles events for the Main Window
class MainForm(QtWidgets.QMainWindow):
def startGUImoving(self):
global CONST_CLOCK_SPEED
self.moveRobotTimer.start(CONST_CLOCK_SPEED)
def debugButtonPressEvent(self, event):
"""creates a new debugging window when
the debug button is pressed"""
if self.debuggingwindow is None:
self.debuggingwindow = DebuggingInfo(self)
self.debuggingwindow.show()
def targetButton1PressEvent(self, event):
"""creates a new stop/go window when the first target button
is clicked (or a similar signal was recieved from the combo box"""
global currentStation
#if the window is not already open, open a new window
currentStation = "station1"
if self.stopgowindow is None:
self.stopgowindow = StopGo(self)
self.stopgowindow.show()
def targetButton2PressEvent(self, event):
"""creates a new stop/go window when the second target button
is clicked (or a similar signal was recieved from the combo box"""
global currentStation
currentStation = "station2"
# if the window is not already open, open a new window
if self.stopgowindow is None:
self.stopgowindow = StopGo(self)
self.stopgowindow.show()
def comboBoxPressEvent(self, event):
"""handle the text changing in the combo box, opens the corresponding
corresponding target button stop/go window """
if self.ui.cB_StationNum.currentText() is "None":
currentStation = "None"
elif self.ui.cB_StationNum.currentText() is "station1":
self.targetButton1PressEvent(self)
else:
self.targetButton2PressEvent(self)
def outputState(self):
"""publishes and logs to ros what state we are in,
ticks every CONST_CLOCK_SPEED milliseconds"""
self.msg.state = currentStation
self.msg.goToNextState = str(goToStation)
#rospy.loginfo(self.msg)
self.pub.publish(self.msg)
def moveGUI(self):
"""updates the GUI Robot object every time the timer
hits CONST_CLOCK_SPEED milliseconds per second using the
global X and Y variables"""
#TODO: set the geometry equal to the values the GPS is giving out
#adapted for the pixels on screen (will this value need to change based
#on what the screen resolution is?
self.bruin2.setGeometry(QtCore.QRect(x, y, 30, 30))
cmd = ['xrandr']
cmd2 = ['grep', '*']
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
p2 = subprocess.Popen(cmd2, stdin=p.stdout, stdout=subprocess.PIPE)
p.stdout.close()
resolution_string, junk = p2.communicate()
resolution = resolution_string.split()[0]
width, height = resolution.split('x')
print("unimplemented")
def CompassCallBack(self, data):
"""Handles the call back from the compass,
extracting the heading and adding this information to
the text of the debugging window"""
global direction
direction = str(data.heading)
#rospy.loginfo("%s" % (data.heading))
#this is probably not necessary here
global debuggingWindowText
debuggingWindowText += str(data.heading) + "\n"
def SteerCallBack(self,data):
"""unimplemented method that gets the current turn direction
of the robot and logs it"""
global turnDirection
#rospy.loginfo("%s" % (data))
def StateMachineCallBack(self, data):
"""handles a call from the state machine's output
as to which state we're in and sets that state name to
the corresponding state"""
global currentRobotState
#we get the message in the form 'current data: data_type
#that needs split, done below
discard, currentRobotState = str(data).split(':')
currentRobotState.strip()
self.ui.lbl_StateType.setText(currentRobotState)
#rospy.loginfo("%s" % (data))
global debuggingWindowText
debuggingWindowText += currentRobotState + "\n"
def GPSCallBack(self, data):
"""handles the GPS's data output message,
TODO: may need commented out whenever
the GPS is not connected """
#rospy.loginfo("%s" % (data))
global x
global y
x = str(data.longitude)
y = str(data.latitude)
self.ui.lbl_lattitudeNum.setText(y)
self.ui.lbl_longitudeNum.setText(x)
global debuggingWindowText
debuggingWindowText += "coordinates: " + x + "," + y + "\n"
def InitializeButtons(self):
"""set up the objects on the map (targets, bruin2)
with scaled contents, desired text/pixmap and shape
IMPORTANT NOTE: these will need changed every time the robot or targets change
locations"""
self.bruin2 = QtWidgets.QLabel(self)
self.bruin2.setGeometry(QtCore.QRect(350, 350, 30, 30))
self.bruin2.setPixmap(QtGui.QPixmap(os.path.join(resource_path, "c_loc.png")))
self.bruin2.setScaledContents(True)
self.bruin2.setObjectName("lbl_bruin2")
self.bruin2.show()
self.target1 = QtWidgets.QPushButton(self)
self.target1.setGeometry(QtCore.QRect(230, 300, 25, 25))
self.target1.setObjectName("target1")
self.target1.setText("1")
self.target1.setStyleSheet("background-color: rgb(255, 0, 0);")
self.target2 = QtWidgets.QPushButton(self)
self.target2.setGeometry(QtCore.QRect(200, 250, 25, 25))
self.target2.setObjectName("target2")
self.target2.setText("2")
self.target2.setStyleSheet("background-color: rgb(255, 0, 0);")
def __init__(self, parent = None):
"""Constructor for Mainwindow (possibly split into
separate method?)"""
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
#call the initialize buttons method which creates the robot button
#and target buttons
self.InitializeButtons()
#set up the publisher of the state to be sent to the state machine
rospy.init_node('GUIIO', anonymous=True)
self.pub = rospy.Publisher('GUIData', GUImsg, queue_size=10)
self.msg = GUImsg()
self.msg.state = currentStation
#set a timer to update which station we're going to every CONST_CLOCK_SPEED
self.stateTimer = QtCore.QTimer()
self.stateTimer.timeout.connect(self.outputState)
self.moveRobotTimer = QtCore.QTimer()
self.moveRobotTimer.timeout.connect(self.moveGUI)
global CONST_CLOCK_SPEED
self.stateTimer.start(CONST_CLOCK_SPEED)
#initialize the listeners to the various things we need to pull data from
rospy.Subscriber("CompassData", CompassDataMsg, self.CompassCallBack)
rospy.Subscriber("CurrentState", MsgsForGUI, self.StateMachineCallBack)
rospy.Subscriber("steer/cmd", Command, self.SteerCallBack)
rospy.Subscriber("GPSData", NavSatFix, self.GPSCallBack)
#initialize the pop up windows to None (will be initialized when a button is clicked)
self.debuggingwindow = None
self.stopgowindow = None
#connect the signals of the button presses to their respective events
self.ui.btn_debug.clicked.connect(self.debugButtonPressEvent)
self.target1.clicked.connect(self.targetButton1PressEvent)
self.target2.clicked.connect(self.targetButton2PressEvent)
self.ui.cB_StationNum.currentIndexChanged.connect(self.comboBoxPressEvent)
#setup map's picture using the pixmap feature
self.ui.lbl_map.setPixmap(QtGui.QPixmap(os.path.join(resource_path, "map.png")))
#class that intializes and contains logic for the debugging window
class DebuggingInfo(QtWidgets.QMainWindow):
def __init__(self, parent = None):
"""constructor for the debugging window, sets up
window and creates a timer for updating the
debugging information"""
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_DebuggingInfo()
self.ui.setupUi(self)
self.show()
self.logFileTimer = QtCore.QTimer()
self.logFileTimer.timeout.connect(self.timerHit)
self.ui.btn_log.clicked.connect(self.timerStart)
def timerHit(self):
"""sets the text of the debugging window
equal to the global string"""
global debuggingWindowText
self.ui.label.setText(debuggingWindowText)
def timerStart(self, event):
"""when the get debugging info button is pressed,
starts the timer getting information from the various
ros messages"""
global CONST_CLOCK_SPEED
self.logFileTimer.start(CONST_CLOCK_SPEED)
#class that contains the logic for stop/go on the robot itself
class StopGo(QtWidgets.QMainWindow):
def timerHit(self):
"""Handles a timer hit by displaying the current
information supplied by ROS messages"""
#TODO: stil need the turn info and speed
global direction, x, y
self.ui.lbl_latittudedata.setText(str(y))
self.ui.lbl_longitudedata.setText(str(x))
self.ui.lbl_directiondata.setText(direction)
def goButtonPress(self):
"""when the user presses the "yes I want to go here"
button, disable the button and tell the state machine
we are actively going to the desired target"""
#TODO: Renable this after the user closes the window
self.ui.btn_go.setEnabled(False)
global goToStation
goToStation = True
def __init__(self, parent = None):
""""constructor for the window, creates timer for
updating information on the window"""
QtWidgets.QWidget.__init__(self, parent)
self.ui = Ui_Form()
self.ui.setupUi(self)
self.timer = QtCore.QTimer()
self.ui.btn_go.setEnabled(True)
self.timer.timeout.connect(self.timerHit)
self.ui.lbl_latittudedata.setText(str(y))
self.ui.lbl_longitudedata.setText(str(x))
global CONST_CLOCK_SPEED
self.timer.start(CONST_CLOCK_SPEED)
self.ui.btn_go.clicked.connect(self.goButtonPress)
if __name__ == '__main__':
"""create a new window and display it until the user
closes the window"""
global mainwin
app = QtWidgets.QApplication(sys.argv)
mainwin = MainForm()
mainwin.show()
sys.exit(app.exec_())
|
"""
申万行业指数:训练数据生成
"""
import urllib.request
import json
import re as regex
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from earnmi.chart.KPattern2 import KPattern2
from earnmi.data.SWImpl import SWImpl
from earnmi.chart.Indicator import Indicator
def generateSWTrainData(kPatterns:[],start:datetime,end:datetime)-> pd.DataFrame:
sw = SWImpl()
lists = sw.getSW2List()
cloumns = ["code", "name", "kPattern", "k", "d", "dif", "dea", "macd","open", "short","long"]
datas = []
kPatternMap = {}
for kPatternValues in kPatterns:
kPatternMap[kPatternValues] = True
macd_list = []
for code in lists:
# for code in lists:
name = sw.getSw2Name(code)
barList = sw.getSW2Daily(code, start, end)
indicator = Indicator(34)
preBar = None
for bar in barList:
##先识别形态
kEncodeValue = None
if indicator.inited:
tmpKEncodeValue = KPattern2.encode3KAgo1(indicator)
if kPatternMap.__contains__(tmpKEncodeValue):
kEncodeValue = tmpKEncodeValue
if kEncodeValue is None:
indicator.update_bar(bar)
preBar = bar
continue
##昨天的kdj
k,d,j = indicator.kdj(array=False)
dif,dea,macd = indicator.macd(fast_period=12, slow_period=26, signal_period=9,array=False)
##第二天的收益
short_pct = 100 * ((bar.high_price + bar.close_price) / 2 - preBar.close_price) / preBar.close_price
long_pct = 100 * ((bar.low_price + bar.close_price) / 2 - preBar.close_price) / preBar.close_price
open_pct = 100 * (bar.open_price - preBar.close_price) / preBar.close_price
item = []
item.append(code)
item.append(name)
item.append(kEncodeValue)
item.append(k)
item.append(d)
item.append(dif)
item.append(dea)
item.append(macd)
#下个k线数据
item.append(open_pct)
item.append(short_pct)
item.append(long_pct)
datas.append(item)
macd_list.append(macd)
indicator.update_bar(bar)
preBar = bar
macd_list = np.array(macd_list)
print(f"total size : {len(datas)},mean ={macd_list.mean()},max={macd_list.max()},min={macd_list.min()}")
wxl = pd.DataFrame(datas, columns=cloumns)
return wxl
if __name__ == "__main__":
## 开始时间2014-6-30
start = datetime(2014, 5, 1)
end = datetime(2020, 8, 17)
kPattrns = [33214, 33134, 33297, 39858, 33296, 39775, 33135, 26736, 39857, 39777, 33133, 46418, 39776, 33215, 33216, 39856, 39694, 26573, 33213, 39696, 33377, 33295, 26735, 39938, 46337, 33217, 46338, 39695, 26734, 26574, 33053]
kPattrns = [33214]
writer=pd.ExcelWriter('files/sw_train_data_sample_33214.xlsx')
sampld_data_df = generateSWTrainData(kPattrns,start,end)
sampld_data_df.to_excel(writer,sheet_name="sample",index=False)
writer.save()
writer.close() |
'''
Created on 2017年7月16日
@author: jack
'''
# encoding=utf-8
import smtplib
from threading import Timer
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from Tools import MyDataBase
class MailSender():
def __init__(self):
self.smtpServer = "smtp.126.com"
self.user = "z6185091@126.com"
self.pwd = "zz6691978"
self.fromAdddress = "z6185091@126.com"
self.toAddresses = []
self.msgs = []
def setToAddresses(self, toAddresses):
self.toAddresses = toAddresses
def getMsgs(self):
db = MyDataBase.MyDataBase(database='rubbish_letter')
conn = db.getConn()
executer = db.getExcuter()
executer.execute('select content from qiushibaike order by id desc limit 1')
for i in executer.fetchall():
self.msgs.append(i[0])
def sendmail(self,):
s = smtplib.SMTP()
s.connect(self.smtpServer) # 连接smtp服务器
s.login(self.user, self.pwd) # 登录邮箱
# 给地址中的每一个人都发邮件
for toAddress in self.toAddresses:
for msg in self.msgs:
mail_msg = MIMEMultipart()
mail_msg['Subject'] = "不好意思,打扰了"
mail_msg['From'] = self.fromAdddress
mail_msg['To'] = ','.join(toAddress)
mail_msg.attach(MIMEText(msg, 'html', 'utf-8'))
s.sendmail(self.fromAdddress, toAddress, mail_msg.as_string()) # 发送邮件
s.quit()
if __name__ == '__main__':
t = MailSender()
t.setToAddresses(['z6185091@126.com'])
t.getMsgs()
t.sendmail()
|
import sys
import getopt
import os
import operator
from math import log
from collections import defaultdict
class NaiveBayes:
class TrainSplit:
"""
Set of training and testing data
"""
def __init__(self):
self.train = []
self.test = []
class Document:
"""
This class represents a document with a label. classifier is 'pos' or 'neg' while words is a list of strings.
"""
def __init__(self):
self.classifier = ''
self.words = []
def __init__(self):
"""
Initialization of naive bayes
"""
self.stopList = set(self.readFile('data/english.stop'))
self.bestModel = False
self.stopWordsFilter = False
self.naiveBayesBool = False
self.numFolds = 10
#Custom data structures for training and clasifying
self.posDocCount = 0.0
self.totalDocCount = 0.0
self.posFrequency = defaultdict(lambda: 2.0) #Laplace smoothing
self.negFrequency = defaultdict(lambda: 2.0)
self.posWordCount = 0.0
self.negWordCount = 0.0
self.posWordSet = set()
self.negWordSet = set()
#Data structures for highly optimized model
self.posBigram = defaultdict(lambda: defaultdict(lambda: 1.0)) #Bigram counts and interpolation
self.negBigram = defaultdict(lambda: defaultdict(lambda: 1.0))
self.interpolation = 0.05
self.negateWords = ["not", "didn't", "isn't", "no", "never", "didnt", "isnt"] #Negation in best model
self.punctuation = ['.', ',', '!', '?', '-']
self.bonusWords = ['very', 'really', 'exceptionally', 'extremely', 'hugely', 'truly'] #Bonus words give more weight to the following word
def classify(self, words):
"""
Classify a list of words and return a positive or negative sentiment
"""
if self.stopWordsFilter:
words = self.filterStopWords(words)
#Probability that a document is positive or negative based on ratio of docs in training set
probPos = -log(self.posDocCount / self.totalDocCount)
probNeg = -log( (self.totalDocCount - self.posDocCount) / self.totalDocCount)
vocab = len(self.posWordSet.union(self.negWordSet))
if self.naiveBayesBool:
vocab = self.posWordCount + self.negWordCount
elif self.bestModel:
vocab = len(self.posWordSet) + len(self.negWordSet)
negateFlag = False
bonus = False
prevWord = '__START__' #Start of sentence in bigram
for word in words:
pos = self.posFrequency[word]
neg = self.negFrequency[word]
if self.bestModel: #Negate in best model
if word in self.negateWords:
negateFlag = True
if negateFlag and word in self.punctuation:
negateFlag = False
if negateFlag:
word = 'NOT_' + word
#Implement weighted interpolation for bigram
pos = (self.interpolation * self.posFrequency[word] + (1 - self.interpolation) * self.posBigram[prevWord][word])
neg = (self.interpolation * self.negFrequency[word] + (1 - self.interpolation) * self.negBigram[prevWord][word])
#Calculate and add probability of each word given the category
probPosWord = -log( pos / (self.posWordCount + 2 * vocab) )
probNegWord = -log( neg / (self.negWordCount + 2 * vocab) )
if self.bestModel: #Increase weight of any word immediately following very, really, etc
if bonus:
if probPosWord < probNegWord:
probPosWord *= 0.9
else:
probNegWord *= 0.9
bonus = False
if word in self.bonusWords:
bonus = True
probPos += probPosWord
probNeg += probNegWord
prevWord = word
#Smallest is most likely because we are adding -log
if probPos < probNeg:
return 'pos'
else:
return 'neg'
def addDocument(self, classifier, words):
"""
Train your model on a document with label classifier (pos or neg) and words (list of strings). You should
store any structures for your classifier in the naive bayes class. This function will return nothing
"""
if classifier == 'pos':
self.posDocCount += 1
self.totalDocCount += 1
docSet = set() #Avoid duplicates in binary naive bayes
negateFlag = False #Negate in best model
prevWord = '__START__' #Start of sentence in bigram model
for word in words:
if self.bestModel: #Implement negation in best model
if word in self.negateWords:
negateFlag = True
if negateFlag and word in self.punctuation:
negateFlag = False
if negateFlag:
word = 'NOT_' + word
if classifier == 'pos':
if self.naiveBayesBool:
if word not in docSet: #Check for duplicates
self.posWordCount += 1
docSet.add(word)
self.posFrequency[word] += 1
else:
self.posWordCount += 1
self.posFrequency[word] += 1
if self.bestModel: #Keep track of bigram counts
self.posBigram[prevWord][word] += 1
self.posWordSet.add(word)
else: #classifier is 'neg'
if self.naiveBayesBool:
if word not in docSet:
self.negWordCount +=1
docSet.add(word)
self.negFrequency[word] += 1
else:
self.negWordCount += 1
self.negFrequency[word] += 1
if self.bestModel:
self.negBigram[prevWord][word] += 1
self.negWordSet.add(word)
prevWord = word
def readFile(self, fileName):
"""
Reads a file and segments.
"""
contents = []
f = open(fileName)
for line in f:
contents.append(line)
f.close()
str = '\n'.join(contents)
result = str.split()
return result
def trainSplit(self, trainDir):
"""Takes in a trainDir, returns one TrainSplit with train set."""
split = self.TrainSplit()
posDocTrain = os.listdir('%s/pos/' % trainDir)
negDocTrain = os.listdir('%s/neg/' % trainDir)
for fileName in posDocTrain:
doc = self.Document()
doc.words = self.readFile('%s/pos/%s' % (trainDir, fileName))
doc.classifier = 'pos'
split.train.append(doc)
for fileName in negDocTrain:
doc = self.Document()
doc.words = self.readFile('%s/neg/%s' % (trainDir, fileName))
doc.classifier = 'neg'
split.train.append(doc)
return split
def train(self, split):
for doc in split.train:
words = doc.words
if self.stopWordsFilter:
words = self.filterStopWords(words)
self.addDocument(doc.classifier, words)
def crossValidationSplits(self, trainDir):
"""Returns a lsit of TrainSplits corresponding to the cross validation splits."""
splits = []
posDocTrain = os.listdir('%s/pos/' % trainDir)
negDocTrain = os.listdir('%s/neg/' % trainDir)
# for fileName in trainFileNames:
for fold in range(0, self.numFolds):
split = self.TrainSplit()
for fileName in posDocTrain:
doc = self.Document()
doc.words = self.readFile('%s/pos/%s' % (trainDir, fileName))
doc.classifier = 'pos'
if fileName[2] == str(fold):
split.test.append(doc)
else:
split.train.append(doc)
for fileName in negDocTrain:
doc = self.Document()
doc.words = self.readFile('%s/neg/%s' % (trainDir, fileName))
doc.classifier = 'neg'
if fileName[2] == str(fold):
split.test.append(doc)
else:
split.train.append(doc)
yield split
def test(self, split):
"""Returns a list of labels for split.test."""
labels = []
for doc in split.test:
words = doc.words
if self.stopWordsFilter:
words = self.filterStopWords(words)
guess = self.classify(words)
labels.append(guess)
return labels
def buildSplits(self, args):
"""
Construct the training/test split
"""
splits = []
trainDir = args[0]
if len(args) == 1:
print '[INFO]\tOn %d-fold of CV with \t%s' % (self.numFolds, trainDir)
posDocTrain = os.listdir('%s/pos/' % trainDir)
negDocTrain = os.listdir('%s/neg/' % trainDir)
for fold in range(0, self.numFolds):
split = self.TrainSplit()
for fileName in posDocTrain:
doc = self.Document()
doc.words = self.readFile('%s/pos/%s' % (trainDir, fileName))
doc.classifier = 'pos'
if fileName[2] == str(fold):
split.test.append(doc)
else:
split.train.append(doc)
for fileName in negDocTrain:
doc = self.Document()
doc.words = self.readFile('%s/neg/%s' % (trainDir, fileName))
doc.classifier = 'neg'
if fileName[2] == str(fold):
split.test.append(doc)
else:
split.train.append(doc)
splits.append(split)
elif len(args) == 2:
split = self.TrainSplit()
testDir = args[1]
print '[INFO]\tTraining on data set:\t%s testing on data set:\t%s' % (trainDir, testDir)
posDocTrain = os.listdir('%s/pos/' % trainDir)
negDocTrain = os.listdir('%s/neg/' % trainDir)
for fileName in posDocTrain:
doc = self.Document()
doc.words = self.readFile('%s/pos/%s' % (trainDir, fileName))
doc.classifier = 'pos'
split.train.append(doc)
for fileName in negDocTrain:
doc = self.Document()
doc.words = self.readFile('%s/neg/%s' % (trainDir, fileName))
doc.classifier = 'neg'
split.train.append(doc)
posDocTest = os.listdir('%s/pos/' % testDir)
negDocTest = os.listdir('%s/neg/' % testDir)
for fileName in posDocTest:
doc = self.Document()
doc.words = self.readFile('%s/pos/%s' % (testDir, fileName))
doc.classifier = 'pos'
split.test.append(doc)
for fileName in negDocTest:
doc = self.Document()
doc.words = self.readFile('%s/neg/%s' % (testDir, fileName))
doc.classifier = 'neg'
split.test.append(doc)
splits.append(split)
return splits
def filterStopWords(self, words):
"""
Stop word filter
"""
removed = []
for word in words:
if not word in self.stopList and word.strip() != '':
removed.append(word)
return removed
def test10Fold(args, stopWordsFilter, naiveBayesBool, bestModel):
nb = NaiveBayes()
splits = nb.buildSplits(args)
avgAccuracy = 0.0
fold = 0
for split in splits:
classifier = NaiveBayes()
classifier.stopWordsFilter = stopWordsFilter
classifier.naiveBayesBool = naiveBayesBool
classifier.bestModel = bestModel
accuracy = 0.0
for doc in split.train:
words = doc.words
classifier.addDocument(doc.classifier, words)
for doc in split.test:
words = doc.words
guess = classifier.classify(words)
if doc.classifier == guess:
accuracy += 1.0
accuracy = accuracy / len(split.test)
avgAccuracy += accuracy
print '[INFO]\tFold %d Accuracy: %f' % (fold, accuracy)
fold += 1
avgAccuracy = avgAccuracy / fold
print '[INFO]\tAccuracy: %f' % avgAccuracy
def classifyFile(stopWordsFilter, naiveBayesBool, bestModel, trainDir, testFilePath):
classifier = NaiveBayes()
classifier.stopWordsFilter = stopWordsFilter
classifier.naiveBayesBool = naiveBayesBool
classifier.bestModel = bestModel
trainSplit = classifier.trainSplit(trainDir)
classifier.train(trainSplit)
testFile = classifier.readFile(testFilePath)
print classifier.classify(testFile)
def main():
stopWordsFilter = False
naiveBayesBool = False
bestModel = False
(options, args) = getopt.getopt(sys.argv[1:], 'fbm')
if ('-f', '') in options:
stopWordsFilter = True
elif ('-b', '') in options:
naiveBayesBool = True
elif ('-m', '') in options:
bestModel = True
if len(args) == 2 and os.path.isfile(args[1]):
classifyFile(stopWordsFilter, naiveBayesBool, bestModel, args[0], args[1])
else:
test10Fold(args, stopWordsFilter, naiveBayesBool, bestModel)
if __name__ == "__main__":
main()
|
# http://stackoverflow.com/questions/12507274/how-to-get-bounds-of-a-google-static-map
import math
MERCATOR_RANGE = 256
def bound(value, opt_min, opt_max):
if opt_min is not None:
value = max(value, opt_min)
if opt_max is not None:
value = min(value, opt_max)
return value
def degrees_to_radians(deg):
return deg * (math.pi / 180)
def radians_to_degrees(rad):
return rad / (math.pi / 180)
class Point:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __repr__(self):
return "Point(%d,%d)" % (self.x, self.y)
def __str__(self):
return "(x=%d,y=%d)" % (self.x, self.y)
class LatLng:
def __init__(self, lt, ln):
self.lat = lt
self.lng = ln
def __repr__(self):
return "LatLng(%g,%g)" % (self.lat, self.lng)
def __str__(self):
return "(lat=%g,lng=%g)" % (self.lat, self.lng)
class MercatorProjection:
def __init__(self):
self.pixelOrigin_ = Point(int(MERCATOR_RANGE / 2.0), int(MERCATOR_RANGE / 2.0))
self.pixelsPerLonDegree_ = MERCATOR_RANGE / 360.0
self.pixelsPerLonRadian_ = MERCATOR_RANGE / (2.0 * math.pi)
def from_latlng_to_point(self, latlng, opt_point=None):
point = opt_point if opt_point is not None else Point(0, 0)
origin = self.pixelOrigin_
point.x = origin.x + latlng.lng * self.pixelsPerLonDegree_
# NOTE(appleton): Truncating to 0.9999 effectively limits latitude to
# 89.189.This is about a third of a tile past the edge of world tile
siny = bound(math.sin(degrees_to_radians(latlng.lat)), -0.9999, 0.9999)
point.y = origin.y + 0.5 * math.log((1 + siny) / (1.0 - siny)) * -self.pixelsPerLonRadian_
return point
def from_point_to_latlng(self, point):
origin = self.pixelOrigin_
lng = (point.x - origin.x) / self.pixelsPerLonDegree_
lat_radians = (point.y - origin.y) / -self.pixelsPerLonRadian_
lat = radians_to_degrees(2.0 * math.atan(math.exp(lat_radians)) - math.pi / 2.0)
return LatLng(lat, lng)
def get_point(point, center, zoom, mapwidth, mapheight):
scale = 2.0 ** zoom
proj = MercatorProjection()
center_p = proj.from_latlng_to_point(center)
center_p.x = center_p.x * scale
center_p.y = center_p.y * scale
subject_p = proj.from_latlng_to_point(point)
subject_p.x = subject_p.x * scale
subject_p.y = subject_p.y * scale
return Point((subject_p.x - center_p.x) + mapwidth / 2.0, (subject_p.y - center_p.y) + mapheight / 2.0)
def get_corners(center, zoom, mapwidth, mapheight):
scale = 2.0 ** zoom
proj = MercatorProjection()
center_px = proj.from_latlng_to_point(center)
sw_point = Point(center_px.x - (mapwidth / 2.0) / scale, center_px.y + (mapheight / 2.0) / scale)
sw_lat_lon = proj.from_point_to_latlng(sw_point)
ne_point = Point(center_px.x + (mapwidth / 2.0) / scale, center_px.y - (mapheight / 2.0) / scale)
ne_lat_lon = proj.from_point_to_latlng(ne_point)
return {'N': ne_lat_lon.lat, 'E': ne_lat_lon.lng, 'S': sw_lat_lon.lat, 'W': sw_lat_lon.lng, }
# https://wiki.openstreetmap.org/wiki/Slippy_map_tilenames
def get_tile_xy(latlng, zoom):
lat_rad = math.radians(latlng.lat)
n = 2.0 ** zoom
xtile = (latlng.lng + 180.0) / 360.0 * n
ytile = ((1.0 - math.log(math.tan(lat_rad) + (1 / math.cos(lat_rad))) / math.pi) / 2.0 * n)
return {'X': xtile, 'Y': ytile}
|
"""
Commonly-used queries
"""
from sqlalchemy.exc import SQLAlchemyError
from setup import Category, Item
import bleach
def getCategories(session):
"""
Retrieve all categories.
:param session: (DBSession) SQLAlchemy session
:return:
List of Category objects.
"""
try:
categories = (session.query(Category)
.order_by(Category.name)
.all())
except SQLAlchemyError:
return False
return categories
def getItems(session):
"""
Retrieve all items.
:param session: (DBSession) SQLAlchemy session
:return:
List of Item objects from the greatest to lowest id
"""
try:
items = (session.query(Item)
.filter(Item.category_id == Category.id)
.order_by(Item.id.desc())
.all())
except SQLAlchemyError:
return False
return items
def getCategory(category_name, session):
"""
Retrieve a category based on category name
:param category_name: (string)
:param session: (DBSession) SQLAlchemy session
:return:
Category object.
"""
try:
category = (session.query(Category)
.filter_by(name=bleach.clean(category_name))
.one())
except SQLAlchemyError:
return False
return category
def getCategoryItems(category_id, session):
"""
Retrieve a category's items based on category id.
:param category_id: (integer)
:param session: (DBSession) SQLAlchemy session
:return:
Category object.
"""
try:
items = (session.query(Item)
.filter_by(category_id=category_id)
.filter(Item.category_id == Category.id)
.order_by(Item.id.desc())
.all())
except SQLAlchemyError:
return False
return items
def getItem(category_id, item_name, session):
"""
Retrieve item based on category id and item name.
:param category_id: (integer) Category.id
:param item_name: (string) Item.name
:param session: (DBSession) SQLAlchemy session
:return:
Item object.
"""
try:
item = (session.query(Item)
.filter_by(category_id=category_id,
name=bleach.clean(item_name.lower()))
.one())
except SQLAlchemyError:
return False
return item
|
from selenium.webdriver.common.by import By
"""以下为联系人功能配置信息"""
add_contacts_button = By.ID, "com.android.contacts:id/floating_action_button"
input_name = By.XPATH, "//*[@text='姓名']"
input_phone_number = By.XPATH, "//*[@text='电话']"
"""以下为短信功能配置信息"""
new_message = By.ID, "com.android.mms:id/action_compose_new"
message_recipient = By.XPATH, "//*[@text='接收者']"
message_text = By.XPATH, "//*[@text='键入信息']"
message_send = By.ID, "com.android.mms:id/send_button_sms"
|
from pyrefinebio import (
annotation as prb_annotation,
computed_file as prb_computed_file,
processor as prb_processor,
transcriptome_index as prb_transcriptome_index,
)
from pyrefinebio.api_interface import get_by_endpoint
from pyrefinebio.base import Base
from pyrefinebio.util import create_paginated_list, parse_date
class ComputationalResult(Base):
"""Computational Result
Retrieve a ComputationalResult based on id
>>> import pyrefinebio
>>> id = 1
>>> result = pyrefinebio.ComputationalResult.get(id)
Retrieve a list of ComputationalResult based on filters
>>> import pyrefinebio
>>> results = pyrefinebio.ComputationalResult.search(processor_id=4)
"""
def __init__(
self,
id=None,
commands=None,
processor=None,
is_ccdl=None,
annotations=None,
files=None,
organism_index=None,
time_start=None,
time_end=None,
created_at=None,
last_modified=None,
):
super().__init__(identifier=id)
self.id = id
self.commands = commands
self.processor = prb_processor.Processor(**(processor)) if processor else None
self.is_ccdl = is_ccdl
self.annotations = (
[prb_annotation.Annotation(**annotation) for annotation in annotations]
if annotations
else []
)
self.files = [prb_computed_file.ComputedFile(**file) for file in files] if files else []
if organism_index:
self.organism_index = prb_transcriptome_index.TranscriptomeIndex(**(organism_index))
else:
self.organism_index = None
self.time_start = parse_date(time_start)
self.time_end = parse_date(time_end)
self.created_at = parse_date(created_at)
self.last_modified = parse_date(last_modified)
@classmethod
def get(cls, id):
"""Retrieve a computational result based on its id.
Returns:
ComputationalResult
Parameters:
id (int): The id for the computational result to be retrieved.
"""
response = get_by_endpoint("computational_results/" + str(id)).json()
return ComputationalResult(**response)
@classmethod
def search(cls, **kwargs):
"""Retrieve a list of computational results based on filters.
Returns:
list of ComputationalResult
Keyword Arguments:
processor__id (int): id of the Processor that processed the result
limit (int): number of results to return per page
offset (int): the initial index from which to return the results
"""
response = get_by_endpoint("computational_results", params=kwargs)
return create_paginated_list(cls, response)
|
# import
import timm
import torch
from torch.optim.lr_scheduler import CosineAnnealingLR, StepLR
from src.project_parameters import ProjectParameters
from pytorch_lightning import LightningModule
import torch.nn as nn
from torchmetrics import Accuracy, ConfusionMatrix
import pandas as pd
import numpy as np
from src.utils import load_checkpoint, load_yaml
import torch.optim as optim
from os.path import dirname, basename
import torch.nn.functional as F
import sys
# def
def _get_backbone_model_from_file(filepath, in_chans, num_classes):
"""Load a BackboneModel from a file .
Args:
filepath (str): the file path of the backbone model.
in_chans (int): number of input channels / colors.
num_classes (int): the number of classes.
Returns:
nn.Module: the self-defined backbone model.
"""
sys.path.append('{}'.format(dirname(filepath)))
class_name = basename(filepath).split('.')[0]
exec('from {} import {}'.format(*[class_name]*2))
return eval('{}(in_chans={}, num_classes={})'.format(class_name, in_chans, num_classes))
def _get_backbone_model(project_parameters):
"""Get the backbone model .
Args:
project_parameters (argparse.Namespace): the parameters for the project.
Returns:
timm.models or nn.Module: the timm model or the self-defined backbone model.
"""
if project_parameters.backbone_model in timm.list_models():
backbone_model = timm.create_model(model_name=project_parameters.backbone_model, pretrained=True,
num_classes=project_parameters.num_classes, in_chans=project_parameters.in_chans)
elif '.py' in project_parameters.backbone_model:
backbone_model = _get_backbone_model_from_file(
filepath=project_parameters.backbone_model, in_chans=project_parameters.in_chans, num_classes=project_parameters.num_classes)
else:
assert False, 'please check the backbone model. the backbone model: {}'.format(
project_parameters.backbone_model)
return backbone_model
def _get_loss_function(project_parameters):
"""Get loss function .
Args:
project_parameters (argparse.Namespace): the parameters for the project.
Returns:
torch.nn.modules.loss.CrossEntropyLoss: It is useful when training a classification problem with `C` classes.
"""
if 'data_weight' in project_parameters:
weight = torch.Tensor(list(project_parameters.data_weight.values()))
else:
weight = None
return nn.BCELoss(weight=weight) if project_parameters.loss_function == 'BCELoss' else nn.CrossEntropyLoss(weight=weight)
def _get_optimizer(model_parameters, project_parameters):
"""Get optimizer .
Args:
model_parameters (iterable): iterable of parameters to optimize or dicts defining parameter groups
project_parameters (argparse.Namespace): the parameters for the project.
Returns:
torch.optim: the optimization algorithm.
"""
optimizer_config = load_yaml(
filepath=project_parameters.optimizer_config_path)
optimizer_name = list(optimizer_config.keys())[0]
if optimizer_name in dir(optim):
for name, value in optimizer_config.items():
if value is None:
optimizer = eval('optim.{}(params=model_parameters, lr={})'.format(
optimizer_name, project_parameters.lr))
elif type(value) is dict:
value = ('{},'*len(value)).format(*['{}={}'.format(a, b)
for a, b in value.items()])
optimizer = eval('optim.{}(params=model_parameters, lr={}, {})'.format(
optimizer_name, project_parameters.lr, value))
else:
assert False, '{}: {}'.format(name, value)
return optimizer
else:
assert False, 'please check the optimizer. the optimizer config: {}'.format(
optimizer_config)
def _get_lr_scheduler(project_parameters, optimizer):
"""Returns the LR scheduler .
Args:
project_parameters (argparse.Namespace): the parameters for the project.
optimizer (Optimizer): Wrapped optimizer.
Returns:
torch.optim.lr_scheduler: the LR scheduler.
"""
if project_parameters.lr_scheduler == 'StepLR':
lr_scheduler = StepLR(optimizer=optimizer,
step_size=project_parameters.step_size, gamma=project_parameters.gamma)
elif project_parameters.lr_scheduler == 'CosineAnnealingLR':
lr_scheduler = CosineAnnealingLR(
optimizer=optimizer, T_max=project_parameters.step_size)
return lr_scheduler
def create_model(project_parameters):
"""Create a neural network model .
Args:
project_parameters (argparse.Namespace): the parameters for the project.
Returns:
LightningModule: a neural network model.
"""
model = Net(project_parameters=project_parameters)
if project_parameters.checkpoint_path is not None:
model = load_checkpoint(model=model, num_classes=project_parameters.num_classes,
use_cuda=project_parameters.use_cuda, checkpoint_path=project_parameters.checkpoint_path)
return model
# class
class Net(LightningModule):
"""Constructs a LightningModule class .
"""
def __init__(self, project_parameters):
"""Initialize the class.
Args:
project_parameters (argparse.Namespace): the parameters for the project.
"""
super().__init__()
self.project_parameters = project_parameters
self.backbone_model = _get_backbone_model(
project_parameters=project_parameters)
self.activation_function = nn.Softmax(dim=-1)
self.loss_function = _get_loss_function(
project_parameters=project_parameters)
self.accuracy = Accuracy()
self.confusion_matrix = ConfusionMatrix(
num_classes=project_parameters.num_classes)
def training_forward(self, x):
"""Defines the computation performed at every call in training.
Args:
x (torch.Tensor): the input data.
Returns:
torch.Tensor: the predict of neural network model.
"""
if self.project_parameters.loss_function == 'BCELoss':
return self.activation_function(self.backbone_model(x))
elif self.project_parameters.loss_function == 'CrossEntropyLoss':
return self.backbone_model(x)
def forward(self, x):
"""Defines the computation performed at every call.
Args:
x (torch.Tensor): the input data.
Returns:
torch.Tensor: the predict of neural network model.
"""
return self.activation_function(self.backbone_model(x))
def get_progress_bar_dict(self):
"""Remove the step loss information from the progress bar .
Returns:
dict: Dictionary with the items to be displayed in the progress bar.
"""
# don't show the loss value
items = super().get_progress_bar_dict()
items.pop('loss', None)
return items
def _parse_outputs(self, outputs, calculate_confusion_matrix):
"""Parse the outputs to get the epoch of loss and accuracy .
Args:
outputs (dict): the output contains loss and accuracy.
calculate_confusion_matrix (bool): whether to calculate the confusion matrix.
Returns:
tuple: the tuple contains the epoch of loss and accuracy. And if calculate_confusion_matrix is True, the tuple will contain a confusion matrix.
"""
epoch_loss = []
epoch_accuracy = []
if calculate_confusion_matrix:
y_true = []
y_pred = []
for step in outputs:
epoch_loss.append(step['loss'].item())
epoch_accuracy.append(step['accuracy'].item())
if calculate_confusion_matrix:
y_pred.append(step['y_hat'])
y_true.append(step['y'])
if calculate_confusion_matrix:
y_pred = torch.cat(y_pred, 0)
y_true = torch.cat(y_true, 0)
confmat = pd.DataFrame(self.confusion_matrix(y_pred, y_true).tolist(
), columns=self.project_parameters.classes, index=self.project_parameters.classes).astype(int)
return epoch_loss, epoch_accuracy, confmat
else:
return epoch_loss, epoch_accuracy
def training_step(self, batch, batch_idx):
"""Compute and return the training loss and accuracy.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch.
Returns:
dict: the dictionary contains loss and accuracy.
"""
x, y = batch
y_hat = self.training_forward(x)
loss = self.loss_function(y_hat, y)
if self.project_parameters.loss_function == 'BCELoss':
train_step_accuracy = self.accuracy(y_hat.argmax(-1), y.argmax(-1))
elif self.project_parameters.loss_function == 'CrossEntropyLoss':
train_step_accuracy = self.accuracy(F.softmax(y_hat, dim=-1), y)
return {'loss': loss, 'accuracy': train_step_accuracy}
def training_epoch_end(self, outputs):
"""Called at the end of the training epoch with the outputs of all training steps.
Args:
outputs (list): List of outputs defined in :meth:`training_step`.
"""
epoch_loss, epoch_accuracy = self._parse_outputs(
outputs=outputs, calculate_confusion_matrix=False)
self.log('training loss', np.mean(epoch_loss),
on_epoch=True, prog_bar=True)
self.log('training accuracy', np.mean(epoch_accuracy))
def validation_step(self, batch, batch_idx):
"""Compute and return the validation loss and accuracy.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch.
Returns:
dict: the dictionary contains loss and accuracy.
"""
x, y = batch
y_hat = self.training_forward(x)
loss = self.loss_function(y_hat, y)
if self.project_parameters.loss_function == 'BCELoss':
val_step_accuracy = self.accuracy(y_hat.argmax(-1), y.argmax(-1))
elif self.project_parameters.loss_function == 'CrossEntropyLoss':
val_step_accuracy = self.accuracy(F.softmax(y_hat, dim=-1), y)
return {'loss': loss, 'accuracy': val_step_accuracy}
def validation_epoch_end(self, outputs):
"""Called at the end of the validation epoch with the outputs of all validation steps.
Args:
outputs (list): List of outputs defined in :meth:`validation_step`.
"""
epoch_loss, epoch_accuracy = self._parse_outputs(
outputs=outputs, calculate_confusion_matrix=False)
self.log('validation loss', np.mean(epoch_loss),
on_epoch=True, prog_bar=True)
self.log('validation accuracy', np.mean(epoch_accuracy))
def test_step(self, batch, batch_idx):
"""Operates on a single batch of data from the test set.
Args:
batch (:class:`~torch.Tensor` | (:class:`~torch.Tensor`, ...) | [:class:`~torch.Tensor`, ...]): The output of :class:`~torch.utils.data.DataLoader`. A tensor, tuple or list.
batch_idx (int): Integer displaying index of this batch.
Returns:
dict: the dictionary contains loss, accuracy, predicted, and ground truth.
"""
x, y = batch
y_hat = self.training_forward(x)
loss = self.loss_function(y_hat, y)
if self.project_parameters.loss_function == 'BCELoss':
y_hat = y_hat.argmax(-1)
y = y.argmax(-1)
test_step_accuracy = self.accuracy(y_hat, y)
elif self.project_parameters.loss_function == 'CrossEntropyLoss':
y_hat = F.softmax(y_hat, dim=-1)
test_step_accuracy = self.accuracy(y_hat, y)
return {'loss': loss, 'accuracy': test_step_accuracy, 'y_hat': y_hat, 'y': y}
def test_epoch_end(self, outputs):
"""Called at the end of a test epoch with the output of all test steps.
Args:
outputs (list): List of outputs defined in :meth:`test_step`.
"""
epoch_loss, epoch_accuracy, confmat = self._parse_outputs(
outputs=outputs, calculate_confusion_matrix=True)
self.log('test loss', np.mean(epoch_loss))
self.log('test accuracy', np.mean(epoch_accuracy))
print(confmat)
def configure_optimizers(self):
"""Choose what optimizers and learning-rate schedulers to use in your optimization.
Returns:
**Single optimizer** or **List of optimizers and LR schedulers.: the optimizer or the optimizer and LR schedulers.
"""
optimizer = _get_optimizer(model_parameters=self.parameters(
), project_parameters=self.project_parameters)
if self.project_parameters.step_size > 0:
lr_scheduler = _get_lr_scheduler(
project_parameters=self.project_parameters, optimizer=optimizer)
return [optimizer], [lr_scheduler]
else:
return optimizer
if __name__ == '__main__':
# project parameters
project_parameters = ProjectParameters().parse()
# create model
model = create_model(project_parameters=project_parameters)
# display model information
model.summarize()
# create input data
x = torch.ones(project_parameters.batch_size,
project_parameters.in_chans, 224, 224)
# get model output
y = model.forward(x)
# display the dimension of input and output
print(x.shape)
print(y.shape)
|
# MorseCodeWriter - Morse code visualisation on LED (RGB)
# Timing: https://en.wikipedia.org/wiki/Morse_code#Transmission
import RPi.GPIO as GPIO
import time
# LED setup
red = 18
green = 24
blue = 23
# Settings
selectedColor = green # Selected color of LED
timeUnit = 0.1 # Duration of one time unit [s]
# Morse code dictionary
CODE = {"'": '.----.',
'(': '-.--.-',
')': '-.--.-',
',': '--..--',
'-': '-....-',
'.': '.-.-.-',
'/': '-..-.',
'0': '-----',
'1': '.----',
'2': '..---',
'3': '...--',
'4': '....-',
'5': '.....',
'6': '-....',
'7': '--...',
'8': '---..',
'9': '----.',
':': '---...',
';': '-.-.-.',
'?': '..--..',
'A': '.-',
'B': '-...',
'C': '-.-.',
'D': '-..',
'E': '.',
'F': '..-.',
'G': '--.',
'H': '....',
'I': '..',
'J': '.---',
'K': '-.-',
'L': '.-..',
'M': '--',
'N': '-.',
'O': '---',
'P': '.--.',
'Q': '--.-',
'R': '.-.',
'S': '...',
'T': '-',
'U': '..-',
'V': '...-',
'W': '.--',
'X': '-..-',
'Y': '-.--',
'Z': '--..',
'_': '..--.-'}
# Board setup
GPIO.setwarnings(False) # When another doesnt safe exit
GPIO.setmode(GPIO.BCM) # Refeting pins by Broadcom SOC channel model
GPIO.setup(red,GPIO.OUT) # All pins as output
GPIO.setup(green,GPIO.OUT)
GPIO.setup(blue,GPIO.OUT)
# Common anode setup
GPIO.output(red,GPIO.HIGH) # Common [anode = HIGH, catode = LOW]
GPIO.output(green,GPIO.HIGH)
GPIO.output(blue,GPIO.HIGH)
# Timing setup (international timing)
dotLength = timeUnit # For how long is LED visible [s]
dashLength = dotLength * 3
breakLength = dotLength
letterGap = dotLength * 3
wordGap = dotLength * 7
# Dot visualisation
def dot():
GPIO.output(selectedColor,GPIO.LOW)
time.sleep(dotLength)
GPIO.output(selectedColor,GPIO.HIGH)
time.sleep(breakLength)
# Dash visualisation
def dash():
GPIO.output(selectedColor,GPIO.LOW)
time.sleep(dashLength)
GPIO.output(selectedColor,GPIO.HIGH)
time.sleep(breakLength)
# Main loop
try:
while True:
input = raw_input('What would you like to send? ')
for letter in input:
time.sleep(letterGap)
# Space between words
if letter == ' ':
time.sleep(wordGap)
continue;
# Character visualisation
for symbol in CODE[letter.upper()]:
if symbol == '.': # Dot
dot()
elif symbol == '-': # Dash
dash()
# GPIO safe exit
except KeyboardInterrupt:
GPIO.cleanup()
|
import cv2
import numpy as np
from enum import Enum
class Color(Enum):
RED = 1
GREEN = 2
YELLOW = 3
BLUE = 4
MIX = 0
def get(self):
if Color(self.value) == Color.RED:
return np.array([[[0,0,255]]])
elif Color(self.value) == Color.GREEN:
return np.array([[[0,255,0]]])
elif Color(self.value) == Color.YELLOW:
return np.array([[[0,255,255]]])
elif Color(self.value) == Color.BLUE:
return np.array([[[255,0,0]]])
else:
return None
def cvtLab(img):
img = np.float32(cv2.cvtColor(np.uint8(img), cv2.COLOR_BGR2Lab))
img[:,:,0] *=100/255
img[:,:,1] -= 128
img[:,:,2] -= 128
return img
|
import pandas as pd
import numpy as np
import json
import re
import copy
import itertools
import math
import re, string
import sqlite3
from collections import OrderedDict
from quantipy.core.helpers.constants import DTYPE_MAP
from quantipy.core.helpers.constants import MAPPED_PATTERN
from itertools import product
from quantipy.core.view import View
from quantipy.core.view_generators.view_mapper import ViewMapper
from quantipy.core.helpers import functions
from quantipy.core.tools.dp.dimensions.reader import quantipy_from_dimensions
from quantipy.core.tools.dp.decipher.reader import quantipy_from_decipher
from quantipy.core.tools.dp.spss.reader import parse_sav_file
from quantipy.core.tools.dp.spss.writer import save_sav
from quantipy.core.tools.dp.ascribe.reader import quantipy_from_ascribe
def load_json(path_json, hook=OrderedDict):
''' Returns a python object from the json file located at path_json
'''
with open(path_json) as f:
obj = json.load(f, object_pairs_hook=hook)
return obj
def loads_json(json_text, hook=OrderedDict):
''' Returns a python object from the json string json_text
'''
obj = json.loads(json_text, object_pairs_hook=hook)
return obj
def load_csv(path_csv):
return pd.DataFrame.from_csv(path_csv)
def save_json(obj, path_json):
def represent(obj):
if isinstance(obj, np.generic):
return np.asscalar(obj)
else:
return "Unserializable object: %s" % (str(type(obj)))
with open(path_json, 'w+') as f:
json.dump(obj, f, default=represent)
def df_to_browser(df, path_html='df.html', **kwargs):
import webbrowser
with open(path_html, 'w') as f:
f.write(df.to_html(**kwargs))
webbrowser.open(path_html, new=2)
def verify_dtypes_vs_meta(data, meta):
''' Returns a df showing the pandas dtype for each column in data compared
to the type indicated for that variable name in meta plus a 'verified'
column indicating if quantipy determines the comparison as viable.
data - (pandas.DataFrame)
meta - (dict) quantipy meta object
'''
dtypes = data.dtypes
dtypes.name = 'dtype'
var_types = pd.DataFrame({k: v['type'] for k, v in meta['columns'].iteritems()}, index=['meta']).T
df = pd.concat([var_types, dtypes.astype(str)], axis=1)
missing = df.loc[df['dtype'].isin([np.NaN])]['meta']
if missing.size>0:
print '\nSome meta not paired to data columns was found (these may be special data types):\n', missing, '\n'
df = df.dropna(how='any')
df['verified'] = df.apply(lambda x: x['dtype'] in DTYPE_MAP[x['meta']], axis=1)
return df
def coerce_dtypes_from_meta(data, meta):
data = data.copy()
verified = verify_dtypes_vs_meta(data, meta)
for idx in verified[~verified['verified']].index:
meta = verified.loc[idx]['meta']
dtype = verified.loc[idx]['dtype']
if meta in ["int", "single"]:
if dtype in ["object"]:
data[idx] = data[idx].convert_objects(convert_numeric=True)
data[idx] = data[idx].replace(np.NaN, 0).astype(int)
return data
def read_ddf(path_ddf, auto_index_tables=True):
''' Returns a raw version of the DDF in the form of a dict of
pandas DataFrames (one for each table in the DDF).
Parameters
----------
path_ddf : string, the full path to the target DDF
auto_index_tables : boolean (optional)
if True, will set the index for all returned DataFrames using the most
meaningful candidate column available. Columns set into the index will
not be dropped from the DataFrame.
Returns
----------
dict of pandas DataFrames
'''
# Read in the DDF (which is a sqlite file) and retain all available
# information in the form of pandas DataFrames.
with sqlite3.connect(path_ddf) as conn:
ddf = {}
ddf['sqlite_master'] = pd.read_sql(
'SELECT * FROM sqlite_master;',
conn
)
ddf['tables'] = {
table_name:
pd.read_sql('SELECT * FROM %s;' % (table_name), conn)
for table_name in ddf['sqlite_master']['tbl_name'].values
if table_name.startswith('L')
}
ddf['table_info'] = {
table_name:
pd.read_sql("PRAGMA table_info('%s');" % (table_name), conn)
for table_name in ddf['tables'].keys()
}
# If required, set the index for the expected Dataframes that should
# result from the above operation.
if auto_index_tables:
try:
ddf['sqlite_master'].set_index(
['name'],
drop=False,
inplace=True
)
except:
print (
"Couldn't set 'name' into the index for 'sqlite_master'."
)
for table_name in ddf['table_info'].keys():
try:
ddf['table_info'][table_name].set_index(
['name'],
drop=False,
inplace=True
)
except:
print (
"Couldn't set 'name' into the index for '%s'."
) % (table_name)
for table_name in ddf['tables'].keys():
index_col = 'TableName' if table_name=='Levels' else ':P0'
try:
ddf['table_info'][table_name].set_index(
['name'],
drop=False,
inplace=True
)
except:
print (
"Couldn't set '%s' into the index for the '%s' "
"Dataframe."
) % (index_col, table_name)
return ddf
def read_dimensions(path_mdd, path_ddf):
meta, data = quantipy_from_dimensions(path_mdd, path_ddf)
return meta, data
def read_decipher(path_json, path_txt, text_key='main'):
meta, data = quantipy_from_decipher(path_json, path_txt, text_key)
return meta, data
def read_spss(path_sav, **kwargs):
meta, data = parse_sav_file(path_sav, **kwargs)
return meta, data
def write_spss(path_sav, meta, data, index=True, text_key=None, mrset_tag_style='__', drop_delimited=True):
save_sav(
path_sav,
meta,
data,
index=index,
text_key=text_key,
mrset_tag_style=mrset_tag_style,
drop_delimited=drop_delimited
)
def read_ascribe(path_xml, path_txt, text_key='main'):
meta, data = quantipy_from_ascribe(path_xml, path_txt, text_key)
return meta, data
|
products = input().split()
searched_products = input().split()
product_dict = {}
for i in range(0, len(products), 2):
product = products[i]
quantity = products[i + 1]
product_dict[product] = int(quantity)
for product in searched_products:
if product in product_dict:
print(f"We have {product_dict[product]} of {product} left")
else:
print(f"Sorry, we don't have {product}")
|
#Squeeze with Pytorch
#Squeeze : 원소가 1인 차원을 제거
#Squeeze(dim = n): n차원을 제거
import torch
import numpy as np
ft = torch.FloatTensor([[0], [1], [2]])
print(ft)
print(ft.shape)
print(ft.squeeze())
print(ft.squeeze().shape)
|
from django.shortcuts import render
from django.http import HttpResponse, HttpResponseRedirect, Http404
from .models import Article, Comment
from django.urls import reverse
def index(request):
latest_articles_list = Article.objects.order_by('-pub_date')[:5]
return render(request, 'firstApp/list.html', {'latest_articles_list': latest_articles_list})
#return HttpResponse('Hello World!')
def detail(request, article_id):
try:
a = Article.objects.get(id = article_id)
except:
raise Http404('Статья не найдена')
latest_comments_list = a.comment_set.all()[:15]
return render(request, 'firstApp/detail.html', {'article': a, 'l_c_l': latest_comments_list})
def leave_comment(request, article_id):
try:
a = Article.objects.get(id = article_id)
except:
raise Http404('Статья не найдена')
a.comment_set.create(text = request.POST['text'], author_name = request.POST['name'])
return HttpResponseRedirect(reverse('firstApp:detail', args = (a.id,))) |
import unittest.mock
from slack import WebClient
from programy.clients.polling.slack.client import SlackBotClient
from programy.clients.polling.slack.config import SlackConfiguration
from programy.clients.render.text import TextRenderer
from programytest.clients.arguments import MockArgumentParser
class MockSlackClient(object):
def __init__(self, token, should_connect=True):
self._token = token
self._should_connect = should_connect
def rtm_connect(self, with_team_state=True, **kwargs):
return self._should_connect
class MockSlackBotClient(SlackBotClient):
def __init__(self, argument_parser=None, slack_client=None, id="slackid"):
self.test_slack_client = slack_client
self.test_question = None
self._id = id
self._connect = True
self.response_sent = None
self.channel_sent = None
SlackBotClient.__init__(self, argument_parser)
def set_question(self, question):
self.test_question = question
def get_license_keys(self,):
self._bot_token = "SLACK_BOT_TOKEN"
def create_client(self):
return MockSlackClient(self._bot_token)
def connect(self):
return self._connect
def ask_question(self, sessionid, question):
if self.test_question is not None:
return self.test_question
return super(MockSlackBotClient, self).ask_question(sessionid, question)
def create_client(self):
if self.test_slack_client is not None:
return self.test_slack_client
return super(MockSlackBotClient,self).create_client()
def get_bot_id(self):
return self._id
def send_response(self, response, channel):
self.response_sent = response
self.channel_sent = channel
class SlackBotClientTests(unittest.TestCase):
def test_slack_client_init(self):
arguments = MockArgumentParser()
client = MockSlackBotClient(arguments)
self.assertIsNotNone(client)
self.assertEqual("SLACK_BOT_TOKEN", client._bot_token)
self.assertEqual('ProgramY AIML2.0 Client', client.get_description())
self.assertIsInstance(client.get_client_configuration(), SlackConfiguration)
self.assertIsInstance(client._slack_client, WebClient)
self.assertFalse(client._render_callback())
self.assertIsInstance(client.renderer, TextRenderer)
def test_parse_direct_message(self):
arguments = MockArgumentParser()
client = MockSlackBotClient(arguments)
text = "<@U024BE7LH> Hello"
userid, message = client.parse_direct_message(text)
self.assertIsNotNone(userid)
self.assertEqual("U024BE7LH", userid)
self.assertIsNotNone(message)
self.assertEqual("Hello", message)
def test_parse_mention(self):
arguments = MockArgumentParser()
client = MockSlackBotClient(arguments)
text = "I told <@U024BE7LH> Hello"
userid, message = client.parse_mention(text)
self.assertIsNotNone(userid)
self.assertEqual("U024BE7LH", userid)
self.assertIsNotNone(message)
self.assertEqual("Hello", message)
def test_handle_message(self):
arguments = MockArgumentParser()
client = MockSlackBotClient(arguments)
client._starterbot_id = "U024BE7LH"
client.test_question = "Hi there"
client.handle_message("Hello", "test", "U024BE7LH")
self.assertIsNotNone(client.response_sent)
self.assertEqual("Hi there", client.response_sent)
self.assertIsNotNone(client.channel_sent)
self.assertEqual("test", client.channel_sent)
def test_parse_message(self):
arguments = MockArgumentParser()
client = MockSlackBotClient(arguments)
client._starterbot_id = "U024BE7LH"
client.test_question = "Hi there"
event = {}
event["text"] = "<@U024BE7LH> Hello"
event["channel"] = "test"
client.parse_message(event)
self.assertIsNotNone(client.response_sent)
self.assertEqual("Hi there", client.response_sent)
self.assertIsNotNone(client.channel_sent)
self.assertEqual("test", client.channel_sent)
def test_parse_messages(self):
arguments = MockArgumentParser()
client = MockSlackBotClient(arguments)
client._starterbot_id = "U024BE7LH"
client.test_question = "Hi there"
events = []
events.append({"type": "message", "text": "<@U024BE7LH> Hello", "channel": "test"})
client.parse_messages(events)
self.assertIsNotNone(client.response_sent)
self.assertEqual("Hi there", client.response_sent)
self.assertIsNotNone(client.channel_sent)
self.assertEqual("test", client.channel_sent)
def poll_and_answer(self):
arguments = MockArgumentParser()
client = MockSlackBotClient(arguments)
client.poll_and_answer()
|
# Contributors: Matt Ware
import numpy as np
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
class arrayinfo(object):
def __init__(self,name,array):
self.name = name
self.shape = array.shape
self.dtype = array.dtype
class small(object):
def __init__(self):
self.arrayinfolist = []
self.endrun = False
def addarray(self,name,array):
self.arrayinfolist.append(arrayinfo(name,array))
class mpidata(object):
def __init__(self):
self.small=small()
self.arraylist = []
def endrun(self):
self.small.endrun = True
comm.send(self.small,dest=0,tag=rank)
def addarray(self,name,array):
self.arraylist.append(array)
self.small.addarray(name,array)
def send(self):
assert rank!=0
comm.send(self.small,dest=0,tag=rank)
for arr in self.arraylist:
assert arr.flags['C_CONTIGUOUS']
comm.Send(arr,dest=0,tag=rank)
def recv(self):
assert rank==size-1
status=MPI.Status()
self.small=comm.recv(source=MPI.ANY_SOURCE,tag=MPI.ANY_TAG,status=status)
recvRank = status.Get_source()
if not self.small.endrun:
for arrinfo in self.small.arrayinfolist:
if not hasattr(self,arrinfo.name) or arr.shape!=arrinfo.shape or arr.dtype!=arrinfo.dtype:
setattr(self,arrinfo.name,np.empty(arrinfo.shape,dtype=arrinfo.dtype))
arr = getattr(self,arrinfo.name)
comm.Recv(arr,source=recvRank,tag=MPI.ANY_TAG)
|
import serial
import socket
import time
import struct
from threading import Thread
from openctrl import Packet, checksum
class Bus(object):
def __init__(self,ser):
self.packet = Packet()
self.ser = ser
def send_welcome(self,recv_packet):
self.packet.src = [1,1]
self.packet.dst = [0,recv_packet.src[1]]
self.packet.id = recv_packet.id
self.packet.len = 252
self.packet.type = 252
self.packet.make_checksum()
self.send_packet()
def send_ping(self,dst):
self.packet.src = [1,1]
self.packet.dst = [1,dst]
self.packet.id = 1
self.packet.len = 255
self.packet.type = 255
self.packet.make_checksum()
self.send_packet()
def send_packet(self):
self.ser.write("%s%s%s" % (''.join(map(chr,[self.packet.src[0],self.packet.src[1],self.packet.dst[0],self.packet.dst[1],self.packet.id,self.packet.len])),''.join(map(chr,self.packet.data)),''.join(map(chr,self.packet.checksum))))
def read(self,size):
return self.ser.read(size)
|
from kivymd.app import MDApp
from usermapviewv2 import UserMapView
import sqlite3
from searchpopupmenuv2 import SearchPopupMenu
class MainApp(MDApp):
connection = None
cursor = None
search_menu = None
def on_start(self):
#init gps
#connect to db
self.connection = sqlite3.connect("store.db")
self.cursor = self.connection.cursor()
#start search menu
self.search_menu = SearchPopupMenu()
MainApp().run()
|
from django.shortcuts import render
from django.http import HttpResponse,Http404
from .models import Question
# Create your views here.
def index(request):
latest_question_list = Question.objects.order_by('-pub_date')[:5]
context = {'latest_question_list': latest_question_list}
return render(request, 'firstApp/index.html', context)
def details(request,question_id):
try:
question = Question.objects.get(pk=question_id)
except Question.DoesNotExits:
raise Http404('question doesnot exit')
return render(request,'firstApp/details.html',{'question':question})
def results(request,question_id):
response = "you're looking at the results of question %s"
return HttpResponse(response %question_id)
def vote(request,question_id):
response = "you are looking at the result of questoin %s"
return HttpResponse(response %question_id)
|
def part_1():
with open('input/day2_input') as f:
s = 0
for line in f:
row = [int(x) for x in line.split('\t')]
s += (max(row) - min(row))
print('Part 1: {}'.format(s))
def part_2():
with open('input/day2_input') as f:
s = 0
for line in f:
row = [int(x) for x in line.split('\t')]
for x in row:
for y in row:
if x != y and x % y == 0:
s += int(x / y)
print('Part 2: {}'.format(s))
part_1()
part_2()
|
# -*- coding: utf-8 -*-
"""
xyz_parser.py: Functions to preprocess dataset.
"""
import os
import networkx as nx
import numpy as np
from rdkit import Chem
from rdkit.Chem import ChemicalFeatures
from rdkit import RDConfig
def init_graph(prop):
prop = prop.split()
g_tag = prop[0]
g_index = int(prop[1])
g_A = float(prop[2])
g_B = float(prop[3])
g_C = float(prop[4])
g_mu = float(prop[5])
g_alpha = float(prop[6])
g_homo = float(prop[7])
g_lumo = float(prop[8])
g_gap = float(prop[9])
g_r2 = float(prop[10])
g_zpve = float(prop[11])
g_U0 = float(prop[12])
g_U = float(prop[13])
g_H = float(prop[14])
g_G = float(prop[15])
g_Cv = float(prop[16])
labels = [g_mu, g_alpha, g_homo, g_lumo, g_gap, g_r2, g_zpve, g_U0, g_U, g_H, g_G, g_Cv]
# Add graph(molecule) attributes
return nx.Graph(tag=g_tag, index=g_index, A=g_A, B=g_B, C=g_C, mu=g_mu, alpha=g_alpha, homo=g_homo, lumo=g_lumo, gap=g_gap, r2=g_r2, zpve=g_zpve, U0=g_U0, U=g_U, H=g_H, G=g_G, Cv=g_Cv), labels
def xyz_graph_decoder(xyzfile):
with open(xyzfile, 'r') as f:
# Number of atoms
na = int(f.readline())
# Graph properties
g, l = init_graph(f.readline())
# Atom properties
atom_property = []
for i in range(na):
a_properties = f.readline() #lines of Element types, coords, Mulliken partial charges in e
a_properties = a_properties.replace('.*^', 'e')
a_properties = a_properties.replace('*^', 'e')
a_properties = a_properties.split()
atom_property.append(a_properties)
# Frequencies
f.readline()
# SMILES
smiles = f.readline()
smiles = smiles.split()
smiles = smiles[0]
m = Chem.MolFromSmiles(smiles)
m = Chem.AddHs(m)
fdef_name = os.path.join(RDConfig.RDDataDir, 'BaseFeatures.fdef')
factory = ChemicalFeatures.BuildFeatureFactory(fdef_name)
feats = factory.GetFeaturesForMol(m)
# Create nodes
for i in range(m.GetNumAtoms()):
atom_i = m.GetAtomWithIdx(i)
# Add node(atom) attributes
g.add_node(i,
a_type=atom_i.GetSymbol(),
a_num=atom_i.GetAtomicNum(),
acceptor=0, donor=0,
aromatic=atom_i.GetIsAromatic(),
hybridization=atom_i.GetHybridization(),
num_h=atom_i.GetTotalNumHs(),
coord=np.array(atom_property[i][1:4]).astype(np.float),
pc=float(atom_property[i][4]))
for i in range(len(feats)):
if feats[i].GetFamily() == 'Donor':
node_list = feats[i].GetAtomIds()
for n in node_list:
g.node[n]['donor'] = 1
elif feats[i].GetFamily() == 'Acceptor':
node_list = feats[i].GetAtomIds()
for n in node_list:
g.node[i]['acceptor'] = 1
# Create Edges
for i in range(m.GetNumAtoms()):
for j in range(m.GetNumAtoms()):
e_ij = m.GetBondBetweenAtoms(i, j)
if e_ij is not None:
# Add edge(bond) attributes
g.add_edge(i, j,
b_type=e_ij.GetBondType(),
distance=np.linalg.norm(g.node[i]['coord'] - g.node[j]['coord']))
else:
# Unbonded
g.add_edge(i, j,
b_type=None,
distance=np.linalg.norm(g.node[i]['coord'] - g.node[j]['coord']))
h = _qm9_nodes(g)
g, e = _qm9_edges(g)
return g, h, e, l
def _qm9_nodes(g, hydrogen=False):
"""Return node embedding h_v.
"""
# h is the embedding of atoms in the molecule
h = []
for n, d in g.nodes(data=True):
h_t = []
# Atom type (One-hot H, C, N, O, F)
h_t += [int(d['a_type'] == x) for x in ['H', 'C', 'N', 'O', 'F']]
# Atomic number
h_t.append(d['a_num'])
# Partial Charge
h_t.append(d['pc'])
# Acceptor
h_t.append(d['acceptor'])
# Donor
h_t.append(d['donor'])
# Aromatic
h_t.append(int(d['aromatic']))
# Hybridization
h_t += [int(d['hybridization'] == x) for x in [Chem.rdchem.HybridizationType.SP, Chem.rdchem.HybridizationType.SP2, Chem.rdchem.HybridizationType.SP3]]
# If number hydrogen is enabled
if hydrogen:
h_t.append(d['num_h'])
h.append(h_t)
return h
def _qm9_edges(g, e_representation='raw_distance'):
"""Return adjacency matrix and distance of edges.
"""
remove_edges = []
edge = {}
for n1, n2, d in g.edges(data=True):
e_t = []
# Raw distance function
if e_representation == 'chem_graph':
if d['b_type'] is None:
remove_edges += [(n1, n2)]
else:
e_t += [i+1 for i, x in enumerate([Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC]) if x == d['b_type']]
elif e_representation == 'distance_bin':
if d['b_type'] is None:
step = (6-2)/8.0
start = 2
b = 9
for i in range(0, 9):
if d['distance'] < (start+i*step):
b = i
break
e_t.append(b+5)
else:
e_t += [i+1 for i, x in enumerate([Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC]) if x == d['b_type']]
elif e_representation == 'raw_distance':
if d['b_type'] is None:
remove_edges += [(n1, n2)]
else:
e_t.append(d['distance'])
e_t += [int(d['b_type'] == x) for x in [Chem.rdchem.BondType.SINGLE, Chem.rdchem.BondType.DOUBLE, Chem.rdchem.BondType.TRIPLE, Chem.rdchem.BondType.AROMATIC]]
else:
print('Incorrect Edge representation transform')
quit()
if e_t:
edge[(n1, n2)] = e_t
for edg in remove_edges:
g.remove_edge(*edg)
g = nx.to_numpy_matrix(g)
e = np.zeros((g.shape[0], g.shape[1], len(list(edge.values())[0])))
for edg in edge.keys():
e[edg[0], edg[1], :] = edge[edg]
e[edg[1], edg[0], :] = edge[edg]
return g, e
##########################################
def mol_graph_decoder(xyzfile):
with open(xyzfile, 'r') as f:
# Number of atoms
na = int(f.readline())
label = int(f.readline())
g = nx.Graph()
# Atom properties
atom_property = []
index_hash = {}
for i in range(na):
a_prop = f.readline()
a_prop = a_prop.replace('.*^', 'e')
a_prop = a_prop.replace('*^', 'e')
a_prop = a_prop.split()
atom_property.append(a_prop[:-1])
index_hash[a_prop[-1]] = i
# Add nodes
for i in range(na):
g.add_node(i,
a_symbol=atom_property[i][0],
coord=np.array(atom_property[i][1:4]).astype(np.float),
pc=float(atom_property[i][4]))
# Add edges
for i in range(na):
e_prop = f.readline()
e_prop = e_prop.replace('.*^', 'e')
e_prop = e_prop.replace('*^', 'e')
e_prop = e_prop.split()
atom_i = index_hash[e_prop[1]]
num_neighbor = int(e_prop[2])
for j in range(num_neighbor):
try:
atom_j = index_hash[e_prop[2*j+3]]
g.add_edge(atom_i, atom_j,
bo=float(e_prop[2*j+4]),
distance=np.linalg.norm(g.node[atom_i]['coord'] - g.node[atom_j]['coord']))
except KeyError:
continue
h = _mol_nodes(g)
g, e = _mol_edges(g)
return g, h, e, label
def _mol_nodes(g):
h = []
for n, d in g.nodes(data=True):
h_t = []
h_t += [int(d['a_symbol'] == x) for x in ['Mo', 'S']]
h_t.append(d['pc'])
h.append(h_t)
return h
def _mol_edges(g):
edge = {}
for n1, n2, d in g.edges(data=True):
e_t = []
e_t.append(d['bo'])
e_t.append(d['distance'])
edge[(n1, n2)] = e_t
g = nx.to_numpy_matrix(g)
e = np.zeros((g.shape[0], g.shape[1], len(list(edge.values())[0])))
for edg in edge.keys():
e[edg[0], edg[1], :] = edge[edg]
e[edg[1], edg[0], :] = edge[edg]
return g, e
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Read a single XYZ file as input')
parser.add_argument('--path', '-p', nargs=1, help='Specify the path of XYZ file')
args = parser.parse_args()
g, h, e, l = xyz_graph_decoder(args.path[0])
print("Adjacency matrix: \n", g)
print("Node embedding: \n", h)
print("Edge: \n", e)
print("Label: \n", l)
|
ops = set(("ARRAY-INIT",
"NULL",
"ASSIGN",
"UPDATE",
"BLOCK",
"BREAK",
"CONTINUE",
"CALL",
"CASE",
"CATCH",
"GUARDED-CATCH",
"CATCH",
"COMMA",
"DEBUGGER",
"DEFAULT",
"DEFAULT",
"DELETE",
"TYPEOF",
"NEW",
"UNARY_MINUS",
"NOT",
"VOID",
"BITWISE_NOT",
"UNARY_PLUS",
"DO",
"DO-WHILE",
"DOT",
"ATTRIBUTE",
"FUNCTION",
"DEF-FUNCTION",
"FOR",
"FOR_IN",
"GROUP",
"HOOK",
"TERNARY",
"IDENTIFIER",
"INITVAR",
"IF",
"IF-ELSE",
"POST-INCREMENT",
"POST-DECREMENT",
"INDEX",
"ARRAY-INDEX",
"LABEL",
"LABELED-STATEMENT",
"LIST",
"NEW_WITH_ARGS",
"NUMBER",
"TRUE",
"FALSE",
"THIS",
"NULL",
"OBJECT_INIT",
"PLUS",
"LT",
"EQ",
"AND",
"OR",
"MINUS",
"MUL",
"LE",
"NE",
"STRICT_EQ",
"DIV",
"GE",
"INSTANCEOF",
"IN",
"GT",
"BITWISE_OR",
"BITWISE_AND",
"BITWISE_XOR",
"STRICT_NE",
"LSH",
"RSH",
"URSH",
"MOD",
"PROPERTY_INIT",
"REGEXP",
"RETURN",
"SCRIPT",
"SEMICOLON",
"STRING",
"SWITCH",
"THROW",
"TRY",
"VAR",
"CONST",
"WITH",
"WHILE"))
|
# Write a function that takes in an array of unique integers and returns
# an array of all permutations of those integers in no particular order
def getPermutations(array):
permutations = []
permutationsHelper(array, [], permutations)
return permutations
def permutationsHelper(array, currentPermutation, permutations):
if not len(array) and len(currentPermutation):
permutations.append(currentPermutation)
else:
for i in range(len(array)):
newArray = array[:i] + array[i + 1:]
newPermutation = currentPermutation + [array[i]]
permutationsHelper(newArray, newPermutation, permutations) |
# -*- coding: utf-8 -*-
import os
import re
from functions import *
is_shifted = lambda path: isNotNone(re.match(r'^.*\.zs$', path))
is_deconvoluted = lambda path: isNotNone(re.match(r'^.*\.dv_decon.*$|^.*_D4D\.dv.*$', path))
class ImageFile:
def __init__(self, path):
self._path = path
self._basename = None
self._origin_name = None
self._shifted = None
self._deconvoluted = None
@property
def basename(self):
if self._basename is None:
self._basename = os.path.basename(self._path)
return self._basename
@property
def is_shifted(self):
if self._shifted is None:
self._shifted = (re.match(r'^.*\.zs$', self.basename) is not None)
return self._shifted
@property
def is_deconvoluted(self):
if self._deconvoluted is None:
self._deconvoluted = (re.match(r'^.*\.dv_decon.*$|^.*_D3D\.dv.*$',
self.basename) is not None)
return self._deconvoluted
@property
def origin_name(self):
if self._origin_name is None:
tmp = self.basename
if (self.is_shifted):
tmp = tmp[:-3]
tmp = re.sub(r'^(.*)\.dv_decon$', r'\1.dv', tmp)
tmp = re.sub(r'^(.*)_D3D\.dv$', r'\1.dv', tmp)
self._origin_name = tmp
return self._origin_name
|
###########################################################################
# TASK 1
type_list = [int(1), str("python"), bool(None), float(0.35), {0, 1, 2, 3}, tuple()]
print(type_list)
print(type(type_list))
print(type(type_list[0]))
print(type(type_list[1]))
print(type(type_list[2]))
print(type(type_list[3]))
print(type(type_list[4]))
print(type(type_list[6]))
###########################################################################
|
import os
import tempfile
from subprocess import Popen
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
import barcode
from barcode.writer import ImageWriter
tmp_path = 'BarcodeGenerator.png'
def generate_barcode(event):
global tmp_path
delete_temp_image()
barcode_txt = ent_barcode_txt.get()
code_type = ent_code_type.get()
if len(barcode_txt) is 0:
notify('Nothing to do')
return
print(f'Code type: {code_type}')
print(f'Text to generate: {barcode_txt}')
b_class = barcode.get_barcode_class(code_type)
iw = ImageWriter()
iw.set_options({'dpi': 140})
try:
bar = b_class(str(barcode_txt), writer=iw)
notify('Format success')
except (barcode.errors.WrongCountryCodeError,
barcode.errors.BarcodeError,
barcode.errors.BarcodeNotFoundError,
barcode.errors.IllegalCharacterError,
barcode.errors.NumberOfDigitsError,
ValueError) as e:
return notify(str(e))
path = os.path.join(tempfile.gettempdir(), barcode_txt)
tmp_path = bar.save(path, text=barcode_txt)
print(f'temporary image: {tmp_path}')
def delete_temp_image():
global tmp_path
if tmp_path == 'BarcodeGenerator.png':
return
os.remove(tmp_path)
tmp_path = 'BarcodeGenerator.png'
def preview_image():
tmp_img = PhotoImage(file=tmp_path)
pnl_image.config(image=tmp_img)
pnl_image.image = tmp_img
pnl_image.after(200, preview_image)
def open_image(event=None):
os_name = os.name
if os_name == 'nt':
try:
Popen(['mspaint', tmp_path])
except (FileNotFoundError, NameError):
notify('MS Paint not found')
elif os_name == 'posix':
# for Ubuntu maybe others
try:
Popen(['shotwell', tmp_path])
except (FileNotFoundError, NameError):
notify('Shotwell not found')
def save_image(event=None):
file = filedialog.asksaveasfile(
mode="wb", title="Save Image", defaultextension=".png",
initialfile=tmp_path.split(os.sep)[-1],
filetypes=(("png files", "*.png"), ("all files", "*.*")))
if file:
try:
image = open(tmp_path, 'rb').read()
file.write(image)
file.close()
notify('Image saved')
except AttributeError as e:
notify(str(e))
def notify(string='Press Enter to generate barcode'):
lbl_notify.config(text=string)
master = Tk()
lbl_barcode_txt = Label(master, text='Text to Generate')
lbl_barcode_txt.place(x=100, y=10)
ent_barcode_txt = Entry(master)
ent_barcode_txt.bind('<Return>', generate_barcode)
ent_barcode_txt.place(x=80, y=40)
ent_barcode_txt.focus()
lbl_code_type = Label(master, text='Format')
lbl_code_type.place(x=360, y=10)
ent_code_type = ttk.Combobox(master, values=barcode.PROVIDED_BARCODES)
ent_code_type.config(width=12)
ent_code_type.place(x=330, y=40)
ent_code_type.current(0)
lbl_preview = Label(master, text='Preview')
lbl_preview.place(x=240, y=180)
img = PhotoImage(file=tmp_path)
pnl_image = Label(master, image=img)
pnl_image.place(x=220, y=130)
lbl_notify = Label(master, text='None')
lbl_notify.place(x=5, y=290)
btn_open = Button(master, text='Open', command=open_image)
btn_open.bind('<Return>', open_image)
btn_open.config(width=10)
btn_open.place(x=60, y=120)
btn_save = Button(master, text='Save', command=save_image)
btn_save.bind('<Return>', save_image)
btn_save.config(width=10)
btn_save.place(x=60, y=190)
master.iconbitmap(r'icon.ico')
master.wm_title("BarcodeGenerator")
master.geometry("540x320")
preview_image()
notify()
master.mainloop()
delete_temp_image()
|
#!/usr/bin/env python
# -- coding: utf-8 --#
# @Time : 2020/4/14 13:01
# @Author : Aries
# @Site :
# @File : numbers.py
# @Software: PyCharm
'''
一些数字相关的算法
'''
from typing import List
import sys
def merge(intervals: List[List[int]]) -> List[List[int]]:
'''
给出一个区间的集合,请合并所有重叠的区间。
示例 1:
输入: [[1,3],[2,6],[8,10],[15,18]]
输出: [[1,6],[8,10],[15,18]]
解释: 区间 [1,3] 和 [2,6] 重叠, 将它们合并为 [1,6].
:param self:
:param intervals:
:return:
'''
intervals.sort(key=lambda x: x[0])
merged = []
for interval in intervals:
# 如果列表为空,或者当前区间与上一区间不重合,直接添加
if not merged or merged[-1][1] < interval[0]:
merged.append(interval)
else:
# 否则的话,我们就可以与上一区间进行合并
merged[-1][1] = max(merged[-1][1], interval[1])
return merged
def numberOfSubarrays(nums: List[int], k: int) -> int:
'''
给你一个整数数组 nums 和一个整数 k。
如果某个 连续 子数组中恰好有 k 个奇数数字,我们就认为这个子数组是「优美子数组」。
请返回这个数组中「优美子数组」的数目。
输入:nums = [2,2,2,1,2,2,1,2,2,2], k = 2
输出:16
数学方式
:param self:
:param nums:
:param k:
:return:
'''
if len(nums) < k: return 0
cnt = 0
odd = [-1]
for i in range(len(nums)):
if nums[i] % 2 == 1:
odd.append(i)
odd.append(len(nums))
if len(odd) < k: return 0
for i in range(1, len(odd) - k):
cnt += (odd[i] - odd[i - 1]) * (odd[i + k] - odd[i + k - 1])
return cnt
def numberOfSubarrays2(nums: List[int], k: int) -> int:
'''
给你一个整数数组 nums 和一个整数 k。
如果某个 连续 子数组中恰好有 k 个奇数数字,我们就认为这个子数组是「优美子数组」。
请返回这个数组中「优美子数组」的数目。
输入:nums = [2,2,2,1,2,2,1,2,2,2], k = 2
输出:16
前缀和+差分
:param self:
:param nums:
:param k:
:return:
'''
if len(nums) < k: return 0
cnt = [0] * (len(nums) + 1)
cnt[0] = 1
odd, ans = 0, 0
for i in range(len(nums)):
if nums[i] % 2 == 1:
odd += 1
if odd >= k:
ans += cnt[odd - k]
cnt[odd] += 1
print(cnt)
return ans
def maxSubArray(nums: List[int]) -> int:
'''
给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。
示例:
输入: [-2,1,-3,4,-1,2,1,-5,4],
输出: 6
解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。\
动态规划
:param self:
:param nums:
:return:
'''
if not nums or len(nums) < 1: return 0
dp = [0] * len(nums)
dp[0] = nums[0]
result = dp[0]
for i in range(1, len(nums)):
dp[i] = max(dp[i - 1] + nums[i], nums[i])
result = max(result, dp[i])
return result
def maxSubArray2(nums: List[int]) -> int:
'''
给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。
示例:
输入: [-2,1,-3,4,-1,2,1,-5,4],
输出: 6
解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。\
贪心算法
:param self:
:param nums:
:return:
'''
if not nums or len(nums) < 1: return 0
result = nums[0]
sum = 0
for i in range(0, len(nums)):
sum += nums[i]
result = max(result, sum)
if sum < 0: sum = 0
return result
def maxSubArray3(nums: List[int]) -> int:
'''
给定一个整数数组 nums ,找到一个具有最大和的连续子数组(子数组最少包含一个元素),返回其最大和。
示例:
输入: [-2,1,-3,4,-1,2,1,-5,4],
输出: 6
解释: 连续子数组 [4,-1,2,1] 的和最大,为 6。\
分治算法
:param self:
:param nums:
:return:
'''
def get(nums, l, r):
if l == r: return nums[l],nums[l],nums[l],nums[l]
mid = int((l + r) / 2)
l_sum_l, r_sum_l, m_sum_l, i_sum_l = get(nums, l, mid)
l_sum_r, r_sum_r, m_sum_r, i_sum_r = get(nums, mid + 1, r)
return max(l_sum_l, i_sum_l + l_sum_r),max(r_sum_r, i_sum_r + r_sum_l),max(m_sum_l, m_sum_r, r_sum_l + l_sum_r),(i_sum_l + i_sum_r)
if not nums or len(nums) < 1: return 0
l_sum, r_sum, m_sum, i_sum = get(nums, 0, len(nums) - 1)
return m_sum
def add_two_nums(nums1:List[int], nums2:List[int]) -> List[int]:
'''
:param nums1:
:param nums2:
:return:
'''
def get_a_sign(a):
if a > 9:
return a % 10, 1
else:
return a, 0
if not nums1: return nums2
if not nums2: return nums1
i = 1
sign = 0
lst = []
while i <= len(nums1) and i <= len(nums2):
a = sign + nums1[-i] + nums2[-i]
a, sign = get_a_sign(a)
lst.append(a)
i += 1
while i <= len(nums1):
a = sign + nums1[-i]
a, sign = get_a_sign(a)
lst.append(a)
i += 1
while i <= len(nums2):
a = sign + nums2[-i]
a, sign = get_a_sign(a)
lst.append(a)
i += 1
if sign == 1:
lst.append(1)
lst.reverse()
return lst
def myPow(x: float, n: int) -> float:
def quick_muk_N(N):
if N == 0: return 1.0
y = quick_muk_N(N // 2)
return y * y if N % 2 == 0 else y * y * x
if x == 0: return 0
return quick_muk_N(n) if n > 0 else 1.0 / quick_muk_N(-n)
def myPow2(x: float, n: int) -> float:
'''
时间复杂度:O(\log n)O(logn),即为对 nn 进行二进制拆分的时间复杂度。
:param x:
:param n:
:return:
'''
def quick_muk_N(N):
ans = 1.0
x_contribute = x
while N > 0:
if N % 2 == 1:
ans *= x_contribute
x_contribute *= x_contribute
# 舍弃 N 二进制表示的最低位,这样我们每次只要判断最低位即可
N //= 2
return ans
if x == 0: return 0
return quick_muk_N(n) if n > 0 else 1.0 / quick_muk_N(-n)
def productExceptSelf(nums: List[int]) -> List[int]:
'''
给你一个长度为 n 的整数数组 nums,其中 n > 1,返回输出数组 output ,
其中 output[i] 等于 nums 中除 nums[i] 之外其余各元素的乘积。
输入: [1,2,3,4]
输出: [24,12,8,6]
:param nums:
:return:
'''
answer = [1] * len(nums)
for i in range(1, len(nums)):
answer[i] = nums[i - 1] * answer[i - 1]
R = 1
for i in range(len(nums) - 2, -1, -1):
R *= nums[i + 1]
answer[i] *= R
return answer
def productExceptSelf2(nums: List[int]) -> List[int]:
'''
给你一个长度为 n 的整数数组 nums,其中 n > 1,返回输出数组 output ,
其中 output[i] 等于 nums 中除 nums[i] 之外其余各元素的乘积。
输入: [1,2,3,4]
输出: [24,12,8,6]
:param nums:
:return:
'''
num_len = len(nums)
l,r,answer = [1] * num_len, [1] * num_len, [1] * num_len
for i in range(1, num_len):
l[i] = nums[i - 1] * l[i - 1]
r[num_len - i - 1] = nums[num_len - i] * r[num_len - i]
for i in range(0, num_len):
answer[i] = r[i] * l[i]
return answer
def maxProduct(nums: List[int]) -> int:
if len(nums) == 1:
return nums[0]
n_min, n_max, ans = None, None, None
for i in range(len(nums)):
if i == 0:
n_min, n_max, ans = nums[i], nums[i], nums[i]
else:
mn = n_min
ma = n_max
n_max = max(nums[i], mn * nums[i], ma * nums[i])
n_min = min(nums[i], mn * nums[i], ma * nums[i])
ans = max(ans, n_max)
return ans
if __name__ == '__main__':
# print(maxSubArray3([-2,1,-3,4,-1,2,1,-5,4]))
print(maxProduct([-2])) |
import redis
# 保持跟数据库的链接,当超过数量时,就等着
# # 解码
pool = redis.ConnectionPool(host="127.0.0.1", port=6379, decode_responses=True, max_connections=10)
conn = redis.Redis(connection_pool=pool)
ret = conn.get("n1")
print(ret)
|
from django.db import models
# Create your models here.
''' Models for questions '''
class Question(models.Model):
'''text feild for question'''
'''user=models.ForeignKey(User)'''
question_text = models.CharField(max_length=200)
pub_date = models.DateTimeField('date published')
'''for string convertion of question model'''
def __str__(self):
return self.question_text
class Choice(models.Model):
question = models.ForeignKey(Question)
choice_text = models.CharField(max_length=200)
votes = models.IntegerField(default=0)
def __str__(self):
return self.choice_text
'''
class User(models.Model):
user_name=models.CharFeild(max_length=200)
password=models.CharFeild(max_lenght=200)
'''
|
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
setup(
name='plasma',
description='Plasma MVP',
long_description=readme,
author='David Knott',
author_email='',
license=license,
packages=find_packages(exclude=('tests')),
include_package_data=True,
install_requires=[
'ethereum==2.3.0',
'web3==4.8.2',
'werkzeug==0.13',
'json-rpc==1.10.8',
'plyvel==1.0.4',
'py-solc',
'click==6.7',
'pytest',
'python-dotenv',
'eth-abi==0.5.0',
'eth-hash==0.1.0',
'eth-keyfile==0.4.1',
'eth-keys==0.1.0b4',
'eth-tester==0.1.0b15',
'eth-utils==0.7.3'
],
entry_points={
'console_scripts': ["omg=plasma.cli:cli"],
}
)
|
from flask import Blueprint, render_template, session, redirect, url_for, \
request, flash, g, jsonify, abort
from gui.utils import do_create_mag, do_get_paginated_mag, do_get_mag, do_logout
import json
mod = Blueprint('mags', __name__)
@mod.route('/mags/')
def index():
if not g.logged_in:
return redirect(url_for('users.login'))
return render_template("/mags/index.html")
@mod.route('/mags/get/<mag_id>', methods=['GET', 'POST'])
def get(mag_id):
if not g.logged_in:
return redirect(url_for('users.login'))
if request.method == 'GET':
result = do_get_mag(mag_id, request.cookies)
if result.success:
if result.response.status_code == 200:
tmp = str(result.response.content)
list_sm = tmp.split('\\n')
mag = list_sm[0]
mag = mag[2:]
kassir = list_sm[1]
kassir = kassir[0:-1]
mag_d = json.loads(mag)
datetime = str(mag_d["datetime"]).split("_")
date = datetime[0]
time = datetime[1]
dictionary = {"date":date, "time":time}
ar = mag_d["seats"]
kassir_d = json.loads(kassir)
return render_template("/mags/get.html", mag=mag_d, kassir=kassir_d, seats = ar,
datetime = dictionary, number_of_seats = len(ar)+1)
elif result.response.status_code == 403:
do_logout()
return redirect(url_for('users.login'))
else:
flash('Ошибка. Кассира магазина не существует.', "error")
return redirect(url_for('mags.get_all'), "error")
else:
flash(result.error, "error")
return redirect(url_for('mags.get_all'), "error")
@mod.route('/mags/create', methods=['GET', 'POST'])
def create():
if not g.logged_in:
return redirect(url_for('users.login'))
if request.method == 'GET':
if 'kassir_id' in request.args:
kassir_id = request.args['kassir_id']
return render_template("/mags/create.html", kassir_id=kassir_id)
else:
failed = False
if 'number_of_seats' not in request.form or request.form['number_of_seats']=='':
flash('Количество товара не задано', "error")
failed = True
try:
number_of_seats = int(request.form['number_of_seats'])
except:
flash('Количество товара выражается числом', 'error')
return render_template("/mags/create.html", kassir_id=request.args['kassir_id'])
if 'date' not in request.form or request.form['date']=='':
flash('Дата не задана', "error")
failed = True
if 'time' not in request.form or request.form['time']=='':
flash('Время задано', "error")
failed = True
date_time = request.form['date'] + '_' + request.form['time']
if failed:
return render_template("/mags/create.html", kassir_id=request.args['kassir_id'])
result = do_create_mag(request.args['kassir_id'], number_of_seats, date_time, request.cookies)
if result.success:
if result.response.status_code == 201:
flash('Витрина в магазине успешно создана', "info")
response = redirect('kassirs/get_all')
return response
elif result.response.status_code == 403:
do_logout()
return redirect(url_for('users.login'))
else:
st = result.response.content.decode('utf-8')
if st=='':
st = str(result.response.content)
flash(st, "error")
return redirect(url_for('kassirs.get_all'))
else:
flash(result.error)
return redirect(url_for('kassirs/get_all'), "error")
@mod.route('/mags/get_all')
def get_all():
if not g.logged_in:
return redirect(url_for('users.login'))
if request.method == 'GET':
if 'page' not in request.args:
return redirect(url_for('mags.get_all', page=1))
page = request.args.get('page', 1, type=int)
result = do_get_paginated_mag(page, 10, request.cookies)
if result.success:
if result.response.status_code == 200:
mags_obj = result.response.content
mags_str = (str(mags_obj)).split('\\n')
n = len(mags_str)
mags_str[0] = mags_str[0][2:]
mags_str[n-1] = mags_str[n-1][0:-1]
mags = []
dictr = json.loads(mags_str[n-1])
mags_str.remove(mags_str[n-1])
for mag in mags_str:
if mag != "":
mag1 = json.loads(bytes(mag, 'utf8'))
ar = mag1["seats"]
number_of_seats = len(ar)
number_of_free_seats = 0
for item in ar:
if item:
number_of_free_seats = number_of_free_seats+1
datetime = str(mag1["datetime"]).split("_")
date = datetime[0]
time = datetime[1]
dictionary = {"mag_id": mag1["mag_id"], "kassir_id": mag1["kassir_id"],
"number_of_seats": number_of_seats, "number_of_free_seats": number_of_free_seats,
"date":date, "time":time}
mags.append(dictionary)
return render_template("/mags/get_all.html", mags=mags, prev_url=dictr['is_prev_page'],
next_url=dictr['is_next_page'], next_page=page+1, prev_page=page-1)
elif result.response.status_code == 403:
do_logout()
return redirect(url_for('users.login'))
else:
flash("Магазины не найдены", "error")
return redirect(url_for('mags.index'))
else:
flash(result.error, "error")
return redirect(url_for('mags.index')) |
import pickle
import math
import ROOT
#What to read
pileups = [200]
ss = ["Nu_PU200_aged3000","Nu_PU200_aged1000","Nu_PU200"]
samples = {"Nu_PU200_aged3000":ROOT.kBlue,"Nu_PU200_aged1000":ROOT.kRed,"Nu_PU200":ROOT.kBlack}
tags = {"Nu_PU200_aged1000":"1000 fb^{-1} Aging","Nu_PU200_aged3000":"3000 fb^{-1} Aging","Nu_PU200":"No Aging"}
qualities = ['_q12']
ROOT.gROOT.SetBatch(True)
ROOT.gROOT.ProcessLine('.L PlotTemplate.C+')
with open('tdr_rates_aged_all.pickle', 'rb') as handle:
bb = pickle.load(handle)
outFile = ROOT.TFile("tdr_rates.root","RECREATE")
outFile.cd()
#All pT bin edges again
bins = [0.,1.,2.,3.,4.,4.5,5.,6.,7.,8.,10.,12.,14.,16.,18.,20.,25.,30.,35.,40.,45.,50.,60.,70.,80.,90.,100.,120.,140.,200.,250.]
cbins = []
for i in range(len(bins)-1):
cbins.append((bins[i] + bins[i+1])/2.)
#Filling scheme
coeff = 2760*11.246
hp = ROOT.TH1F('hp','',1,-0.5,0.5)
ht = ROOT.TH1F('ht','',1,-0.5,0.5)
for puValue in pileups:
gr = {}
#Canvas created with official template
canvas = ROOT.CreateCanvas('name',True,True)
leg = ROOT.TLegend(0.55,0.65,0.9,0.48)
drawn = False
for s in ss:
for q in qualities:
puHandle = ''
if puValue == 0: puHandle ='NOPU'
else: puHandle = 'PU' + str(puValue)
gr[s] = ROOT.TGraphAsymmErrors(len(bins)-1)
for i, b in enumerate(cbins):
#This annoying way is set to run a Clopper Pearson estimation of efficiency
hp.SetBinContent(1, bb[s]['fired'+str(bins[i])+ q] )
ht.SetBinContent(1, bb[s]['total'] )
eff = ROOT.TEfficiency(hp, ht)
val = coeff * eff.GetEfficiency(1)
val_up = coeff * eff.GetEfficiencyErrorUp(1)
val_dn = coeff * eff.GetEfficiencyErrorLow(1)
gr[s].SetPoint(i, b, val)
gr[s].SetPointEYlow(i, val_dn)
gr[s].SetPointEYhigh(i, val_up)
gr[s].SetPointEXlow(i, b - bins[i])
gr[s].SetPointEXhigh(i, bins[i+1] - b)
print s, b, val
#Set drawing options and draw the graph
#canvas.SetLogx(True)
gr[s].SetMarkerColor(samples[s])
gr[s].SetLineColor(samples[s])
gr[s].GetXaxis().SetLimits(0,100)
gr[s].GetXaxis().SetTitle("L1 p_{T}^{cut} [GeV]")
gr[s].GetYaxis().SetTitle("Rate [kHz]")
gr[s].SetTitle("")
if not(drawn):
gr[s].Draw("AP")
drawn = True
else:
gr[s].Draw("P same")
leg.AddEntry(gr[s],tags[s],'l')
leg.Draw("same")
ROOT.DrawPrelimLabel(canvas)
ROOT.DrawLumiLabel(canvas,'14 TeV, %i PU'%puValue)
ROOT.SaveCanvas(canvas,'tdr_pt_rate'+ q + '_PU_log' + str(puValue))
outFile.Close()
|
import sys
from datetime import date, timedelta
def main():
"""
Args:
year: year of start date
month: month of start date
day: day of start date
instruction_days: number of weekly meetings (2 or 3)
- debug mode: any additional parameter (optional)
"""
year = int(sys.argv[1])
month = int(sys.argv[2])
day = int(sys.argv[3])
instruction_days = int(sys.argv[4])
WEEKS = 16 #weeks in a semester
mydate = date(year, month, day)
day_count = WEEKS * instruction_days + 1 #last day for final exam
html = get_header(instruction_days)
counter = 0
week = 1
for i in range(day_count):
if instruction_days == 2:
if (i % 2 == 0):
html += '\n\t\t<tr class="week">'
html += '\n\t\t\t<td rowspan="2">{1}'.format(instruction_days, week)
else:
html += '\n\t\t<tr>'
elif instruction_days == 3:
if (i % 3 == 0):
html += '\n\t\t<tr class="week">'
html += '\n\t\t\t<td rowspan="3">{1}'.format(instruction_days, week)
else:
html += '\n\t\t<tr>'
html += '\n\t\t\t<td>' + mydate.strftime('%a, %m/%d')
for j in range(3):
html += '\n\t\t\t<td>'
if instruction_days == 2:
if counter == 0:
mydate += timedelta(2)
counter += 1
else:
mydate += timedelta(5)
counter = 0
week += 1
elif instruction_days == 3:
if counter == 2:
mydate += timedelta(3)
counter = 0
week += 1
else:
mydate += timedelta(2)
counter += 1
html += '\n</table>'
print(html)
def get_header(instruction_days):
html = ''
if (len(sys.argv) == 6):
html += '<link rel="stylesheet" href="testing.css">\n\n'
html += '<table border=1 class="schedule days-{0}">'. \
format(instruction_days)
html += '\n\t<thead>'
html += '\n\t\t<tr>'
html += '\n\t\t\t<th>Week'
html += '\n\t\t\t<th>Date'
html += '\n\t\t\t<th>Readings'
html += '\n\t\t\t<th>Sessions & Labs'
html += '\n\t\t\t<th>Assignments'
html += '\n\t<tbody>'
return html
main()
|
from .fact import Fact
class Rule:
def __init__(self, left_side, conclusion_op, right_side):
self.left_side = left_side
self.conclusion_op = conclusion_op
self.right_side = right_side
self.full_rule = ' '.join(map(str, self.left_side + [self.conclusion_op] + self.right_side))
def __str__(self):
return self.full_rule
def __repr__(self):
return self.full_rule
def __hash__(self):
return hash(self.full_rule)
def __eq__(self, other):
return self.full_rule == other.full_rule
def get_left_side_facts(self):
left_side_facts = []
for token in self.left_side:
if type(token) is Fact:
left_side_facts.append(token)
return left_side_facts
def get_right_side_facts(self):
right_side_facts = []
for token in self.right_side:
if type(token) is Fact:
right_side_facts.append(token)
return right_side_facts
|
# coding:utf-8
#! /usr/bin/env python
from scapy.all import *
import random
import datetime
ifaceNames = ["enp0s31f6","docker0"]
linkSrcAddr = "fe80::437f:2137:3e16:b6ea"
linkDstAddr = "ff02::1:ff21:41f"
macSrcAddr = "8c:ec:4b:73:25:8d"
macOtherSrcAddr = "7c:76:35:de:0c:79"
macMultiAddr = "33:33:ff:e4:89:00"
def send_dad_ns_pkt(ifaceName):
ether=Ether(src=macSrcAddr,dst=macMultiAddr)
a=IPv6(src="::", dst=linkDstAddr)
b=ICMPv6ND_NS(tgt=linkSrcAddr)
print "send DAD NS packet target address:",linkSrcAddr
sendp(ether/a/b,iface=ifaceName)
def send_ns_pkt(ifaceName):
ether=Ether(src=macOtherSrcAddr,dst=macSrcAddr)
a=IPv6(src=linkSrcAddr, dst=linkDstAddr)
b=ICMPv6ND_NS(tgt=linkSrcAddr)
print "send NS packet target address:",linkSrcAddr
sendp(ether/a/b,iface=ifaceName)
if __name__ == "__main__":
#for i in range(5) :
# send_dad_ns_pkt(ifaceNames[0])
send_dad_ns_pkt(ifaceNames[0])
while True:
send_ns_pkt(ifaceNames[0])
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jan 8 14:04:43 2017
@author: firojalam
"""
import optparse
import datetime
import aidrtokenize;
from gensim.models import Word2Vec
from nltk.corpus import stopwords
from gensim.similarities.docsim import WmdSimilarity
import warnings
import datetime
import optparse
import os, errno
stop_words = stopwords.words('english')
def graph_dist(tweetlist,model,outFile):
of=open(outFile,"w")
model.init_sims(replace=True) # Normalizes the vectors in the word2vec class.
#rowVector=[]
index=0;
for tweetR in tweetlist:
rVec = aidrtokenize.tokenize(tweetR)
rVec =[w for w in rVec if w not in stop_words]
colVector=[]
for tweetC in tweetlist:
cVec =aidrtokenize.tokenize(tweetC)
cVec =[w for w in cVec if w not in stop_words]
distance = model.wmdistance(rVec, cVec)
colVector.append(distance)
vector=str(index)+" "
for val in colVector:
vector=vector+str(1-val)+" "
of.write(vector+"\n")
#rowVector.append(colVector)
of.close()
def graph_sim(tweetlist,model,outFile):
print("Number of tweets to generate the graph: "+str(len(tweetlist)))
of=open(outFile,"w")
model.init_sims(replace=True) # Normalizes the vectors in the word2vec class.
rowVector=[]
for tweetR in tweetlist:
rVec = aidrtokenize.tokenize(tweetR)
rVec =[w for w in rVec if w not in stop_words]
rowVector.append(rVec)
instance = WmdSimilarity(rowVector, model, num_best=None)
#index=0;
print("Writing into the file....")
for index,colVector in enumerate(instance):
vector=""
for i,val in enumerate(colVector):
if(index != i and val>=0.3):
#print str(index)+" != "+str(i)
vector=vector+str(i)+" "
of.write(str(index)+" "+vector.strip()+"\n")
of.close()
if __name__ == '__main__':
a = datetime.datetime.now().replace(microsecond=0)
modelFile="/Users/firojalam/QCRI/w2v/GoogleNews-vectors-negative300.bin"
model = Word2Vec.load_word2vec_format(modelFile, binary=True) #, binary=False
sentList=[]
sentList.append("disease affected people")
sentList.append("Reports of affected people due to the disease")
sentList.append("disease prevention")
sentList.append("Questions or suggestions related to the prevention of disease or mention of a new prevention strategy")
graph_sim(sentList,model,"test.graph.txt")
|
"""
Created by lgc on 2020/2/5 15:05.
微信公众号:泉头活水
"""
import pytest,os
import allure
from time import sleep
from Api.cloudparking_service import cloudparking_service
from Api.information_service.information import Information
from Api.sentry_service.carInOutHandle import CarInOutHandle
from common.BaseCase import BaseCase
from common.utils import YmlUtils
from common.Assert import Assertions
args_item = "send_data,expect"
test_data, case_desc = YmlUtils("/test_data/sentryDutyRoom/carInOutHandle/messageInOut.yml").getData
@pytest.mark.parametrize(args_item, test_data)
@allure.feature("岗亭收费处")
@allure.story('PC岗亭进出场消息处理')
class TestSentryMessage(BaseCase):
"""岗亭收费处理:进场、离场消息 """
def test_mockCarIn(self,sentryLogin, send_data, expect):
"""模拟进场"""
re = cloudparking_service().mockCarInOut(send_data["carNum"], 0, send_data["StrictRule_inClientID"])
result = re
Assertions().assert_in_text(result, expect["mockCarInMessage"])
def test_checkMessageIn(self, sentryLogin, send_data, expect):
"""登记放行"""
re = CarInOutHandle(sentryLogin).carInOutHandle(send_data['carNum'], send_data['carInHandleType'],send_data['carIn_jobId'])
result = re
Assertions().assert_in_text(result['screen'], expect["checkCarInScreen"])
Assertions().assert_in_text(result['voice'], expect["checkCarInVoice"])
Assertions().assert_in_text(result['open_gate'], expect["checkCarInOpenGate"])
def test_mockCarOut(self, send_data, expect):
"""模拟离场"""
re = cloudparking_service().mockCarInOut(send_data["carNum"], 1, send_data["StrictRule_outClientID"])
result = re
Assertions().assert_in_text(result, expect["mockCarOutMessage"])
def test_checkMessageOut(self, sentryLogin, send_data, expect):
"""登记放行"""
re = CarInOutHandle(sentryLogin).carInOutHandle(send_data['carNum'], send_data['carOutHandleType'], send_data['carOut_jobId'])
result = re
Assertions().assert_in_text(result['screen'], expect['checkCarOutScreen'])
Assertions().assert_in_text(result['voice'], expect['checkCarOutVoice'])
Assertions().assert_in_text(result['open_gate'], expect['checkCarOutOpenGate'])
def test_carLeaveHistory(self, userLogin, send_data, expect):
"""查看离场记录"""
re = Information(userLogin).getCarLeaveHistory(send_data["parkName"], send_data["carNum"])
result = re
Assertions().assert_in_text(result, expect["carLeaveHistoryMessage"])
|
import os
import json
path = r"../data_proc/raw_skeletons/numbered/"
f= open("../data_proc/raw_skeletons/skeletons_info.txt", 'w+')
Classes = {'clap':1,
'hit':2,
'jump':3,
'kick':4,
'punch':5,
'push':6,
'run':7,
'shake':8,
'sit':9,
'situp':10,
'stand':11,
'turn':12,
'walk':13,
'wave':14,
}
count = 0
full = []
labesls = {}
ImageCount = 0
for subdir, dirs, files in os.walk(path,topdown=True):
files.sort()
files = sorted(files, key= lambda x: int(x.replace('.txt','')))
for file in files:
itemCount = 0
count += 1
print("proccessing file#" + file)
pathtofile = os.path.join(subdir,file)
with open(pathtofile) as f:
items = json.load(f)
labesls[items[0][3]] = 0
for item in items:
# print(item)
item[2] = int(item[2]) + ImageCount
itemCount += 1
full.append(item)
ImageCount += itemCount +1
print(labesls)
with open('../data_proc/raw_skeletons/skeletons_info.txt', 'w+') as outfile:
json.dump(full, outfile)
|
import matplotlib.image as mpimg
from PIL import Image
import numpy as np
import os
import shutil
NUM = 5
mypath = '/Users/zhangjunwei/Downloads/AerialImageDataset/'
test_img = mypath + 'train/images/'
#test_img = mypath + 'test/images/'
tests_img = mypath + 'test_small/images/'
tests_img_eg = mypath + 'test_small_eg/images_eg_%d/' % NUM
tests_pred_eg = mypath + 'test_small_eg/predict_eg_%d/' % NUM
tests_pred = mypath + 'test_small/predict/'
'''
try:
shutil.rmtree(mypath + 'test_small')
except FileNotFoundError:
pass
os.mkdir(mypath + 'test_small')
os.mkdir(tests_pred)
os.mkdir(tests_img)
'''
os.mkdir(tests_img_eg)
os.mkdir(tests_pred_eg)
#img_names = os.listdir(test_img)
img_names = ['austin1.tif']
s = 0
for name in img_names:
s += 1
print("%d/180"%s)
img = Image.open(test_img + name)
count = 0
for i in range(20):
for j in range(20):
count += 1
crop_img = img.crop((i*250, j*250, (i+1)*250, (j+1)*250))
crop_img = crop_img.resize((128,128))
crop_img.save(tests_img_eg + '%s_%d.tif' % (os.path.splitext(name)[0], count))
|
import urllib.request
import re
url = "http://www.pythonchallenge.com/pc/def/equality.html"
response = urllib.request.urlopen(url)
html = response.read()
# print(html)
ss = str(html)
print(ss[:2000])
f = re.findall(r'[a-z][A-Z]{3}[a-z][A-Z]{3}[a-z]', ss)
print(f)
new_ss = ""
for i in f:
new_ss += i[4]
print(new_ss)
|
# -*- coding: utf-8 -*-
""" Administration classes for the presidencies application. """
# standard library
# django
from django.contrib import admin
from django.urls import reverse
from django.shortcuts import redirect
# parler
from parler.admin import TranslatableAdmin
from institutions.admin import GovernmentStructureFilter
# Aldryn
from aldryn_translation_tools.admin import AllTranslationsMixin
# models
from .models import Presidency, PresidencyURL
@admin.register(Presidency)
class PresidencyAdmin(AllTranslationsMixin, TranslatableAdmin):
list_filter = (
('government_structure', GovernmentStructureFilter),
)
list_display = (
'name',
'title',
'government_structure',
'twitter',
'url',
)
filter_horizontal = (
'urls',
)
def changelist_view(self, request, extra_content=None):
if not request.GET.get('government_structure__id__exact'):
return redirect(
reverse('admin:presidencies_presidency_changelist') +
'?government_structure__id__exact=' +
str(request.government_structure.pk)
)
else:
return super().changelist_view(request, extra_content)
@admin.register(PresidencyURL)
class PresidencyURLAdmin(AllTranslationsMixin, TranslatableAdmin):
list_display = ('url', 'name', 'order')
|
# import subprocess
# # import os
# with open('a.txt','a')as f:
# t = subprocess.Popen('curl -X POST -k -L www.baidu.com '
# '',stdout=f)
# # t = subprocess.Popen('ls', stdout=f)
# # tl = t.split('\n')
# # print tl
# # print t
a = 2.25
s = "%.2fG%.1fG"%(a,a)
u = 1.00
t = "Your Storage Status: %.2fG/%.2fG (%.2f%%)"%(u,a,u/a*100)
print t |
from __future__ import print_function
from builtins import input
import numpy as np
import os
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from scipy.spatial import ConvexHull, Delaunay
import random
# from sklearn import metrics
import ensure_segmappy_is_installed
from segmappy import Dataset
from segmappy.tools.hull import point_in_hull, n_points_in_hull, are_in_hull
from segmappy.core.config import get_default_dataset_dir
DATASET_FOLDER = "KAIST02"
FILE_PATH = os.path.join(get_default_dataset_dir(), DATASET_FOLDER + "/matches_database.csv")
if os.path.isfile(FILE_PATH):
os.remove(FILE_PATH)
dataset = Dataset(folder=DATASET_FOLDER, use_matches=False, normalize_classes=False)
segments, _, ids, n_ids, features, matches, labels_dict, _, _, _ = dataset.load()
filtered_segments = []
filtered_ids = []
filtered_features = []
previous_id = 9999
last_n_points = 0
RATIO_POINTS_TO_TAKE_VIEW = 0.1
for i in range(ids.size):
save_view = False
if ids[i] != previous_id:
previous_id = ids[i]
save_view = True
if ids.size == i + 1 | ids[i] != ids[i + 1]:
save_view = True
if float(segments[i].shape[0]) >= float(last_n_points) * (
1.0 + RATIO_POINTS_TO_TAKE_VIEW
):
save_view = True
if save_view:
last_n_points = segments[i].shape[0]
filtered_segments.append(segments[i])
filtered_ids.append(ids[i])
filtered_features.append(features[i])
print("size before ", ids.size)
print("size after ", np.array(filtered_ids).size)
segments = filtered_segments
ids = np.array(filtered_ids)
features = np.array(filtered_features)
# Find the convex hulls of the last view of each segment.
unique_ids = []
unique_segments = []
hulls = []
unique_centroids = []
centroids = []
for i in range(ids.size):
segment = segments[i]
centroids.append(np.mean(segment, 0))
# skip if it's not the last duplicate
if i + 1 < ids.size and ids[i] == ids[i + 1]:
continue
unique_ids.append(ids[i])
unique_segments.append(segment)
hull = ConvexHull(segment)
hulls.append(hull)
unique_centroids.append(np.mean(segment, 0))
CENTROID_DISTANCE_THRESHOLD = 3.0
SUBSAMPLING_RATE = 5
HULL_VOLUME_THRESHOLD = 0.33
N_ID_TO_SKIP = 100
# PLOT_3D = True
# PLOT_MAP = False
PLOT_3D = False
PLOT_MAP = True
matches = []
n_unique_ids = len(unique_ids)
for i in range(n_unique_ids):
segment1 = unique_segments[i]
hull1 = hulls[i]
if i + N_ID_TO_SKIP > n_unique_ids:
continue
range_j = range(n_unique_ids - i - 1 - N_ID_TO_SKIP)
range_j = [x + i + 1 + N_ID_TO_SKIP for x in range_j]
print("i: ", i)
for j in range_j:
if (
np.linalg.norm(unique_centroids[i] - unique_centroids[j])
<= CENTROID_DISTANCE_THRESHOLD
):
# print('Close centroids for j = ', j)
segment2 = unique_segments[j]
hull2 = hulls[j]
subsampled_segment1 = segment1[0 : segment1.shape[0] : SUBSAMPLING_RATE]
subsampled_segment2 = segment2[0 : segment2.shape[0] : SUBSAMPLING_RATE]
n_points_1_in_2 = n_points_in_hull(segment1, hull2)
n_points_2_in_1 = n_points_in_hull(segment2, hull1)
ratio_1_in_2 = float(n_points_1_in_2) / float(segment1.shape[0])
ratio_2_in_1 = float(n_points_2_in_1) / float(segment2.shape[0])
ins, outs = are_in_hull(segment1, hull2)
if len(ins) < 5:
continue
points_1_in_2 = segment1[ins, :]
ins, outs = are_in_hull(segment2, hull1)
if len(ins) < 5:
continue
points_2_in_1 = segment2[ins, :]
intersection_hull = ConvexHull(
np.concatenate((points_1_in_2, points_2_in_1))
)
volume_ratio_1 = intersection_hull.volume / hull1.volume
volume_ratio_2 = intersection_hull.volume / hull2.volume
# print("Source volume ratio: ", volume_ratio_1)
# print("Target volume ratio: ", volume_ratio_2)
if (
volume_ratio_1 > HULL_VOLUME_THRESHOLD
and volume_ratio_2 > HULL_VOLUME_THRESHOLD
):
matches.append([unique_ids[i], unique_ids[j]])
if PLOT_3D:
x_min = min(segment1[:, 0].min(), segment2[:, 0].min())
x_max = max(segment1[:, 0].max(), segment2[:, 0].max())
y_min = min(segment1[:, 1].min(), segment2[:, 1].min())
y_max = max(segment1[:, 1].max(), segment2[:, 1].max())
z_min = min(segment1[:, 2].min(), segment2[:, 2].min())
z_max = max(segment1[:, 2].max(), segment2[:, 2].max())
print("Source volume ratio: ", volume_ratio_1)
print("Target volume ratio: ", volume_ratio_2)
fig = plt.figure(1)
ax = fig.add_subplot(1, 2, 1, projection="3d")
ins, outs = are_in_hull(segment2, hull1)
ax.scatter(
segment2[ins, 0],
segment2[ins, 1],
segment2[ins, 2],
color="blue",
marker=".",
)
ax.scatter(
segment2[outs, 0],
segment2[outs, 1],
segment2[outs, 2],
color="red",
marker=".",
)
for simplex in hull2.simplices:
plt.plot(
segment2[simplex, 0],
segment2[simplex, 1],
segment2[simplex, 2],
"k-",
)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_zlim(z_min, z_max)
ax = fig.add_subplot(1, 2, 2, projection="3d")
ax.scatter(
segment1[:, 0], segment1[:, 1], segment1[:, 2]
)
for simplex in hull1.simplices:
plt.plot(
segment1[simplex, 0],
segment1[simplex, 1],
segment1[simplex, 2],
"k-",
)
ax.set_xlim(x_min, x_max)
ax.set_ylim(y_min, y_max)
ax.set_zlim(z_min, z_max)
plt.draw()
plt.pause(0.001)
input("Segment: ")
plt.clf()
elif PLOT_MAP:
fig = plt.figure(1)
ax = fig.add_subplot(1, 2, 1)
plt.plot(segment1[:, 0], segment1[:, 1], "o")
# for simplex in target_hulls[i].simplices:
# plt.plot(segment2[simplex, 0], segment2[simplex, 1], 'k-')
ax = fig.add_subplot(1, 2, 2)
plt.plot(segment2[:, 0], segment2[:, 1], "o")
# plt.draw()
# plt.pause(0.001)
# input('Segment: ')
if PLOT_MAP:
plt.draw()
plt.pause(0.001)
input("Segment: ")
plt.clf()
print("Number of matches: ", len(matches))
matches = np.asarray(matches)
np.savetxt(FILE_PATH, (matches), delimiter=" ")
|
from Tollapp.models import main
import datetime
a = "2017-02-10"
b = "2017-02-24"
c = a.split('-')
start = int(c[0]),int(c[1]),int(c[2])
d = b.split('-')
end = int(d[0]),int(d[1]),int(d[2])
f = main.objects.filter(timestamp__gte = datetime.date(start),timestamp__lte = datetime.date(end),vehicle_status = "permit").count()
|
import unittest
from unique_occurrences import unique_occurrences
class TestUnique_Occurrences(unittest.TestCase):
def test_unique_occurrences(self):
self.assertEqual(unique_occurrences([1, 2, 2, 1, 1, 3]), True)
self.assertEqual(unique_occurrences([1, 2]), False)
self.assertEqual(unique_occurrences(
[-3, 0, 1, -3, 1, 1, 1, -3, 10, 0]), True)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
import argparse
import logging
import pkg_resources
import sys
import textwrap
from workspace.commands.bump import Bump
from workspace.commands.checkout import Checkout
from workspace.commands.clean import Clean
from workspace.commands.commit import Commit
from workspace.commands.diff import Diff
from workspace.commands.log import Log
from workspace.commands.merge import Merge
from workspace.commands.publish import Publish
from workspace.commands.push import Push
from workspace.commands.update import Update
from workspace.commands.status import Status
from workspace.commands.setup import Setup
from workspace.commands.test import Test
from workspace.utils import log_exception
log = logging.getLogger(__name__)
class Commander(object):
"""
Tools to simplify workspace / scm management when working with multiple repositories.
It helps you do more with less work by seamlessly integrating all workspace tooling into one where
you can simply run one command instead of many native commands individually to do common tasks.
To get started
---------------
* Optionally setup workspace environment/shortcuts, run "wst setup -h" for options.
- "wst setup -a" is recommended. :)
* To checkout a product, run: wst checkout <git repository url> [<url2> ...]
* All commands are named appropriately for what they do, but see its --help for additional info.
* For more info, read the docs at http://workspace-tools.readthedocs.org
"""
@classmethod
def commands(cls):
"""
Map of command name to command classes.
Override commands to replace any command name with another class to customize the command.
"""
cs = [Bump, Checkout, Clean, Commit, Diff, Log, Merge, Publish, Push, Setup, Status, Test, Update]
return dict((c.name(), c) for c in cs)
@classmethod
def command(cls, name):
""" Get command class for name """
return cls.commands().get(name)
@classmethod
def main(cls):
logging.basicConfig(level=logging.INFO, format='[%(levelname)s] %(message)s')
cls()._run()
def _run(self):
"""
Sets up logging, parser, and creates the necessary command sequences to run, and runs
the command given by the user.
"""
self.setup_parsers()
args, extra_args = self.parser.parse_known_args()
if not args.command:
self.parser.print_help()
sys.exit()
if args.command not in [c.name() for c in list(self.commands().values()) if 'extra_args' in c.docs()[1]] and extra_args:
log.error('Unrecognized arguments: %s', ' '.join(extra_args))
sys.exit(1)
if args.debug:
logging.root.setLevel(logging.DEBUG)
with log_exception(exit=True, stack=args.debug):
args_dict = args.__dict__
args_dict['extra_args'] = extra_args
return self.run(args.command, **args_dict)
def run(self, name=None, **kwargs):
"""
Run the command by name with given args.
:param str name: Name of command to run. If not given, this calls self._run()
:param kwargs: Args to pass to the command constructor
"""
if not name:
return self._run()
if name in self.commands():
kwargs['commander'] = self
return self.command(name)(**kwargs).run()
else:
log.error('Command "%s" is not registered. Override Commander.commands() to add.', name)
sys.exit(1)
def _setup_parser(self):
"""
Sets up the main parser.
To show version of your customized wst package when --version is invoked, set cls.package_name to your package name.
"""
self.parser = argparse.ArgumentParser(description=textwrap.dedent(self.__doc__),
formatter_class=argparse.RawDescriptionHelpFormatter)
self.parser.register('action', 'parsers', AliasedSubParsersAction)
versions = []
for pkg in [_f for _f in [getattr(self, 'package_name', None), 'workspace-tools'] if _f]:
try:
versions.append('%s %s' % (pkg, pkg_resources.get_distribution(pkg).version))
except Exception:
pass
self.parser.add_argument('-v', '--version', action='version', version='\n'.join(versions))
self.parser.add_argument('--debug', action='store_true', help='Turn on debug mode')
def setup_parsers(self):
"""
Sets up parsers for all commands
"""
self._setup_parser()
self.subparsers = self.parser.add_subparsers(title='sub-commands', help='List of sub-commands', dest='command')
self.subparsers.remove_parser = lambda *args, **kwargs: _remove_parser(self.subparsers, *args, **kwargs)
for name, command in sorted(self.commands().items()):
doc, _ = command.docs()
help = list(filter(None, doc.split('\n')))[0]
aliases = [command.alias] if command.alias else None
parser = self.subparsers.add_parser(name, aliases=aliases, description=textwrap.dedent(doc), help=help,
formatter_class=argparse.RawDescriptionHelpFormatter)
cmd_args = command.arguments()
if isinstance(cmd_args, tuple):
normal_args, chain_args = cmd_args
else:
normal_args = cmd_args
chain_args = []
for args, kwargs in normal_args:
parser.add_argument(*args, **kwargs)
if chain_args:
group = parser.add_argument_group('chaining options')
for args, kwargs in chain_args:
group.add_argument(*args, **kwargs)
# Copied from https://gist.github.com/sampsyo/471779
class AliasedSubParsersAction(argparse._SubParsersAction):
class _AliasedPseudoAction(argparse.Action):
def __init__(self, name, aliases, help):
dest = name
if aliases:
dest += ' (%s)' % ','.join(aliases)
sup = super(AliasedSubParsersAction._AliasedPseudoAction, self)
sup.__init__(option_strings=[], dest=dest, help=help)
def add_parser(self, name, **kwargs):
if 'aliases' in kwargs:
aliases = kwargs['aliases'] or []
del kwargs['aliases']
else:
aliases = []
parser = super(AliasedSubParsersAction, self).add_parser(name, **kwargs)
# Make the aliases work.
for alias in aliases:
self._name_parser_map[alias] = parser
# Make the help text reflect them, first removing old help entry.
if 'help' in kwargs:
help = kwargs.pop('help')
self._choices_actions.pop()
pseudo_action = self._AliasedPseudoAction(name, aliases, help)
self._choices_actions.append(pseudo_action)
return parser
def _remove_parser(self, name, **kwargs):
# remove choice help from list
if 'help' in kwargs:
alias_name = '%s (' % name
self._choices_actions = [action
for action in self._choices_actions
if action.dest != name and not action.dest.startswith(alias_name)]
# remove the parser from the map
self._name_parser_map.pop(name, None)
# remove aliases
aliases = kwargs.pop('aliases', ())
for alias in aliases:
self._name_parser_map.pop(alias, None)
|
# -*- coding: utf-8 -*-
"""
Created on 2018
@author:
"""
# #######text data 前提################
# #输入原始数据
# #
# # 此处理是将中文语料输入 训练,并并保存模型
# #
# #输出是....
# #使用方法:python word2vec_train.py std_zh_wiki_00
# #
# #
import os
import sys
import multiprocessing
import logging
import gensim
#from gensim.corpora import WikiCorps
from gensim.models import Word2Vec
from gensim.models.word2vec import LineSentence
vec_modelPath = './models/'
vec_filePath = './'
vec_size = 400
vec_window = 5
vec_minCount = 5
vec_workers = 2
# 针对一个语料文件 进行训练
def vec_train_fun(modelpath, input_file, outmodel):
# inp1为输入语料 outp1 为输出模型 outp2为原始c版本word2vec的vector格式的模型
inp1 = input_file
outp1 = modelpath + outmodel
outp2 = modelpath + 'c_' + outmodel
#获取日志信息
logging.basicConfig(format = '%(asctime)s:%(leveltime)s:%(message)s',level = logging.INFO)
# ######### train
sentences = LineSentence(inp1)
cpuworkers = multiprocessing.cpu_count()
# size表示神经网络的隐藏层单元数,默认为100
# window表示
# min_count小于该数的单词会被剔除,默认值为5,
model = Word2Vec(sentences, size = vec_size, window = vec_window, min_count = vec_minCount,
workers = cpuworkers)
model.save(outp1)
model.wv.save_word2vec_format(outp2, binary = False)
# (无初始化模型)针对多个语料文件 进行训练
def vec_increment_train_of_no_model(model_path,incr_filedata):
# wiki file path
wikifile = vec_filePath + incr_filedata[0]
print('corpuse file:',wikifile)
cpuworkers = multiprocessing.cpu_count()
print('cpu counter:', cpuworkers)
#获取日志信息
#logging.basicConfig(format = '%(asctime)s:%(leveltime)s:%(message)s',level = logging.INFO)
#fn = open(wikifile, u'r', encoding="utf-8")
sentences = LineSentence(wikifile)
print('training init model!')
model = Word2Vec(sentences, size=vec_size, window=vec_window, min_count=vec_minCount,
workers=cpuworkers)
for file in incr_filedata[1:len(incr_filedata)]:
tempfile = vec_filePath + file
corpusSingleFile = open(tempfile, u'r', encoding="utf-8")
trainedWordCount = model.train(LineSentence(corpusSingleFile),
total_examples=model.corpus_count,epochs=model.epochs)
# total_examples=model.corpus_count,epochs=model.iter)
print('update model, update words num is: ', trainedWordCount)
outp1 = model_path + 'zhs_incr.vec.model'
model.save(outp1)
return True
# (有初始化模型)针对多个语料文件 进行训练
# 输入中 incr_filedata 是原料的文件名 包括路径+文件名
# oldmodel 原有模型的文件名 不含路径
# newmodel 新的模型的文件名 不含路径
def vec_increment_train_fun_on_basemodel(model_path,oldmodel,incr_filedata,newmodel):
# 加载 原有向量模型
model = Word2Vec.load(model_path + oldmodel)
for file in incr_filedata[0:len(incr_filedata)]:
tempfile = file
corpusSingleFile = open(tempfile, u'r', encoding="utf-8")
more_sentences = LineSentence(corpusSingleFile)
model.build_vocab(more_sentences, update=True)
# 进行训练
model.train(more_sentences, total_examples=model.corpus_count, epochs=model.epochs)
print('new corpuse is training...')
outp1 = model_path + newmodel
model.save(outp1)
return True
# #######################
def vec_model_test_func():
# #导入模型
model = Word2Vec.load("text8.model")
# 计算两个词的相似度/相关程度
y1 = model.similarity(u"不错", u"好")
print(u"【不错】和【好】的相似度为:", y1)
print("--------\n")
# 计算某个词的相关词列表
y2 = model.most_similar(u"书", topn=20) # 20个最相关的
print(u"和【书】最相关的词有:\n")
for item in y2:
print(item[0], item[1])
print("--------\n")
# 寻找对应关系
print(u"书-不错,质量-")
y3 = model.most_similar([u'质量', u'不错'], [u'书'], topn=3)
for item in y3:
print(item[0], item[1])
print("--------\n")
# 寻找不合群的词 即选出集合中不同类的词语
y4 = model.doesnt_match(u"书 书籍 教材 很".split())
print(u"不合群的词:", y4)
print("--------\n")
# 计算两个集合之间的余弦似度
list1 = ['我','走','我','学校']
list2 = ['我','去','家']
list_sim1 = model.n_similarity(list1,list2)
print(list_sim1)
if __name__ == '__main__':
#if len(sys.argv) != 2:
# print('Usage: python script.py inputfile')
# sys.exit()
#input_file = sys.argv[1]
# input_file = 'cut_std_zhs_wiki_00'
input_file = './csvdata/ali_nlp_sim.dat'
outmodel = 'ali.text.vec.model'
model_path = './models/'
print('start train text data')
# 针对一个语料文件 进行训练
#vec_train_fun(model_path,input_file,outmodel)
# 针对多个语料文件 进行训练
#incr_corpuses = ['test_00.txt','test_01.txt']
#vec_increment_train_of_no_model(model_path, incr_corpuses)
# 针对有训练模型的情况 继续增量训练
#incr_data2 = ["./csvdata/ali_nlp_sim.dat"]
incr_data2 = ["cut_std_zhs_wiki_00","cut_std_zhs_wiki_01","cut_std_zhs_wiki_02",
"cut_std_zhs_wiki_03","cut_std_zhs_wiki_04","cut_std_zhs_wiki_05"]
#oldmodel = 'word2vec_wx'
#newmodel = 'word2vec_wx_ali.vec.model'
oldmodel = 'word2vec_wx_ali.vec.model'
newmodel = 'wiki_wx_ali.vec.model'
vec_increment_train_fun_on_basemodel(model_path, oldmodel,incr_data2,newmodel)
print('train end text data')
|
from Router import *
from PyQt4 import QtCore
class yRouter(Router):
device_type="yRouter"
def __init__(self):
Interfaceable.__init__(self)
self.setProperty("WLAN", "False")
self.setProperty("mac_type", "MAC 802.11 DCF")
self.lightPoint = QPoint(-14,15)
|
import os
import sys
import json
import argparse
import numpy as np
import pandas as pd
from copy import copy
import warnings
import utils.general_functions as gn
from sklearn.linear_model import LinearRegression
from sklearn import metrics
from utils.data_preprocess_version_control import generate_version_params
from sklearn.externals import joblib
sys.path.insert(0, os.path.abspath(os.path.join(sys.path[0], '4EBaseMetal')))
'''
This file is the live deployment of linear regression. It has 1 class.
Linear_online: Has two functions: train and test.
'''
class Linear_online():
"""
lag: the window size of the data feature
horizon: the time horizon of the predict target
version: the version of the feature
gt: the ground_truth metal name
date: the last date of the prediction
source: the data source
"""
def __init__(self,
lag,
horizon,
version,
gt,
date,
source,
path):
self.lag = lag
self.horizon = horizon
self.version = version
self.gt = gt
self.date = date
self.source = source
#this function is used to train the model and save it
def train(self):
print("begin to train")
#assert that the configuration path is correct
self.path = gn.generate_config_path(self.version)
#read the data from the 4E or NExT database
time_series,LME_dates,config_length = gn.read_data_with_specified_columns(self.source,self.path,"2003-11-12")
for date in self.date.split(","):
#generate list of dates for today's model training period
today = date
length = 5
if gn.even_version(self.version) and self.horizon > 5:
length = 4
start_time,train_time,evalidate_date = gn.get_relevant_dates(today,length,"train")
split_dates = [train_time,evalidate_date,str(today)]
#generate the version
version_params = generate_version_params(self.version)
print("the train date is {}".format(split_dates[0]))
print("the test date is {}".format(split_dates[1]))
#toggle metal id
metal_id = False
ground_truth_list = [self.gt]
if gn.even_version(self.version):
metal_id = True
ground_truth_list = ["LME_Cu_Spot","LME_Al_Spot","LME_Ni_Spot","LME_Xi_Spot","LME_Zn_Spot","LME_Pb_Spot"]
#extract copy of data to process
ts = copy(time_series.loc[start_time:split_dates[2]])
#load data for use
final_X_tr, final_y_tr, final_X_va, final_y_va, val_dates, column_lag_list = gn.prepare_data(ts,LME_dates,self.horizon,ground_truth_list,self.lag,copy(split_dates),version_params,metal_id_bool = metal_id)
LR= LinearRegression(n_jobs = -1)
LR.fit(final_X_tr,final_y_tr[:,0])
if gn.even_version(self.version):
joblib.dump(LR,os.path.join(os.getcwd(),'result','model','linear',self.version+"_ALL_"+str(self.horizon)+"_"+str(self.lag)+"_"+evalidate_date+'.pkl'))
else:
joblib.dump(LR,os.path.join(os.getcwd(),'result','model','linear',self.version+"_"+self.gt+"_"+str(self.horizon)+"_"+str(self.lag)+"_"+evalidate_date+'.pkl'))
#-------------------------------------------------------------------------------------------------------------------------------------#
#this function is used to predict the date
def test(self):
#split the date
print("begin to test")
#assert that the configuration path is correct
self.path = gn.generate_config_path(self.version)
#read the data from the 4E or NExT database
time_series,LME_dates,config_length = gn.read_data_with_specified_columns(self.source,self.path,"2003-11-12")
for date in self.date.split(","):
#generate list of dates for today's model training period
today = date
length = 5
if gn.even_version(self.version) and self.horizon > 5:
length = 4
start_time,train_time,evalidate_date = gn.get_relevant_dates(today,length,"test")
split_dates = [train_time,evalidate_date,str(today)]
if gn.even_version(self.version):
model = joblib.load(os.path.join(os.getcwd(),'result','model','linear',self.version+"_ALL_"+str(self.horizon)+"_"+str(self.lag)+"_"+evalidate_date+'.pkl'))
else:
model = joblib.load(os.path.join(os.getcwd(),'result','model','linear',self.version+"_"+self.gt+"_"+str(self.horizon)+"_"+str(self.lag)+"_"+evalidate_date+'.pkl'))
#generate the version
version_params=generate_version_params(self.version)
metal_id = False
if gn.even_version(self.version):
metal_id = True
#extract copy of data to process
ts = copy(time_series.loc[start_time:split_dates[2]])
#load data for use
final_X_tr, final_y_tr, final_X_va, final_y_va,val_dates, column_lag_list = gn.prepare_data(ts,LME_dates,self.horizon,[self.gt],self.lag,copy(split_dates),version_params,metal_id_bool = metal_id,live = True)
prob = (1+model.predict(final_X_va))*final_y_va[:,1]
final_list = []
piece_list = []
for i,val_date in enumerate(val_dates):
piece_list.append(val_date)
piece_list.append(prob[i])
final_list.append(piece_list)
piece_list=[]
final_dataframe = pd.DataFrame(prob, columns=['prediction'],index=val_dates)
final_dataframe.to_csv(os.path.join("result","prediction","linear",self.version,"_".join([self.gt,date,str(self.horizon),self.version])+".csv"))
|
import datetime
from DataPoints import *
def generateDataPoints(startTime):
points = []
i = 0
file = open("./tweepyData", "r")
line = file.readline()
while "*" not in line:
if "#" in line:
i += 1
else:
points.append(subclassList[i]((float(line)-startTime)//3600, "I am a Tweet about BTC"))
line = file.readline()
file = open("./Articles", "r")
line = file.readline()
while "*" not in line:
info = line.split(" ")
timestamp = datetime.datetime(2018, 6 if "J" in info[2] else 5, int(info[2][1:]), int(info[1])//100).timestamp()
points.append(subclassList[int(info[0])]((timestamp-startTime)//3600, "I am an Article that affects BTC prices"))
line = file.readline()
return points
|
import pyaudio
import numpy
import scipy
p = pyaudio.PyAudio()
host_api_count = p.get_host_api_count()
print "Number of available Host API: %d" % host_api_count
for i in range(host_api_count):
host_api = p.get_host_api_info_by_index (i)
if host_api['deviceCount'] != pyaudio.paNoDevice:
if 'defaultOutputDevice' in host_api and 'defaultInputDevice' in host_api:
print "Name: %s\t Device Count: %d\t Default Output: %s\t Default Input: %s" % (host_api['name'], host_api['deviceCount'], p.get_device_info_by_host_api_device_index(i, host_api['defaultOutputDevice'])['name'] , p.get_device_info_by_host_api_device_index(i, host_api['defaultInputDevice'])['name'])
elif 'defaultOutputDevice' in host_api:
print "Name: %s\t Device Count: %d\t Default Output: %s" % (host_api['name'], host_api['deviceCount'], p.get_device_info_by_host_api_device_index(i, host_api['defaultOutputDevice'])['name'])
elif 'defaultInputDevice' in host_api:
print "Name: %s\t Device Count: %d\t Default Input: %s" % (host_api['name'], host_api['deviceCount'], p.get_device_info_by_host_api_device_index(i, host_api['defaultInputDevice'])['name'])
else:
print "Name: %s\t Device Count: %d" % (host_api['name'], host_api['deviceCount'])
p.terminate()
|
"""
Figure 11: Why dictionaries are needed
"""
from weakref import WeakKeyDictionary
class NonNegative(object):
"""A descriptor that forbids negative values"""
def __init__(self, default):
self.default = default
self.data = WeakKeyDictionary()
def __get__(self, instance, owner):
# we get here when someone calls x.d, and d is a NonNegative instance
# instance = x
# owner = type(x)
return self.data.get(instance, self.default)
def __set__(self, instance, value):
# we get here when someone calls x.d = val, and d is a NonNegative instance
# instance = x
# value = val
if value < 0:
raise ValueError("Negative value not allowed: %s" % value)
self.data[instance] = value
class BrokenNonNegative(object):
def __init__(self, default):
self.value = default
def __get__(self, instance, owner):
return self.value
def __set__(self, instance, value):
if value < 0:
raise ValueError("Negative value not allowed: %s" % value)
self.value = value
class Foo(object):
bar = BrokenNonNegative(5)
# It seems to work!?
f = Foo()
try:
f.bar = -1
except ValueError:
print "Caught the invalid assignment"
# Uh, oh not in this case!
f = Foo()
g = Foo()
print "f.bar is %s\ng.bar is %s" % (f.bar, g.bar)
print "Setting f.bar to 10"
f.bar = 10
print "f.bar is %s\ng.bar is %s" % (f.bar, g.bar) #ouch
|
from __future__ import division
import serial
import time
from array_devices import array3710
__author__ = 'JoeSacher'
"""
This is a crude script to play with PC baud rates while the load
is set to a fixed baud rate.
"""
load_addr = 1
# This should match load
base_baud_rate = 9600
serial_conn = serial.Serial('COM4', base_baud_rate, timeout=1)
load = array3710.Load(load_addr, serial_conn)
load.remote_control = True
serial_conn.close()
# Set this to sufficient range to get possible valid connections
min_rate = 3500
max_rate = 20000
print("Walking from {} to {} for {}".format(min_rate, max_rate, base_baud_rate))
for baud_rate in xrange(min_rate, max_rate, 100):
time.sleep(0.1)
serial_conn = serial.Serial('COM4', baud_rate, timeout=0.5)
try:
load = array3710.Load(load_addr, serial_conn, print_errors=False)
except IOError:
print("Baud_Rate: {} Error: Can't creating load".format(baud_rate))
else:
error_count = 0
for i in range(10):
try:
load.set_load_resistance(baud_rate/1000)
load.update_status(retry_count=1)
# print(load.voltage)
except IOError:
error_count += 1
try:
load.load_on = True
load.load_on = False
time.sleep(0.05)
except IOError:
error_count += 1
print("Baud_Rate: {} - Errors: {}".format(baud_rate, error_count))
serial_conn.close()
serial_conn = serial.Serial('COM4', base_baud_rate, timeout=1)
load = array3710.Load(load_addr, serial_conn)
load.remote_control = False
serial_conn.close()
"""
Results for both of my loads.
I found the multiple baud responses when the load was set at 9600 very interesting.
When I get time, I want to scope the wild mismatches and see what is going on.
4800(L1): 4700-5200 (All 0 errors)
4800(L2): 4700-5200 (All 0 errors)
9600(L1): 4000-4600, 5300-6400, 7600-12400, 15100-17400 (All 0 errors)
9600(L2): 4000-4600, 5300-6400, 7600-12400, 15100-17400 (All 0 errors)
19200(L1): 17500-24000 (~30% with 1 error, 1 with 2 errors)
19200(L2): 17500-24000 (~20% with 1 error, 5% with 2 errors)
38400(L1): 35900-44200 (Errors of 1-4 throughout range, pretty evenly spread, 20% with 0 errors)
38400(L2): 35900-44200 (same distribution of errors as L1 at this baud rate)
""" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.