index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
10,800 | 4763e6940db1ad4d8032071bfd6a4528d59bdf41 | from .. import pyplot as plt
def plot_grid2d(grid, *args, ax=None, boundaries_on=False, **kwargs):
_, rows, cols = grid.shape
if ax is None:
ax = plt.gca()
if boundaries_on:
for i in range(rows):
ax.plot(*grid[:2, i, :], *args, **kwargs)
for i in range(cols):
ax.plot(*grid[:2, :, i], *args, **kwargs)
else:
for i in range(1, rows-1):
ax.plot(*grid[:2, i, :], *args, **kwargs)
for i in range(1, cols-1):
ax.plot(*grid[:2, :, i], *args, **kwargs)
return ax
def plot_grid3d(grid, *args, ax=None, boundaries_on=False, **kwargs):
_, rows, cols = grid.shape
if ax is None:
_, ax = plt.gcfa3d()
if boundaries_on:
for i in range(rows):
plt.plot3d(*grid[:, i, :], *args, ax=ax, **kwargs)
for i in range(cols):
plt.plot3d(*grid[:, :, i], *args, ax=ax, **kwargs)
else:
for i in range(1, rows-1):
plt.plot3d(*grid[:, i, :], *args, ax=ax, **kwargs)
for i in range(1, cols-1):
plt.plot3d(*grid[:, :, i], *args, ax=ax, **kwargs)
return ax |
10,801 | 7d2c6b339bcb88f9da28454b76600ac8119cb619 |
### copied by hand all laws and ordos from https://www.legifrance.gouv.fr/liste/lois
### in "french law initial data.pages" and "french ordos initial data.pages"
### !!! for next scrapping just add new laws and ordos since last scrap
### copy everything in excel/google spreadsheets, sort alphabetically, delete extra rows, clean (replace n °)
### copy everything onto a second column and name them "ordo" and "ordo_date"
### save as "french laws as of [insert date scrapping[].csv" and as "french ordos as of [insert date scrapping[].csv" |
10,802 | ab2efe6b0cd740b38910269f716ac2314f9cc970 | # _*_ coding: utf-8 _*_
# descrption: some small functions
import os
import cv2
from finger import *
# get all images path from a folder
def get_all_image(folder):
files = os.listdir(folder)
files_path = []
for i in files:
temp = folder + '/' + i
files_path.append(temp)
return files_path
success = 0
failed = 0
images_path = get_all_image('2')
for image in images_path:
img = cv2.imread(image)
print image
core_x, core_y = Get_central_point(img)
rows, cols = img.shape[:2]
if core_x != 0:
if (core_x + 75 > cols) | (core_y + 75 > rows):
print "failed"
failed += 1
else:
print "success"
print image
success += 1
else:
print "failed"
failed += 1
print "success:", success
print "failed:", failed
|
10,803 | e18d050df152f7e91111624b25eb71d209152850 | import tensorflow as tf
from tensorflow.keras.models import load_model, model_from_json
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('model', default='model')
parser.add_argument('--resume', '-r')
args = parser.parse_args()
if args.resume is not None:
model = load_model(args.resume)
tf.contrib.saved_model.save_keras_model(model, args.model)
|
10,804 | a54cf8aecc3bca63f8c75aaaa1b0ff27f309f380 | # 647. Palindromic Substrings
# Medium
# Given a string, your task is to count how many palindromic substrings
# in this string.
# The substrings with different start indexes or end indexes are counted
# as different substrings even they consist of same characters.
# Example 1:
# Input: "abc"
# Output: 3
# Explanation: Three palindromic strings: "a", "b", "c".
# Example 2:
# Input: "aaa"
# Output: 6
# Explanation: Six palindromic strings: "a", "a", "a", "aa", "aa", "aaa".
# Note:
# The input string length won't exceed 1000.
class Solution:
def countSubstrings(self, s: str) -> int:
res, l = 0, len(s)
# check odds
for i in range(l):
p = q = i
while p >= 0 and q < l and s[p] == s[q]:
res = res + 1 if s[p : q + 1] else res
p, q = p - 1, q + 1
# check evens
for i in range(l - 1):
p, q = i, i + 1
while p >= 0 and q < l and s[p] == s[q]:
res = res + 1 if s[p : q + 1] else res
p, q = p - 1, q + 1
return res
t = Solution()
print(t.countSubstrings("abc"))
print(t.countSubstrings("aaa"))
print(t.countSubstrings("abcdcba"))
print(t.countSubstrings("xabceece"))
|
10,805 | c776b4ee78e2f45cf028436a2d1f706bc2efa073 | # -*- coding:utf-8 -*-
"""
复用浏览器
"""
from time import sleep
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
class TestDemo():
def setup(self):
option = Options()
option.debugger_address = '127.0.0.1:9222'
self.driver = webdriver.Chrome(options=option)
# 隐式等待,动态的等待元素,最好在实例化driver之后立刻去设置
self.driver.implicitly_wait(5)
# 浏览器窗口的最大化
self.driver.maximize_window()
def tesrdown(self):
self.driver.quit()
def test_demo(self):
# self.driver.get("https://ceshiren.com/")
self.driver.find_element(By.LINK_TEXT, "所有分类").click()
sleep(3)
category = self.driver.find_element(By.LINK_TEXT, "所有分类")
assert 'active' == category.get_attribute("class")
|
10,806 | 96de1dd2fa80a9118d3cd22f0340264a66198d55 | TOKEN="TOKEN"
PREFIX="fjo "
DESCRIPTION="A fun discord bot."
OWNERS=[104933285506908160, 224895364480630784]
extensions = [
"jishaku",
"cogs.commands.utility",
"cogs.events.error_handler",
"cogs.commands.prose_edda"
]
|
10,807 | d7113822b9045b51c1e16aed70dc67faec52f3d2 | # coding=utf-8
import sys
import argparse
import time
import os
from helpers.ssh_manager import SSHManager
from vm_manager.models.environment import Environment
#
env = Environment()
vm_names = []
ssh = SSHManager()
# Step 1. Prepare vms
for image in env.get_images_list():
vm_name = env.get_vm_name_from_config(env.create_vm(image=image))
vm_names.append(vm_name)
time.sleep(60)
vm = env.vm_conn(vm_name=vm_name)
vm.suspend()
writepath = '/home/msamoylov/vm_manager/vms'
mode = 'a+' if os.path.exists(writepath) else 'r+'
with open(writepath, mode) as f:
if vm_name not in f.readlines():
f.write('{}\n'.format(vm_name))
# Step 2. Prepare snapshot 'ready'
# for vm in env.get_vm_ids():
# with open('/home/msamoylov/vm_manager/vms') as f:
# if env.get_vm_name(vm) in f.read():
# if len(env.snapshots_vm(vm)) == 0 or 'ready' not in env.snapshots_vm(vm):
# env.create_snapshot(vm, 'ready')
# env.suspend(vm)
# Step 3. Resume VM, revert snapshot, upload and run script, suspend vm
# with open('/home/msamoylov/vm_manager/vms') as f:
# for vm in f.readlines():
# try:
# virt = vm.rstrip('\n')
# env.resume(vm_name=virt)
# env.revert_snapshot_name(vm_name=virt, snapshot_name='ready')
# vm_ip = env.get_vm_ip(vm_name=virt)
# ssh.upload_to_remote(vm_ip, 'root', 'TestRoot1',
# '/home/msamoylov/statistics_sender/client.py',
# '/tmp/client.py')
# cmd = 'python /tmp/client.py'
# result = ssh.exec_cmd(vm_ip, cmd)
# env.suspend(vm_name=virt)
# except Exception as e:
# print("Cannot connect to vm {}".format(virt), e) |
10,808 | 24279a007dfae71c2debe9511d8937a87b472ebb | # -*- coding: utf-8 -*
from sklearn.linear_model import LogisticRegression, LogisticRegressionCV
from sklearn.metrics import classification_report, accuracy_score
from sklearn.preprocessing import StandardScaler
from model.load_data import load_data, split_test_data
from sklearn.metrics import roc_auc_score
def LR(X_train, X_test, y_train, y_test):
lr = LogisticRegression(random_state=1995, class_weight='balanced')
lr.fit(X_train, y_train)
print(lr.coef_)
y_pred_test = lr.predict(X_test)
# 2W数据集:0.4998749061796347
# 4W数据集:0.7124093493934135
# 4W数据集CV5:0.8502412526760958
print('ROC_AUC_SCORE:', roc_auc_score(y_test, y_pred_test))
print('classification_report:\n', classification_report(y_test, y_pred_test))
print('accuracy_score:', accuracy_score(y_test, y_pred_test))
if __name__ == '__main__':
allData = load_data()
X_train, X_test, y_train, y_test = split_test_data(allData)
# 预处理
# ss = StandardScaler()
# X_train = ss.fit_transform(X_train)
# X_test = ss.transform(X_test)
LR(X_train, X_test, y_train, y_test)
|
10,809 | 94b2104c2199c60deffcc62ba8339c8f8f907b53 | from dolfin import *
import numpy as np
import scipy.sparse as sp
import scipy.interpolate
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.colors import BoundaryNorm
from matplotlib.ticker import MaxNLocator
from datetime import date
import os
import math
import sys
import h5py
import ipdb
#########
## generated plots for model_new
#########
# drt = "./results/modelNew2_lin/2015-09-29(whole)"
# drt2 = "./results/modelNew2_lin/2015-09-29(whole)"
drt = "./results/modelNew2/2016-03-07(whole_3)"
# drt = "./results/modelNew2/2016-03-01(local03)"
drt2 = drt
mesh = Mesh("./test_geo/test2.xml")
subdomains = MeshFunction("size_t", mesh,
"./test_geo/test2_physical_region.xml")
boundaries = MeshFunction("size_t", mesh,
"./test_geo/test2_facet_region.xml")
V0 = FunctionSpace(mesh,"DG",0)
V = VectorFunctionSpace(mesh,"CG",2)
P = FunctionSpace(mesh, "CG", 1)
W = V * P
T = FunctionSpace(mesh, "CG", 1)
t_final = 300
# ##############
# t_final = t_final/3 # only works for MPC
# ##############
dt = 10
time_axis = range(0,t_final+dt,dt)
time_axis = np.array(time_axis)
def retrieve_result( filename_lin, filename_final ):
fdata = h5py.File( filename_lin, "r" )
n_f = fdata[ "n_f" ].value
n_t = fdata[ "n_t" ].value
n_u = fdata[ "n_u" ].value
n_p = fdata[ "n_p" ].value
num_t = fdata[ "num_t" ].value
num_u = fdata[ "num_u" ].value
num_p = fdata[ "num_p" ].value
n_e1 = fdata[ "n_e1" ].value
n_e2 = fdata[ "n_e2" ].value
n_e3 = fdata[ "n_e3" ].value
t_range = fdata[ "t_range" ].value
v_range = fdata[ "v_range" ].value
p_range = fdata[ "p_range" ].value
vbc_point = fdata[ "vbc_point" ].value
vbc_point2 = fdata[ "vbc_point2" ].value
vbc2_point = fdata[ "vbc2_point" ].value
vbc2_point2 = fdata[ "vbc2_point2" ].value
tq_point = fdata[ "tq_point" ].value
tq_point2 = fdata[ "tq_point2" ].value
tq_point3 = fdata[ "tq_point3" ].value
# ipdb.set_trace()
final_array = np.load( filename_final )
return ( n_f, n_t, n_u, n_p,
num_t, num_u, num_p,
n_e1, n_e2, n_e3,
t_range, v_range, p_range,
vbc_point, vbc_point2, vbc2_point, vbc2_point2,
tq_point, tq_point2, tq_point3,
final_array )
( n_f, n_t, n_u, n_p,
num_t, num_u, num_p,
n_e1, n_e2, n_e3,
t_range, v_range, p_range,
vbc_point, vbc_point2, vbc2_point, vbc2_point2,
tq_point, tq_point2, tq_point3,
final_array ) = retrieve_result( "model_new2_lin.data",
(drt + "/results1.npy") )
final_array2 = np.load( (drt2 + "/results1.npy") )
# #############
# n_f = n_f/3 # for MPC only
# #############
num_lp = 1
n_total = n_f*( num_t+1+1 ) + num_u + num_p + ( 1 + 1 )*2
n_constraint = n_f*n_e1 + n_e2 + n_e3
tidx = np.arange( 0, n_f*num_t ).reshape( ( n_f, num_t ) ) # temperature indx
uidx = ( tidx.size +
np.arange( 0, num_u ) ) # velocity indx
pidx = ( tidx.size + uidx.size +
np.arange( 0, num_p ) ) # pressure indx
vidx = ( tidx.size + uidx.size + pidx.size +
np.arange( 0, n_f ) ) # heater control, indx
vuidx = ( tidx.size + uidx.size + pidx.size + vidx.size +
np.arange( 0, 1 ) ) # velocity control 1, indx
vu2idx = ( tidx.size + uidx.size + pidx.size + vidx.size + vuidx.size +
np.arange( 0, 1 ) ) # velocity control 2, indx
v2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
np.arange( 0, n_f ) ) # heater control, indx
v2uidx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size + v2idx.size +
np.arange(0,1) ) # velocity control 1 of N2, indx
v2u2idx = ( tidx.size + uidx.size + pidx.size +
vidx.size + vuidx.size + vu2idx.size +
v2idx.size + v2uidx.size +
np.arange(0,1) ) # velocity control 2 of N2, indx
e1idx = np.arange( 0, n_f*n_e1 ).reshape( ( n_f, n_e1 ) )
e2idx = ( e1idx.size +
np.arange( 0, n_e2 ) )
e3idx = ( e1idx.size + e2idx.size +
np.arange( 0, n_e3 ) )
tqidx = [] # index for target area
for i in tq_point:
tqidx.append( t_range.tolist().index(i) )
tqidx = np.array( tqidx )
tq2idx = [] # indx for target area 2
for i in tq_point2:
tq2idx.append( t_range.tolist().index(i) )
tq2idx = np.array( tq2idx )
tq3idx = [] # indx for target area 3
for i in tq_point3:
tq3idx.append( t_range.tolist().index(i) )
tq3idx = np.array( tq3idx )
finalT = np.zeros( (n_f+1,n_t) )
for i in range(1,n_f+1):
finalT[ i,t_range ] = final_array[tidx[i-1,:]]
finalU = np.zeros( (n_u,) )
finalU[v_range] = final_array[uidx]
finalU[vbc_point] = final_array[vuidx]
finalU[vbc_point2] = final_array[vu2idx]
finalU[vbc2_point] = final_array[v2uidx]
finalU[vbc2_point2] = final_array[v2u2idx]
finalP = np.zeros( (n_p,) )
finalP[p_range] = final_array[pidx]
# finalV = np.zeros( (n_f+1,) )
finalV = 1000.0*final_array[vidx]
finalV2 = 1000.0*final_array[v2idx]
final2V = 1000.0*final_array2[vidx]
final2V2 = 1000.0*final_array2[v2idx]
finalVU = final_array[vuidx]
finalVU2 = final_array[vu2idx]
finalV2U = final_array[v2uidx]
finalV2U2 = final_array[v2u2idx]
eng_p = finalP.max()
eng_f1 = eng_p * 2.0/0.1 * t_final**2 * (finalVU**2 + finalVU2**2)**0.5
eng_h1 = np.sum(finalV) * dt
eng_f2 = eng_p * 2.0/0.1 * t_final**2 * (finalV2U**2 + finalV2U2**2)**0.5
eng_h2 = np.sum(finalV2) * dt
# tem = np.mean( final_array[ tidx[ 0:n_f/3, tqidx ] ] ) + np.mean( final_array[ tidx[ n_f/3:2*n_f/3, tq2idx ] ] ) + np.mean( final_array[ tidx[ 2*n_f/3:, tq3idx ] ] )
# tem = tem/3
# import ipdb; ipdb.set_trace()
# plot controls for the two cases
'''
plt.figure()
heat1_moving = np.zeros( (n_f+1,) )
heat1_moving[1:] = finalV
heat1_moving[0] = finalV[0]
heat2_moving = np.zeros( (n_f+1,) )
heat2_moving[1:] = finalV2
heat2_moving[0] = finalV2[0]
heat1_whole = np.zeros( (n_f+1,) )
heat1_whole[1:] = final2V
heat1_whole[0] = final2V[0]
heat2_whole = np.zeros( (n_f+1,) )
heat2_whole[1:] = final2V2
heat2_whole[0] = final2V2[0]
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
line1, = plt.step(time_axis,heat1_moving, color='b')
line2, = plt.step(time_axis,heat2_moving,color='b',linestyle="--")
line3, = plt.step(time_axis,heat1_whole,color='r')
line4, = plt.step(time_axis,heat2_whole,color='r',linestyle='--')
plt.xlabel('Time (s)')
plt.ylim(0.0,300)
plt.grid()
plt.savefig((drt + '/linear_heat.pdf'), dpi=1000, format='pdf')
plt.close()
# import ipdb; ipdb.set_trace()
'''
# plot velocity in matplot
plt.figure()
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
########################
contalpha = 0.5
wallthick = 0.5
wallalpha = 0.25
wallcolor = '#2e3436'
heateralpha = 0.4
heatercolor = '#3465A4'
omegazdict = { 'width': 2,
'height': 2,
'boxstyle': patches.BoxStyle('Round', pad=0.15),
'linewidth': 1.0,
'color': 'black',
'zorder': 15,
'fill': False }
heaterdict = { 'width': 1,
'height': 1,
'boxstyle': patches.BoxStyle('Round',pad=0.15),
'linewidth': 1.0,
'edgecolor': 'black',
'alpha': heateralpha,
'facecolor': heatercolor,
'zorder': 5,
'fill': True }
walldict = { 'fill': True,
'color': wallcolor,
'linewidth': 0,
'zorder': 5,
'alpha': wallalpha }
#############
XU = V.dofmap().tabulate_all_coordinates(mesh)
v_dim = V.dim()
XU.resize((V.dim(),2))
xu_cor = XU[::2,0]
# xv_cor = XU[1::2,0]
yu_cor = XU[::2,1]
# yv_cor = XU[1::2,1]
dx = 0.3
dy = 0.3
( xm, ym ) = np.meshgrid( np.arange( xu_cor.min(), xu_cor.max(), dx ),
np.arange( yu_cor.min(), yu_cor.max(), dy ) )
# linear interplation
u_x = finalU[::2]
u_y = finalU[1::2]
ipdb.set_trace()
for i in range( len( u_x ) ):
u_x[i] = np.sign( u_x[i] ) * abs( u_x[i] )**(0.7)
u_y[i] = np.sign( u_y[i] ) * abs( u_y[i] )**(0.7)
Ux = scipy.interpolate.Rbf(xu_cor, yu_cor, u_x, function='linear')
Uy = scipy.interpolate.Rbf(xu_cor, yu_cor, u_y, function='linear')
u_xi = Ux(xm, ym)
u_yi = Uy(xm, ym)
# speed = np.sqrt( u_xi*u_xi + u_yi*u_yi )
( fig, ax ) = plt.subplots( num = 1,
# figsize=(6,3),
dpi=150 )
q_plot = plt.quiver( xm, ym, u_xi, u_yi, pivot = 'tip', color = 'b' )
# plt.streamplot(yu_cor, xu_cor, v_y, u_x)
# plt.colorbar()
q_plot.ax.axes.get_xaxis().set_visible(False)
q_plot.ax.axes.get_yaxis().set_visible(False)
# qk = plt.quiverkey(q_plot, 0.1, 0.1, 0.1,
# r'$0.1 \frac{m}{s}$',
# fontproperties={'weight': 'bold', 'size':20} )
###########
## omega_z
# ax.add_patch( patches.FancyBboxPatch( xy=(1.5, 2.25), ## bottom-left corner
# **omegazdict ) )
## heaters
# ax.add_patch( patches.FancyBboxPatch( xy=(0.75,3.25), ##bottom-left corner
# **heaterdict ) )
# ax.add_patch( patches.FancyBboxPatch( xy=(8.25,3.25), ##bottom-left corner
# **heaterdict ) )
## walls
ax.add_patch( patches.Rectangle( xy=(0,wallthick), ##bottom-left corner
width=wallthick,
height=4-wallthick,
**walldict ) )
ax.add_patch( patches.Rectangle( xy=(5.5,1.5), ##bottom-left corner
width=wallthick,
height=3.5-wallthick,
**walldict ) )
ax.add_patch( patches.Rectangle( xy=(10,1), ##bottom-right corner
width=-wallthick,
height=3,
**walldict ) )
ax.add_patch( patches.Rectangle( xy=(0,0), ##bottom-left corner
width=10,
height=wallthick,
**walldict ) )
ax.add_patch( patches.Rectangle( xy=(0,5), ##top-left corner
width=10,
height=-wallthick,
**walldict ) )
ax.axis( 'equal' )
ax.axis( 'off' )
# plt.tight_layout()
# fig.subplots_adjust( left=0.03, bottom=0.05, right=1.0, top=0.95 )
# plt.savefig((drt + '/velocity' + str(num_lp) + '.pdf'), dpi=1000, format='pdf')
plt.savefig(('./results/modelNew2/acc/velocity_wh.pdf'),
dpi=1000, format='pdf')
plt.show()
# plt.close()
import ipdb; ipdb.set_trace()
# plot temperature in matplot
nx = 100
ny = 100
X = T.dofmap().tabulate_all_coordinates(mesh)
X.resize((T.dim(),2))
x_cor = X[:,0]
y_cor = X[:,1]
xi, yi = np.linspace(x_cor.min(), x_cor.max(), nx+1), np.linspace(y_cor.min(), y_cor.max(), ny+1)
xi, yi = np.meshgrid(xi, yi)
tmp_idx = [30]
finalT = finalT[tmp_idx,:]
levels = MaxNLocator(nbins=15).tick_values(finalT.min(), finalT.max())
cmap = plt.get_cmap('Reds')
for i in range( len( tmp_idx ) ):
fig = plt.figure()
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
ax = fig.add_subplot(111, aspect="equal")
temp_T = finalT[i,:]
rbf = scipy.interpolate.Rbf(x_cor, y_cor, temp_T, function='linear')
temp_zi = rbf(xi, yi)
CS = ax.contourf(xi, yi, temp_zi, levels=levels, cmap=cmap)
CS2 = ax.contour(CS, levels=CS.levels, colors = 'r', hold='on')
cbar = fig.colorbar(CS)
cbar.add_lines(CS2)
CS.ax.axes.get_xaxis().set_visible(False)
CS.ax.axes.get_yaxis().set_visible(False)
CS2.ax.axes.get_xaxis().set_visible(False)
CS2.ax.axes.get_yaxis().set_visible(False)
fig.savefig((drt + '/temperature' + str(num_lp) + str(i).zfill(2)+'.pdf'), dpi=1000, format='pdf')
plt.close()
# import ipdb; ipdb.set_trace()
# plot pressure in matplot
plt.figure()
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
XQ = P.dofmap().tabulate_all_coordinates(mesh)
XQ.resize((T.dim(),2))
xq_cor = XQ[:,0]
yq_cor = XQ[:,1]
temp_P = finalP
rbf_p = scipy.interpolate.Rbf(xq_cor, yq_cor, temp_P, function='linear')
temp_zi = rbf_p(xi, yi)
cmap = plt.get_cmap('Blues')
levels = MaxNLocator(nbins=15).tick_values(finalP.min(), finalP.max())
CS = plt.contourf(xi, yi, temp_zi, levels=levels, cmap=cmap)
plt.colorbar()
CS.ax.axes.get_xaxis().set_visible(False)
CS.ax.axes.get_yaxis().set_visible(False)
plt.savefig( ( drt + '/pressure' + str(num_lp) + '.pdf' ), dpi=1000, format='pdf' )
plt.close()
# plot control
plt.figure()
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
heat_time = np.zeros( (n_f+1,) )
heat_time[1:] = finalV
heat_time[0] = finalV[0]
pl_v, = plt.step( time_axis, abs( heat_time ) )
plt.xlabel('Time (s)')
plt.ylabel('Input (W)')
plt.grid()
plt.savefig((drt + '/heat' + str(num_lp) + '.pdf'), dpi=1000, format='pdf')
plt.close()
# plot control
plt.figure()
plt.rcParams['ps.useafm'] = True
plt.rcParams['pdf.use14corefonts'] = True
plt.rcParams['text.usetex'] = True
heat_time = np.zeros( (n_f+1,) )
heat_time[1:] = finalV2
heat_time[0] = finalV2[0]
pl_v, = plt.step( time_axis, abs( heat_time ) )
plt.xlabel('Time (s)')
plt.ylabel('Input (W)')
plt.grid()
plt.savefig((drt + '/heat2' + str(num_lp) + '.pdf'), dpi=1000, format='pdf')
plt.close()
|
10,810 | b4bb96f2395f0e522724256837a81136974123b0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Uninformed Search Algorithm
__author__ = "Gonzalo Chacaltana Buleje"
__email__ = "gchacaltanab@gmail.com"
from Node import Node
class UnniformedSearchAlgorithm(object):
def __init__(self, listNodes, searched):
self.listNodes = listNodes
self.searched = searched
self.createQueue()
def createQueue(self):
self.queue = []
self.queue.append(self.listNodes[0])
def addQueue(self, node):
pass
def readQueue(self):
return self.queue.pop(0)
def getQueueLen(self):
return len(self.queue)
def getNodeFromList(self, name):
for node in self.listNodes:
if node.name == name:
return node
def validateLenQueue(self):
if self.getQueueLen() == 0:
raise Exception("La cola esta vacia")
def matchSearched(self, nodeName):
if nodeName == self.searched:
raise Exception("Ciudad encontrada: %s" % nodeName)
def search(self):
pass
def insertNodeChildQueue(self, node):
childrenNodes = node.getChildrenNodes()
for child in childrenNodes:
childNode = self.getNodeFromList(child.name)
if (isinstance(childNode, Node)):
self.addQueue(childNode)
|
10,811 | fef95e5b425674268f5146b5b22e777fdcbcbec5 | class Solution:
def createTargetArray(self, nums, index):
i, res = 0, []
while i < len(index):
res.insert(index[i], nums[i])
i += 1
return res
if __name__ == '__main__':
nums = [0,1,2,3,4]
index = [0,1,2,2,1]
sol = Solution().createTargetArray(nums, index)
print(sol) |
10,812 | 139f8cdcb7a9e0730130800285a9908d3cffa3ab | import random
class Dices:
def __init__(self):
self.dice1 = 0
self.dice2 = 0
def roll(self):
self.dice1 = random.randint(1, 6)
self.dice2 = random.randint(1, 6)
x = (self.dice1, self.dice2)
return x
|
10,813 | ac6ed5e43342dbe0178c7a7443a6c6163517a2de | from meses_2021 import meses
#Horarios de salida : general = 5 de la tarde, viernes 4 de la tarde
general = 17
viernes = 16
datos_almacenados={}
print("*****Control de Horario*****")
print("")
mes_in = input("Indique el mes: ")
while True:
mes = meses(mes_in)
dia = input("Ingrese el dia (formato: dia 00)")
if dia in mes:
print("el dia existe")
entrada = float(input("Ingrese horario entrada (ej: 8.00): "))
salida = float(input("Ingrese horario salida: "))
datos_almacenados[dia] = salida
prueba = dia[0]
#print(prueba)
if prueba == 'l' or prueba == 'm' or prueba == 'm' or prueba == 'j':
#Calcualr horas extras trabajadas si dia equivale de lunes a jueves es 17:00 salida - salida_funcionario
horas_extras_porDia = salida - general
#Redondear a 2 digitos
horas_extras = round(horas_extras_porDia,2)
print("Horas extras trabajadas el dia {} fueron: {}".format(dia,horas_extras))
elif prueba == 'v':
horas_extras_Viernes = salida - viernes
horas_extras_v = round(horas_extras_Viernes,2)
print("Horas extras trabajdas el dia {} fueron: {}".format(dia,horas_extras_v))
else:
horas_extras_sabado = salida - entrada
print("Horas extras trabajadas el dia sabado fueron: {}".format(horas_extras_sabado))
continuar = int(input("Desea continuar? (1=Si 0=No)"))
if continuar == 1:
continue
#dia = input("Ingrese el dia (formato: dia 00)")
else:
break
else:
print("No existe, reintentar")
dia = input("Ingrese el dia (formato: dia 00)")
print("\n>>>>>Datos mes {}<<<<<".format(mes_in))
print("------------------------------------")
print(datos_almacenados)
|
10,814 | 3ab6b4cef47371b680599211546245647e68d624 | import os
import PIL.Image
import time
from Tkinter import *
# =============================================Initialize Variables=============================================#
size = 256, 256 # Size of thumbnail image displayed
newValue = list((0, 0, 0))
convMask = 3
normalizer = 1
errorMessage = ""
previewBox = 0
convMatrix = [[0 for x in range(convMask)] for x in range(convMask)] # matrix used for 2D image convolution
newColor = list((0, 0, 0))
for x in range(0, convMask):
for y in range(0, convMask):
convMatrix[x][y] = 0
# cnt = cnt+1
convMatrix[1][1] = 1
# ----------------------------------------------Load Images----------------------------------------------#
image = PIL.Image.open("bumbleKoda.png") # Open default image to memory
thumbnailImage = PIL.Image.open("bumbleKoda.png") # Open another copy of image, to be used as thumbnail
thumbnailImage.thumbnail(size, PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max 'size' of size
# ----------------------------------------------Pre Process Images----------------------------------------------#
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if other
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
pixels = image.load() # Holds all pixel data as a 3 tuple in a 2D array
thumbnailPixels = thumbnailImage.load()
newPixels = pixels # To be used when processing, will hold new image while processing
imageWidth = image.size[0]
imageHeight = image.size[1]
# =============================================Initialize GUI=============================================#
root = Tk() # Initialize Tkinter for GUI
# ----------------------------------------------GUI Functions----------------------------------------------#
def image_load(): # loads the image and displays it on screen
global thumbnailImage
global pixels
global thumbnailPixels
global newPixels
global image
global imageWidth
global imageHeight
global size
global errorMessage
global previewBox
global newImage
filePath = path.get() # Retrieve file path from UI
start = time.clock() # timer (debug message)
if filePath == "":
errorMessage = "Error: Image path is blank"
update_error()
elif os.path.isfile(filePath) == FALSE:
errorMessage = "Error: File does not exist"
update_error()
else:
image = PIL.Image.open(filePath) # Open image to memory
newImage = image
thumbnailImage = PIL.Image.open(filePath) # Open another copy of image, to be used as thumbnail
if image.mode != 'RGB': # Removes alpha channel if RGBA, sets to RGB if grayscale/monotone
image = image.convert('RGB')
if thumbnailImage.mode != 'RGB':
thumbnailImage = image.convert('RGB')
imageWidth = image.size[0]
imageHeight = image.size[1]
pixels = image.load() # 2D array containing all of the pixel data in image
thumbnailPixels = thumbnailImage.load() # 2D array containing all fo the pixel data in thumbnailImage
newPixels = newImage.load() # to be used in processing, holds new image while processing
thumbnailImage.thumbnail(size,
PIL.Image.ANTIALIAS) # Turn thumbnailImage into a image with max width and height of 'size'
thumbnailImage.save("tempThumbnail.gif") # image to be loaded to UI
photo = PhotoImage(file="tempThumbnail.gif") # load image to UI
display_image.configure(image=photo)
display_image.photo = photo
stop = time.clock() # timer (debug message)
print "Image loaded and displayed in %f seconds." % (stop - start) # debug message
errorMessage = "" # Clears error message on UI
update_error()
def apply_matrix(): # Need to properly set this up!
global pixels
global newPixels
global image
global imageHeight
global imageWidth
global newImage
global convMatrix
global convMask
global normalizer
global previewBox
if previewBox:
imageStart = 2
imageStopWidth = 128
imageStopHeight = 128
else:
imageStart = 2
imageStopWidth = imageWidth-2
imageStopHeight = imageHeight-2
start = time.clock() # timer (debug message)
for x in range(imageStart, imageStopWidth): # Image Rows, ignore outside pixels
print x,"/",(imageStopWidth)
for y in range(imageStart, imageStopHeight): # Image Columns, ignore outside pixels
newColor = list((0, 0, 0)) # clear newColor for next loop
for r in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- X values for convolution
for q in range((-convMask + 1)/2, (convMask - 1)/2 + 1): # +/- Y values for convolution
color = list(pixels[x + r, y + q]) # receive color of pixel being weighted and added
for i in range(0, 3): # for each R, G, and B
newValue[i] = color[i] * convMatrix[q + 1][r + 1] / normalizer
newColor[i] = newColor[i] + newValue[i] # sum all in r and q area
for j in range(0, 3): # clip R,G,B channels
if newColor[j] > 255:
newColor[j] = 255
elif newColor[j] < 0:
newColor[j] = 0
newPixels[x, y] = tuple(newColor) # convert back to tuple, store in new location
newImage.save("processedImage.png")
newImage.thumbnail(size, PIL.Image.ANTIALIAS) # processed image to be displayed to UI
newImage.save("processedImageThumbnail.gif")
newImage = PIL.Image.open("processedImage.png") #reload to avoid resize issues
update_image()
stop = time.clock() # timer (debug message)
print "Image processed in", (stop - start), "seconds." # debug message
def update_image(): # Updates image displayed on UI to most recently processed one
photo = PhotoImage(file="processedImageThumbnail.gif")
display_image.configure(image=photo)
display_image.photo = photo
def update_matrix(): # updates the normalizer and each value of the convolution matrix to what was entered by user
global normalizer
global convMatrix
convMatrix[0][0] = int(matrix_1_1.get())
convMatrix[0][1] = int(matrix_1_2.get())
convMatrix[0][2] = int(matrix_1_3.get())
convMatrix[1][0] = int(matrix_2_1.get())
convMatrix[1][1] = int(matrix_2_2.get())
convMatrix[1][2] = int(matrix_2_3.get())
convMatrix[2][0] = int(matrix_3_1.get())
convMatrix[2][1] = int(matrix_3_2.get())
convMatrix[2][2] = int(matrix_3_3.get())
normalizer = int(normalizer_entry.get())
def update_error(): # updates the error message displayed on screen
global error_message
error_message.configure(text=errorMessage) # updates text displayed
def swap_checkbox_value():
global previewBox
if previewBox == 1:
previewBox=0;
else:
previewBox=1;
print previewBox
# ----------------------------------------------GUI Widgets----------------------------------------------#
# -------------------------Left Side Widgets-------------------------#
frame = Frame(root, bg="white") # base frame for other elements
frame.pack(side=LEFT)
quit_button = Button(frame, text="QUIT", command=frame.quit)
quit_button.pack(side=BOTTOM, fill=X)
apply_filter = Button(frame, text="Apply Matrix Filter", command=apply_matrix)
apply_filter.pack(side=TOP, fill=X)
preview_checkbox = Checkbutton(frame, text="Small Section Preview", command=swap_checkbox_value)
preview_checkbox.pack(side=TOP, fill=X)
load_image = Button(frame, text="Load Image", command=image_load)
load_image.pack(side=TOP, fill=X)
path = Entry(frame) # text entry field, for Load image
path.pack(side=TOP, fill=X)
photo = PhotoImage(file="blankThumbnail.gif")
display_image = Label(frame, image=photo)
display_image.photo = photo
display_image.pack(side=BOTTOM)
# -------------------------Right Side Widgets-------------------------#
frame_right = Frame(root) #main right frame
frame_right.pack(side=RIGHT)
frame_right_first = Frame(frame_right) #holds Update button and normalizer entry
frame_right_first.pack(side=TOP)
frame_right_second = Frame(frame_right) #holds first row of convolution matrix
frame_right_second.pack(side=TOP)
frame_right_third = Frame(frame_right) #holds second row of convolution matrix
frame_right_third.pack(side=TOP)
frame_right_fourth = Frame(frame_right) #holds third row of convolution matrix
frame_right_fourth.pack(side=TOP)
frame_right_fifth = Frame(frame_right) #hold error message
frame_right_fifth.pack(side=TOP)
update_matrix_button = Button(frame_right_first, text="Update Matrix", command=update_matrix)
update_matrix_button.pack(side=LEFT)
normalizer_entry = Entry(frame_right_first, width=2)
normalizer_entry.pack(side=LEFT)
matrix_1_1 = Entry(frame_right_second, width=2)
matrix_1_1.pack(side=LEFT)
matrix_1_2 = Entry(frame_right_second, width=2)
matrix_1_2.pack(side=LEFT)
matrix_1_3 = Entry(frame_right_second, width=2)
matrix_1_3.pack(side=LEFT)
matrix_2_1 = Entry(frame_right_third, width=2)
matrix_2_1.pack(side=LEFT)
matrix_2_2 = Entry(frame_right_third, width=2)
matrix_2_2.pack(side=LEFT)
matrix_2_3 = Entry(frame_right_third, width=2)
matrix_2_3.pack(side=LEFT)
matrix_3_1 = Entry(frame_right_fourth, width=2)
matrix_3_1.pack(side=LEFT)
matrix_3_2 = Entry(frame_right_fourth, width=2)
matrix_3_2.pack(side=LEFT)
matrix_3_3 = Entry(frame_right_fourth, width=2)
matrix_3_3.pack(side=LEFT)
error_message = Label(frame_right_fifth, relief=RIDGE, wraplength=150)
error_message.pack(side=LEFT)
# =============================================Run GUI=============================================#
root.mainloop() # main loop for Tkint
root.destroy() # clears the window, fully ending task
if os.path.isfile("tempThumbnail.gif"): # clean up working directory of temp files
os.remove("tempThumbnail.gif")
if os.path.isfile("processedImageThumbnail.gif"):
os.remove("processedImageThumbnail.gif")
|
10,815 | bf1293b005fc57cb9116e972342f909a39d63004 | import re;
from importlib.metadata import requires
from django.shortcuts import render, redirect
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.db.models import Q, Prefetch
from .models import Project, Tag
from .utils import searchProjects, paginateProjects
from .forms import ProjectForm, ReviewForm
from django.http import HttpResponse
def projects(request):
#return HttpResponse("Here are our projects")
#Search projects
projects, search_query = searchProjects(request)
#exclude projects with userID null
projects = projects.exclude(owner__isnull=True)
#pagination projects
results = 6
custom_range, projects = paginateProjects(request, projects, results)
context = {'projects': projects, 'search_query':search_query, 'custom_range':custom_range}
return render(request, 'projects/projects.html', context)
def project(request, pk):
#return HttpResponse("Single Project" +" "+ str(pk))
projectObj = Project.objects.get(id=pk)
form = ReviewForm()
if request.method =='POST':
form = ReviewForm(request.POST)
review = form.save(commit=False)
review.project = projectObj
review.owner = request.user.profile
review.save()
#update project vote count
projectObj.getVoteCount
messages.success(request, 'Your review was successfully submited!')
return redirect('project', pk = projectObj.id)
context = {'project': projectObj, 'form':form}
return render(request,'projects/single-project.html', context )
@login_required(login_url="login")
def createProject(request):
profile = request.user.profile
form = ProjectForm()
#get all distinct tags used by the User on their other projects.
tagsId = Project.objects.filter(owner=profile.id).values_list('tags', flat=True).exclude(tags__isnull=True).order_by().distinct()
otherTags = Tag.objects.filter(id__in=tagsId)
if request.method == 'POST':
newtags = request.POST.get('newtags')
#remove non word characters
newtags = re.sub('[^A-Za-z0-9-]+', " ", newtags).split()
#get selected tags
tagsChecked = request.POST.getlist('tags')
form = ProjectForm(request.POST, request.FILES)
if form.is_valid():
project = form.save(commit=False)
project.owner = profile
project.save()
project.tags.set(tagsChecked)
for tag in newtags:
tag,created = Tag.objects.get_or_create(name=tag)
project.tags.add(tag)
return redirect('account')
context={'form':form, 'otherTags':otherTags}
return render(request, 'projects/project_form.html', context)
@login_required(login_url="login")
def updateProject(request, pk):
profile = request.user.profile
try:
project = profile.project_set.get(id=pk )
except:
messages.error(request,"Project not found")
return redirect('account')
form = ProjectForm(instance= project)
#get all distinct tags used by the User on their other projects.
tagsId = Project.objects.filter(owner=profile.id).values_list('tags', flat=True).exclude(tags__isnull=True).order_by().distinct()
otherTags = Tag.objects.filter(id__in=tagsId).exclude( id__in=project.tags.all())
if request.method == 'POST':
newtags = request.POST.get('newtags')
#remove non word characters
newtags = re.sub('[^A-Za-z0-9-]+', " ", newtags).split()
#get selected tags
tagsChecked = request.POST.getlist('tags')
form = ProjectForm(request.POST, request.FILES, instance = project)
if form.is_valid():
project = form.save()
project.tags.set(tagsChecked)
for tag in newtags:
tag,created = Tag.objects.get_or_create(name=tag)
project.tags.add(tag)
#list of tags used in all projects
allUsedTags = Project.objects.all().values_list('tags', flat=True).exclude(tags__isnull=True).order_by().distinct()
#list of tags not linked to any project, then delete them.
unusedTags = Tag.objects.all().exclude(id__in=allUsedTags)
unusedTags.delete()
return redirect('account')
context={'form':form, 'project':project, "otherTags":otherTags}
return render(request, 'projects/project_form.html', context)
@login_required(login_url="login")
def deleteProject(request, pk):
profile = request.user.profile
try:
project = profile.project_set.get(id=pk )
except:
messages.error(request,"Project not found")
return redirect('account')
if request.method == 'POST':
project.delete()
return redirect('account')
context={'object':project}
return render(request, 'delete_template.html', context)
|
10,816 | 081457ccf83814231237a64e7f9f94def27cde91 | #代理模式 Proxy
'''
为其他对象提供一种代理以控制这个对象的访问
适用性:
在需要比较通用复杂的对象指针代替简单的指针的时候,使用Proxy模式。
1.远程代理 为一个对象在不同的地址空间提供局部代表
2.虚代理 根据需要创建开销很大的对象
3.保护代理 控制对原始对象的访问。
4.智能指引 取代了简单的指针,它在访问对象时只需一些附加操作。
组成:
抽象角色:通过接口或抽象类声明真实角色的业务方法。
代理角色:实现抽象角色,是真实角色的代理,通过真实角色的业务逻辑方法
来实现抽象方法,并可以附件自己的操作。
真实角色:实现抽象角色,定义真实角色所要实现的业务逻辑,供代理角色调用。
'''
class Jurisdiction:
'''权限类'''
def level1(self):
print('权限等级1')
def level3(self):
print('权限等级3')
def level2(self):
print('权限等级2')
def level4(self):
print('权限等级4')
class Proxy:
def __init__(self,name):
self.user=name
self._jurisdiction=Jurisdiction()
def level(self):
if self.user=='a':
return self._jurisdiction.level1()
elif self.user=='b':
return self._jurisdiction.level2()
elif self.user=='c':
return self._jurisdiction.level3()
elif self.user=='d':
return self._jurisdiction.level4()
else:
print('你咩有权限。')
if __name__ == "__main__":
test = Proxy('a')
test.level()
test.user = 'b'
test.level()
test.user = 'c'
test.level()
test.user = 'd'
test.level()
test.user = 'e'
test.level()
|
10,817 | def90130940db577a708008753641bb09025d10d | # Question Link:- https://codeforces.com/contest/1430/problem/A
mex = 1005
save = [[] for i in range(mex)]
can = [False for i in range(mex)]
def init():
for i in range(mex//3+2):
for j in range(mex//5+2):
for k in range(mex//7+2):
val = i*3 + j*5+k*7
if val<=1000:
can[val] = True
save[val] = [i,j,k]
init()
for _ in range(int(input())):
n = int(input())
if can[n]:
print(*save[n])
else:
print(-1)
|
10,818 | b6cad51218d2b168570ebe444e43c0847f9da90c | # seleniumbase package
__version__ = "1.61.0"
|
10,819 | 91299e3b1ec77937b2810a1198b696c4f3793a20 | from scrapy import cmdline
#
# cmdline.execute('scrapy crawl Dmu'.split())
# cmdline.execute('scrapy crawl arts'.split())
cmdline.execute('scrapy crawl HarperAdams_g'.split())
# cmdline.execute('scrapy crawl Southampton'.split())
# cmdline.execute('scrapy crawl StAndrews_g'.split())
# cmdline.execute('scrapy crawl Hud'.split())
# cmdline.execute('scrapy crawl brunel'.split())
# cmdline.execute('scrapy crawl York'.split())
# cmdline.execute('scrapy crawl text'.split())
# cmdline.execute('scrapy crawl work'.split())
|
10,820 | fd85289b6b40e72dcd59e270cd2d849cbd571b1c | #
# Definition for binary tree:
# class Tree(object):
# def __init__(self, x):
# self.value = x
# self.left = None
# self.right = None
'''
def restoreBinaryTree(inorder, preorder):
PO_0=preorder.pop(0) #preorder naught
IO_0=inorder.pop(0) #inorder naught
t=Tree(PO_0) #tree bottom root
root=t #pointer to current root
leftStack=dict() #stack left of current root :: d[value_IO] = index_entry_queue
while PO_0:
if IO_0==PO_0:
LBP=root #left branch pointer
while PO_0 in leftStack:
#set root
#Create left tree
#next PO_0 is right branch
PO_0=preorder.pop(0)
root.right=Tree(PO_0)
else:
leftStack.append(IO_0)
IO_0=inorder.pop(0)
return t
'''
def restoreBinaryTree(inorder, preorder):
return tInit(inorder,preorder)
def leftSubTree(t,inorder,preorder):
#print('Left Branch')
t=Tree(preorder.pop(0))
iLeft=inorder.pop(0)
Left=[]
#find all left branches
while iLeft!=t.value:
Left.append(iLeft)
iLeft=inorder.pop(0)
Right=preorder[:len(Left)]
preorder=preorder[len(Left):]
#print('CR:',t.value)
#print('LR:',Left,Right)
#print('IP:',inorder,preorder)
if Left:
t.left=leftSubTree(t,Left,Right)
if preorder:
t.right=rightSubTree(t,inorder,preorder)
return t
def rightSubTree(t,inorder,preorder):
#print("Right Branch")
t=Tree(preorder.pop(0))
iLeft=inorder.pop(0)
Left=[]
#find all left branches
while iLeft!=t.value:
Left.append(iLeft)
iLeft=inorder.pop(0)
Right=preorder[:len(Left)]
preorder=preorder[len(Left):]
#print('CR:',t.value)
#print('LR:',Left,Right)
#print('IP:',inorder,preorder)
if Left:
t.left=leftSubTree(t,Left,Right)
if preorder:
t.right=rightSubTree(t,inorder,preorder)
return t
def tInit(inorder,preorder):
#print("create tree")
t=Tree(preorder.pop(0))
iLeft=inorder.pop(0)
Left=[]
#find all left branches
while iLeft!=t.value:
Left.append(iLeft)
iLeft=inorder.pop(0)
Right=preorder[:len(Left)]
preorder=preorder[len(Left):]
#print('CR:',t.value)
#print('LR:',Left,Right)
#print('IP:',inorder,preorder)
if Left:
t.left=leftSubTree(t,Left,Right)
if preorder:
t.right=rightSubTree(t,inorder,preorder)
return t
|
10,821 | 393e481f8e21d2b3f4a36bb50dc48d330a7b8733 | import sys, os
from db.interface import *
from learning import interface
from analysis import graphutils
from learning import consolidateFeatures
from mlabwrap import mlab
import numpy as np
import datetime
LEARNING_ROOT="learning/"
FEATURES="features"
LABELS="ytrain"
def main(args):
db = args[0]
date1 = args[1]
date2 = args[2]
date3 = args[3]
k = int(args[4])
basename = args[5]
reader = DBReader(db)
print("Getting uid")
uid = reader.uid()
print("Getting all the feature graphs")
feature_graphs = graphutils.get_feat_graphs(db, uid, None, date2)
print("Getting Gcollab_delta graph")
Gcollab_delta = graphutils.get_collab_graph(db, uid, date1, date2)
Gcollab_base = graphutils.get_collab_graph(db, uid, date3, date1)
base_graphs = graphutils.get_base_dict(Gcollab_base, feature_graphs)
graphutils.print_stats(base_graphs)
graphutils.print_graph_stats("Gcollab_delta", Gcollab_delta)
filepath = os.path.join(LEARNING_ROOT, basename + ".mat")
features_matrix_name = "%s_%s"%(basename, FEATURES)
labels_matrix_name = "%s_%s"%(basename, LABELS)
features = consolidateFeatures.consolidate_features_add(base_graphs, k, Gcollab_delta)
#features = consolidateFeatures.consolidate_features(base_graphs, Gcollab_delta, k)
labels = consolidateFeatures.consolidate_labels(features, Gcollab_delta)
np_train, np_output = interface.matwrapTrain(features, labels)
interface.writeTrain(np_train, np_output, filepath, features_matrix_name, labels_matrix_name)
# Add learning root to mlab path so that all .m functions are available as mlab attributes
mlab.path(mlab.path(), LEARNING_ROOT)
mlab.training(np_train, np_output)
# NOTE base graph = till date3 to date1
# delta graph = date1 to date2
# This file calls consolidate_features for the base graph, consolidate_labels for the delta graph and writes the .mat file
# based on the basename. It also needs the k (number of hops) parameter
if __name__=="__main__":
if len(sys.argv)<6:
print("Usage: program.py <db> <date1> <date2> <date3> <k hops> <basename mat>")
sys.exit(1)
main(sys.argv[1:])
|
10,822 | 3703a96e6dd3e199a9a2cbe2c92d961a095743e2 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Checklist.name'
db.add_column(u'core_checklist', 'name',
self.gf('django.db.models.fields.CharField')(default='placeholder name', max_length=200),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Checklist.name'
db.delete_column(u'core_checklist', 'name')
models = {
u'core.aircraft': {
'Meta': {'object_name': 'Aircraft'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'core.checklist': {
'Meta': {'object_name': 'Checklist'},
'aircraft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'checklists'", 'to': u"orm['core.Aircraft']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'core.checklistphase': {
'Meta': {'object_name': 'ChecklistPhase'},
'checklist': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': u"orm['core.Checklist']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'core.checkliststep': {
'Meta': {'object_name': 'ChecklistStep'},
'action': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'checklist_phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'steps'", 'to': u"orm['core.ChecklistPhase']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['core'] |
10,823 | 0bd126c5b7caee9b5c4c7746699bfba5ede9d0f5 | import numpy as np, omnical, aipy
import subprocess, datetime, os
from astropy.io import fits
import copy
import heracal
from scipy.io.idl import readsav
def unwrap(arr):
brr = np.unwrap(arr)
crr = []
for ii in range(1,brr.size): crr.append(brr[ii]-brr[ii-1])
crr = np.unwrap(crr)
nn = np.round(crr[0]/(2*np.pi))
crr -= (nn*2.*np.pi)
drr = np.zeros(brr.shape)+brr[0]
for ii in range(crr.size): drr[ii+1] += np.sum(crr[:ii+1])
return drr
def output_mask_array(flag_array):
invf = 1 - flag_array
sf = np.sum((np.sum(invf,axis=0)),axis=0).astype(bool)
st = np.sum((np.sum(invf,axis=1)),axis=1).astype(bool)
mask_array = 1 - np.outer(st,sf)
mask_array = mask_array.astype(bool)
return mask_array
def find_ex_ant(uvdata):
ex_ant = []
for ii in uvdata.antenna_numbers:
if not ii in uvdata.ant_1_array and not ii in uvdata.ant_2_array:
ex_ant.append(ii)
return ex_ant
def scale_gains(g0, amp_ave=1.):
g = copy.deepcopy(g0)
for p in g.keys():
amp = 0
n = 0
for a in g[p].keys():
amp += np.abs(g[p][a])
n += 1
amp /= n
q = amp/amp_ave
inds = np.where(amp!=0)
for a in g[p].keys(): g[p][a][inds] /= q[inds]
return g
def uv_wrap_fc(uv,redbls,pols=['xx','yy']):
wrap_list = []
a1 = uv.ant_1_array[:uv.Nbls]
a2 = uv.ant_2_array[:uv.Nbls]
data = uv.data_array
flag = uv.flag_array
for jj in range(uv.Npols):
pp = aipy.miriad.pol2str[uv.polarization_array[jj]]
if not pp in pols: continue
wrap = {}
wrap['pol'] = pp
wrap['data'] = {}
wrap['flag'] = {}
for ii in range(uv.Nbls):
if (a1[ii],a2[ii]) in redbls: bl = (a1[ii],a2[ii])
elif (a2[ii],a1[ii]) in redbls: bl = (a2[ii],a1[ii])
else: continue
if not wrap['data'].has_key(bl):
if bl == (a1[ii],a2[ii]): dat_temp = data[:,0][:,:,jj].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)[:,ii]
else: dat_temp = data[:,0][:,:,jj].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)[:,ii].conj()
flg_temp = flag[:,0][:,:,jj].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)[:,ii]
dat_ma = np.ma.masked_array(dat_temp, mask=flg_temp)
dat_ma = np.mean(dat_ma,axis=0)
wrap['data'][bl] = {pp: np.complex64([dat_ma.data])}
wrap['flag'][bl] = {pp: np.array([dat_ma.mask])}
wrap_list.append(wrap)
return wrap_list
def uv_wrap_omni(uv,pols=['xx','yy']):
data_wrap = {}
a1 = uv.ant_1_array[:uv.Nbls]
a2 = uv.ant_2_array[:uv.Nbls]
data = uv.data_array
flag = uv.flag_array
for jj in range(uv.Npols):
pp = aipy.miriad.pol2str[uv.polarization_array[jj]]
if not pp in pols: continue
wrap = {}
wrap['pol'] = pp
wrap['data'] = {}
wrap['flag'] = {}
wrap['auto'] = {}
wrap['mask'] = output_mask_array(flag[:,0][:,:,jj].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs))
auto_scale = 0
for ii in range(uv.Nbls):
if a1[ii] == a2[ii]:
auto_m = np.ma.masked_array(data[:,0][:,:,jj].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)[:,ii].real,mask=wrap['mask'])
wrap['auto'][a1[ii]] = np.sqrt(np.mean(auto_m,axis=0).data) + 1e-10
auto_scale += np.nanmean(wrap['auto'][a1[ii]])
else:
bl = (a1[ii],a2[ii])
wrap['data'][bl] = {pp: np.complex64(data[:,0][:,:,jj].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)[:,ii])}
wrap['flag'][bl] = {pp: np.array(flag[:,0][:,:,jj].reshape(uv.Ntimes,uv.Nbls,uv.Nfreqs)[:,ii])}
auto_scale /= len(wrap['auto'].keys())
for a in wrap['auto'].keys(): wrap['auto'][a] /= auto_scale
data_wrap[pp] = wrap
return data_wrap
def polyfunc(x,z):
sum = np.zeros((x.size))
for ii in range(z.size):
sum *= x
sum += z[ii]
return sum
def mwa_bandpass_fit(gains0, auto, tile_info, amp_order=2, phs_order=1, fit_reflection=True):
gains = copy.deepcopy(gains0)
fqs = np.linspace(167.075,197.715,384)
freq = np.arange(384)
for p in gains.keys():
for ant in gains[p].keys():
x = np.where(gains[p][ant]!=0)[0]
if x.size == 0: continue
A = np.zeros((384),dtype=np.float)
for n in range(0,24):
chunk = np.arange(16*n+1,16*n+15)
induse = np.where(gains[p][ant][chunk]!=0)
z1 = np.polyfit(freq[chunk[induse]],np.abs(gains[p][ant][chunk[induse]])/auto[ant][chunk[induse]],amp_order)
A[chunk[induse]] = auto[ant][chunk[induse]]*polyfunc(freq[chunk[induse]],z1)
y2 = np.angle(gains[p][ant][x])
y2 = np.unwrap(y2)
z2 = np.polyfit(x,y2,phs_order)
rp = np.zeros((384))
cable = tile_info[ant]['cable']
if fit_reflection and cable==150:
vf = tile_info[ant]['vf']
t0 = 2*cable/299792458.0/vf*1e6
rp[x] = y2 - polyfunc(x,z2)
tau = np.fft.fftfreq(384,(fqs[-1]-fqs[0])/383)
fftrp = np.fft.fft(rp,n=384)
inds = np.where(abs(np.abs(tau)-t0)<0.05)
imax = np.argmax(np.abs(fftrp[inds]))
ind = np.where(np.abs(tau)==np.abs(tau[inds][imax]))
mask =np.zeros((384))
mask[ind] = 1.
fftrp *= mask
rp = np.fft.ifft(fftrp)
gains[p][ant][x] = A[x]*np.exp(1j*polyfunc(x,z2))
gains[p][ant][x] *= np.exp(1j*rp[x])
return gains
def poly_bandpass_fit(gains0,fit_order=4):
gains = copy.deepcopy(gains0)
for p in gains.keys():
for a in gains[p].keys():
g = np.copy(gains[p][a])
for ff in range(24):
chunk = np.arange(16*ff+1,16*ff+15)
z1 = np.polyfit(chunk,g.real[chunk],fit_order)
z2 = np.polyfit(chunk,g.imag[chunk],fit_order)
gains[p][a][chunk] = polyfunc(chunk,z1) + 1j*polyfunc(chunk,z2)
return gains
def amp_bandpass_fit(gains0,fit_order=4):
gains = copy.deepcopy(gains0)
for p in gains.keys():
for a in gains[p].keys():
g = np.abs(gains[p][a])
for ff in range(24):
chunk = np.arange(16*ff+1,16*ff+15)
z = np.polyfit(chunk,g[chunk],fit_order)
gains[p][a][chunk] = polyfunc(chunk,z)
return gains
def ampproj(g_input,g_target):
amppar = {}
for p in g_input.keys():
SH = g_input[p][g_input[p].keys()[0]].shape
s = np.zeros(SH)
n = np.zeros(SH)
for a in g_input[p].keys():
if not a in g_target[p].keys(): continue
if np.isnan(np.mean(g_target[p][a])): continue
if np.isnan(np.mean(g_input[p][a])): continue
num = np.ones(SH)
amp_in = np.abs(g_input[p][a])
amp_ta = np.resize(np.abs(g_target[p][a]),SH)
ind = np.where(amp_in==0)
amp_in[ind] = 1.
amp_ta[ind] = 0.
num[ind] = 0
s += amp_ta/amp_in
n += num
ind = np.where(n==0)
n[ind] = 1.
s[ind] = 0.
amppar[p] = s/n
return amppar
def phsproj(g_input,g_target,antpos,EastHex,SouthHex): #only returns slopes
phspar = {}
ax1,ax2 = [],[]
for ii in range(EastHex.shape[0]):
if ii == 3: continue
ind_east = EastHex[ii]
ind_south = SouthHex[ii]
ax1.append(ind_east)
ax1.append(ind_south)
for jj in range(EastHex.shape[1]):
if jj == 3: continue
ind_east = EastHex[:,jj]
ind_south = SouthHex[:,jj]
ax2.append(ind_east)
ax2.append(ind_south)
for p in g_input.keys():
phspar[p] = {}
a0 = g_input[p].keys()[0]
SH = g_input[p][a0].shape
if len(SH) == 2:
for a in g_input[p].keys(): g_input[p][a] = np.mean(g_input[p][a],axis=0)
slp1 = []
slp2 = []
for ff in range(0,384):
if ff%16 in [0,15]:
slp1.append(0)
slp2.append(0)
continue
#***** East-West direction fit *****#
slope = []
for inds in ax1:
x,tau = [],[]
for ii in inds:
if not ii in g_input[p].keys(): continue
if not ii in g_target[p].keys(): continue
if np.isnan(g_input[p][ii][ff]): continue
if np.isnan(g_target[p][ii][ff]): continue
x.append(float(np.argwhere(inds==ii)))
tau.append(np.angle(g_target[p][ii][ff]*g_input[p][ii][ff].conj()))
if len(tau) < 3: continue
if np.round(x[-1])-np.round(x[0])+1 != len(x): continue
tau = unwrap(tau)
z = np.polyfit(x,tau,1)
slope.append(z[0])
slope = np.unwrap(slope)
slp1.append(np.median(slope))
#***** 60 deg East-South direction fit *****#
slope = []
for inds in ax2:
x,tau = [],[]
for ii in inds:
if not ii in g_input[p].keys(): continue
if not ii in g_target[p].keys(): continue
if np.isnan(g_input[p][ii][ff]): continue
if np.isnan(g_target[p][ii][ff]): continue
x.append(float(np.argwhere(inds==ii)))
tau.append(np.angle(g_target[p][ii][ff]*g_input[p][ii][ff].conj()))
if len(tau) < 3: continue
if np.round(x[-1])-np.round(x[0])+1 != len(x): continue
tau = unwrap(tau)
z = np.polyfit(x,tau,1)
slope.append(z[0])
slope = np.unwrap(slope)
slp2.append(np.median(slope))
phspar[p]['phi1'] = np.array(slp1)
phspar[p]['phi2'] = np.array(slp2)
return phspar
def plane_fitting(gains,antpos):
phspar = {}
for p in gains.keys():
phspar[p] = {}
phix,phiy,offset_east,offset_south = [],[],[],[]
for f in range(384):
if f%16 in [0,15]:
phix.append(0)
phiy.append(0)
offset_east.append(0)
offset_south.append(0)
continue
M0 = np.zeros((4,4))
p0 = np.zeros((4,1))
for a in gains[p].keys():
x = antpos[a]['top_x']
y = antpos[a]['top_y']
if gains[p][a].ndim == 2:
z = np.angle(np.mean(gains[p][a],axis=0)[f])
else:
z = np.angle(gains[p][a][f])
if 56 < a < 93:
M0 += np.array([[x*x, x*y, x , 0 ],
[x*y, y*y, y , 0 ],
[ x , y , 1 , 0 ],
[ 0 , 0 , 0 , 0 ]])
p0 += np.array([[z*x],
[z*y],
[ z ],
[ 0 ]])
if 92 < a < 128:
M0 += np.array([[x*x, x*y, 0 , x ],
[x*y, y*y, 0 , y ],
[ 0 , 0 , 0 , 0 ],
[ x , y , 0 , 1 ]])
p0 += np.array([[z*x],
[z*y],
[ 0 ],
[ z ]])
C = np.linalg.inv(M0).dot(p0)
#Attention: append negative results here
phix.append(-C[0][0])
phiy.append(-C[1][0])
offset_east.append(-C[2][0])
offset_south.append(-C[3][0])
phspar[p]['phix'] = np.array(phix)
phspar[p]['phiy'] = np.array(phiy)
phspar[p]['offset_east'] = np.array(offset_east)
phspar[p]['offset_south'] = np.array(offset_south)
return phspar
def degen_project_OF(gomni,gfhd,antpos,EastHex,SouthHex,v2={}):
gains = copy.deepcopy(gomni)
for p in gains.keys():
ref1 = min(gains[p].keys())
ref2 = max(gains[p].keys())
ref_exp1 = np.exp(1j*np.angle(gains[p][ref1]*gfhd[p][ref1].conj()))
ref_exp2 = np.exp(1j*np.angle(gains[p][ref2]*gfhd[p][ref2].conj()))
for a in gains[p].keys():
if a < 93: gains[p][a] /= ref_exp1
else: gains[p][a] /= ref_exp2
amppar = ampproj(gains,gfhd)
phspar = phsproj(gains,gfhd,antpos,EastHex,SouthHex)
for a in gains[p].keys():
if a < 93:
dx = antpos[a]['top_x']-antpos[ref1]['top_x']
dy = antpos[a]['top_y']-antpos[ref1]['top_y']
else:
dx = antpos[a]['top_x']-antpos[ref2]['top_x']
dy = antpos[a]['top_y']-antpos[ref2]['top_y']
nx = dx/14.-dy/np.sqrt(3)/14.
ny = -2*dy/np.sqrt(3)/14.
proj = amppar[p]*np.exp(1j*(nx*phspar[p]['phi1']+ny*phspar[p]['phi2']))
gains[p][a] *= proj
ratio = {p:{}}
for a in gains[p].keys():
r = gains[p][a]*gfhd[p][a].conj()
if np.isnan(np.mean(r)): continue
ratio[p][a] = r
phspar2 = plane_fitting(ratio,antpos)
for a in gains[p].keys():
dx = antpos[a]['top_x']
dy = antpos[a]['top_y']
proj = np.exp(1j*(dx*phspar2[p]['phix']+dy*phspar2[p]['phiy']))
if a > 92: proj *= np.exp(1j*phspar2[p]['offset_south'])
else: proj *= np.exp(1j*phspar2[p]['offset_east'])
gains[p][a] *= proj
if not v2 == {}:
pp = p+p
for bl in v2[pp].keys():
i,j = bl
if i < 93: v2[pp][bl] *= (ref_exp1*np.exp(-1j*phspar2[p]['offset_east']))
else: v2[pp][bl] *= (ref_exp2*np.exp(-1j*phspar2[p]['offset_south']))
if j < 93: v2[pp][bl] *= (ref_exp1.conj()*np.exp(1j*phspar2[p]['offset_east']))
else: v2[pp][bl] *= (ref_exp2.conj()*np.exp(1j*phspar2[p]['offset_south']))
dx = antpos[i]['top_x']-antpos[j]['top_x']
dy = antpos[i]['top_y']-antpos[j]['top_y']
nx = dx/14.-dy/np.sqrt(3)/14.
ny = -2*dy/np.sqrt(3)/14.
proj = amppar[p]*amppar[p]*np.exp(1j*(nx*phspar[p]['phi1']+ny*phspar[p]['phi2']))*np.exp(1j*(dx*phspar2[p]['phix']+dy*phspar2[p]['phiy']))
proj = np.resize(proj,v2[pp][bl].shape)
ind = np.where(proj!=0)
v2[pp][bl][ind] /= proj[ind]
return gains
def degen_project_FO(gomni,antpos,v2={}):
gains = scale_gains(gomni)
phspar = plane_fitting(gains,antpos)
for p in gains.keys():
for a in gains[p].keys():
dx = antpos[a]['top_x']
dy = antpos[a]['top_y']
proj = np.exp(1j*(dx*phspar[p]['phix']+dy*phspar[p]['phiy']))
if a > 92: proj *= np.exp(1j*phspar[p]['offset_south'])
else: proj *= np.exp(1j*phspar[p]['offset_east'])
gains[p][a] *= proj
if not v2 == {}:
pp = p+p
for bl in v2[pp].keys():
i,j = bl
if i < 93: v2[pp][bl] *= np.exp(-1j*phspar[p]['offset_east'])
else: v2[pp][bl] *= np.exp(-1j*phspar[p]['offset_south'])
if j < 93: v2[pp][bl] *= np.exp(1j*phspar[p]['offset_east'])
else: v2[pp][bl] *= np.exp(1j*phspar[p]['offset_south'])
dx = antpos[i]['top_x']-antpos[j]['top_x']
dy = antpos[i]['top_y']-antpos[j]['top_y']
proj = np.exp(-1j*(dx*phspar[p]['phix']+dy*phspar[p]['phiy']))
v2[pp][bl][ind] *= proj
return gains
def degen_project_simple(g_input,g_target,antpos):
g_output = copy.deepcopy(g_input)
amppar = ampproj(g_input,g_target)
for p in g_output.keys():
ratio = {p:{}}
for a in g_output[p].keys():
r = g_input[p][a]*g_target[p][a].conj()
if np.isnan(np.mean(r)): continue
ratio[p][a] = r
phspar = plane_fitting(ratio,antpos)
for a in g_input[p].keys():
dx = antpos[a]['top_x']
dy = antpos[a]['top_y']
proj = amppar[p]*np.exp(1j*(dx*phspar[p]['phix']+dy*phspar[p]['phiy']))
if a > 92: proj *= np.exp(1j*phspar[p]['offset_south'])
else: proj *= np.exp(1j*phspar[p]['offset_east'])
g_output[p][a] *= proj
return g_output
def cal_var_wgt(v,m,w):
n = np.ma.masked_array(v-m,mask=w,fill_value=0.+0.j)
var = np.var(n,axis=0).data
zeros = np.where(var==0)
var[zeros] = 1.
inv = 1./var
inv[zeros] = 0.
return inv
def pos_to_info(position, pols=['x'], fcal=False, **kwargs):
nant = position['nant']
antpos = -np.ones((nant*len(pols),3))
xmin,ymin = 0,0
for key in position.keys():
if key == 'nant': continue
if position[key]['top_x'] < xmin: xmin = position[key]['top_x']
if position[key]['top_y'] < ymin: ymin = position[key]['top_y']
for ant in range(0,nant):
try:
x = position[ant]['top_x'] - xmin + 0.1
y = position[ant]['top_y'] - ymin + 0.1
except(KeyError): continue
for z, pol in enumerate(pols):
z = 2**z
i = heracal.omni.Antpol(ant,pol,nant)
antpos[i.val,0],antpos[i.val,1],antpos[i.val,2] = x,y,z
reds = heracal.omni.compute_reds(nant, pols, antpos[:nant],tol=0.01)
ex_ants = [heracal.omni.Antpol(i,nant).ant() for i in range(antpos.shape[0]) if antpos[i,0] < 0]
kwargs['ex_ants'] = kwargs.get('ex_ants',[]) + ex_ants
reds = heracal.omni.filter_reds(reds, **kwargs)
if fcal:
from heracal.firstcal import FirstCalRedundantInfo
info = FirstCalRedundantInfo(nant)
else:
info = heracal.omni.RedundantInfo(nant)
info.init_from_reds(reds, antpos)
return info
def cal_reds_from_pos(position,**kwargs):
nant = position['nant']
antpos = -np.ones((nant,3))
xmin = 0
ymin = 0
for key in position.keys():
if key == 'nant': continue
if position[key]['top_x'] < xmin: xmin = position[key]['top_x']
if position[key]['top_y'] < ymin: ymin = position[key]['top_y']
for ant in range(0,nant):
try:
x = position[ant]['top_x'] - xmin + 0.1
y = position[ant]['top_y'] - ymin + 0.1
except(KeyError): continue
z = 0
i = ant
antpos[i,0],antpos[i,1],antpos[i,2] = x,y,z
reds = omnical.arrayinfo.compute_reds(antpos,tol=0.01)
kwargs['ex_ants'] = kwargs.get('ex_ants',[]) + [i for i in range(antpos.shape[0]) if antpos[i,0] < 0]
reds = omnical.arrayinfo.filter_reds(reds,**kwargs)
return reds
def get_phase(fqs,tau, offset=False):
fqs = fqs.reshape(-1,1) #need the extra axis
if offset:
delay = tau[0]
offset = tau[1]
return np.exp(-1j*(2*np.pi*fqs*delay) - offset)
else:
return np.exp(-2j*np.pi*fqs*tau)
def save_gains_fc(s,fqs,outname):
s2 = {}
for k,i in s.iteritems():
if len(i) > 1:
s2[str(k)] = get_phase(fqs,i,offset=True).T
s2['d'+str(k)] = i[0]
s2['o'+str(k)] = i[1]
else:
s2[str(k)] = get_phase(fqs,i).T
s2['d'+str(k)] = i
np.savez(outname,**s2)
def load_gains_fc(fcfile):
g0 = {}
fc = np.load(fcfile)
for k in fc.keys():
if k[0].isdigit():
a = int(k[:-1])
p = k[-1]
if not g0.has_key(p): g0[p] = {}
g0[p][a] = fc[k]
return g0
def save_gains_omni(filename, meta, gains, vismdl, xtalk):
d = {}
metakeys = ['jds','lsts','freqs','history']
for key in meta:
if key.startswith('chisq'): d[key] = meta[key] #separate if statements pending changes to chisqs
for k in metakeys:
if key.startswith(k): d[key] = meta[key]
for pol in gains:
for ant in gains[pol]:
d['%d%s' % (ant,pol)] = gains[pol][ant]
for pol in vismdl:
for bl in vismdl[pol]:
d['<%d,%d> %s' % (bl[0],bl[1],pol)] = vismdl[pol][bl]
for pol in xtalk:
for bl in xtalk[pol]:
d['(%d,%d) %s' % (bl[0],bl[1],pol)] = xtalk[pol][bl]
np.savez(filename,**d)
def load_gains_omni(filename):
meta, gains, vismdl, xtalk = {}, {}, {}, {}
def parse_key(k):
bl,pol = k.split()
bl = tuple(map(int,bl[1:-1].split(',')))
return pol,bl
npz = np.load(filename)
for k in npz.files:
if k[0].isdigit():
pol,ant = k[-1:],int(k[:-1])
if not gains.has_key(pol): gains[pol] = {}
gains[pol][ant] = npz[k]
try: pol,bl = parse_key(k)
except(ValueError): continue
if k.startswith('<'):
if not vismdl.has_key(pol): vismdl[pol] = {}
vismdl[pol][bl] = npz[k]
elif k.startswith('('):
if not xtalk.has_key(pol): xtalk[pol] = {}
xtalk[pol][bl] = npz[k]
kws = ['chi','hist','j','l','f']
for kw in kws:
for k in [f for f in npz.files if f.startswith(kw)]: meta[k] = npz[k]
return meta, gains, vismdl, xtalk
def quick_load_gains(filename):
d = np.load(filename)
gains = {}
for k in d.keys():
if k[0].isdigit():
p = k[-1]
if not gains.has_key(p): gains[p] = {}
a = int(k[:-1])
gains[p][a] = d[k]
return gains
def load_gains_fhd(fhdsav):
fhd_cal = readsav(fhdsav,python_dict=True)
gfhd = {'x':{},'y':{}}
for a in range(fhd_cal['cal']['N_TILE'][0]):
gfhd['x'][a] = fhd_cal['cal']['GAIN'][0][0][a]
gfhd['y'][a] = fhd_cal['cal']['GAIN'][0][1][a]
return gfhd
def fill_flags(data,flag,fit_order = 4):
dout = np.copy(data)
wgt = np.logical_not(flag)
SH = data.shape
time_stack = np.sum(wgt,axis=1)
for ii in range(SH[0]):
if time_stack[ii] <= (SH[1]/2 + 1) : continue
for jj in range(24):
chunk = np.arange(16*jj+1,16*jj+15)
ind = np.where(wgt[ii][chunk])
if ind[0].size == 14: continue
x = chunk[ind]
y = dout[ii][chunk][ind]
z1 = np.polyfit(x,y.real,fit_order)
z2 = np.polyfit(x,y.imag,fit_order)
zeros = np.where(flag[ii][chunk])
d_temp = dout[ii][chunk]
d_temp[zeros] = (polyfunc(chunk,z1) + 1j*polyfunc(chunk,z2))[zeros]
dout[ii][chunk] = d_temp
return dout
def fit_data(data,fit_order=2):
if data.ndim == 2: d = np.mean(data,axis=0)
else: d = data
# fq = np.arange(d.size)
# zr = np.polyfit(fq,d.real,fit_order)
# zi = np.polyfit(fq,d.imag,fit_order)
# fit_data = polyfunc(fq,zr) + 1j*polyfunc(fq,zi)
fit_data = np.zeros(d.shape,dtype=np.complex64)
for ii in range(24):
chunk = np.arange(16*ii+1,16*ii+15)
dr = d.real[chunk]
di = d.imag[chunk]
zr = np.polyfit(chunk,dr,fit_order)
zi = np.polyfit(chunk,di,fit_order)
fit_data[chunk] = polyfunc(chunk,zr)+1j*polyfunc(chunk,zi)
return fit_data
def rough_cal(data,info,pol='xx'): #The data has to be the averaged over time axis
p = pol[0]
g0 = {p: {}}
phi = {}
reds = info.get_reds()
reds[0].sort()
reds[1].sort()
redbls = reds[0] + reds[1]
redbls.sort()
SH = data[reds[0][0]][pol].shape
gamma0 = fit_data(data[reds[0][0]][pol])
gamma1 = fit_data(data[reds[1][0]][pol])
subsetant = info.subsetant
fixants = (min(subsetant), min(subsetant[np.where(subsetant>92)]))
for a in fixants: phi[a] = np.zeros(SH)
while len(redbls) > 0:
i,j = redbls[0]
r = (i,j)
redbls.remove(r)
if phi.has_key(i) and phi.has_key(j): continue
elif phi.has_key(i) and not phi.has_key(j):
if r in reds[0]:
phi[j] = np.angle(fit_data(data[r][pol])*np.exp(1j*phi[i])*gamma0.conj())
elif r in reds[1]:
phi[j] = np.angle(fit_data(data[r][pol])*np.exp(1j*phi[i])*gamma1.conj())
elif phi.has_key(j) and not phi.has_key(i):
if r in reds[0]:
phi[i] = np.angle(fit_data(data[r][pol]).conj()*np.exp(1j*phi[j])*gamma0)
elif r in reds[1]:
phi[i] = np.angle(fit_data(data[r][pol]).conj()*np.exp(1j*phi[j])*gamma1)
else: redbls.append(r)
if len(phi.keys()) != subsetant.size: raise IOError('Missing antennas')
for a in phi.keys():
g0[p][a] = np.exp(-1j*phi[a])
return g0
def run_omnical(data, info, gains0=None, xtalk=None, maxiter=500, conv=1e-3,
stepsize=.3, trust_period=1):
m1,g1,v1 = omnical.calib.logcal(data, info, xtalk=xtalk, gains=gains0,
maxiter=maxiter, conv=conv, stepsize=stepsize,
trust_period=trust_period)
m2,g2,v2 = omnical.calib.lincal(data, info, xtalk=xtalk, gains=g1, vis=v1,
maxiter=maxiter, conv=conv, stepsize=stepsize,
trust_period=trust_period)
return m2,g2,v2
def remove_degen_hex(gomni, antpos):
g2 = copy.deepcopy(gomni)
for p in g2.keys():
ref_exp1 = np.exp(-1j*np.angle(g2[p][57]))
ref_exp2 = np.exp(-1j*np.angle(g2[p][93]))
for a in g2[p].keys():
if a < 93: g2[p][a] *= ref_exp1
else: g2[p][a] *= ref_exp2
phi58 = g2[p][58]
phi61 = g2[p][61]
phi1 = np.angle(phi58)
phi2 = np.angle(phi61)
for a in g2[p].keys():
if a < 93:
dx = antpos[a]['top_x'] - antpos[57]['top_x']
dy = antpos[a]['top_y'] - antpos[57]['top_y']
else:
dx = antpos[a]['top_x'] - antpos[93]['top_x']
dy = antpos[a]['top_y'] - antpos[93]['top_y']
nx = dx/14.-dy/np.sqrt(3)/14.
ny = -2*dy/np.sqrt(3)/14.
g2[p][a] *= np.exp(-1j*(phi1*nx+phi2*ny))
g2 = scale_gains(g2)
return g2
|
10,824 | fd04968d347bf6a7ccf5efd19c93f38bf54db831 | class CascadeFaceDetectorConfig:
classifier_path = 'cascades/haarcascade_frontalface_default.xml'
scale_factor = 1.2
min_neighbors = 5
min_size = (20, 20)
class OpencvFaceDetectorConfig:
prototxt_path = 'models/deploy.prototxt.txt'
model_weights_path = 'models/res10_300x300_ssd_iter_140000.caffemodel'
confidence_threshold = 0.5
|
10,825 | 7c36d46285274339814e0a709853aea2ecfabab9 | import pandas as pd
import numpy as np
from os import path
from sklearn.preprocessing import StandardScaler
from sklearn.cross_validation import KFold
from sklearn.metrics import silhouette_score
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier as RF
from sklearn.neighbors import KNeighborsClassifier as KNN
from sklearn.linear_model import LogisticRegression as LREG
from flask import Flask, json, Response
import bokeh
from crossdomain import crossdomain
from string import Template
import regex as re
from utils import BucketedFileRefresher
app = Flask(__name__)
DATASET_BUCKET = 'datasets'
BFR = BucketedFileRefresher()
DEFAULT_CONTENT_TEMPLATE = Template("\n".join([
" <p>Predicción: ${pred}</p>",
" <p>Biomarcadores de riesgo:</p>",
" <ul>",
" ${biom_l}",
" </ul>"
]))
## Utilidades para el manejo de diccionarios
# dict_find - Devuelve el indice de un elemento del diccionario, o None en caso de no encontrarlo
def dict_find(d, value):
if isinstance(d, pd.DataFrame):
d = d.to_dict()
return safe_dict_get(dict(zip(d.values(), d.keys())), value)
# safe_dict_get - Devuelve el valor indicado para el diccionario seleccionado, o None en caso de no encontrarlo
def safe_dict_get(d, index):
return d[index] if index in d.keys() else None
# Definimos la funcion de prognosis
def estimate_prognosis_weight_vector(cell, malignants, c_min=2, c_max=20, metric='euclidean'):
# K-Means Cluster search on malignant cells
silh = []
km_model = [0] * c_max
for t in range(2, 20):
km_model[t - c_min] = KMeans(n_clusters=t).fit(malignants)
silh.append(silhouette_score(malignants, km_model[t - c_min].labels_, metric=metric))
idx = np.argmax(silh)
n_clusters = idx + c_min
cluster_lbl = km_model[idx].labels_
# Cluster centroid retrieval and distance calculation
distance, centroid = {}, {}
for each in np.unique(cluster_lbl).tolist():
centroid[each] = np.mean(malignants[cluster_lbl == each], 0)
distance[each] = np.sqrt(np.sum(np.power(centroid[each] - cell, 2)))
dist_v = {k: np.abs(centroid[k] - cell) for k, v in enumerate(distance.values())}[np.argmin(distance.values())]
inv_dist = 1 / dist_v
weights = inv_dist / np.sum(inv_dist)
return weights
# Descargamos el dataset de cancer del bucket de datasets
filename = 'breast-cancer-wisconsin.data_total.txt'
filepath = path.join(path.dirname(path.realpath(__file__)), filename)
BFR(DATASET_BUCKET, filename, filepath)
# Cargamos los datos y aplicamos alguna transformacion
df = pd.read_csv(filename)
labels = {'benign': 2, 'malignant': 4}
df.drop(['id'], 1, inplace=True)
df.replace('?',-99999, inplace=True)
for col in df.columns:
df = df[df[col] != -99999]
field_names = [field for field in df.drop(['Class'], 1)]
flds = [dict(title=f, start=float(df[f].min()), end=float(df[f].max()), step=.5) for f in field_names]
flds = [dict(zip(list(f.keys())+['value'], list(f.values())+[f['start']])) for f in flds]
# Separamos vectores de caracteristicas y etiquetas
X = np.array(df.drop(['Class'], 1), dtype=float)
y = np.array(df['Class'], dtype=float)
# Preparamos version normalizada para utilizar en los clasificadores
scalerX = StandardScaler()
scalery = StandardScaler()
Xn = np.apply_along_axis(scalerX.fit_transform,0,X)
yn = np.apply_along_axis(scalery.fit_transform,0,y)
# Definimos los clasificadores y los entrenamos
classifiers = {
'SVC': SVC(probability=True).fit(Xn, y),
'RF': RF().fit(Xn, y),
'KNN': KNN().fit(Xn, y),
'LREG': LREG().fit(Xn, y)
}
# Preparamos los vectores para la representacion 3D
#pca = PCA(n_components=3).fit(X)
pca = PCA(n_components=2).fit(X)
Xt = pca.transform(X)
xx1 = Xt[y == labels['benign']][:, 0]
yy1 = Xt[y == labels['benign']][:, 1]
#zz1 = Xt[y == labels['benign']][:, 2]
xx2 = Xt[y == labels['malignant']][:, 0]
yy2 = Xt[y == labels['malignant']][:, 1]
#zz2 = Xt[y == labels['malignant']][:, 2]
#base = dict(x=np.hstack((xx1, xx2)), y=np.hstack((yy1, yy2)), z=np.hstack((yy1, yy2)),
# color=[1] * xx1.size + [3] * xx2.size)
base = dict(x=np.hstack((xx1, xx2)), y=np.hstack((yy1, yy2)), color=['blue'] * xx1.size + ['red'] * xx2.size)
@app.route('/predict/<chars>', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'OPTIONS'], headers=None)
def predict(chars):
chars = np.amin(X, axis=0) if re.match(r"[\[\(\{].*",chars) is None else json.loads(chars)
aux = ""
try:
chars = np.apply_along_axis(scalerX.fit_transform,0,chars).tolist()
clsf = {n: float(np.amax(f.predict_proba(chars))) for n, f in classifiers.items()}
probs = dict(enumerate(clsf.values()))
names = dict(enumerate(clsf.keys()))
p_max_i = int(np.argmax(list(probs.values())))
pred = "%s (%.2f%%, %s)" % (
dict_find(labels, int(classifiers[names[p_max_i]].predict(chars).tolist()[0])),
probs[p_max_i]*100,
names[p_max_i]
)
prog_vec = estimate_prognosis_weight_vector(chars, Xn[y == labels['malignant']])
fnms = dict(enumerate(field_names))
biom_l = "\n".join(["<li>%s: %.2f%%</li>" % (fnms[i], v*100) for i, v in enumerate(prog_vec)])
aux = DEFAULT_CONTENT_TEMPLATE.substitute(**dict(pred=pred, biom_l=biom_l))
except Exception as e:
print("Error with input '%s': %s" % (chars, e))
return json.jsonify({'results': aux})
@app.route('/compute/<chars>', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'OPTIONS'], headers=None)
def compute(chars):
chars = json.loads(chars)
aux = {}
try:
chars = pca.transform(chars)[0]
aux['x'] = np.hstack((base['x'], chars[0])).tolist()
aux['y'] = np.hstack((base['y'], chars[1])).tolist()
#aux['z'] = np.hstack((base['z'], chars[0])).tolist()
aux['color'] = base['color'] + ['green']
except Exception as e:
print("Error with input '%s': %s" % (chars, e))
return json.jsonify({'results': aux})
@app.route('/defaults', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'OPTIONS'], headers=None)
def defaults():
aux = {}
try:
chars = pca.transform(np.amin(X, axis=0))[0]
aux['x'] = np.hstack((base['x'], chars[0])).tolist()
aux['y'] = np.hstack((base['y'], chars[1])).tolist()
#aux['z'] = np.hstack((base['z'], chars[0])).tolist()
aux['color'] = base['color'] + ['green']# + ["rgba(0,255,0,1)"]
except Exception as e:
print("Error in 'defaults': %s" % (e,))
return json.jsonify({'results': aux})
@app.route('/fields', methods=['POST', 'OPTIONS'])
@crossdomain(origin='*', methods=['POST', 'OPTIONS'], headers=None)
def fields():
return json.jsonify({'results': flds})
if __name__ == '__main__':
app.run(port=50000)
|
10,826 | b520bed919f3841a14721fdbf74567eea85293e1 | # Generated by Django 3.1.7 on 2021-02-23 13:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('audios', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='audioclass',
options={'verbose_name': 'Audio class', 'verbose_name_plural': 'Audio classes'},
),
migrations.AlterModelOptions(
name='audioclassweek',
options={'verbose_name': 'Audio class week', 'verbose_name_plural': 'Audio class weeks'},
),
migrations.AddField(
model_name='audioclassweek',
name='audio_file',
field=models.FileField(default='', upload_to='audios'),
),
]
|
10,827 | b5a02d33e00c317e96f403f2029fc578253368b9 | #!/usr/bin/env python3
import wpilib
import wpilib.buttons
from wpilib import RobotDrive
import networktables
from robotpy_ext.common_drivers import navx
class MyRobot(wpilib.IterativeRobot):
'''Insert early definitions for Channels of Speed controls'''
# Channels for the wheels and motors
frontLeftChannel = 2
rearLeftChannel = 3
frontRightChannel = 1
rearRightChannel = 0
winchMotor1 = 4
winchMotor2 = 5
# The channel on the driver station that the joystick is connected to
joystickChannel = 0
def robotInit(self):
'''Robot initialization function - Define your inputs, and what channels they connect to'''
self.robotDrive = wpilib.RobotDrive(self.frontLeftChannel,
self.rearLeftChannel,
self.frontRightChannel,
self.rearRightChannel)
self.robotDrive.setExpiration(0.1)
self.robotDrive.setInvertedMotor(RobotDrive.MotorType.kFrontLeft, True)
self.robotDrive.setInvertedMotor(RobotDrive.MotorType.kRearLeft, True)
self.winch_motor2 = wpilib.Talon(self.winchMotor2)
self.winch_motor1 = wpilib.Talon(self.winchMotor1)
self.stick = wpilib.Joystick(self.joystickChannel)
self.fire_single_piston = wpilib.buttons.JoystickButton(self.stick, 1)
self.fire_double_forward = wpilib.buttons.JoystickButton(self.stick, 2)
self.fire_double_backward = wpilib.buttons.JoystickButton(self.stick, 3)
self.single_solenoid = wpilib.Solenoid(1)
self.double_solenoid = wpilib.DoubleSolenoid(2,3)
def autonomousInit(self):
'''Runs once each time the robot enters in Auto Mode'''
self.auto_loop_counter = 0
def autonomousPeriodic(self):
'''called periodically during Autonomous'''
if self.auto_loop_counter < 100:
self.robotDrive.drive(-0.5, 0)
self.auto_loop_counter +=1
else:
self.robotDrive.drive(0,0)
def teleopInit(self):
''' runs Sensors and timers etc'''
pass
def teleopPeriodic(self):
'''Runs the motors, Button controls, solenoids etc'''
self.robotDrive.mecanumDrive_Cartesian(self.stick.getRawAxis(4),
self.stick.getY(),
self.stick.getX(), 0);
if self.stick.getRawButton(3):
self.winch_motor2.set(1)
self.winch_motor1.set(1)
elif self.stick.getRawButton(4):
self.winch_motor1.set(-1)
self.winch_motor2.set(-1)
else:
self.winch_motor1.set(0)
self.winch_motor2.set(0)
if (self.fire_single_piston.get()):
self.single_solenoid.set(True)
else:
self.single_solenoid.set(False)
if (self.fire_double_forward.get()):
self.double_solenoid.set(wpilib.DoubleSolenoid.Value.kForward)
elif (self.fire_double_backward.get()):
self.double_solenoid.set(wpilib.DoubleSolenoid.Value.kReverse)
def testPeriodic(self):
'''Function called periodically during Test Mode'''
wpilib.LiveWindow.run()
if __name__ == '__main__':
wpilib.run(MyRobot)
|
10,828 | 9f33f05a4b5b9da95a0facd1000222dc450b42a9 | __author__ = 'Filip'
TURN_ON = 0
TURN_OFF = 1
TOGGLE = 3
with open('input.txt') as f:
lines = f.readlines()
lights = [[0 for x in range(1000)] for y in range(1000)]
for line in lines:
split_line = line.split(' ')
if 'turn' in line:
start_coord = split_line[2]
stop_coord = split_line[4]
if split_line[1] == 'on':
action = TURN_ON
else:
action = TURN_OFF
elif 'toggle' in line:
start_coord = split_line[1]
stop_coord = split_line[3]
action = TOGGLE
x1, y1 = [int(x) for x in start_coord.split(',')]
x2, y2 = [int(x) for x in stop_coord.split(',')]
for x in range(x1, x2+1):
for y in range(y1, y2+1):
if action == TURN_ON:
lights[x][y] += 1
elif action == TURN_OFF:
lights[x][y] = max(lights[x][y]-1, 0)
else:
lights[x][y] += 2
count = 0
for x in range(1000):
for y in range(1000):
count+= lights[x][y]
print count
|
10,829 | 6294170ebab420501ef25d4fdafc661337d1ca2d | # # -*- coding:utf-8 -*-
# from splinter import Browser
# browser = Browser("firefox")
# browser.visit("http://google.com")
# browser.fill("q", "splinter - python acceptance testing for web applications")
# button = browser.find_by_name("btnG")
# button.click()
# assert browser.is_text_present("splinter.readthedocs.org")
|
10,830 | 10fa599628f66bd409ebcfd501fa9a23a3a1b99d | import sqlalchemy
from .db_session import SqlAlchemyBase
class Notice(SqlAlchemyBase):
__tablename__ = 'notices'
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True)
name = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
text = sqlalchemy.Column(sqlalchemy.Integer, nullable=False)
|
10,831 | 53471d8f7241403eb3a041fd408387835fe90d4f | class Solution:
def findNthDigit(self, n: int) -> int:
k, l, cnt = 1, 1, 9
while n > cnt:
n -= cnt
k *= 10
l += 1
cnt = 9*k*l
n -= 1
q, r = n // l, n % l
k += q
return int(str(k)[r])
|
10,832 | 53d83c394a56200be0039622b3c166aa26719031 | class Aula:
def __init__(self, professor, quantidade, sala, disciplina, id=0):
self.id = id
self.professor = professor
self.quantidade = quantidade
self.sala = sala
self.disciplina = disciplina
|
10,833 | f99d89404cc802a76a20cde6d3ba2ae3ad2ca938 | # Generated by Django 3.2 on 2021-05-11 07:43
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0008_auto_20210510_1805'),
]
operations = [
migrations.AlterField(
model_name='route',
name='delivery',
field=models.CharField(choices=[('internal', 'internal'), ('in', 'in'), ('out', 'out')], default='internal', max_length=10),
),
]
|
10,834 | 849b38a243a764d7a12cfe586f4f187624cca96d | import numpy as np
import h5py
# datafile = '/net/liuwenran/datasets/DEAP/experiment/ex3_cnn_face/finalExData_shuffled/test.h5'
# originFile = h5py.File(datafile,'r')
# keys = originFile.keys()
# originLabel = originFile[keys[2]].value
result = np.load('output/result_deap_seperate6w_savefirst.npy')
originLabel = np.load('output/label_deap_seperate6w_savefirst.npy')
firstPath = np.load('output/firstPath_deap_seperate6w_savefirst.npy')
num = result.shape[0]
session = []
for i in range(num):
sessionNum = len(session)
sampleNow = firstPath[i]
ind = sampleNow.rindex('/')
sessionNow = sampleNow[:ind]
flag = 0
for j in range(sessionNum):
if sessionNow == session[j]:
flag = 1
break
if flag == 1:
continue
session.append(sessionNow)
sessionCount = len(session)
personLabel = {}
personResult = {}
for i in range(sessionCount):
personLabel[session[i]] = []
personResult[session[i]] = []
for i in range(num):
sampleNow = firstPath[i]
ind = sampleNow.rindex('/')
sessionNow = sampleNow[:ind]
personLabel[sessionNow].append(originLabel[i])
personResult[sessionNow].append(result[i])
result = np.zeros(sessionCount)
originLabel = np.zeros(sessionCount)
for i, name in enumerate(session):
result[i] = np.mean(personLabel[name])
originLabel[i] = np.mean(personResult[name])
result = 100 * result + 54
result = 60 / (result / 128)
originLabel = 100 * originLabel + 54
originLabel = 60 / (originLabel / 128)
diff = result - originLabel
diffMean = np.mean(diff)
diffStd = np.std(diff)
diffabs = np.absolute(diff)
RMSE = diffabs
RMSE = RMSE * RMSE
RMSE = np.sqrt(np.sum(RMSE) / diffabs.shape[0])
MERP = diffabs / originLabel
MERP = np.sum(MERP) / diffabs.shape[0]
num = len(result)
resultMean = np.mean(result)
originLabelMean = np.mean(originLabel)
cov = np.sum((result - resultMean) * (originLabel - originLabelMean))
resultVar = np.var(result)
originLabelVar = np.var(originLabel)
COR = cov / np.sqrt(resultVar * num * originLabelVar * num)
print 'diffMean is ' + str(diffMean)
print 'diffStd is ' + str(diffStd)
print 'RMSE is ' + str(RMSE)
print 'MERP is ' + str(MERP)
print 'COR is ' + str(COR)
|
10,835 | 56e46c61b7dc783cd9453cab6bcddefd6cf13746 | # coding:utf-8
'''
Created on Sep 26, 2013
@author: likaiguo.happy@gmail.com
'''
WRITE_LOG = True
STAR_LIST = [i / 2.0 for i in range(1, 11)]
# 评分相关全局变量
INDUSTRY_CATAGORY_LIST = [u'互联网/电子商务' , u'计算机软件', u'IT服务(系统/数据/维护)/多领域经营', u'通信/电信/网络设备', \
u'计算机硬件及网络设备' , u'通信/电信运营、增值服务', u'网络游戏', u'计算机软件', u'其它']
SPECIAL_INDUSTRY_DICT = {u'网络游戏':5, u'互联网/电子商务':3}
POSTION_TITLE_DICT = {u'CTO':12, u'CEO':12 , u'首席技术官CTO':12, u'首席信息官CIO':12, \
u"总经理":11, u'总监':9, u'资深经理':7, u'高级经理':7 , \
u'产品经理':1, u'经理':5, u'组长':3 , u'leader':3, \
u'负责人':5, u'主管':5, u'team leader':3}
responsibility_importance_dict = {r'.*负责.*?(项目|产品)':7,
r'.*(独立负责|独自负责|主程|主美|主力程序员|主力开发).*':5,
r'.*(独立完成|独自完成|独立实现|独自实现|独立设计|独自设计).*':3
}
import re
RESPONSIBILITY_REGEX_LIST = [ (re.compile(regex) , weight)for regex , weight in responsibility_importance_dict.items()]
DEGREE_TUPLE_LIST = [(u'大专', 1.5), (u'本科', 7), (u'硕士', 10), (u'博士', 15)]
PROFICIENCY_DICT = {u'了解':0.8, u'一般': 1.2, u'良好':1.5 , u'熟练':2 , u'精通':3}
|
10,836 | 5a1279fbabe887f17d6030091714ce10c60c1752 | #!env python
import time
import numpy as np
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtOpenGL import *
from Bee.gui.base.delta import delta
from Bee.util import resources
from Bee.gui.base.delta.read_stl import read_stl
from Bee.gui.style.spin import spin
from Bee.util import profile
class GLWidget(QGLWidget):
xRotationChanged = pyqtSignal(int)
yRotationChanged = pyqtSignal(int)
zRotationChanged = pyqtSignal(int)
def __init__(self, *args, **kwargs):
super(GLWidget, self).__init__()
self.setMinimumSize(600, 600)
self.delta_robot = delta.DeltaRobot()
self.xRot = -2500
self.yRot = 2000
self.zRot = 0.0
self.z_zoom = 35
self.xTran = 0
self.yTran = 0
self.h = -0.4
self.isDrawGrid = True
self.bottel_cap = read_stl.loader(resources.get_path_for_stl('bottel_cap.stl'))
self.real_local = []
def setXRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.xRot:
self.xRot = angle
self.xRotationChanged.emit(angle)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.updateGL()
def setYRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.yRot:
self.yRot = angle
self.yRotationChanged.emit(angle)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# self.updateGL()
def setZRotation(self, angle):
self.normalizeAngle(angle)
if angle != self.zRot:
self.zRot = angle
self.zRotationChanged.emit(angle)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
self.updateGL()
def setXYTranslate(self, dx, dy):
self.xTran += dx
self.yTran -= dy
self.updateGL()
def setZoom(self, zoom):
self.z_zoom = zoom
self.updateGL()
def updateJoint(self):
self.updateGL()
def initializeGL(self):
lightPos = (5.0, 5.0, 10.0, 1.0)
reflectance1 = (0.8, 0.1, 0.0, 1.0)
reflectance2 = (0.0, 0.8, 0.2, 1.0)
reflectance3 = (0.2, 0.2, 1.0, 1.0)
ambientLight = [0.7, 0.7, 0.7, 1.0]
diffuseLight = [0.7, 0.8, 0.8, 1.0]
specularLight = [0.4, 0.4, 0.4, 1.0]
positionLight = [20.0, 20.0, 20.0, 0.0]
glLightfv(GL_LIGHT0, GL_AMBIENT, ambientLight)
glLightfv(GL_LIGHT0, GL_DIFFUSE, diffuseLight)
glLightfv(GL_LIGHT0, GL_SPECULAR, specularLight)
glLightModelf(GL_LIGHT_MODEL_TWO_SIDE, 1.0)
glLightfv(GL_LIGHT0, GL_POSITION, positionLight)
glEnable(GL_LIGHTING)
glEnable(GL_LIGHT0)
glEnable(GL_DEPTH_TEST)
glEnable(GL_NORMALIZE)
glEnable(GL_BLEND)
glClearColor(178.0/255, 213.0/255, 214.0/255, 1.0)
def drawBottelCap(self,local):
glPushMatrix()
glTranslatef(local[0], local[1], self.h)
self.bottel_cap.draw()
glPopMatrix()
def drawGL(self):
glPushMatrix()
if self.isDrawGrid:
self.drawGrid()
if len(self.real_local)>0:
for local in self.real_local:
self.drawBottelCap(local)
B = self.delta_robot.get_B_B()
b = self.delta_robot.get_B_b()
P = self.delta_robot.get_P_P()
A = self.delta_robot.get_vector_B_A()
position = self.delta_robot.Position
base_P = P
base_P[:, 0] += position
base_P[:, 1] += position
base_P[:, 2] += position
color = [108.0/255, 108.0/255, 162.0/255]
self.setupColor(color)
glLineWidth(20)
glColor3f(1,1,0)
glBegin(GL_TRIANGLES)
for i in range(3):
glVertex3f(*P[:,i])
glEnd()
color = [255.0/255, 255.0/255, 255.0/255]
self.setupColor(color)
glColor3f(1,1,1)
for i in [0,2]:
glBegin(GL_LINES)
glVertex3f(*B[:,i])
glVertex3f(*A[:,i])
glEnd()
glBegin(GL_LINES)
glVertex3f(*B[:,i])
glVertex3f(*A[:,i])
glEnd()
glBegin(GL_LINES)
glVertex3f(*P[:,i])
glVertex3f(*A[:,i])
glEnd()
average = lambda array: np.array([sum(array[0]) / 3, sum(array[1]) / 3, sum(array[2]) / 3]).T
cb_P = average(base_P)
glBegin(GL_LINES)
glVertex3f(0,0,0)
glVertex3f(*cb_P)
glEnd()
color = [206.0/255, 207.0/255, 196.0/255]
self.setupColor(color)
glBegin(GL_LINES)
glVertex3f(*cb_P)
cb_P[2] -= 0.02
glVertex3f(*cb_P)
glEnd()
color = [255.0/255, 0.0/255, 255.0/255]
self.setupColor(color)
glBegin(GL_TRIANGLES)
for i in range(3):
glVertex3f(*b[:,i])
glEnd()
color = [255.0/255, 255.0/255, 255.0/255]
self.setupColor(color)
glColor3f(1,1,1)
for i in [1]:
glBegin(GL_LINES)
glVertex3f(*B[:,i])
glVertex3f(*A[:,i])
glEnd()
glBegin(GL_LINES)
glVertex3f(*B[:,i])
glVertex3f(*A[:,i])
glEnd()
glBegin(GL_LINES)
glVertex3f(*P[:,i])
glVertex3f(*A[:,i])
glEnd()
glFlush()
glPopMatrix()
def paintGL(self):
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glPushMatrix()
glTranslate(0, 0, self.z_zoom)
glTranslate(self.xTran, self.yTran, 0)
glRotated(self.xRot/16.0, 1.0, 0.0, 0.0)
glRotated(self.yRot/16.0, 0.0, 1.0, 0.0)
glRotated(self.zRot/16.0, 0.0, 0.0, 1.0)
glRotated(+90.0, 1.0, 0.0, 0.0)
self.drawGL()
glPopMatrix()
def resizeGL(self, w, h):
side = min(w, h)
if side < 0:
return
glViewport(0, 0, w, h)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
gluPerspective(10.0, w / float(h), 1.0, 20000.0)
glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
glTranslated(0.0, 0.0, -40.0)
def mousePressEvent(self, event):
self.lastPos = event.pos()
def drawGrid(self):
glPushMatrix()
glLineWidth(2)
color = [8.0/255, 108.0/255, 162.0/255]
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, color)
step = 0.05
num = 10
for i in range(-num, num+1):
glBegin(GL_LINES)
glVertex3f(i*step, -num * step, self.h)
glVertex3f(i*step, num*step, self.h)
glVertex3f(-num * step, i*step, self.h)
glVertex3f(num*step, i*step, self.h)
glEnd()
glPopMatrix()
def mouseMoveEvent(self, event):
dx = event.x() - self.lastPos.x()
dy = event.y() - self.lastPos.y()
if event.buttons() & Qt.LeftButton:
self.setXRotation(self.xRot + dy)
self.setYRotation(self.yRot - dx)
elif event.buttons() & Qt.RightButton:
if (self.z_zoom + dy) < 35:
self.setZoom(self.z_zoom + dy)
elif event.buttons() & Qt.MidButton:
self.setXYTranslate(dx/100, dx/100)
self.lastPos = event.pos()
def setupColor(self, color):
glMaterialfv(GL_FRONT, GL_AMBIENT_AND_DIFFUSE, color)
def xRotation(self):
return self.xRot
def yRotation(self):
return self.yRot
def zRotation(self):
return self.zRot
def normalizeAngle(self, angle):
while (angle < 0):
angle += 360 * 16
while (angle > 360 * 16):
angle -= 360 * 16
def setDegree(self,degree):
self.delta_robot.Degree = degree
def setPosition(self,position):
self.delta_robot.Position = position
class DeltaGL(QWidget):
def __init__(self, *args, **kwargs):
super(QWidget, self).__init__()
para_error = profile.get_scale()
self.widget_gl = GLWidget(self)
self.error_width = spin.MSpin(text="EW",value=0)
self.error_width.spin.setMaximum(10000)
self.error_width.setValue(para_error['ew']*1000)
self.error_height = spin.MSpin(text="EH",value=0)
self.error_height.spin.setMaximum(10000)
self.error_height.setValue(para_error['eh']*1000)
self.delay_motor = spin.MSpin(text="D",value=0)
self.delay_motor.spin.setMaximum(10000)
self.delay_motor.setValue(para_error['delay_motor']*1000)
self.load_error = QPushButton("LOAD")
self.load_error.clicked.connect(self.upload_error)
vbox_error = QVBoxLayout(self)
vbox_error.addWidget(self.widget_gl)
hbox = QHBoxLayout()
hbox.addWidget(self.error_width)
hbox.addWidget(self.error_height)
hbox.addWidget(self.delay_motor)
hbox.addStretch(1)
hbox.addWidget(self.load_error)
vbox_error.addLayout(hbox)
def upload_error(self):
ew = round(self.error_width.getValue()/1000,3)
eh = round(self.error_height.getValue()/1000,3)
delay_motor = round(self.delay_motor.getValue()/1000,3)
profile.set_error_width(ew)
profile.set_error_height(eh)
profile.set_error_delay_motor(delay_motor)
QMessageBox.information(self," ","Updated")
def get_error_width(self):
return round(self.error_width.getValue()/1000,3)
def get_error_height(self):
return round(self.error_height.getValue()/1000,3)
def get_delay_motor(self):
return round(self.delay_motor.getValue()/1000,3)
def upload_local_delta(self,real_local):
self.widget_gl.real_local = real_local
self.widget_gl.updateJoint()
def upload_degree_delta(self,degree):
self.widget_gl.delta_robot.Degree = degree
self.widget_gl.updateJoint()
def upload_position_delta(self,position):
self.widget_gl.delta_robot.Position = position
self.widget_gl.updateJoint()
def reload_position_gl(self,degree_list):
self.widget_gl.setDegree(degree_list)
for position in self.widget_gl.delta_robot.get_point_on_line():
self.widget_gl.updateGL()
time.sleep(0.001) |
10,837 | 0c0a3298259eb206d181dcd7c72eecb25c0e600f | import os
import discord
import requests
import json
from discord.ext import commands
from main_cog import main_cog
from music_cog import music_cog
bot = commands.Bot(command_prefix='/')
bot.remove_command('help')
bot.add_cog(main_cog(bot))
bot.add_cog(music_cog(bot))
token = os.getenv('TOKEN')
bot.run(token) |
10,838 | 4075f45482d512263bfb8c607c74559e35f591da | import discord
from discord.ext import commands
import os
import sys
import passwords as keys
client = commands.Bot(command_prefix = '!')
client.remove_command('help')
@client.event
async def on_ready():
servers = client.guilds
for server in servers:
for channel in server.text_channels:
if(channel.name == 'general'):
await channel.send("I am online! Type !help for list of commands!")
print('Bot is online!')
@client.event
async def on_member_join(member):
servers = client.guilds
for server in servers:
for channel in server.text_channels:
if(channel.name == 'general'):
await channel.send(f'Welcome to {server.name}! You can see my commands by typing !help.')
print(f'{member} has joined the server!')
@client.command()
async def ping(ctx):
await ctx.send(f'Pong! {round(client.latency * 1000)}ms')
for filename in os.listdir('./cogs'):
if filename.endswith('.py'):
client.load_extension(f'cogs.{filename[:-3]}')
client.run(keys.get_test_token())
|
10,839 | 26be7335a42081fd516cbef552a02d99d7fad91d | import unittest
import math
from unittest import mock
from typing import Optional, Dict, Set, Any, Union
import sympy
from qupulse.parameter_scope import Scope, DictScope
from qupulse.utils.types import ChannelID
from qupulse.expressions import Expression, ExpressionScalar
from qupulse.pulses import ConstantPT, FunctionPT, RepetitionPT, ForLoopPT, ParallelChannelPT, MappingPT,\
TimeReversalPT, AtomicMultiChannelPT
from qupulse.pulses.pulse_template import AtomicPulseTemplate, PulseTemplate, UnknownVolatileParameter
from qupulse.pulses.multi_channel_pulse_template import MultiChannelWaveform
from qupulse.program.loop import Loop
from qupulse._program.transformation import Transformation
from qupulse._program.waveforms import TransformingWaveform
from tests.pulses.sequencing_dummies import DummyWaveform
from tests._program.transformation_tests import TransformationStub
class PulseTemplateStub(PulseTemplate):
"""All abstract methods are stubs that raise NotImplementedError to catch unexpected calls. If a method is needed in
a test one should use mock.patch or mock.patch.object"""
def __init__(self, identifier=None,
defined_channels=None,
duration=None,
parameter_names=None,
measurement_names=None,
registry=None):
super().__init__(identifier=identifier)
self._defined_channels = defined_channels
self._duration = duration
self._parameter_names = parameter_names
self._measurement_names = set() if measurement_names is None else measurement_names
self.internal_create_program_args = []
self._register(registry=registry)
@property
def defined_channels(self) -> Set[ChannelID]:
if self._defined_channels:
return self._defined_channels
else:
raise NotImplementedError()
@property
def parameter_names(self) -> Set[str]:
if self._parameter_names is None:
raise NotImplementedError()
return self._parameter_names
def get_serialization_data(self, serializer: Optional['Serializer']=None) -> Dict[str, Any]:
# required for hashability
return {'id_self': id(self)}
@classmethod
def deserialize(cls, serializer: Optional['Serializer']=None, **kwargs) -> 'AtomicPulseTemplateStub':
raise NotImplementedError()
@property
def duration(self) -> Expression:
if self._duration is None:
raise NotImplementedError()
return self._duration
def _internal_create_program(self, *,
scope: Scope,
measurement_mapping: Dict[str, Optional[str]],
channel_mapping: Dict[ChannelID, Optional[ChannelID]],
global_transformation: Optional[Transformation],
to_single_waveform: Set[Union[str, 'PulseTemplate']],
parent_loop: Loop):
raise NotImplementedError()
@property
def measurement_names(self):
return self._measurement_names
@property
def integral(self) -> Dict[ChannelID, ExpressionScalar]:
raise NotImplementedError()
@property
def initial_values(self) -> Dict[ChannelID, ExpressionScalar]:
raise NotImplementedError()
@property
def final_values(self) -> Dict[ChannelID, ExpressionScalar]:
raise NotImplementedError()
def get_appending_internal_create_program(waveform=DummyWaveform(),
always_append=False,
measurements: list=None):
def internal_create_program(*, scope, parent_loop: Loop, **_):
if always_append or 'append_a_child' in scope:
if measurements is not None:
parent_loop.add_measurements(measurements=measurements)
parent_loop.append_child(waveform=waveform)
return internal_create_program
class AtomicPulseTemplateStub(AtomicPulseTemplate):
def __init__(self, *, duration: Expression=None, measurements=None,
parameter_names: Optional[Set] = None, identifier: Optional[str]=None,
registry=None) -> None:
super().__init__(identifier=identifier, measurements=measurements)
self._duration = duration
self._parameter_names = parameter_names
self._register(registry=registry)
def build_waveform(self, parameters, channel_mapping):
raise NotImplementedError()
@property
def defined_channels(self) -> Set['ChannelID']:
raise NotImplementedError()
@property
def parameter_names(self) -> Set[str]:
if self._parameter_names is None:
raise NotImplementedError()
return self._parameter_names
def get_serialization_data(self, serializer: Optional['Serializer']=None) -> Dict[str, Any]:
raise NotImplementedError()
@property
def measurement_names(self):
raise NotImplementedError()
@classmethod
def deserialize(cls, serializer: Optional['Serializer']=None, **kwargs) -> 'AtomicPulseTemplateStub':
raise NotImplementedError()
@property
def duration(self) -> Expression:
return self._duration
@property
def integral(self) -> Dict[ChannelID, ExpressionScalar]:
raise NotImplementedError()
def _as_expression(self) -> Dict[ChannelID, ExpressionScalar]:
raise NotImplementedError()
class PulseTemplateTest(unittest.TestCase):
def test_create_program(self) -> None:
template = PulseTemplateStub(defined_channels={'A'}, parameter_names={'foo'})
parameters = {'foo': 2.126, 'bar': -26.2, 'hugo': 'exp(sin(pi/2))', 'append_a_child': '1'}
previous_parameters = parameters.copy()
measurement_mapping = {'M': 'N'}
previos_measurement_mapping = measurement_mapping.copy()
channel_mapping = {'A': 'B'}
previous_channel_mapping = channel_mapping.copy()
volatile = {'foo'}
expected_scope = DictScope.from_kwargs(foo=2.126, bar=-26.2, hugo=math.exp(math.sin(math.pi/2)),
volatile=volatile, append_a_child=1)
to_single_waveform = {'voll', 'toggo'}
global_transformation = TransformationStub()
expected_internal_kwargs = dict(scope=expected_scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=global_transformation,
to_single_waveform=to_single_waveform)
dummy_waveform = DummyWaveform()
expected_program = Loop(children=[Loop(waveform=dummy_waveform)])
with mock.patch.object(template,
'_create_program',
wraps=get_appending_internal_create_program(dummy_waveform)) as _create_program:
program = template.create_program(parameters=parameters,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
to_single_waveform=to_single_waveform,
global_transformation=global_transformation,
volatile=volatile)
_create_program.assert_called_once_with(**expected_internal_kwargs, parent_loop=program)
self.assertEqual(expected_program, program)
self.assertEqual(previos_measurement_mapping, measurement_mapping)
self.assertEqual(previous_channel_mapping, channel_mapping)
self.assertEqual(previous_parameters, parameters)
def test__create_program(self):
scope = DictScope.from_kwargs(a=1., b=2., volatile={'c'})
measurement_mapping = {'M': 'N'}
channel_mapping = {'B': 'A'}
global_transformation = TransformationStub()
to_single_waveform = {'voll', 'toggo'}
parent_loop = Loop()
template = PulseTemplateStub()
with mock.patch.object(template, '_internal_create_program') as _internal_create_program:
template._create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=global_transformation,
to_single_waveform=to_single_waveform,
parent_loop=parent_loop)
_internal_create_program.assert_called_once_with(
scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=global_transformation,
to_single_waveform=to_single_waveform,
parent_loop=parent_loop)
self.assertEqual(parent_loop, Loop())
with self.assertRaisesRegex(NotImplementedError, "volatile"):
template._parameter_names = {'c'}
template._create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=global_transformation,
to_single_waveform={template},
parent_loop=parent_loop)
def test__create_program_single_waveform(self):
template = PulseTemplateStub(identifier='pt_identifier', parameter_names={'alpha'})
for to_single_waveform in ({template}, {template.identifier}):
for global_transformation in (None, TransformationStub()):
scope = DictScope.from_kwargs(a=1., b=2., volatile={'a'})
measurement_mapping = {'M': 'N'}
channel_mapping = {'B': 'A'}
parent_loop = Loop()
wf = DummyWaveform()
single_waveform = DummyWaveform()
measurements = [('m', 0, 1), ('n', 0.1, .9)]
expected_inner_program = Loop(children=[Loop(waveform=wf)], measurements=measurements)
appending_create_program = get_appending_internal_create_program(wf,
measurements=measurements,
always_append=True)
if global_transformation:
final_waveform = TransformingWaveform(single_waveform, global_transformation)
else:
final_waveform = single_waveform
expected_program = Loop(children=[Loop(waveform=final_waveform)],
measurements=measurements)
with mock.patch.object(template, '_internal_create_program',
wraps=appending_create_program) as _internal_create_program:
with mock.patch('qupulse.pulses.pulse_template.to_waveform',
return_value=single_waveform) as to_waveform:
template._create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=global_transformation,
to_single_waveform=to_single_waveform,
parent_loop=parent_loop)
_internal_create_program.assert_called_once_with(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=to_single_waveform,
parent_loop=expected_inner_program)
to_waveform.assert_called_once_with(expected_inner_program)
expected_program._measurements = set(expected_program._measurements)
parent_loop._measurements = set(parent_loop._measurements)
self.assertEqual(expected_program, parent_loop)
def test_create_program_defaults(self) -> None:
template = PulseTemplateStub(defined_channels={'A', 'B'}, parameter_names={'foo'}, measurement_names={'hugo', 'foo'})
expected_internal_kwargs = dict(scope=DictScope.from_kwargs(),
measurement_mapping={'hugo': 'hugo', 'foo': 'foo'},
channel_mapping={'A': 'A', 'B': 'B'},
global_transformation=None,
to_single_waveform=set())
dummy_waveform = DummyWaveform()
expected_program = Loop(children=[Loop(waveform=dummy_waveform)])
with mock.patch.object(template,
'_internal_create_program',
wraps=get_appending_internal_create_program(dummy_waveform, True)) as _internal_create_program:
program = template.create_program()
_internal_create_program.assert_called_once_with(**expected_internal_kwargs, parent_loop=program)
self.assertEqual(expected_program, program)
def test_create_program_channel_mapping(self):
template = PulseTemplateStub(defined_channels={'A', 'B'})
expected_internal_kwargs = dict(scope=DictScope.from_kwargs(),
measurement_mapping=dict(),
channel_mapping={'A': 'C', 'B': 'B'},
global_transformation=None,
to_single_waveform=set())
with mock.patch.object(template, '_internal_create_program') as _internal_create_program:
template.create_program(channel_mapping={'A': 'C'})
_internal_create_program.assert_called_once_with(**expected_internal_kwargs, parent_loop=Loop())
def test_create_program_volatile(self):
template = PulseTemplateStub(defined_channels={'A', 'B'})
parameters = {'abc': 1.}
expected_internal_kwargs = dict(scope=DictScope.from_kwargs(volatile={'abc'}, **parameters),
measurement_mapping=dict(),
channel_mapping={'A': 'A', 'B': 'B'},
global_transformation=None,
to_single_waveform=set())
with mock.patch.object(template, '_internal_create_program') as _internal_create_program:
template.create_program(parameters=parameters, volatile='abc')
_internal_create_program.assert_called_once_with(**expected_internal_kwargs, parent_loop=Loop())
with mock.patch.object(template, '_internal_create_program') as _internal_create_program:
template.create_program(parameters=parameters, volatile={'abc'})
_internal_create_program.assert_called_once_with(**expected_internal_kwargs, parent_loop=Loop())
expected_internal_kwargs = dict(scope=DictScope.from_kwargs(volatile={'abc', 'dfg'}, **parameters),
measurement_mapping=dict(),
channel_mapping={'A': 'A', 'B': 'B'},
global_transformation=None,
to_single_waveform=set())
with mock.patch.object(template, '_internal_create_program') as _internal_create_program:
with self.assertWarns(UnknownVolatileParameter):
template.create_program(parameters=parameters, volatile={'abc', 'dfg'})
_internal_create_program.assert_called_once_with(**expected_internal_kwargs, parent_loop=Loop())
def test_create_program_none(self) -> None:
template = PulseTemplateStub(defined_channels={'A'}, parameter_names={'foo'})
parameters = {'foo': 2.126, 'bar': -26.2, 'hugo': 'exp(sin(pi/2))'}
measurement_mapping = {'M': 'N'}
channel_mapping = {'A': 'B'}
volatile = {'hugo'}
scope = DictScope.from_kwargs(foo=2.126, bar=-26.2, hugo=math.exp(math.sin(math.pi/2)), volatile=volatile)
expected_internal_kwargs = dict(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set())
with mock.patch.object(template,
'_internal_create_program') as _internal_create_program:
program = template.create_program(parameters=parameters,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
volatile=volatile)
_internal_create_program.assert_called_once_with(**expected_internal_kwargs, parent_loop=Loop())
self.assertIsNone(program)
def test_matmul(self):
a = PulseTemplateStub()
b = PulseTemplateStub()
from qupulse.pulses.sequence_pulse_template import SequencePulseTemplate
with mock.patch.object(SequencePulseTemplate, 'concatenate', return_value='concat') as mock_concatenate:
self.assertEqual(a @ b, 'concat')
mock_concatenate.assert_called_once_with(a, b)
def test_pow(self):
pt = PulseTemplateStub()
pow_pt = pt ** 5
self.assertEqual(pow_pt, pt.with_repetition(5))
def test_rmatmul(self):
a = PulseTemplateStub()
b = (1, 2, 3)
from qupulse.pulses.sequence_pulse_template import SequencePulseTemplate
with mock.patch.object(SequencePulseTemplate, 'concatenate', return_value='concat') as mock_concatenate:
self.assertEqual(b @ a, 'concat')
mock_concatenate.assert_called_once_with(b, a)
def test_format(self):
a = PulseTemplateStub(identifier='asd', duration=Expression(5))
self.assertEqual("PulseTemplateStub(identifier='asd')", str(a))
self.assertEqual("PulseTemplateStub(identifier='asd')", format(a))
self.assertEqual("PulseTemplateStub(identifier='asd', duration='5')",
"{:identifier;duration}".format(a))
class WithMethodTests(unittest.TestCase):
def setUp(self) -> None:
self.fpt = FunctionPT(1.4, 'sin(f*t)', 'X')
self.cpt = ConstantPT(1.4, {'Y': 'start + idx * step'})
def test_parallel_channels(self):
expected = ParallelChannelPT(self.fpt, {'K': 'k'})
actual = self.fpt.with_parallel_channels({'K': 'k'})
self.assertEqual(expected, actual)
def test_parallel_channels_optimization(self):
expected = ParallelChannelPT(self.fpt, {'K': 'k', 'C': 'c'})
actual = self.fpt.with_parallel_channels({'K': 'k'}).with_parallel_channels({'C': 'c'})
self.assertEqual(expected, actual)
def test_iteration(self):
expected = ForLoopPT(self.cpt, 'idx', 'n_steps')
actual = self.cpt.with_iteration('idx', 'n_steps')
self.assertEqual(expected, actual)
def test_appended(self):
expected = self.fpt @ self.fpt.with_time_reversal()
actual = self.fpt.with_appended(self.fpt.with_time_reversal())
self.assertEqual(expected, actual)
def test_repetition(self):
expected = RepetitionPT(self.fpt, 6)
actual = self.fpt.with_repetition(6)
self.assertEqual(expected, actual)
def test_repetition_optimization(self):
# unstable test due to flimsy expression equality :(
expected = RepetitionPT(self.fpt, ExpressionScalar(6) * 2)
actual = self.fpt.with_repetition(6).with_repetition(2)
self.assertEqual(expected, actual)
def test_time_reversal(self):
expected = TimeReversalPT(self.fpt)
actual = self.fpt.with_time_reversal()
self.assertEqual(expected, actual)
def test_parallel_atomic(self):
expected = AtomicMultiChannelPT(self.fpt, self.cpt)
actual = self.fpt.with_parallel_atomic(self.cpt)
self.assertEqual(expected, actual)
class AtomicPulseTemplateTests(unittest.TestCase):
def test_internal_create_program(self) -> None:
measurement_windows = [('M', 0, 5)]
single_wf = DummyWaveform(duration=6, defined_channels={'A'})
wf = MultiChannelWaveform([single_wf])
template = AtomicPulseTemplateStub(measurements=measurement_windows, parameter_names={'foo'})
scope = DictScope.from_kwargs(foo=7.2, volatile={'gutes_zeuch'})
measurement_mapping = {'M': 'N'}
channel_mapping = {'B': 'A'}
program = Loop()
expected_program = Loop(children=[Loop(waveform=wf)],
measurements=[('N', 0, 5)])
with mock.patch.object(template, 'build_waveform', return_value=wf) as build_waveform:
template._internal_create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
parent_loop=program,
to_single_waveform=set(),
global_transformation=None)
build_waveform.assert_called_once_with(parameters=scope, channel_mapping=channel_mapping)
self.assertEqual(expected_program, program)
# MultiChannelProgram calls cleanup
program.cleanup()
def test_internal_create_program_transformation(self):
inner_wf = DummyWaveform()
template = AtomicPulseTemplateStub(parameter_names=set())
program = Loop()
global_transformation = TransformationStub()
scope = DictScope.from_kwargs()
expected_program = Loop(children=[Loop(waveform=TransformingWaveform(inner_wf, global_transformation))])
with mock.patch.object(template, 'build_waveform', return_value=inner_wf):
template._internal_create_program(scope=scope,
measurement_mapping={},
channel_mapping={},
parent_loop=program,
to_single_waveform=set(),
global_transformation=global_transformation)
self.assertEqual(expected_program, program)
def test_internal_create_program_no_waveform(self) -> None:
measurement_windows = [('M', 0, 5)]
template = AtomicPulseTemplateStub(measurements=measurement_windows, parameter_names={'foo'})
scope = DictScope.from_kwargs(foo=3.5, bar=3, volatile={'bar'})
measurement_mapping = {'M': 'N'}
channel_mapping = {'B': 'A'}
program = Loop()
expected_program = Loop()
with mock.patch.object(template, 'build_waveform', return_value=None) as build_waveform:
with mock.patch.object(template,
'get_measurement_windows',
wraps=template.get_measurement_windows) as get_meas_windows:
template._internal_create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
parent_loop=program,
to_single_waveform=set(),
global_transformation=None)
build_waveform.assert_called_once_with(parameters=scope, channel_mapping=channel_mapping)
get_meas_windows.assert_not_called()
self.assertEqual(expected_program, program)
def test_internal_create_program_volatile(self):
template = AtomicPulseTemplateStub(parameter_names={'foo'})
scope = DictScope.from_kwargs(foo=3.5, bar=3, volatile={'foo'})
measurement_mapping = {'M': 'N'}
channel_mapping = {'B': 'A'}
program = Loop()
with self.assertRaisesRegex(AssertionError, "volatile"):
template._internal_create_program(scope=scope,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
parent_loop=program,
to_single_waveform=set(),
global_transformation=None)
self.assertEqual(Loop(), program)
|
10,840 | ff82da4c6cf77fe9b5bc0c4569c4cd89bc8f982b | # -*- coding: utf-8 -*-
"""
@author: C. J. F. Delcourt
"""
#%% importing required packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# setting the working directory in which the excel files from
# Delcourt and Veraverbeke (2022) were saved
wdir = ""
# loading size-class-specific G and MSD values created from larix_fwd.R script
df_fwd = pd.read_csv(wdir+"outputs/table3_fwd_summary.csv")
#%% values of the multiplication factor M
# storing size-class-specific G and MSD values from from our study and those
# from other tree species in the Canadian Northwest Territories and
# Saskatchewan by diameter size class
boreal_dict = {'lcajYA': {'G': df_fwd.iloc[1:,3].to_list(),
'MSD': [round(x,2) for x in df_fwd.iloc[1:,1].to_list()]},
'llarSK' : {'G' : [0.51, 0.51, 0.49, 0.55],
'MSD' : [0.475, 2.60, 14.1, 40.6]},
'pmarSK' : {'G' : [0.56, 0.51, 0.49, 0.49],
'MSD' : [0.487, 3.49, 15.7, 33.8]},
'pglaSK' : {'G' : [0.54, 0.54, 0.46, 0.41],
'MSD' : [0.528, 3.28, 15.8, 34.2]},
'pmarNT' : {'G' : [0.62, 0.59, 0.55, 0.52],
'MSD' : [0.491, 3.573, 15.0, 34.7]},
'pglaNT' : {'G' : [0.56, 0.54, 0.49, 0.45],
'MSD' : [0.498, 3.248, 15.5, 36.5]}}
# computing M values using Eq. (8) from our paper
for keys in boreal_dict.keys():
boreal_dict[keys]['M'] = []
for i in range(4):
temp = (boreal_dict[keys]['G'][i]*1.13*boreal_dict[keys]['MSD'][i]*np.pi**2)/8
boreal_dict[keys]['M'].append(temp)
del temp, i, keys
# computing differences between values of M from this study and those from
# other tree species and boreal regions for each size class
boreal_dict.keys()
diff_M = {}
for keys in list(boreal_dict.keys())[1:]:
diff_M[keys] = ((np.array(boreal_dict['lcajYA']['M'])-np.array(boreal_dict[keys]['M']))/np.array(boreal_dict['lcajYA']['M']))*100
del keys
print(np.mean(np.concatenate((diff_M['llarSK'], diff_M['pmarSK'],
diff_M['pglaSK'], diff_M['pmarNT'],
diff_M['pglaNT']))))
print(np.mean(np.abs(np.concatenate((diff_M['llarSK'], diff_M['pmarSK'],
diff_M['pglaSK'], diff_M['pmarNT'],
diff_M['pglaNT'])))))
print(np.max(np.concatenate((diff_M['llarSK'], diff_M['pmarSK'],
diff_M['pglaSK'], diff_M['pmarNT'],
diff_M['pglaNT']))))
print(np.min(np.concatenate((diff_M['llarSK'], diff_M['pmarSK'],
diff_M['pglaSK'], diff_M['pmarNT'],
diff_M['pglaNT']))))
#%% FWD biomass estimates per size class in 47 larch forest stands
# loading plot characteristics
df_plot = pd.read_excel(wdir+'YA2019_plots.xlsx', sheet_name='plots_summary')
# calculating slope correction factor (s) using Eq. (2) from our paper
df_plot['slope_corr'] = np.sqrt(1+(np.tan(np.radians(df_plot['slope'])))**2)
# storing s values per plot in a dictionary
slope_dict = dict(zip(df_plot.plotID, df_plot.slope_corr))
# loading FWD inventory data collected using the line-intersect method
df_count = pd.read_excel(wdir+"YA2019_fwd_transects.xlsx",
sheet_name="fwd_count")
# working only with larch FWD and pieces larger than 0.5 cm in diameter
df_count = df_count[(df_count.species=='LC') & (df_count.size_class!=1)]
# creating a function to calculate FWD biomass per size class and plot
# using Eq (4) from our paper
def fwd_pre (df,ref,tilt_corr):
return ((np.pi**2)*df['count']*boreal_dict[ref]['G'][df['size_class']-2]*\
boreal_dict[ref]['MSD'][df['size_class']-2]*tilt_corr*\
slope_dict[df['plotID']])/(8*30)
# deriving FWD biomass estimates using values of G and MSD from our study and
# those from other tree species and boreal regions
for ref in boreal_dict.keys():
df_count['prefwd_'+ref] = df_count.apply(lambda row: fwd_pre(row,ref,1.13),
axis=1)
#%% creating Table A1
table_a1 = df_count.set_index('size_class')
table_a1 = [pd.DataFrame(y).reindex([2,3,4,5]) for x, y in table_a1.groupby('plotID',
as_index=False)]
for i in range(len(table_a1)):
table_a1[i].plotID.fillna(method='bfill', inplace=True)
table_a1[i].plotID.fillna(method='pad', inplace=True)
table_a1[i].species.fillna(method='bfill', inplace=True)
table_a1[i].species.fillna(method='pad', inplace=True)
table_a1[i].iloc[:,2:] = table_a1[i].iloc[:,2:].fillna(0)
del i
table_a1 = pd.concat(table_a1)
table_a1.reset_index(drop=False,inplace=True)
class_count = table_a1[table_a1['count']!=0]
print(class_count.groupby('size_class')['plotID'].count())
table_a1 = table_a1.groupby('size_class').agg({'count':['mean', 'min', 'max'],
'prefwd_llarSK': ['mean', 'std'],
'prefwd_pmarSK': ['mean', 'std'],
'prefwd_pglaSK': ['mean', 'std'],
'prefwd_pmarNT': ['mean', 'std'],
'prefwd_pglaNT': ['mean', 'std'],
'prefwd_lcajYA': ['mean', 'std']})
df_all = df_count.groupby('plotID', as_index=False).agg({'count':sum,
'prefwd_llarSK':sum,
'prefwd_pmarSK':sum,
'prefwd_pglaSK':sum,
'prefwd_pmarNT':sum,
'prefwd_pglaNT':sum,
'prefwd_lcajYA':sum})
all_classes = df_all.agg({'count':['mean', 'min', 'max'],
'prefwd_llarSK': ['mean', 'std'],
'prefwd_pmarSK': ['mean', 'std'],
'prefwd_pglaSK': ['mean', 'std'],
'prefwd_pmarNT': ['mean', 'std'],
'prefwd_pglaNT': ['mean', 'std'],
'prefwd_lcajYA': ['mean', 'std']})
#%% differences in FWD biomass estimates
# calculating percentage difference in FWD biomass estimates in the 47 larch
# forest stands near Yakutsk using M factors derived for other species and
# boreal regions.
for ref in list(boreal_dict.keys())[1:]:
df_all['diff_'+ref] = ((df_all['prefwd_lcajYA']-df_all['prefwd_'+ref])/df_all['prefwd_lcajYA'])*100
data = [df_all.iloc[:,i].to_list() for i in np.arange(df_all.shape[1]-5,
df_all.shape[1])]
data = [data[x] for x in [0,2,1,4,3]]
#%% plotting Figure 3
plt.rcParams["font.family"] = "Arial"
plt.rcParams['figure.dpi'] = 300
cm = 1/2.54
spplabels = ['$\it{L. laricina}$, SK', '$\it{P. glauca}$, SK',
'$\it{P. mariana}$, SK', '$\it{P. glauca}$, NT',
'$\it{P. mariana}$, NT']
sppcolors = ['#285185', '#3669AC', '#6081D0', '#979AE6', '#B9B3F0']
xtickpos = [1,2,3,4,5]
fig1, ax1 = plt.subplots(figsize=(9*cm,9.85*cm))
bp = ax1.boxplot(data, patch_artist=True, showmeans=True)
# changing color of boxes
for patch, color in zip(bp['boxes'], sppcolors):
patch.set_facecolor(color)
#changing linewidth of boxes
for box in bp['boxes']:
box.set(linewidth=0.8)
#changing linewidth of whiskers
for whsk in bp['whiskers']:
whsk.set(linewidth=0.8)
# changing color of medians
for median in bp['medians']:
median.set(color ='w', linewidth=0.8)
# changing style and color of means
for mean in bp['means']:
mean.set(marker="*",
markersize=5,
markerfacecolor = "white",
markeredgecolor = "white")
# changing style and color of fliers
for flier, color in zip(bp['fliers'], sppcolors):
flier.set_markeredgecolor(color)
flier.set(marker='+',
markersize=5,
markeredgewidth=0.5)
ax1.set_ylabel("Percentage difference (%)", fontsize=7)
ax1.set_xticks(xtickpos)
ax1.set_xticklabels(spplabels, rotation = 45, fontsize=7)
ax1.tick_params(axis='y', which='major', direction='in',
length=3, right=True, labelsize=7)
ax1.tick_params(axis='x', which='both', bottom=False, top=False)
fig1.tight_layout()
fig1.savefig(wdir+'outputs/Figures/figure3.png')
|
10,841 | 46f387a0258ecfc0df9648b46604d668569e54e5 | import pytesseract as tess
tess.pytesseract.tesseract_cmd = r'C:\Program Files\Tesseract-OCR\tesseract.exe'
from PIL import Image
import cv2
import numpy as np
import pytesseract as tess
from PIL import Image
#output file
output=open('out.txt', 'w')
imgtext= Image.open('ocr2.png')
text=tess.image_to_string(imgtext)
output.write("CHEMICAL ATTACHED :" + text + '\n')
output.close() |
10,842 | 6ecf255b90f2dbbaf93ba181bbe23591c019f89f | import scipy as sp
import numpy as np
from scipy import log,exp,sqrt,stats
s0=100 # Stock price today
x=100 # Strike price
barrier=150 # Barrier level
T=1 # Maturity in years
r=0.08 # Risk-free rate
sigma=0.3 # Annualized volatility
n_simulation = 1000 # number of simulations
def bs_call(S,X,T,r,sigma):
d1=(log(S/X)+(r+sigma*sigma/2)*T)/(sigma*sqrt(T))
d2=d1-sigma*sqrt(T)
return S*stats.norm.cdf(d1)-X*exp(-r*T)*stats.norm.cdf(d2)
def up_and_out_call(s0,x,T,r,sigma,n_simulation,barrier):
"""Returns: Call value of an up-and-out barrier option with European call
"""
n_steps= 12 # Define number of steps.
dt = T/n_steps
total=0
for j in range(0,n_simulation):
sT=s0
out=False
for i in range(0,int(n_steps)):
e= sp.random.normal()
sT*=sp.exp((r-0.5*sigma**2)*dt+sigma*e*sp.sqrt(dt))
if sT>barrier:
out=True
if out==False:
total+=bs_call(s0,x,T,r,sigma)
return total/n_simulation
result = up_and_out_call (s0,x,T,r,sigma,n_simulation,barrier)
print('Price for the Up-and-out Call = ', result) |
10,843 | 882cd84ee499ff0e10ccfacae057e9c0423a6701 | from unsup_vvs.network_training.tpu_old_dps.full_imagenet_input import ImageNetInput
from unsup_vvs.network_training.tpu_old_dps.rp_imagenet_input import RP_ImageNetInput
from unsup_vvs.network_training.tpu_old_dps.rp_pbrscenenet_input import PBRSceneNetDepthMltInput
from unsup_vvs.network_training.tpu_old_dps.col_imagenet_input import Col_ImageNetInput
from unsup_vvs.network_training.tpu_old_dps.col_pbrscenenet_input import Col_PBRSceneNetInput
from unsup_vvs.network_training.tpu_old_dps.col_pbr_input import Col_PBRNetInput
from unsup_vvs.network_training.tpu_old_dps.depth_pbrscenenet_input import PBRSceneNetDepthInput
from unsup_vvs.network_training.tpu_old_dps.depth_pbr_input import PBRNetDepthInput
from unsup_vvs.network_training.tpu_old_dps.rp_pbr_input import PBRNetDepthMltInput
from unsup_vvs.network_training.tpu_old_dps.rp_ps_zip_input import PBRSceneNetZipInput
from unsup_vvs.network_training.tpu_old_dps.depth_pbr_zip_input import PBRNetZipDepthInput
from unsup_vvs.network_training.tpu_old_dps.depth_ps_zip_input import PBRSceneNetZipDepthInput
from unsup_vvs.network_training.tpu_old_dps.col_tl_imagenet_input import Col_Tl_Input
from unsup_vvs.network_training.tpu_old_dps.combine_depth_imn_input import DepthImagenetInput
from unsup_vvs.network_training.tpu_old_dps.combine_rp_imagenet_input import Combine_RP_ImageNet_Input
from unsup_vvs.network_training.tpu_old_dps.combine_rp_col_input import Combine_RP_Color_Input
from unsup_vvs.network_training.tpu_old_dps.combine_rci_input import Combine_RCI_Input
from unsup_vvs.network_training.tpu_old_dps.combine_rp_col_ps_input import Combine_RP_Color_PS_Input
from unsup_vvs.network_training.tpu_old_dps.combine_rp_col_input_new import Combine_RP_Color_Input_New
from unsup_vvs.network_training.tpu_old_dps.combine_rdc_input import Combine_RDC_Input
from unsup_vvs.network_training.tpu_old_dps.combine_rdc_imn_input import Combine_RDC_ImageNet_Input
from unsup_vvs.network_training.tpu_data_provider import TPUCombineWorld
from unsup_vvs.network_training.utilities.data_path_utils import get_TPU_data_path
def get_deprecated_val_tpu_topn_dp_params(args):
if args.tpu_task=='imagenet_rp':
val_input_fn = ImageNetInput(False, args.sm_loaddir, std=False).input_fn
if args.tpu_task=='rp':
val_input_fn = RP_ImageNetInput(False, args.sm_loaddir).input_fn
if args.tpu_task=='rp_pbr':
val_input_fn = PBRSceneNetZipInput(
False, args.sm_loaddir, args.sm_loaddir2).input_fn
if args.rp_zip==0:
val_input_fn = PBRSceneNetDepthMltInput(
False, args.sm_loaddir, args.sm_loaddir2).input_fn
if args.tpu_task=='rp_only_pbr':
val_input_fn = PBRNetDepthMltInput(False, args.sm_loaddir).input_fn
if args.tpu_task=='colorization':
val_input_fn = Col_ImageNetInput(
False, args.sm_loaddir, down_sample=args.col_down,
col_knn=(args.col_knn==1), col_tl=(args.col_tl==1)).input_fn
if args.tpu_task=='color_ps':
val_input_fn = Col_PBRSceneNetInput(
False, args.sm_loaddir, args.sm_loaddir2,
down_sample=args.col_down, col_knn=(args.col_knn==1)).input_fn
if args.tpu_task=='color_pbr':
val_input_fn = Col_PBRNetInput(
False, args.sm_loaddir,
down_sample=args.col_down, col_knn=(args.col_knn==1)).input_fn
if args.tpu_task=='color_tl':
val_input_fn = Col_Tl_Input(
False, args.sm_loaddir,
down_sample=args.col_down, col_knn=(args.col_knn==1),
combine_rp=(args.combine_rp==1)).input_fn
if args.tpu_task=='depth':
val_input_fn = PBRSceneNetZipDepthInput(
False, args.sm_loaddir, args.sm_loaddir2,
ab_depth=(args.ab_depth==1), down_sample=args.depth_down,
color_dp_tl=(args.color_dp_tl==1), rp_dp_tl=(args.rp_dp_tl==1),
rpcol_dp_tl=(args.combine_col_rp==1)).input_fn
if args.depth_zip==0:
val_input_fn = PBRSceneNetDepthInput(
False, args.sm_loaddir, args.sm_loaddir2).input_fn
if args.tpu_task=='depth_pbr':
val_input_fn = PBRNetZipDepthInput(False, args.sm_loaddir).input_fn
if args.depth_zip==0:
val_input_fn = PBRNetDepthInput(False, args.sm_loaddir).input_fn
if args.tpu_task=='combine_depth_imn':
val_input_fn = DepthImagenetInput(
False, args.sm_loaddir, args.sm_loaddir2).input_fn
if args.tpu_task=='combine_rp_imn':
val_input_fn = Combine_RP_ImageNet_Input(False, args.sm_loaddir).input_fn
if args.tpu_task=='combine_rp_col':
val_input_fn = Combine_RP_Color_Input(
False, args.sm_loaddir, num_grids=1).input_fn
if args.tpu_task=='combine_rci':
val_input_fn = Combine_RCI_Input(False, args.sm_loaddir, num_grids=1).input_fn
if args.tpu_task=='combine_rp_col_ps':
val_input_fn = Combine_RP_Color_PS_Input(
False, args.sm_loaddir, args.sm_loaddir2, num_grids=1).input_fn
if args.tpu_task=='combine_rdc':
val_input_fn = Combine_RDC_Input(
False, args.sm_loaddir, args.sm_loaddir2).input_fn
if args.tpu_task=='combine_rdc_imn':
val_input_fn = Combine_RDC_ImageNet_Input(
False, args.sm_loaddir, args.sm_loaddir2, args.sm_loaddir3).input_fn
return val_input_fn
def get_deprecated_tpu_train_dp_params(args):
if args.tpu_task=='imagenet_rp':
data_provider_func = ImageNetInput(
True, args.sm_loaddir,
std=False).input_fn
if args.tpu_task=='rp':
data_provider_func = RP_ImageNetInput(
True, args.sm_loaddir,
g_noise=args.g_noise, std=(args.rp_std==1),
sub_mean=(args.rp_sub_mean==1),
grayscale=(args.rp_grayscale==1)).input_fn
if args.tpu_task=='rp_pbr':
data_provider_func = PBRSceneNetZipInput(
True, args.sm_loaddir,
args.sm_loaddir2, g_noise=args.g_noise,
std=(args.rp_std==1)).input_fn
if args.rp_zip==0:
data_provider_func = PBRSceneNetDepthMltInput(
True, args.sm_loaddir,
args.sm_loaddir2, g_noise=args.g_noise,
std=(args.rp_std==1)).input_fn
if args.tpu_task=='rp_only_pbr':
data_provider_func = PBRNetDepthMltInput(
True, args.sm_loaddir,
g_noise=args.g_noise, std=(args.rp_std==1)).input_fn
if args.tpu_task=='colorization':
data_provider_func = Col_ImageNetInput(
True, args.sm_loaddir,
down_sample=args.col_down, col_knn=args.col_knn==1,
col_tl=(args.col_tl==1),
combine_rp=(args.combine_rp==1)).input_fn
if args.tpu_task=='color_ps':
data_provider_func = Col_PBRSceneNetInput(
True, args.sm_loaddir,
args.sm_loaddir2, down_sample=args.col_down,
col_knn=args.col_knn==1).input_fn
if args.tpu_task=='color_pbr':
data_provider_func = Col_PBRNetInput(
True, args.sm_loaddir,
down_sample=args.col_down,
col_knn=args.col_knn==1).input_fn
if args.tpu_task=='color_tl':
data_provider_func = Col_Tl_Input(
True, args.sm_loaddir,
down_sample=args.col_down,
col_knn=args.col_knn==1,
combine_rp=(args.combine_rp==1)).input_fn
if args.tpu_task=='depth':
data_provider_func = PBRSceneNetZipDepthInput(
True, args.sm_loaddir, args.sm_loaddir2,
ab_depth=(args.ab_depth==1), down_sample=args.depth_down,
color_dp_tl=(args.color_dp_tl==1),
rp_dp_tl=(args.rp_dp_tl==1),
rpcol_dp_tl=(args.combine_col_rp==1)).input_fn
if args.depth_zip== 0:
data_provider_func = PBRSceneNetDepthInput(
True, args.sm_loaddir, args.sm_loaddir2).input_fn
if args.tpu_task=='combine_rp_imn':
data_provider_func = Combine_RP_ImageNet_Input(
True, args.sm_loaddir).input_fn
if args.tpu_task=='combine_rp_col':
data_provider_func = Combine_RP_Color_Input(
True, args.sm_loaddir, num_grids=1).input_fn
if args.tpu_task=='combine_rci':
data_provider_func = Combine_RCI_Input(
True, args.sm_loaddir,
num_grids=1).input_fn
if args.tpu_task=='combine_rp_col_ps':
data_provider_func = Combine_RP_Color_PS_Input(
True, args.sm_loaddir,
args.sm_loaddir2, num_grids=1).input_fn
if args.tpu_task=='combine_rdc':
data_provider_func = Combine_RDC_Input(
True, args.sm_loaddir,
args.sm_loaddir2).input_fn
if args.tpu_task=='combine_rdc_imn':
data_provider_func = Combine_RDC_ImageNet_Input(
True, args.sm_loaddir,
args.sm_loaddir2, args.sm_loaddir3).input_fn
return data_provider_func
|
10,844 | c0d791e4888b59683013ec76d71665693fed722d | #script in Python 3.7
import numpy as np
import matplotlib.pyplot as plt
import math
# S = susceptible individuals
# I = infectious individuals
# β = infectious rate, controls the rate of spread which represents the probability of transmitting disease between a susceptible and an infectious individual
# γ = recovery rate, is determined by the inverse of the average duration of infection
# N = S + I totall population (constant)
# R = β / γ basic reproduction number
#N = int(input("Enter totall population:"))
#print("Totall population :" + N)
N = 1000
population_S = []
population_S += [N]
population_I = []
population_I += [0]
beta = np.random.rand()
gamma = np.random.rand()
print(beta / gamma)
for time in range(0,1000):
delta_12 = 0 #from Susceptible to Infectious
delta_21 = 0 #from Infectious to Susceptible
R = beta / gamma
#print(beta)
if R > 1:
print("Need of an intervention (R>1)")
#β must dicrise
beta = math.exp(-time)
R = beta / gamma
if 0.87 < R < 0.97:
print("The intervention was effective")
prob_of_infection = beta*population_S[-1]/N
#Susceptible
for atoms in range(0,population_S[-1]):
if(np.random.rand() < prob_of_infection):
delta_12 += 1
prob_of_recovery = gamma
#Infectious
for atoms in range(0,population_I[-1]):
if(np.random.rand() < prob_of_recovery):
delta_21 += 1
#calculating new populations
N_1 = delta_21 - delta_12
N_2 = delta_12 - delta_21
#adding the populations to their populations
population_S += [population_S[-1] + N_1]
population_I += [population_I[-1] + N_2]
plt.figure(figsize=(16,9))
plt.rc('font', size=22)
plt.xlabel('Time')
plt.ylabel('Population I, S')
plt.plot(population_S, color='green', linestyle='-', linewidth=4)
plt.plot(population_I, color='red', linestyle='-', linewidth=4)
plt.show()
|
10,845 | 57bbde84ead319e3ab1edcf9548f2934057d6172 | '''
URL handler functions.
'''
import asyncio
import logging;logging.basicConfig(level=logging.INFO)
from coroutine_web import get,post
from models import User,Blog,Comment
__author__='luibebetter'
@get('/')
async def index(request):
logging.info('hello')
users=await User.findwhere()
return {
'__template__':'test.html',
'users':users
}
|
10,846 | 9486430f7f6ee00e352f6d5f192d86156a356d1e | import pygame
class label:
def __init__(self, posX, posY, text, fontSize):
self.posX = posX
self.posY = posY
self.text = text
self.fontSize = fontSize
def display(self, fenetre):
myfont = pygame.font.SysFont("bitstreamverasans", self.fontSize)
label = myfont.render(self.text, 1, (0, 0, 0))
fenetre.blit(label, (self.posX, self.posY))
|
10,847 | 8057326fabd43ea2771007bf7b39ed087ca051b3 | print('='*20+' REAL PARA DÓLAR '+'='*20)
real = float(input('Quanto dinheiro você tem em reais:'))
dolar = real//4.15
print('Com R${:.2f} reais você pode comprar US${:.2f}! :D'.format(real,dolar))
|
10,848 | ed3dcf5949459b2d2683c6e00fe32173c172c5b9 | import os
config=1
base_dir='/data'
TRAIN_DATA_FOLDER_PATH = '/CORPUS/collection_2018/*.txt'
TOPICS_FOLDER_PATH='/CORPUS/topics/'
TRAIN_TWEETS_2018='/CORPUS/training_data_embeddings/train_data_2018.json'
STATS_SKIP_GRAM='/CORPUS/embeddings/word2vec/skip-gram/stats.txt'
SKIP_GRAM_VECTORS='/CORPUS/embeddings/word2vec/skip-gram/vectors.txt'
STATS_CBOW='/CORPUS/embeddings/word2vec/cbow/stats.txt'
CBOW_VECTORS='/CORPUS/embeddings/word2vec/cbow/vectors.txt'
STATS_FASTTEXT='/CORPUS/embeddings/fasttext/stats.txt'
FASTTEXT_VECTORS='/CORPUS/embeddings/fasttext/vectors.txt'
FASTTEXT_VECTORS_FULL_SG='/CORPUS/embeddings/full/fasttext/SKIP-GRAM/vectors.txt'
FASTTEXT_STATS_FULL_SG='/CORPUS/embeddings/full/fasttext/SKIP-GRAM/stats.txt'
FASTTEXT_VECTORS_FULL_CBOW='/CORPUS/embeddings/full/fasttext/CBOW/vectors.txt'
FASTTEXT_STATS_FULL_CBOW='/CORPUS/embeddings/full/fasttext/CBOW/stats.txt'
CBOW_VECTORS_FULL='/CORPUS/embeddings/full/CBOW/vectors.txt'
CBOW_STATS_FULL='/CORPUS/embeddings/full/CBOW/stats.txt'
TRAIN_TWEETS='/CORPUS/training_data_embeddings'
if(config):
TRAIN_DATA_FOLDER_PATH = base_dir+ TRAIN_DATA_FOLDER_PATH
TOPICS_FOLDER_PATH=base_dir+TOPICS_FOLDER_PATH
TRAIN_TWEETS_2018=base_dir+TRAIN_TWEETS_2018
STATS_SKIP_GRAM=base_dir+STATS_SKIP_GRAM
SKIP_GRAM_VECTORS=base_dir+SKIP_GRAM_VECTORS
STATS_CBOW=base_dir+STATS_CBOW
CBOW_VECTORS= base_dir+CBOW_VECTORS
STATS_FASTTEXT=base_dir+STATS_FASTTEXT
FASTTEXT_VECTORS=base_dir+FASTTEXT_VECTORS
FASTTEXT_VECTORS_FULL_SG=base_dir+FASTTEXT_VECTORS_FULL_SG
FASTTEXT_STATS_FULL_SG=base_dir+FASTTEXT_STATS_FULL_SG
FASTTEXT_VECTORS_FULL_CBOW=base_dir+FASTTEXT_VECTORS_FULL_CBOW
FASTTEXT_STATS_FULL_CBOW=base_dir+FASTTEXT_STATS_FULL_CBOW
CBOW_VECTORS_FULL=base_dir+CBOW_VECTORS_FULL
CBOW_STATS_FULL=base_dir+CBOW_STATS_FULL
TRAIN_TWEETS=base_dir+TRAIN_TWEETS |
10,849 | 051115cb4f92a9e2190a137f6f962f91b14a8c4b | # Generated by Django 3.1.7 on 2021-04-19 12:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20210419_1203'),
]
operations = [
migrations.AlterField(
model_name='product',
name='rating',
field=models.IntegerField(blank=True, db_index=True, null=True, verbose_name='Популярность товара'),
),
]
|
10,850 | 9ca2d51106f1e0ba4336627b682a4a6804d3d780 |
from nose.plugins import Plugin
import warnings
import sys
import logging
log = logging.getLogger()
def import_item(name):
"""Import and return ``bar`` given the string ``foo.bar``.
Calling ``bar = import_item("foo.bar")`` is the functional equivalent of
executing the code ``from foo import bar``.
Parameters
----------
name : string
The fully qualified name of the module/package being imported.
Returns
-------
mod : module object
The module that was imported.
"""
if sys.version_info < (3,):
if not isinstance(name, bytes):
name = name.encode()
parts = name.rsplit('.', 1)
if len(parts) == 2:
# called with 'foo.bar....'
package, obj = parts
module = __import__(package, fromlist=[obj])
try:
pak = getattr(module, obj)
except AttributeError:
raise ImportError('No module named %s' % obj)
return pak
else:
# called with un-dotted string
return __import__(parts[0])
if sys.version_info < (3,):
def from_builtins(k):
return __builtins__[k]
else:
import builtins
def from_builtins(k):
return getattr(builtins,k)
class InvalidConfig(Exception):pass
class WarningFilter(Plugin):
def options(self, parser, env):
"""
Add options to command line.
"""
super(WarningFilter, self).options(parser, env)
parser.add_option("--warningfilters",
default=None,
help="Treat warnings that occur WITHIN tests as errors.")
def configure(self, options, conf):
"""
Configure plugin.
"""
invalid_config = False
if not getattr(options, 'warningfilters', None):
return
for opt in options.warningfilters.split('\n'):
values = [s.strip() for s in opt.split('|')]
# if message empty match all messages.
if len(values) >= 2 :
if '.' in values[2]:
try:
values[2] = import_item(values[2])
except ImportError:
log.warning('The following config value seem to be wrong: %s'%opt, exc_info=True)
invalid_config = True
continue
else:
values[2] = from_builtins(values[2])
try:
warnings.filterwarnings(*values)
except AssertionError:
log.warning('The following configuration option seem to use an error: %s' % opt, exc_info=True)
invalid_config = True
if invalid_config:
raise InvalidConfig('One or more configuration option where wrong, aborting.')
super(WarningFilter, self).configure(options, conf)
def prepareTestRunner(self, runner):
"""
Treat warnings as errors.
"""
return WarningFilterRunner(runner)
class WarningFilterRunner(object):
def __init__(self, runner):
self.runner=runner
def run(self, test):
return self.runner.run(test)
|
10,851 | f5c2886a1db5a8dff6fe87414c4b8be1486a68da | import datetime
from django.db import models
from django.utils import timezone
# Create your models here.
class Issue(models.Model):
issue_title = models.CharField(max_length=200)
STATUS_LIST = (('Draft', 'Draft'),
('Ready to review', 'Ready to review'),
('Approved', 'Approved'),
('In Progress', 'In Progress'),
('Done', 'Done'))
status = models.CharField(max_length=15, choices=STATUS_LIST, default='Draft')
priority = models.IntegerField(default=1)
submitted_date = models.DateTimeField('submitted date', default=timezone.now)
objective = models.TextField(max_length=400, default='Objective is not defined.')
description = models.TextField(max_length=800, default='description is not defined.')
def __str__(self):
return self.issue_title
def flow_check(self):
check_status = self.status
for t in self.task_set.all():
return t.status
class Task(models.Model):
task_title = models.CharField(max_length=200)
STATUS_LIST = (('Open', 'Open'),
('In Progress', 'In Progress'),
('Done', 'Done'))
status = models.CharField(max_length=8, choices=STATUS_LIST, default='Open')
priority = models.IntegerField(default=1)
submitted_date = models.DateTimeField('date submitted', default=timezone.now)
issue = models.ForeignKey(Issue, on_delete=models.CASCADE)
description = models.TextField(max_length=800, default='description is not defined.')
def __str__(self):
return self.task_title
|
10,852 | 71315a3c867bc0c386139cb2e2ada264b26bdaa8 | # -*- coding: utf-8 -*-
"""
opentelematicsapi
This file was automatically generated by APIMATIC v2.0 ( https://apimatic.io ).
"""
class Driver(object):
"""Implementation of the 'Driver' model.
TODO: type model description here.
Attributes:
id (string): The id of this Driver object
provider_id (string): The unique 'Provider ID' of the TSP
server_time (string): Date and time when this object was received at
the TSP
username (string): a username of this driver
driver_license_number (string): the driver's license number
country (string): short code for the country of the region dictating
the specific break rules
region (string): short code for the country's
region/state/province/territory dictating the specific break
rules
driver_home_terminal (string): the home terminal of the driver
"""
# Create a mapping from Model property names to API property names
_names = {
"id":'id',
"provider_id":'providerId',
"server_time":'serverTime',
"username":'username',
"driver_license_number":'driverLicenseNumber',
"country":'country',
"region":'region',
"driver_home_terminal":'driverHomeTerminal'
}
def __init__(self,
id=None,
provider_id=None,
server_time=None,
username=None,
driver_license_number=None,
country=None,
region=None,
driver_home_terminal=None):
"""Constructor for the Driver class"""
# Initialize members of the class
self.id = id
self.provider_id = provider_id
self.server_time = server_time
self.username = username
self.driver_license_number = driver_license_number
self.country = country
self.region = region
self.driver_home_terminal = driver_home_terminal
@classmethod
def from_dictionary(cls,
dictionary):
"""Creates an instance of this model from a dictionary
Args:
dictionary (dictionary): A dictionary representation of the object as
obtained from the deserialization of the server's response. The keys
MUST match property names in the API description.
Returns:
object: An instance of this structure class.
"""
if dictionary is None:
return None
# Extract variables from the dictionary
id = dictionary.get('id')
provider_id = dictionary.get('providerId')
server_time = dictionary.get('serverTime')
username = dictionary.get('username')
driver_license_number = dictionary.get('driverLicenseNumber')
country = dictionary.get('country')
region = dictionary.get('region')
driver_home_terminal = dictionary.get('driverHomeTerminal')
# Return an object of this model
return cls(id,
provider_id,
server_time,
username,
driver_license_number,
country,
region,
driver_home_terminal)
|
10,853 | 37750bbb733b92c7e0da72e695b34c8130356b4d | money=int(input("小明身上有多少錢:"))
kind=int(input("販賣機有幾種飲料:"))
list1=[]
total=0
for i in range(kind):
price=int(input())
list1.append(price)
for i in range(kind):
if(money>=list1[i]):
total+=1
print(total) |
10,854 | 951162ce55c2729b6ec1a7667761d1074ad2ecc0 | import os
from lxml.cssselect import CSSSelector
from lxml import html
import utils
import unicodedata
import string
import re
def read_sentence(path):
with open(path, "r") as file:
doc = html.fromstring("".join(file.readlines()))
phrases = []
for el in doc.cssselect(".DocumentPage-content p"):
phrases.append(format_phrase(el.text_content()))
return filter_phrases(phrases)
re_punctua = re.compile('[%s\n€]' % re.escape(string.punctuation))
re_numbers = re.compile('[0-9]')
def filter_phrases(phrases):
return filter(lambda p: len(p.split(" ")) > 2, phrases)
def format_phrase(phrase):
phrase = strip_accents(phrase.lower())
phrase = re_punctua.sub(' ', phrase)
phrase = re_numbers.sub(' ', phrase)
words = phrase.split()
words = map(lambda a: "flsn" if a in ["fl", "fls"] else a, words)
words = map(lambda a: "artn" if a in ["art", "arts"] else a, words)
words = filter(lambda a: len(a) > 2, words)
return " ".join(words)
def strip_accents(text):
text = unicodedata.normalize('NFD', text)
text = text.encode('ascii', 'ignore')
text = text.decode("utf-8")
return str(text)
def generate(input_path, output_path):
with open(output_path, "w") as output:
for file_path in utils.list_files(input_path):
print(file_path)
for phrase in read_sentence(file_path):
output.write(phrase + "\n")
if __name__ == "__main__":
generate(
"data/sentences",
"data/phrases.csv"
) |
10,855 | f6e4572f12ec6583ef0eb06fd4467796bbca8feb | import shutil, random, os
import zipfile
for foldername in os.listdir("./Data"):
data_source = f'./Data/{foldername}/'
testing_dir = f'./Testing/{foldername}/'
no_of_images = len(os.listdir(data_source))
testing_sample_size = int(no_of_images * 0.3)
images = random.sample(os.listdir(data_source), testing_sample_size)
for image in images:
data_source_path = os.path.join(data_source, image)
os.makedirs(os.path.dirname(testing_dir), exist_ok=True)
shutil.move(data_source_path, testing_dir + image)
# shutil.make_archive('Training', 'zip', 'Data')
# shutil.make_archive('Testing', 'zip', 'Testing') |
10,856 | 7ba4d2eeb9c7244a7fd4a997a65c700af9b17299 | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField
from wtforms.validators import Length, DataRequired
class SubmitForm(FlaskForm):
item_field = StringField('Item')
|
10,857 | f25006477f15e19100de67bde91c876b692eef1d | '''The MNIST dataset
'''
from decaf.layers.data import ndarraydata
import numpy as np
import os
class MNISTDataLayer(ndarraydata.NdarrayDataLayer):
NUM_TRAIN = 60000
NUM_TEST = 10000
IMAGE_DIM = (28,28)
def __init__(self, **kwargs):
"""Initialize the mnist dataset.
kwargs:
is_training: whether to load the training data. Default True.
rootfolder: the folder that stores the mnist data.
dtype: the data type. Default numpy.float64.
"""
is_training = kwargs.get('is_training', True)
rootfolder = kwargs['rootfolder']
dtype = kwargs.get('dtype', np.float64)
self._load_mnist(rootfolder, is_training, dtype)
# normalize data.
self._data /= 255.
ndarraydata.NdarrayDataLayer.__init__(
self, sources=[self._data, self._label], **kwargs)
def _load_mnist(self, rootfolder, is_training, dtype):
if is_training:
self._data = self._read_byte_data(
os.path.join(rootfolder,'train-images-idx3-ubyte'),
16, (MNISTDataLayer.NUM_TRAIN,) + \
MNISTDataLayer.IMAGE_DIM).astype(dtype)
self._label = self._read_byte_data(
os.path.join(rootfolder,'train-labels-idx1-ubyte'),
8, [MNISTDataLayer.NUM_TRAIN]).astype(np.int)
else:
self._data = self._read_byte_data(
os.path.join(rootfolder,'t10k-images-idx3-ubyte'),
16, (MNISTDataLayer.NUM_TEST,) + \
MNISTDataLayer.IMAGE_DIM).astype(dtype)
self._label = self._read_byte_data(
os.path.join(rootfolder,'t10k-labels-idx1-ubyte'),
8, [MNISTDataLayer.NUM_TEST]).astype(np.int)
# In the end, we will make the data 4-dimensional (num * 28 * 28 * 1)
self._data.resize(self._data.shape + (1,))
def _read_byte_data(self, filename, skipbytes, shape):
fid = open(filename, 'rb')
fid.seek(skipbytes)
nbytes = np.prod(shape)
data = np.fromfile(fid, dtype=np.uint8, count=nbytes)
data.resize(shape)
return data
|
10,858 | c28994b0232595fa99bf9b9d03778a6612ccd065 | from collections import namedtuple
Point = namedtuple('Point', 'x,y')
pt1 = Point(1, 2)
pt2 = Point(3, 4)
dot_product = (pt1.x * pt2.x) + (pt1.y * pt2.y)
print(dot_product)
Car = namedtuple('Car', 'Price Mileage Color Class')
xyz = Car(Price=100000, Mileage=30, Color='Red', Class='Y')
print(xyz.Color)
n = int(input())
a = input()
total = 0
Student = namedtuple('Student', a)
for _ in range(n):
student = Student(*input().split())
total += int(Student.MARKS)
print('{:.2f}'.format(total/n))
|
10,859 | 3254a9559c2dd2dc18dc2d11ac176a513c2f785f | """
题目描述
输入一个链表,按链表从尾到头的顺序返回一个ArrayList。
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution1:
def printListFromTailToHead(self, listNode):
"""
暴力法,遍历链表的结点,把每个结点的元素值保存在一个list中,再按逆序返回该list
Note:python中逆序list可以通过列表切片完成list[::-1] 表示从头到尾,步长为-1
:param listNode: 头结点
:return: 按链表从尾到头的顺序返回的数组
"""
res = []
while listNode:
res.append(listNode.val)
listNode = listNode.next
return res[::-1]
class Solution2:
# 栈
def printListFromTailToHead(self, listNode):
"""
看到【从尾到头】想到使用栈,用两个数组实现一个栈。
遍历链表的结点,每读取一个结点,就将该结点的元素值压入栈中,当链表遍历结束后,从栈中取出一个元素输出
【注意】使用堆栈时不要忘记使用栈的基本操作:pop push 等
:param listNode: 链表头结点
:return: 按链表从尾到头的顺序返回的数组
"""
res = []
stack = []
while listNode:
stack.append(listNode.val)
listNode = listNode.next
while stack:
res.append(stack.pop())
return res
class Solution3:
# 递归
def __init__(self):
self.res = []
def printListFromTailToHead_1(self, listNode):
"""
可以用【栈】,自然可以想到使用【递归】
每访问到一个节点的时候,先递归输出它后面的结点,再输出该节点自身,这样链表的输出结果就反过来了
【缺点】当链表非常长时,使用递归会导致函数调用的层级很深,导致函数调用栈溢出,鲁棒性没有使用栈好
:param listNode:
:return:
"""
if listNode:
self.printListFromTailToHead_1(listNode.next)
self.res.append(listNode.val)
return self.res
def printListFromTailToHead_2(self, listNode):
"""
递归的第二种写法,内部函数
:param listNode:
:return:
"""
res = []
def printListNode(listNode):
if listNode:
printListNode(listNode.next)
res.append(listNode.val)
printListNode(listNode)
return res
if __name__ == '__main__':
listNode = ListNode(1)
listNode_1 = ListNode(2)
listNode_2 = ListNode(3)
listNode_3 = ListNode(4)
listNode.next = listNode_1
listNode_1.next = listNode_2
listNode_2.next = listNode_3
s = Solution3()
res = s.printListFromTailToHead_2(listNode)
print(res)
|
10,860 | 4e40288a1ae1dd00c32a3503f50aa12f6ab15be1 | import logging
import unittest
from unittest import mock
from pika.exceptions import AMQPConnectionError, NackError, UnroutableError
from sdc.rabbit import DurableExchangePublisher, ExchangePublisher, QueuePublisher
from sdc.rabbit.exceptions import PublishMessageError
from sdc.rabbit.test.test_data import test_data
good_urls = ['amqp://guest:guest@0.0.0.0:5672', 'amqp://guest:guest@0.0.0.0:5672']
bad_urls = ['amqp://guest:guest@0.0.0.0:672', 'amqp://guest:guest@0.0.0.0:672']
loop_urls = ['amqp://guest:guest@0.0.0.0:672', 'amqp://guest:guest@0.0.0.0:5672']
queue_name = 'test_queue'
exchange_name = 'test_exchange'
durable_exchange_name = 'test_durable'
class TestPublisher(unittest.TestCase):
logger = logging.getLogger(__name__)
queue_publisher = QueuePublisher(good_urls, queue_name)
bad_queue_publisher = QueuePublisher(bad_urls, queue_name)
confirm_delivery_queue_publisher = QueuePublisher(good_urls, queue_name, confirm_delivery=True)
exchange_publisher = ExchangePublisher(good_urls, exchange_name)
bad_exchange_publisher = ExchangePublisher(bad_urls, exchange_name)
confirm_delivery_exchange_publisher = ExchangePublisher(good_urls, exchange_name, confirm_delivery=True)
durable_exchange_publisher = DurableExchangePublisher(good_urls, durable_exchange_name)
bad_durable_exchange_publisher = DurableExchangePublisher(bad_urls, durable_exchange_name)
confirm_delivery_durable_exchange_publisher = DurableExchangePublisher(good_urls, durable_exchange_name, confirm_delivery=True)
def test_incomplete_publisher(self):
from sdc.rabbit.publishers import Publisher
class BadPublisher(Publisher):
pass
this_publisher = BadPublisher(good_urls[:1])
with self.assertRaises(NotImplementedError):
this_publisher._do_publish('test')
with self.assertRaises(NotImplementedError):
this_publisher._declare()
with self.assertRaises(PublishMessageError):
this_publisher.publish_message('test')
def test_queue_init(self):
this_publisher = QueuePublisher(good_urls, queue_name)
self.assertEqual(this_publisher._urls, good_urls)
self.assertEqual(this_publisher._queue, queue_name)
self.assertEqual(this_publisher._arguments, {})
self.assertEqual(this_publisher._connection, None)
self.assertEqual(this_publisher._channel, None)
self.assertEqual(this_publisher._durable_queue, True)
def test_exchange_init(self):
this_publisher = ExchangePublisher(good_urls, exchange_name)
self.assertEqual(this_publisher._urls, good_urls)
self.assertEqual(this_publisher._exchange, exchange_name)
self.assertEqual(this_publisher._arguments, {})
self.assertEqual(this_publisher._connection, None)
self.assertEqual(this_publisher._channel, None)
self.assertEqual(this_publisher._durable_exchange, False)
def test_durable_exchange_init(self):
this_publisher = DurableExchangePublisher(good_urls, durable_exchange_name)
self.assertEqual(this_publisher._urls, good_urls)
self.assertEqual(this_publisher._exchange, durable_exchange_name)
self.assertEqual(this_publisher._arguments, {})
self.assertEqual(this_publisher._connection, None)
self.assertEqual(this_publisher._channel, None)
self.assertEqual(this_publisher._durable_exchange, True)
def test_queue_connect_loops_correctly(self):
this_publisher = QueuePublisher(loop_urls, queue_name)
self.assertEqual(this_publisher._urls, loop_urls)
self.assertEqual(this_publisher._queue, queue_name)
self.assertEqual(this_publisher._arguments, {})
self.assertEqual(this_publisher._connection, None)
self.assertEqual(this_publisher._channel, None)
self.assertTrue(this_publisher._connect())
def test_exchange_connect_loops_correctly(self):
this_publisher = ExchangePublisher(loop_urls, exchange_name)
self.assertEqual(this_publisher._urls, loop_urls)
self.assertEqual(this_publisher._exchange, exchange_name)
self.assertEqual(this_publisher._arguments, {})
self.assertEqual(this_publisher._connection, None)
self.assertEqual(this_publisher._channel, None)
self.assertTrue(this_publisher._connect())
def test_durable_exchange_connect_loops_correctly(self):
this_publisher = DurableExchangePublisher(loop_urls, durable_exchange_name)
self.assertEqual(this_publisher._urls, loop_urls)
self.assertEqual(this_publisher._exchange, durable_exchange_name)
self.assertEqual(this_publisher._arguments, {})
self.assertEqual(this_publisher._connection, None)
self.assertEqual(this_publisher._channel, None)
self.assertTrue(this_publisher._connect())
def test_queue_connect_amqp_connection_error(self):
with self.assertRaises(AMQPConnectionError):
self.bad_queue_publisher._connect()
def test_queue_connect_confirm_delivery_true(self):
with self.assertLogs(level='INFO') as cm:
self.confirm_delivery_queue_publisher._connect()
msg = 'Enabled delivery confirmation'
self.assertIn(msg, cm.output[8])
def test_exchange_connect_amqp_connection_error(self):
with self.assertRaises(AMQPConnectionError):
self.bad_exchange_publisher._connect()
def test_exchange_connect_confirm_delivery_true(self):
with self.assertLogs(level='INFO') as cm:
self.confirm_delivery_exchange_publisher._connect()
msg = 'Enabled delivery confirmation'
self.assertIn(msg, cm.output[8])
def test_durable_exchange_connect_amqp_connection_error(self):
with self.assertRaises(AMQPConnectionError):
self.bad_durable_exchange_publisher._connect()
def test_durable_exchange_connect_confirm_delivery_true(self):
with self.assertLogs(level='INFO') as cm:
self.confirm_delivery_durable_exchange_publisher._connect()
msg = 'Enabled delivery confirmation'
self.assertIn(msg, cm.output[8])
def test_queue_connect_amqpok(self):
result = self.queue_publisher._connect()
self.assertEqual(result, True)
def test_queue_disconnect_ok(self):
self.queue_publisher._connect()
with self.assertLogs(level='DEBUG') as cm:
self.queue_publisher._disconnect()
msg = 'Disconnected from rabbit'
self.assertIn(msg, cm[1][-1])
def test_exchange_connect_amqpok(self):
result = self.exchange_publisher._connect()
self.assertEqual(result, True)
def test_exchange_disconnect_ok(self):
self.exchange_publisher._connect()
with self.assertLogs(level='DEBUG') as cm:
self.exchange_publisher._disconnect()
msg = 'Disconnected from rabbit'
self.assertIn(msg, cm[1][-1])
def test_durable_exchange_connect_amqpok(self):
result = self.durable_exchange_publisher._connect()
self.assertEqual(result, True)
def test_durable_exchange_disconnect_ok(self):
self.durable_exchange_publisher._connect()
with self.assertLogs(level='DEBUG') as cm:
self.durable_exchange_publisher._disconnect()
msg = 'Disconnected from rabbit'
self.assertIn(msg, cm[1][-1])
def test_queue_disconnect_already_closed_connection(self):
self.queue_publisher._connect()
self.queue_publisher._disconnect()
with self.assertLogs(level='DEBUG') as cm:
self.queue_publisher._disconnect()
msg = 'Close called on closed connection'
self.assertIn(msg, cm.output[1])
def test_exchange_disconnect_already_closed_connection(self):
self.exchange_publisher._connect()
self.exchange_publisher._disconnect()
with self.assertLogs(level='DEBUG') as cm:
self.exchange_publisher._disconnect()
msg = 'Close called on closed connection'
self.assertIn(msg, cm.output[1])
def test_durable_exchange_disconnect_already_closed_connection(self):
self.durable_exchange_publisher._connect()
self.durable_exchange_publisher._disconnect()
with self.assertLogs(level='DEBUG') as cm:
self.durable_exchange_publisher._disconnect()
msg = 'Close called on closed connection'
self.assertIn(msg, cm.output[1])
def test_queue_publish_message_no_connection(self):
with self.assertRaises(PublishMessageError):
self.bad_queue_publisher.publish_message(test_data['valid'])
def test_queue_publish(self):
"""Test that when a message is successfully published, a result of True is given and
the correct messages are logged.
"""
self.queue_publisher._connect()
with self.assertLogs(level='INFO') as cm:
result = self.queue_publisher.publish_message(test_data['valid'])
self.assertEqual(True, result)
self.assertIn('Published message to queue', cm.output[8])
def test_queue_publish_nack_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = NackError('a')
self.queue_publisher._connect()
with self.assertRaises(PublishMessageError):
self.queue_publisher.publish_message(test_data['valid'])
def test_queue_publish_unroutable_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = UnroutableError('a')
self.queue_publisher._connect()
with self.assertRaises(PublishMessageError):
self.queue_publisher.publish_message(test_data['valid'])
def test_queue_publish_generic_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = Exception()
self.queue_publisher._connect()
with self.assertRaises(Exception):
self.queue_publisher.publish_message(test_data['valid'])
def test_exchange_publish_message_no_connection(self):
with self.assertRaises(PublishMessageError):
self.bad_exchange_publisher.publish_message(test_data['valid'])
def test_exchange_publish(self):
"""Test that when a message is successfully published, a result of True is given and
the correct messages are logged.
"""
self.exchange_publisher._connect()
with self.assertLogs(level='INFO') as cm:
result = self.exchange_publisher.publish_message(test_data['valid'])
self.assertEqual(True, result)
self.assertIn('Published message to exchange', cm.output[8])
def test_exchange_publish_nack_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = NackError('a')
self.exchange_publisher._connect()
with self.assertRaises(PublishMessageError):
self.exchange_publisher.publish_message(test_data['valid'])
def test_exchange_publish_unroutable_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = UnroutableError('a')
self.exchange_publisher._connect()
with self.assertRaises(PublishMessageError):
self.exchange_publisher.publish_message(test_data['valid'])
def test_exchange_publish_generic_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = Exception()
self.exchange_publisher._connect()
with self.assertRaises(Exception):
self.exchange_publisher.publish_message(test_data['valid'])
def test_durable_exchange_publish_message_no_connection(self):
with self.assertRaises(PublishMessageError):
self.bad_durable_exchange_publisher.publish_message(test_data['valid'])
def test_durable_exchange_publish(self):
"""Test that when a message is successfully published, a result of True is given and
the correct messages are logged.
"""
self.durable_exchange_publisher._connect()
with self.assertLogs(level='INFO') as cm:
result = self.durable_exchange_publisher.publish_message(test_data['valid'])
self.assertEqual(True, result)
self.assertIn('Published message to exchange', cm.output[8])
def test_durable_exchange_publish_nack_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = NackError('a')
self.durable_exchange_publisher._connect()
with self.assertRaises(PublishMessageError):
self.durable_exchange_publisher.publish_message(test_data['valid'])
def test_durable_exchange_publish_unroutable_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = UnroutableError('a')
self.durable_exchange_publisher._connect()
with self.assertRaises(PublishMessageError):
self.durable_exchange_publisher.publish_message(test_data['valid'])
def test_durable_exchange_publish_generic_error(self):
mock_method = 'pika.adapters.blocking_connection.BlockingChannel.basic_publish'
with mock.patch(mock_method) as barMock:
barMock.side_effect = Exception()
self.durable_exchange_publisher._connect()
with self.assertRaises(Exception):
self.durable_exchange_publisher.publish_message(test_data['valid'])
|
10,861 | c6cb57b7892c4db9ba6402271cd23ba3fbba4f41 | from django.contrib import admin
from .models import Key_Words
from .models import Entry_Linkpool
admin.site.register(Key_Words)
admin.site.register(Entry_Linkpool)
# Register your models here.
|
10,862 | 73d2cc2daa8b6e7e8f626a92c9c5b21200bea732 | """log_importeds table
Revision ID: 552caa2b2519
Revises:
Create Date: 2018-12-05 19:16:15.071723
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '552caa2b2519'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('log_imported',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('date', sa.DateTime(), nullable=True),
sa.Column('sender_address', sa.String(length=255), nullable=True),
sa.Column('recipient_address', sa.String(length=255), nullable=True),
sa.Column('recipient_count', sa.Integer(), nullable=True),
sa.Column('return_path', sa.String(length=255), nullable=True),
sa.Column('client_hostname', sa.String(length=255), nullable=True),
sa.Column('client_ip', sa.String(length=100), nullable=True),
sa.Column('server_hostname', sa.String(length=255), nullable=True),
sa.Column('server_ip', sa.String(length=100), nullable=True),
sa.Column('original_client_ip', sa.String(length=100), nullable=True),
sa.Column('original_server_ip', sa.String(length=100), nullable=True),
sa.Column('event_id', sa.String(length=50), nullable=True),
sa.Column('total_bytes', sa.Integer(), nullable=True),
sa.Column('connector_id', sa.String(length=50), nullable=True),
sa.Column('message_subject', sa.String(length=255), nullable=True),
sa.Column('source', sa.String(length=50), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_log_imported_id'), 'log_imported', ['id'], unique=False)
op.create_index(op.f('ix_log_imported_sender_address'), 'log_imported', ['sender_address'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_log_imported_sender_address'), table_name='log_imported')
op.drop_index(op.f('ix_log_imported_id'), table_name='log_imported')
op.drop_table('log_imported')
# ### end Alembic commands ###
|
10,863 | 443619613a6007ee3fad4eba3ae5f9c60f5bd23f | """
Em python um módulo é lido como se fosse um script. Deste modo, sempre que
um módulo for importado será executado como um script. Por isso tomar cuidado
ao importar módulos.
Se o objetivo for criar um módulo que funcione como um script, sendo executando
diretamente, pode ser necessário utilizar __name__ == "__main__". Com isso,
ao executar o módulo, a ordem de execução do script será definida de acordo com
o conteúdo desta condição.
The simplest explanation for the __name__ variable (imho) is the following:
Create the following files.
modulo a
# a.py
import b
and
modulo b
# b.py
print "Hello World from %s!" % __name__
if __name__ == '__main__':
print "Hello World again from %s!" % __name__
Running them will get you this output:
$ python a.py
Hello World from b!
As you can see, when a module is imported, Python sets globals()['__name__'] in this module to the module's name.
Executando o módulo b como se fosse um script.
$ python b.py
Hello World from __main__!
Hello World again from __main__!
As you can see, when a file is executed, Python sets globals()['__name__'] in this file to "__main__".
Só é executado o que está dentro da condição.
font: https://stackoverflow.com/questions/419163/what-does-if-name-main-do
"""
import sys
def erro(msg):
print("Erro:", msg)
sys.exit(1)
def inc(x):
return x + 1
def dec(x):
return x - 1
def quadrado(x):
return x**2
if __name__ == "__main__":
print(inc(10))
print(dec(10))
print(quadrado(5))
input("Presione ENTER para sair...")
|
10,864 | 6f155a5fd253c8a8b8c6b7f17b3d00143af4c983 | #Irma Gómez Carmona, A01747743
#Menú con ciclos while para ejecutar las opciones
def seleccionarOpcion():
print("")
print("Misión 07. Ciclos While")
print("Autor: Irma Gómez Carmona ")
print("Matrícula: A01747743")
print("1. Calcular divisiones") # opciones
print("2. Encontrar el mayor")
print("3. Salir")
opcionM = int(input("Teclea tu opción:"))
print("")
return opcionM
def dividir( dividendo, divisor):
contador=0 #resultado (el número de veces que el dividor se le puede restar exactamente al dividendo)
D1=dividendo
D2=divisor
while D1>=D2: #mientras que el dividendo siga siendo mayor que el divisor se ejecuta el ciclo
D1-=D2
contador+=1
print("%d / %d = %d , sobra %d" % (dividendo,divisor,contador,D1))
def encontrarMayor(num1, num2): #se comparan los dos números para encontrar el mayor
if num1>num2:
return num1
return num2
def main():
opcionM=seleccionarOpcion()
while opcionM!=3: #mientras que la opciones sean diferentes a 3 se ejecutará el menú
if opcionM==1:
dividendo=int(input("Teclea el dividendo: "))
divisor = int(input("Teclea el divisor: "))
dividir(dividendo,divisor)
elif opcionM==2:
num2 = 0
cont=0
num1 = int(input("Teclea un número [-1 para salir]: "))
if num1==-1:
print("No hay valor mayor")
else:
while num1 !=-1:
num2 = encontrarMayor(num1, num2)
num1 = int(input("Teclea un número [-1 para salir]: "))
print("El mayor es: ", num2)
elif opcionM!=3:
print("ERROR, teclea 1, 2 o 3") #si no se cumplen las demás condiciones, es un valor invalido
opcionM = seleccionarOpcion()
print("Gracias por usar este programa, regrese pronto") #se termina el programa
main ()
|
10,865 | 20ca751f5eec92f4d7b8e94c03c284a72168c6d0 | from django import forms
import inspect
from ...University.models import University, consignment,representativePush, representative
from ...account.models import User
class CreateUniversity(forms.ModelForm):
class Meta():
model = University
fields = '__all__'
class CreateRepr(forms.ModelForm):
class Meta():
model = representative
fields = '__all__'
def __init__(self, *args, **kwargs):
super(CreateRepr, self).__init__(*args, **kwargs)
self.fields['user'].queryset = User.objects.filter(is_staff=True)
class consignmentForm(forms.ModelForm):
class Meta():
model = consignment
fields = '__all__'
exclude = ('user','totalPaid','consignmentID','totalCommission')
class representativePushForm(forms.ModelForm):
class Meta():
model = representativePush
fields = '__all__'
class addMoneyForm(forms.ModelForm):
class Meta():
model = representativePush
fields = ('pushMoney',) |
10,866 | f171b4ecb91994b93430f81f99fb8130482f31f4 | ''' Contains game server code '''
import logging
import socket
from gevent import Greenlet
from game.game import GameSession
class TelnetServer():
''' Listener handling for Telnet server, creates session greenlets '''
def __init__(self, host='', port=5555):
# IPv4 socket (socket.AF_INET), TCP (socket.SOCK_STREAM).
self._listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
logging.basicConfig()
self._logger = logging.getLogger('server')
self._logger.setLevel(logging.DEBUG)
# https://docs.python.org/3/library/socket.html#example
# Explicitly assign host and port to this socket ('bind' required
# when explicitly specifying port).
self._listener.bind((host, port))
# Start listening.
self._listener.listen()
def log(self, message):
''' Helper function for logging '''
self._logger.debug(message)
def run(self):
''' Listener loop '''
while True:
# Via listener socket, new server socket spawned to handle unique
# connection with client, so it can continue listening.
# Blocks thread on 'accept' waiting for new connection attempts.
conn, _ = self._listener.accept()
TelnetServerHandler(conn)
class TelnetServerHandler(Greenlet): #pylint: disable=too-few-public-methods
''' Wrapper class for game session Greenlet '''
def __init__(self, sock):
self._socket = sock
self._game = GameSession(sock)
super().__init__(self.handle)
self.start()
def handle(self):
''' Main execution body for game session greenlet '''
with self._socket: # Kill conn. if exception, finishes, etc.
self._game.run()
|
10,867 | f96ecf16e551856c48c698f9ecb7b92cd515cf7d | from account.models import NewFeed
from django.core.exceptions import ObjectDoesNotExist
def newfeed_serialize(user):
try:
newfeed = user.newfeed
except ObjectDoesNotExist:
newfeed = NewFeed.objects.create(user = user)
return {
'isMinimizedFeed': newfeed.isMinimizedFeed
}
|
10,868 | 3ca46be5e5871ad590360dc1e3ba653a8ddf4d98 | ''' Type your code here. '''
string = input()
numbers = list(map(int, string.split()))
if len(numbers) > 9:
print('Too many inputs')
else:
print(numbers[len(numbers)//2])
|
10,869 | c37f40d2a207efd31a3faef04dd5bcc3f175c1cf | import matplotlib.pyplot as plt
import datetime
from sklearn.svm import SVR
from util.data_io import load_csv
def svr(df):
# Use only one feature
df_X = df.distance_from_central.values
df_X = df_X.reshape(len(df_X), 1)
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
svr_lin = SVR(kernel='linear', C=1e3)
y_rbf = svr_rbf.fit(df_X, df.stars.values).predict(df_X)
print("RBF DONE: {}".format(datetime.datetime.now()))
y_lin = svr_lin.fit(df_X, df.stars.values).predict(df_X)
print("LINEAR DONE: {}".format(datetime.datetime.now()))
# Plotting
lw = 2
fig = plt.scatter(df_X, df.stars.values, color='darkorange', label='data')
plt.hold('on')
plt.plot(df_X, y_rbf, color='navy', lw=lw, label='RBF model')
plt.plot(df_X, y_lin, color='c', lw=lw, label='Linear model')
plt.xlabel('Distance from central Las Vegas')
plt.ylabel('Average stars')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
fig.figure.savefig('../project_data/stars vs distance.png')
def random_forest(df):
from sklearn.ensemble import RandomForestRegressor
features = df.distance_from_central.values
features = features.reshape(len(features), 1)
labels = df.stars.values
model = RandomForestRegressor(n_estimators=10, max_features=1)
model.fit(features, labels)
plt.scatter(features, df.stars.values, color='darkorange', label='data')
plt.plot(features, model.predict(features))
plt.show()
def group_by_distance(df):
"""
Ugly way of filtering and grouping by distance.
Pandas doesnt seem to allow returning a groupby object from a filter operation, therefore code makes the call twice
"""
grouped_df = df.groupby('distance_from_central', as_index=False)
grouped_df = grouped_df.filter(lambda x: len(x) > 10)
grouped_df = grouped_df.groupby('distance_from_central', as_index=False)
grouped_df = grouped_df['stars'].mean()
return grouped_df
def t_test(df):
from scipy.stats import ttest_ind
print(ttest_ind(df.stars, df.distance_from_central))
if __name__ == '__main__':
df = load_csv("distance_col_yelp_business.csv")
df = df[df['review_count'] > 25] # Filter low review counts
df = group_by_distance(df)
t_test(df)
svr(df)
# random_forest(df)
|
10,870 | b1f601652877b42768c4326b478ef978aa72f5e4 | import gzip
import sys
import csv
if len( sys.argv ) != 2:
print "Usage: %s ACCESSIONFILE " % sys.argv[0]
sys.exit(1)
genomes = []
accessions = []
accession_file_name = sys.argv[1]
genome_db = {}
with gzip.GzipFile( './data/1000genomes_samples.csv.gz' ) as fobj:
reader = csv.reader( fobj )
genomes = list(reader)
genome_names = [x[1] for x in genomes]
genome_db = dict(zip(genome_names, genomes))
with open('./data/' + accession_file_name) as accessions:
reader = csv.reader( accessions )
accessions = list(reader)
def test(accession, genomes):
return genome_db.has_key(accession)
# found = False
# for record in genomes:
# if record[1] == accession:
# return True
# return False
with open('./data/' + accession_file_name + "-results", "w") as resultsFile:
writer = csv.writer(resultsFile, delimiter=" ")
for accession in accessions:
writer.writerow([accession[0], test(accession[0], genomes)])
|
10,871 | 781110f6180b093185028db2502fff90042064c6 | /Users/noah/anaconda3/lib/python3.7/linecache.py |
10,872 | 40026eb553c509a5b41a09496ebf25275e16bbfc | from alerts.modules.ef.m1_m2.vertedero_emergencia.base import VertederoEmergenciaController
from alerts.modules.utils import single_state_create
from alerts.modules.base_states import EVENT_STATES
from alerts.modules.event_types import DAILY_INSPECTION
from base.fields import StringEnum
class Controller(VertederoEmergenciaController):
name = "Falla o bloqueo del vertedero de emergencia"
event_type = DAILY_INSPECTION
states = StringEnum(*EVENT_STATES, "A1")
TEMPLATE = "ef-mvp.m1.triggers.vertedero"
create = single_state_create("A1")
|
10,873 | 5150bac9a42949389f32d5c6b709ae24217f2d2f | import numpy as np
import os
import pandas as pd
from Bio.Seq import Seq
from Bio import SeqIO
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import uuid
from joblib import Parallel, delayed
import argparse
import matplotlib
matplotlib.use('agg')
import seaborn as sns
import matplotlib.pyplot as plt
from pkg_resources import resource_filename
from janggu.data import Bioseq
from janggu.data import ReduceDim
import numpy as np
from janggu import inputlayer
from janggu import outputconv
from janggu import DnaConv2D
from janggu.data import ReduceDim
from janggu.data import Cover
try:
from StringIO import StringIO ## for Python 2
except ImportError:
from io import StringIO ## for Python 3
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.colors as clr
import numpy as np
from matplotlib.colors import ListedColormap
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap, LinearSegmentedColormap
import pandas as pd
import matplotlib.pylab as plt
import numpy as np
import scipy
import seaborn as sns
import glob
from sklearn.model_selection import KFold,StratifiedKFold
import warnings
from sklearn.metrics import roc_curve,roc_auc_score,average_precision_score,accuracy_score
import warnings
warnings.filterwarnings('ignore')
# warnings.simplefilter(action='ignore', category=FutureWarning)
from sklearn.ensemble import RandomForestRegressor,GradientBoostingRegressor,RandomForestClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics.scorer import make_scorer
from sklearn.model_selection import train_test_split
from sklearn.base import TransformerMixin
from sklearn.datasets import make_regression
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor,GradientBoostingClassifier
from sklearn.neighbors import KNeighborsRegressor
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression, Ridge
import scipy
import numpy as np
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import LeaveOneOut
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from sklearn.kernel_ridge import KernelRidge
from sklearn.svm import SVR
from sklearn.neighbors import KNeighborsRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import Lasso
from sklearn.linear_model import Ridge,Lars,BayesianRidge
from copy import deepcopy as dp
"""
Feature extraction (Top motif scores)
1. using janggu get DNA one-hot
3. read meme get motif PWMs in both strands
4. scan motifs get score_list, max(pos_strand,neg_strand)
with tree-based methods, we don't need to do normalization here
5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
Dependency
----------
meme (to get motif revcomp)
bedtools (to get fasta sequences for gkm_svm)
python library
--------------
janggu (tensorflow + keras)
biopython
sklearn
joblib
"""
def read_fasta(f):
my_dict = {}
for r in SeqIO.parse(f, "fasta"):
my_dict[r.id] = str(r.seq).upper()
return my_dict
def read_motif(meme_file):
revcomp_file = "/tmp/"+str(uuid.uuid4())
os.system("meme-get-motif -rc -all %s > %s"%(meme_file,revcomp_file))
original_motif_label = "++original++"
revcomp_motif_label = "--revcomp--"
dict1 = parse_meme(meme_file,label=original_motif_label)
dict2 = parse_meme(revcomp_file,label=revcomp_motif_label)
myDict = {}
for k in dict1:
motif_name = k.replace(original_motif_label,"")
myDict[motif_name]=[dict1[k].T.values,dict2[k.replace(original_motif_label,revcomp_motif_label)].T.values]
return myDict
def parse_meme(file,label=""):
"""function to read meme file to pd.DataFrame"""
lines = open(file).readlines()
i = 0
myDict = {}
while i < len(lines):
myList = lines[i].strip().split()
if len(myList) < 1:
i = i + 1
continue
if myList[0] == "MOTIF":
if lines[i+1].strip() == "":
desc = lines[i+2].strip().split()
flag = True
else:
desc = lines[i+1].strip().split()
flag = False
try:
motifLength = int(desc[5])
except:
print (desc)
i = i+1
continue
if flag:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+3:i+3+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
i = i+3+motifLength
continue
else:
myString = "\n".join(map(lambda x:"\t".join(x.strip().split()),lines[i+2:i+2+motifLength])).replace(" "," ")
df = pd.read_csv(StringIO(myString), sep="\t",header=None)
df.columns=['A','C','G','T']
myDict[myList[1]+label] = df
i = i+2+motifLength
if df.shape[0] != motifLength or df.shape[1] !=4:
print ("something is wrong")
continue
i = i+1
return myDict
def motif_scan(s,m):
## s, m are numpy array
## s.shape = L*4
## m.shape = 4*W
L = s.shape[0]
W = m.shape[1]
score_list = []
for i in range(L-W):
sub = np.matmul(s[i:i+W,:],m)
# if i < 3:
# print ("DNA seq",s[i:i+W,:])
# print ("motif",m)
# print ("mapping score: ",np.trace(sub))
score_list.append(np.trace(sub))
return score_list
def DNA_motif_scan(DNA_array,m1,m2):
score_list = []
# print (m1)
# print (m2)
for i in range(DNA_array.shape[0]):
score_list_1 = motif_scan(DNA_array[i,:,:],m1)
# print ("score_list_1",score_list_1)
score_list_2 = motif_scan(DNA_array[i,:,:],m2)
# print ("score_list_2",score_list_2)
for j in range(len(score_list_1)):
if score_list_2[j] > score_list_1[j]:
score_list_1[j] = score_list_2[j]
score_list.append(score_list_1)
# print (score_list)
out = np.array(score_list)
print ("DNA scanning out shape",out.shape)
return out
def get_roi(myList):
## roi is region of interest, term used by janggu
# chr19:13180899-13180900+
# strand = [list(x)[-1] for x in myList]
strand = [x[-1] for x in myList]
# print (strand)
chr = [x[:-1].split(":")[0] for x in myList]
start = [int(x[:-1].split(":")[-1].split("-")[0]) for x in myList]
end = [int(x[:-1].split(":")[-1].split("-")[1]) for x in myList]
roi_A = []
roi = []
for i in range(len(chr)):
roi_A.append([chr[i],start[i],end[i],myList[i],".",strand[i]])
roi.append([chr[i],start[i],end[i]])
return roi_A,roi
def get_high_low_data(input,pos_cutoff,neg_cutoff):
df = pd.read_csv(input,index_col=0)
# pos = df[df['HbFBase']>=pos_cutoff].index.tolist()
pos = df[df['HbFBase']>pos_cutoff].index.tolist()
neg = df[df['HbFBase']<=neg_cutoff].index.tolist()
print ("Pos size %s. Neg size %s"%(len(pos),len(neg)))
return df.loc[pos+neg],pos,neg
def roi2fasta(roi,genome_fa,flank):
df = pd.DataFrame(roi)
df[1] = df[1]-flank
df[2] = df[2]+flank
df.to_csv("tmp.bed",sep="\t",header=False,index=False)
os.system("bedtools getfasta -fi %s -fo tmp.fa -bed tmp.bed -s -name"%(genome_fa))
seq = read_fasta("tmp.fa")
os.system("rm tmp.fa tmp.bed")
return seq
## Define parameters
# high_hbf = 50
high_hbf = 0
low_hbf = 0
input = "Editable_A_scores.combined.scores.csv"
flank = 100
refgenome="/home/yli11/Data/Human/hg19/fasta/hg19.fa"
bw_file="/home/yli11/Projects/Li_gRNA/footprint/H1_H2_GM12878_Tn5_bw/Hudep2.bw"
meme_file = "selected_motifs.meme"
top_n=5 # number of features for each motif
## read data
data,high,low = get_high_low_data(input,high_hbf,low_hbf)
roi_A,roi = get_roi(high+low)
seq = roi2fasta(roi_A,refgenome,flank)
test = pd.DataFrame.from_dict(seq,orient='index')
data['seq'] = test[0]
# 1. using janggu get DNA one-hot
## get one-hot data and ATAC feature matrix
dna_A = Bioseq.create_from_refgenome(name='dna',refgenome=refgenome,roi=roi_A,flank=flank)
Tn5 = Cover.create_from_bigwig('bigwig_coverage',bigwigfiles=bw_file,roi=roi,binsize=1,stepsize=1,flank=flank)
## ReShape
dna_A=np.reshape(dna_A,(len(high+low),flank*2+1,4))
bw_values=np.reshape(Tn5,(len(high+low),flank*2+1))
## get motif PWM, 3. read meme get motif PWMs in both strands
motifs = read_motif(meme_file)
# 4. scan motifs get score_list, max(pos_strand,neg_strand)
score_list_A = Parallel(n_jobs=-1)(delayed(DNA_motif_scan)(dna_A,motifs[m][0],motifs[m][1]) for m in motifs)
def get_footprint_score(s,l,footprint_score):
flanking=2
# print (s,l)
left_start = s-flanking
# print ("left_start:",left_start)
if left_start >= 0:
left = list(footprint_score[left_start:s])
else:
left = [np.nan]
right_end = s+l+flanking
# print ("right_end:",right_end)
# print ("len(footprint_score):",len(footprint_score))
if right_end <= len(footprint_score):
right = list(footprint_score[s+l:right_end])
else:
right = [np.nan]
flanking = np.nanmean(left+right)
# print ("left",left,"right",right)
# print ("flanking",flanking,"left+right",left+right)
occ = np.nanmean(footprint_score[s:s+l])
# print ("all:",footprint_score[s:s+l],"occ:",occ)
return flanking - occ
def get_top_n_motif_scores(score_list,top_n):
"""score_list.shape = L * 1
return
------
pos, value list
"""
return score_list.argsort()[-top_n:],score_list[score_list.argsort()[-top_n:]]
# 5. for each seq, get top N scores from (4) and their footprint score (given their positions), get adjusted score
def get_adjusted_motif_score(motif_score,footprint_score,n):
"""motif_score and footprint_score are same shape, N * L"""
out = []
# print ("motif_score",motif_score)
motif_length = footprint_score.shape[1] - motif_score.shape[1]
for i in range(motif_score.shape[0]):
pos,value = get_top_n_motif_scores(motif_score[i],n)
# print ("pos,:",pos)
# print ("value,:",value)
FOS_list = [get_footprint_score(s,motif_length,footprint_score[i]) for s in pos]
# print ("FOS_list:",FOS_list)
value = [value[i]*FOS_list[i] for i in range(len(value))]
out.append(value)
return out
adjusted_scores = Parallel(n_jobs=-1)(delayed(get_adjusted_motif_score)(motif_score,bw_values,top_n) for motif_score in score_list_A)
def set_col_names(motifs,top_n,label):
out = []
for i in motifs:
for j in range(top_n):
out.append("%s_%s_%s"%(label,i,j))
return out
## get feature table
adjusted_scores = np.array(adjusted_scores)
adjusted_scores = np.swapaxes(adjusted_scores,0,1)
adjusted_scores = adjusted_scores.reshape((len(high+low),top_n*len(motifs)))
adjusted_scores = pd.DataFrame(adjusted_scores)
adjusted_scores.columns = set_col_names(motifs,top_n,"motif_footprint_score")
adjusted_scores.index = high+low
df = pd.concat([adjusted_scores,data],axis=1)
# df.to_csv("ML_data.csv")
df.to_csv("all_A_features.csv")
|
10,874 | 7c427c41ca50e5ed19b35300731bf06ec4e67611 | # Tuple utilities for 2 int tuples
def tadd(a, b):
return (a[0] + b[0], a[1] + b[1])
def tsub(a, b):
return (a[0] - b[0], a[1] - b[1])
def tmul(a, b):
return (a[0] * b, a[1] * b)
def tdiv(a, b):
return (a[0] / b, a[1] / b)
|
10,875 | 2226b9ab8d866aef448d467e2fea973ad01427ed | import csv
import random
input_file = "raw_poem_quality_data.csv"
output_file = "qc_poem_quality_data.csv"
num_poems = 10
votes_per_poem = 11
random.seed(213)
### sample results from QC HIT
with open(input_file, 'w') as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(['title', 'is_english', 'embodies_keywords', 'embodies_mood'])
for i in range(num_poems):
for j in range(votes_per_poem):
writer.writerow(['poem%d' % i, random.randrange(2), random.randrange(2), random.randrange(2)])
with open(input_file, 'r') as file:
with open(output_file, 'w') as qc:
reader = csv.reader(file, delimiter=',')
writer = csv.writer(qc, delimiter=',')
next(reader)
writer.writerow(['title', 'is_english', 'embodies_keywords', 'embodies_mood'])
votes = {}
for line in reader:
title = line[0]
is_english = int(line[1])
embodies_keywords = int(line[2])
embodies_mood = int(line[3])
if title in votes:
votes[title] = (is_english + votes[title][0], embodies_keywords + votes[title][1], embodies_mood + votes[title][2])
else:
votes[title] = (is_english, embodies_keywords, embodies_mood)
for title in votes:
is_english = 1 if votes[title][0] > votes_per_poem / 2 else 0
embodies_keywords = 1 if votes[title][1] > votes_per_poem / 2 else 0
embodies_mood = 1 if votes[title][2] > votes_per_poem / 2 else 0
writer.writerow([title, is_english, embodies_keywords])
|
10,876 | a72b486161704375521f30fd7925330ca588286e | #!/usr/bin/env python
# coding: utf-8
import torch
import torch2trt
from torch2trt import TRTModule
import json
import trt_pose.coco
import trt_pose.models
import time
print("Loading topology...")
with open('human_pose.json', 'r') as f:
human_pose = json.load(f)
topology = trt_pose.coco.coco_category_to_topology(human_pose)
num_parts = len(human_pose['keypoints'])
num_links = len(human_pose['skeleton'])
print("Loading model backbone...")
model = trt_pose.models.densenet169_baseline_att(num_parts, 2 * num_links).cuda().eval()
print("Loading model weight...")
# path to model to convert
MODEL_WEIGHTS = './models/densenet169_256x256_epoch130.pth'
model.load_state_dict(torch.load(MODEL_WEIGHTS))
print("Generating data...")
# change to model width and height
WIDTH =256
HEIGHT=256
data = torch.zeros((1, 3, HEIGHT, WIDTH)).cuda()
print("Start converting...")
# set fp16 precision, and workspace size when converting
model_trt = torch2trt.torch2trt(model, [data], fp16_mode=True, max_workspace_size=1<<24)
print("Loading trt model...")
# change to designated trt model path
OPTIMIZED_MODEL = './models/densenet169_256x256_epoch130_trt.pth'
print("Saving trt model in",OPTIMIZED_MODEL)
torch.save(model_trt.state_dict(), OPTIMIZED_MODEL )
print("Running trt benchmark...")
model_trt = TRTModule()
model_trt.load_state_dict(torch.load(OPTIMIZED_MODEL))
t0 = time.time()
torch.cuda.current_stream().synchronize()
for i in range(50):
y = model_trt(data)
torch.cuda.current_stream().synchronize()
t1 = time.time()
print(50.0 / (t1 - t0))
|
10,877 | d90241c2ef75c92d7d406cb6c8bf54ca3294c978 | from django.shortcuts import render
# Create your views here.
from rest_framework import exceptions
from rest_framework.utils import json
from rest_framework.views import APIView
from rest_framework.serializers import ModelSerializer
from rest_framework.authentication import BaseAuthentication
from rest_framework.pagination import PageNumberPagination
from utils.sendCmd import SendCmd
from .models import manage as WorkAreaModel
from .models import load as BindEquipment
from equipment.models import info as EquipmentInfo
from django.http import HttpResponse
from rest_framework_jwt.settings import api_settings
jwt_decode_handler = api_settings.JWT_DECODE_HANDLER
'''
{
"token":token,
"workArea_id"id,
"workArea_name":name,
"workArea_type":xxx,
"long_lat_itude":(xxx,xxx),
"area_size":xxx,
"time":xxx,
"location":xxx,
"status":xxx,
"duty_person":xxx
"company":xxx
}
'''
from user.models import info as userinfo
class WorkAreaAuthentication(BaseAuthentication):
def authenticate(self, request):
token = request.data.get("token")
if not token:
token = request.query_params.get("token")
token = token.replace("\"", "")
user1 = jwt_decode_handler(token)
user = userinfo.objects.filter(username=user1.get("username")).first()
if not user:
ret = {
"code":410,
"msg":"用户不存在"
}
raise exceptions.AuthenticationFailed(ret)
else:
if user.user_frozen==True:
ret = {
"code": 400,
"msg": "用户已经冻结"
}
raise exceptions.AuthenticationFailed(ret)
else:
if user.user_permission=="user":
ret = {
"code": 410,
"msg": "权限不够"
}
raise exceptions.AuthenticationFailed(ret)
else:
return (user, None)
class WorkAreaSerializer(ModelSerializer):
class Meta:
model = WorkAreaModel
fields = ["workArea_id","workArea_name","workArea_type","long_lat_itude","area_size","workArea_time","location","workArea_status","duty_person","company"]
#exclude = ["time",]
def validate(self, attrs):
return attrs
def create(self, validated_data):
print(validated_data)
work = WorkAreaModel(**validated_data)
work.save()
return work
def update(self, instance, validated_data):
instance.workArea_id = validated_data.get("workArea_id",instance.workArea_id)
instance.workArea_name = validated_data.get("workArea_name", instance.workArea_name)
instance.workArea_type = validated_data.get("workArea_type",instance.workArea_type)
instance.long_lat_itude = validated_data.get("long_lat_itude", instance.long_lat_itude)
instance.area_size = validated_data.get("area_size",instance.area_size)
instance.workArea_time = validated_data.get("workArea_time",instance.workArea_time)
instance.location = validated_data.get("location",instance.location)
instance.workArea_status = validated_data.get("workArea_status",instance.workArea_status)
instance.duty_person = validated_data.get("duty_person",instance.duty_person)
instance.company = validated_data.get("company",instance.company)
instance.save()
return instance
class MyPagination(PageNumberPagination):
page_size = 5
page_query_param = "page"
page_size_query_param = "size"
max_page_size = 5
class WorkArea(APIView):
authentication_classes = [WorkAreaAuthentication,]
def post(self,request,*args,**kwargs):
print(request.data)
myser = WorkAreaSerializer(data=request.data)
if myser.is_valid():
try:
myser.save()
except Exception as e:
print(e)
ret = {
"code": 410,
"msg": "工程id已经存在"
}
return HttpResponse(json.dumps(ret))
ret = {
"code":200,
"msg":"添加成功"
}
return HttpResponse(json.dumps(ret))
else:
ret = {
"code":500,
"msg":"数据添加失败"
}
return HttpResponse(json.dumps(ret))
def get(self,request,*args,**kwargs):
work = WorkAreaModel.objects.filter(workArea_frozen=False)
print("hhhhhh")
pg = MyPagination()
pager = pg.paginate_queryset(view = self,request = request,queryset = work)
print("------------")
print(pager)
print("------------")
myser = WorkAreaSerializer(instance=pager,many=True)
print(myser.data)
work1 = WorkAreaModel.objects.filter(company__contains="")
print(work1)
return HttpResponse(json.dumps(myser.data))
def put(self,request,*args,**kwargs):
workArea_id = request.data.get("workArea_id")
work = WorkAreaModel.objects.filter(workArea_id=workArea_id).first()
myser = WorkAreaSerializer(instance=work,data=request.data)
if myser.is_valid():
try:
myser.save()
except Exception as e:
print(e)
ret = {
"code": 410,
"msg": "跟新失败"
}
return HttpResponse(json.dumps(ret))
ret = {
"code": 200,
"msg": "跟新成功"
}
return HttpResponse(json.dumps(ret))
else:
ret = {
"code":400,
"msg":"信息格式不正确"
}
return HttpResponse(json.dumps(ret))
def delete(self,request,*args,**kwargs):
workArea_id = request.data.get("workArea_id")
work = WorkAreaModel.objects.filter(workArea_id=workArea_id).first()
if not work:
ret = {
"code":410,
"msg":"工程不存在"
}
return HttpResponse(json.dumps(ret))
else:
work.workArea_frozen = True
work.save()
ret = {
"code": 200,
"msg": "删除成功"
}
return HttpResponse(json.dumps(ret))
class WorkAreaBind(APIView):
authentication_classes = [WorkAreaAuthentication,]
def post(self,request,*args,**kwargs):
equipment_id = request.data.get("equipment_id")
equipment_password = request.data.get("equipment_password")
workArea_id = request.data.get("workArea_id")
bind = request.data.get("bind")
bind_status = True
if bind=="0":
bind_status = False
workArea_id = -1
if equipment_id and equipment_password and workArea_id:
bind1 = BindEquipment.objects.filter(equipment_id_id=equipment_id).first()
if not bind1:
ret = {
"code":410,
"msg":"设备不存在"
}
return HttpResponse(json.dumps(ret))
else:
bind2 = BindEquipment.objects.filter(equipment_id_id=equipment_id,
equipment_password=equipment_password).first()
if not bind2:
ret = {
"code": 411,
"msg": "密码错误"
}
return HttpResponse(json.dumps(ret))
else:
equip = EquipmentInfo.objects.filter(equipment_id=equipment_id).first()
equip.workArea_id = workArea_id
equip.bind = bind_status
equip.save()
th = SendCmd(option="active",serialnum=equipment_id)
th.start()
ret = {
"code": 200,
"msg": "绑定/解绑成功"
}
return HttpResponse(json.dumps(ret))
else:
ret = {
"code":400,
"msg":"信息格式有误"
}
return HttpResponse(json.dumps(ret))
class SearchWorkArea(APIView):
authentication_classes = [WorkAreaAuthentication,]
def get(self,request,*args,**kwargs):
#模糊查询工程名字
workArea_name = request.query_params.get("workArea_name")
work = WorkAreaModel.objects.filter(workArea_name__contains=workArea_name)
pg = MyPagination()
pager = pg.paginate_queryset(view=self, request=request, queryset=work)
myser = WorkAreaSerializer(instance=pager, many=True)
return HttpResponse(json.dumps(myser.data))
class CensusAuthentication(BaseAuthentication):
def authenticate(self, request):
token = request.data.get("token")
if not token:
token = request.query_params.get("token")
user1 = jwt_decode_handler(token)
user = userinfo.objects.filter(username=user1.get("username")).first()
if not user:
ret = {
"code":410,
"msg":"用户不存在"
}
raise exceptions.AuthenticationFailed(ret)
else:
if user.user_frozen==True:
ret = {
"code": 400,
"msg": "用户已经冻结"
}
raise exceptions.AuthenticationFailed(ret)
else:
return (user, None)
class Census(APIView):
#authentication_classes = [CensusAuthentication,]
def get(self,request,*args,**kwargs):
ret = {
"code":200,
"msg":"获取成功",
"workArea_num":len(WorkAreaModel.objects.all())
}
return HttpResponse(json.dumps(ret))
class ActiveSerializer(ModelSerializer):
class Meta:
fields = ["workArea_name",]
model = WorkAreaModel
def validate(self, attrs):
return attrs
class Active(APIView):
#authentication_classes = [CensusAuthentication,]
def get(self,request,*args,**kwargs):
work = WorkAreaModel.objects.filter(workArea_status=True)
myser = ActiveSerializer(instance=work,many=True)
return HttpResponse(json.dumps(myser.data)) |
10,878 | adeb6fc44a5bd0e539fa56a25a219cf9f79bf314 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2017-18 Richard Hull and contributors
# See LICENSE.md for details.
"""
Display a TOTP code based on some stored secrets
"""
import time
import RPi.GPIO as GPIO
from luma.core.serial import spi
from luma.core.virtual import viewport
from luma.led_matrix.device import max7219, sevensegment
from secret import get_token
def scroll_message(device, msg, delay=0.2):
width = device.width
padding = " " * width
msg = padding + msg + padding
n = len(msg)
virtual = viewport(device, width=n, height=8)
sevensegment(virtual).text = msg
for i in reversed(list(range(n - width))):
virtual.set_position((i, 0))
time.sleep(delay)
class display(object):
def __init__(self, loop, secrets):
self.seg = sevensegment(max7219(spi()))
self.secrets = secrets
self.loop = loop
self.current = 0
def token(self):
n = self.current % len(self.secrets)
self.seg.text = " %s" % get_token(self.secrets[n])
last_digit = token % 10
self.loop.call_later(0.8, self.dot, last_digit)
def dot(self, last_digit):
self.seg.text[7:] = str(last_digit) + "."
self.loop.call_later(0.2, self.token)
def message(self, next=None):
n = self.current % len(self.secrets)
token = get_token(self.secrets[n])
self.seg.device.clear()
scroll_message(self.seg.device, self.secrets[n].name)
self.seg.text = " %06d" % token
if next:
self.loop.call_soon(next)
def next(self):
self.current += 1
self.loop.call_soon(self.message)
def prev(self):
self.current -= 1
self.loop.call_soon(self.message)
def init(loop, secrets):
dispatch = display(loop, secrets)
# GPIO buttons
import const as button
button.a = 17
button.b = 26
def cb(channel):
method = dispatch.prev if channel == button.a else dispatch.next
loop.call_soon(method)
def title(msg):
dispatch.seg.text = msg
GPIO.setmode(GPIO.BCM)
GPIO.setup(button.a, GPIO.IN)
GPIO.setup(button.b, GPIO.IN)
GPIO.add_event_detect(button.a, GPIO.RISING, callback=cb, bouncetime=200)
GPIO.add_event_detect(button.b, GPIO.RISING, callback=cb, bouncetime=200)
loop.call_soon(title, "- ZAUP -")
loop.call_later(3, dispatch.message, dispatch.token)
|
10,879 | 22f1f519caa8b2e60e50dbafa9355fd402eb099b | import faulthandler; faulthandler.enable()
from config import imagenet_alexnet_config as config
import mxnet as mx
import argparse
import json
import os
ap = argparse.ArgumentParser()
ap.add_argument("-c", "--checkpoints", required=True, help="path to output checkpoint directory")
ap.add_argument("-p", "--prefix", required=True, help="name of model prefix")
ap.add_argument("-e", "--epoch", type=int, required=True, help="epoch # to load")
args = vars(ap.parse_args())
means = json.loads(open(config.DATASET_MEAN).read())
testIter = mx.io.ImageRecordIter(
path_imgrec=config.TEST_MX_REC,
data_shape=(3, 227, 227),
batch_size=config.BATCH_SIZE,
mean_r=means["R"],
mean_g=means["G"],
mean_b=means["B"]
)
print("[INFO] loading model...")
checkpointsPath = os.path.sep.join([args["checkpoints"], args["prefix"]])
model = mx.model.FeedForward.load(checkpointsPath, args["epoch"])
model = mx.model.FeedForward(
ctx=[mx.gpu(0)],
symbol=model.symbol,
arg_params=model.arg_params,
aux_params=model.aux_params
)
print("[INFO] predicting on test data...")
metrics = [mx.metric.Accuracy(), mx.metric.TopKAccuracy(top_k=5)]
(rank1, rank5) = model.score(testIter, eval_metric=metrics)
print("[INFO] rank-1: {:.2f}%".format(rank1 * 100))
print("[INFO] rank-5: {:.2f}%".format(rank5 * 100))
|
10,880 | d1c1bf8eebe7eac1569b050f8a330ae2bcbf990e | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('discussion', '0010_auto_20150805_2052'),
]
operations = [
migrations.AlterField(
model_name='discussor',
name='reply_to',
field=models.ForeignKey(related_name='reply_to', to='discussion.DiscussionReply'),
),
]
|
10,881 | 5b55a296c7ddcebf9518a910375e930e68e15f84 | """ MARKDOWN
---
YamlDesc: CONTENT-ARTICLE
Title: Python Pickle Serialization and De-Serialization
MetaDescription: Python Pickle Serialization and De-Serialization
MetaKeywords: Python Pickle Serialization and De-Serialization
Author: Sreelakshmi Radhakrishnan
ContentName: python-pickle-serialization
---
MARKDOWN """
""" MARKDOWN
# Python Pickle Serialization and De-Serialization
* Serialization is the process of converting an object state into a
binary file, this file can be stored on filesystem or transmitted across
network. or it can be persisted(stored) and later use.
* Serialization of an object is also known as deflating or marshalling.
* In Python we use the **PICKLE** module to Serialize an Object.
* In order to resurrect an Object from a Pickle file it needs to be deserialized
* Deserialization of a file into an object also known as inflating or
unmarshalling.
MARKDOWN """
# MARKDOWN ```
import pickle;
#############################
# Serialization of an Object
#############################
# Create a Class
class PickleTest():
a=0
b=0
def __init__(self,i_a,i_b):
self.a=i_a
self.b=i_b
# Create an Object
Obj1 = PickleTest(1,2)
# Serialize an Object
with open('c:\\Personal\\tinitiate\\object.pickle', 'wb') as f:
pickle.dump(Obj1, f)
# DeSerialize File to Object
with open('c:\\Personal\\tinitiate\\object.pickle', 'rb') as f:
ObjFile = pickle.load(f)
print(ObjFile)
print(ObjFile.a)
print(ObjFile.b)
################################
# Serialization of a Dictionary
################################
# Create a Dict
Dict1={'APPLE':'FRUIT', 'POTATO':'ROOT', 'OKRA':'VEGETABLE'}
# Serialize an Dict
with open('c:\\Personal\\tinitiate\\dict.pickle', 'wb') as f:
pickle.dump(Dict1, f)
# DeSerialize File to Object
with open('c:\\Personal\\tinitiate\\dict.pickle', 'rb') as f:
DictFile = pickle.load(f)
print(DictFile)
# MARKDOWN ```
|
10,882 | 0c12fa5f3f93e2f89ad2f977a7bc4f0b6b14031c | import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import TensorDataset, DataLoader
import argparse
from tqdm import tqdm
import os
import numpy as np
from net.ae import AE, KMEANS
from net.vae import VRAE
import random
from deeplog.model import *
# Device configuration
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def generate_bgl(name, window_size):
num_sessions = 0
inputs = set()
with open('bgl/window_'+str(window_size)+'future_0/' + name, 'r') as f_len:
file_len = len(f_len.readlines())
with open('bgl/window_'+str(window_size)+'future_0/' + name, 'r') as f:
for line in f.readlines():
line = tuple(map(lambda n: n, map(int, line.strip().split())))
inputs.add(line)
num_sessions += 1
print('Number of sessions({}): {}'.format(name, num_sessions))
return inputs
def generate_hdfs(name, window_size):
hdfs = set()
with open('data/' + name, 'r') as f:
for line in f.readlines():
line = list(map(lambda n: n - 1, map(int, line.strip().split())))
### pad 28 if the sequence len is less than the window_size (log key start from 0 to 27)
line = line + [28] * (window_size + 1 - len(line))
for i in range(len(line) - window_size):
seq = line[i:i + window_size]
hdfs.add(tuple(seq))
print('Number of sessions({}): {}'.format(name, len(hdfs)))
return hdfs
def generate_random_hdfs(window_size, num_samples):
hdfs = []
for i in range(num_samples):
line = [random.randint(0, 28) for j in range(window_size)]
hdfs.append(line)
return hdfs
if __name__ == '__main__':
# Hyperparameters
batch_size = 2048
input_size = 1
model_dir = 'model'
parser = argparse.ArgumentParser()
# ae
parser.add_argument('-model', type=str, default='ae', choices=['ae', 'vae', 'dl'])
parser.add_argument('-num_layers', default=2, type=int)
parser.add_argument('-hidden_size', default=128, type=int)
parser.add_argument('-latent_length', default=20, type=int)
parser.add_argument('-window_size', default=20, type=int)
parser.add_argument('-dropout', default=0.0, type=float)
# training
parser.add_argument('-dataset', type=str, default='hd', choices=['hd', 'bgl'])
parser.add_argument('-epoch', default=150, type=int)
parser.add_argument('-lr', default=0.001, type=float)
# k-means
parser.add_argument('-k', default=10, type=int)
parser.add_argument('-threshold', default=0.1, type=float)
args = parser.parse_args()
num_layers = args.num_layers
hidden_size = args.hidden_size
latent_length = args.latent_length
window_size = args.window_size
num_epochs = args.epoch
dropout = args.dropout
k = args.k
threshold = args.threshold
if args.dataset == 'hd':
train_normal_loader = generate_hdfs('hdfs_train', window_size)
test_normal_loader = generate_hdfs('hdfs_test_normal', window_size)
test_abnormal_loader = generate_hdfs('hdfs_test_abnormal', window_size)
num_classes = 28
if args.model != 'dl':
num_classes +=1
elif args.dataset == 'bgl':
test_normal_loader = generate_bgl('normal_test.txt', window_size)
test_abnormal_loader = generate_bgl('abnormal_test.txt', window_size)
num_classes = 1848
len_train_normal = len(train_normal_loader)
len_normal = len(test_normal_loader)
len_abnormal = len(test_abnormal_loader)
model_path = 'model/'
if args.model == 'ae' or args.model == 'vae':
log = model_path + \
'dataset=' + args.dataset + \
'_window_size=' + str(window_size) + \
'_hidden_size=' + str(hidden_size) + \
'_latent_length=' + str(latent_length) + \
'_num_layer=' + str(num_layers) + \
'_epoch=' + str(num_epochs) + \
'_dropout=' + str(dropout)
log = log + '_lr=' + str(args.lr) if args.lr != 0.001 else log
log = log + '_' + args.model + '.pt'
else:
log = 'model/num_layer=' + str(num_layers) + \
'_window_size=' + str(window_size) + \
'_hidden=' + str(hidden_size) + \
'_dataset=' + args.dataset +\
'_epoch='+str(args.epoch)
log = log + '_' + args.model
log = log + '.pt'
print('retrieve model from: ', log)
if args.model == 'ae':
model = AE(input_size, hidden_size, latent_length, num_layers, num_classes, window_size)
elif args.model == 'vae':
model = VRAE(sequence_length=window_size,
number_of_features=1,
num_classes=num_classes,
hidden_size=hidden_size,
latent_length=latent_length,
training=False)
elif args.model == 'dl':
model = DL(input_size, hidden_size, num_layers, num_classes)
model = model.to(device)
model.load_state_dict(torch.load(log))
model.eval()
k_means_path = log[:-3] + '_' + str(k) + '/'
# normal_embedded
clusters = []
for i in range(k):
cluster = np.load(k_means_path + 'center_' + str(i) + '.npy')
cluster = torch.from_numpy(cluster).cuda()
# print(cluster.data)
clusters.append(cluster)
FP = 0
tbar = tqdm(train_normal_loader)
with torch.no_grad():
normal_min_dist = 0.0
for index, line in enumerate(tbar):
line = list(line)
line[-1] = random.randint(0, 28)
line = tuple(line)
seq = torch.tensor(line, dtype=torch.float).view(-1, window_size, input_size).to(device)
latent = model.get_latent(seq)
min_dist = 100.0
for i, cluster in enumerate(clusters):
dist = torch.sqrt(torch.sum(torch.mul(latent - cluster, latent - cluster)))
min_dist = dist.item() if dist.item() < min_dist else min_dist
if min_dist > threshold:
FP += 1
normal_min_dist += min_dist
tbar.set_description('train normal min dist: %.3f' % (normal_min_dist / (index + 1)))
print('accuracy:')
print(FP/len_train_normal)
TP = 0
FP = 0
# Test the model
tbar = tqdm(test_normal_loader)
with torch.no_grad():
normal_min_dist = 0.0
for index, line in enumerate(tbar):
line = list(line)
line[-1] = random.randint(0, 28)
line = tuple(line)
seq = torch.tensor(line, dtype=torch.float).view(-1, window_size, input_size).to(device)
latent = model.get_latent(seq)
min_dist = 100.0
for i, cluster in enumerate(clusters):
dist = torch.sqrt(torch.sum(torch.mul(latent - cluster, latent - cluster)))
min_dist = dist.item() if dist.item() < min_dist else min_dist
if min_dist > threshold:
FP += 1
normal_min_dist += min_dist
tbar.set_description('normal min dist: %.3f' % (normal_min_dist / (index + 1)))
tbar = tqdm(test_abnormal_loader)
with torch.no_grad():
abnormal_min_dist = 0.0
for index, line in enumerate(tbar):
seq = torch.tensor(line, dtype=torch.float).view(-1, window_size, input_size).to(device)
latent = model.get_latent(seq)
min_dist = 100.0
for i, cluster in enumerate(clusters):
dist = torch.sqrt(torch.sum(torch.mul(latent - cluster, latent - cluster)))
min_dist = dist.item() if dist.item() < min_dist else min_dist
if min_dist > threshold:
TP += 1
abnormal_min_dist += min_dist
tbar.set_description('abnormal min dist: %.3f' % (abnormal_min_dist / (index + 1)))
print('normal_avg_dist:')
print(normal_min_dist/len_normal)
print('abnormal_avg_dist:')
print(abnormal_min_dist/len_abnormal)
# Compute precision, recall and F1-measure
FN = len(test_abnormal_loader) - TP
P = 100 * TP / (TP + FP)
R = 100 * TP / (TP + FN)
F1 = 2 * P * R / (P + R)
print('false positive (FP): {}, false negative (FN): {}, Precision: {:.3f}%, Recall: {:.3f}%, F1-measure: {:.3f}%'.format(FP, FN, P, R, F1))
print('Finished Predicting')
# generate random sequence
random_hdfs = generate_random_hdfs(window_size, 10000)
# test random seq
avg_dist = 0.0
with torch.no_grad():
for index, line in enumerate(random_hdfs):
seq = torch.tensor(line, dtype=torch.float).view(-1, window_size, input_size).to(device)
latent = model.get_latent(seq)
min_dist = 100.0
for i, cluster in enumerate(clusters):
dist = torch.sqrt(torch.sum(torch.mul(latent - cluster, latent - cluster)))
min_dist = dist.item() if dist.item() < min_dist else min_dist
# print('random seq: ', line, '~~min_distance: ', min_dist)
avg_dist += min_dist
print('average dist ', avg_dist/10000)
|
10,883 | 975f263eed5ce5a9ac237e5367a1515f521da966 | import PySimpleGUI as sg
from searchEngine import SearchEngine
sg.theme("LightGrey3")
layout = [
[sg.Text("Enter your query"), sg.Input(key="IN"),sg.Button("search",bind_return_key=True,key="search")],
[sg.Output(size=(100,30))]]
def main():
searchEn = SearchEngine()
searchEn.startSearchEngine()
window = sg.Window("My Search Engine",layout)
while True:
event,values = window.read()
if event is None:
searchEn.closeConnection()
break
if event == "search":
searchEn.searchInterface(values["IN"])
window.close()
main()
|
10,884 | 5a4f10718debe93c385119a160150497f1447202 | from framework.Logger import Logger
from testsutes.base_testcase import BaseTestCase
from pageobject.login import Homepage
import time
logger=Logger(logger="testmanagercase").getlog()
class managerCase(BaseTestCase):
def test_manage(self):
homepage=Homepage(self.driver)
name=homepage.login("admin","sa")
if "admin" in name:
# homepage.deltie()
time.sleep(10)
homepage.managermodel("sa","ddd")
# homepage.quit_browser()
namenew=homepage.login("fwz","15935622817")
if "fwz" in namenew:
self.driver.switch_to.window(self.driver.current_window_handle)
time.sleep(5)
homepage.newmodelsend('小半小半','空空留遗憾多孤单心伤')
time.sleep(5)
homepage.newmodelreply("好好听好好听好好听好好听好好")
time.sleep(5)
|
10,885 | 6c48a67f9514918cebdefa37f1542b2e8b024a03 | """
publish Python objects as various API formats
"""
import tornado.web
import datetime
import json
import csv
import xmlrpclib
class Output():
_format = "html"
JSON = 'json'
_ftype_map = {
'html' : 'text/html',
'xls' : 'application/excel',
'json' : 'text/html',
'xml' : 'application/rdf+xml'
}
def __init__(self,format):
self._format = format
def _dumpJSON(self,lst,callback):
if callback:
out = "%s (%s) " % (callback,json.dumps(lst))
else:
out = json.dumps(lst)
return out
def _dumpHTML(self,lst):
out = "<table border=1 width=100%>"
for row in lst['data']:
out = out + "<tr>"
for k, v in row.iteritems():
out = out + "<td>%s</td>" % v
out = out + "<tr>"
out = out+"</table>"
return out
def _dumpCSV(self,data):
out = ""
lst = data.get('data')
for row in lst:
for k, v in row.iteritems():
if v:
out = out + str(v) + "\t"
else :
out = out + "\t"
out = out + "\n"
return out
def getMimeType(self):
return self._ftype_map[self._format]
def render(self, lst, callback=None):
preferences = ['html', 'csv', 'json', 'xml']
if self._format == 'html':
return self._dumpHTML(lst)
elif self._format == 'xls':
return self._dumpCSV(lst)
elif self._format == 'json':
return self._dumpJSON(lst,callback)
else:
raise ValueError, 'unknown format'
|
10,886 | a10975b98d6f73dc981e6dea6216f4bbc3b753eb | pi = 2.
delta = 1.
i = 0
while delta > 0.00000000001:
i = i + 1
delta = abs(pi - pi * (4. * i ** 2. / (4. * i ** 2. - 1.)))
pi = pi * (4. * i ** 2. / (4. * i ** 2. - 1.))
print pi
|
10,887 | 52f80aef9c0f4e86f7e25657398f1dc4470080ac | #------------------------------
# functional_tests.test_lists.test_layout_and_styling
#------------------------------
# Author: TangJianwei
# Create: 2019-02-25
#------------------------------
from selenium.webdriver.common.keys import Keys
from .base_lists import ListsTest
class LayoutAndStylingTest(ListsTest):
''' 画面布局与风格测试
'''
def test_001(self):
''' 输入框居中显示
主要是检查Bootstrap是否加载
'''
self.browser.get(self.live_server_url)
self.browser.set_window_size(600, 900)
# 首页的输入框居中显示
input_box = self.get_item_input_box()
self.assertAlmostEqual( \
input_box.location['x'] + input_box.size['width'] / 2, \
300, \
delta=10 \
)
# 清单页面的输入框居中显示
self.add_list_item('testing')
input_box = self.get_item_input_box()
self.assertAlmostEqual( \
input_box.location['x'] + input_box.size['width'] / 2, \
300, \
delta=10 \
)
|
10,888 | 4f18d01b26b56023abd8198ef050331d2805b367 | import torch
import torch.nn as nn
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(self, query_size, context_size, hidden_size=None):
super(Attention, self).__init__()
if hidden_size is None:
self.hidden_size = context_size
self.W_q = nn.Linear(query_size, self.hidden_size, bias=False)
self.W_c = nn.Linear(context_size, self.hidden_size, bias=False)
self.v = nn.Parameter(torch.normal(mean=torch.zeros(self.hidden_size),
std=torch.ones(self.hidden_size)))
def forward(self, query, memory, memory_mask):
"""
:param query: mel data (batch_size, audio_len // r, input_dim)
:param memory: encoder output (batch_size, text_len, embed_dim)
:param memory_mask: mask (batch_size, audio_len, text_len)
:return: align (batch_size, audio_len, text_len)
"""
batch_size = memory.size(0)
text_len = memory.size(1)
embed_dim = memory.size(2)
audio_len = query.size(1)
input_dim = query.size(2)
query_tiled = query.unsqueeze(2)
query_tiled = query_tiled.repeat(1, 1, text_len, 1)
query_tiled = query_tiled.view(-1, input_dim) # (batch_size * audio_len * text_len, input_dim)
memory_tiled = memory.unsqueeze(1)
memory_tiled = memory_tiled.repeat(1, audio_len, 1, 1)
memory_tiled = memory_tiled.view(-1, embed_dim) # (batch_size * audio_len * text_len, embed_dim)
info_matrix = torch.tanh(self.W_q(query_tiled) + self.W_c(memory_tiled))
v_tiled = self.v.unsqueeze(0).repeat(batch_size * audio_len * text_len, 1)
energy = torch.sum(v_tiled * info_matrix, dim=1)
energy = energy.view(batch_size, audio_len, text_len) # (batch_size, audio_len, text_len)
energy = energy.float().masked_fill(memory_mask==0, float('-inf')).type_as(energy)
alignment = F.softmax(energy.float(), dim=2).type_as(energy) # (batch_size, audio_len, text_len)
return alignment
|
10,889 | 52be2d7f625412d134ae45785cea8562002e8505 | import cv2
cap = cv2.VideoCapture(0) #this is webcam
while cap.isOpened():
ret , back = cap.read()
# back is what the camera is reading and ret is bascially that if a bool like whatever u r reading is successful/not
if ret :
cv2.imshow("image",back)
if cv2.waitKey(10) == ord('q'):
#save the image
cv2.imwrite('image.jpg',back)
break;
cap.release()
cv2.destroyAllWindows()
|
10,890 | f0e4594080481ea9cd10d1049864c1caff69facd | #!/usr/bin/env python
from transitions import Machine
from src.modes import Inactive, RTD, Autospray
import rospy
from agrodrone.srv import SetCompanionMode
from agrodrone.msg import CompanionMode
DEFAULT_MODE_PUBLISH_RATE = 1
class Modes(Machine):
"""
This class holds all the modes and also functions as a state machine
Transitions to a new mode are triggered by self.to_'mode name'()
"""
def set_new_mode(self):
if self.cur_mode is not None:
rospy.loginfo("Companion mode switch: from [%s] -> [%s]"
% (self.cur_mode.name, self.state))
self.cur_mode = self.states[self.state]
def __init__(self, vehicle):
self.cur_mode = None
self.mode_pub = None
self.mode_pub_rate = None
self.prev_publish_time = None
self.set_mode_service = None
self.vehicle = vehicle
modes = [
Inactive(self.vehicle),
RTD(self.vehicle),
Autospray(self.vehicle)
]
self.initial_state = modes[0].name
Machine.__init__(self,
states=modes,
initial=self.initial_state,
after_state_change='set_new_mode')
self.set_new_mode()
self.setup_services()
self.setup_publisher()
# TODO could be included as a transition, however, this might be more
# resource friendly
rospy.Timer(rospy.Duration(0.5), self.check_manual_mode_change)
def check_manual_mode_change(self, event):
"""
This method is used to regulary check if the fcu flight mode has been changed
by the user manually instead of by the software self.
If this is the case the state should be interrupted and set back to pending.
"""
if self.vehicle.get_manual_mode_change(reset=True):
data = lambda: None
data.mode_to_set = "Inactive"
self.set_companion_mode(data)
def setup_publisher(self):
self.mode_pub = rospy.Publisher('/commander/companion_mode', CompanionMode, queue_size=3)
mode_pub_rate = rospy.get_param("~mode_pub_rate", DEFAULT_MODE_PUBLISH_RATE)
self.mode_pub_rate = rospy.Duration(1/mode_pub_rate)
self.prev_publish_time = rospy.get_rostime()
def setup_services(self):
self.set_mode_service = rospy.Service(
'/commander/set_companion_mode',
SetCompanionMode, self.set_companion_mode
)
def publish_mode(self):
now = rospy.get_rostime()
if now - self.prev_publish_time > self.mode_pub_rate:
info = CompanionMode()
info.mode = self.cur_mode.name
info.state = self.cur_mode.cur_state.name
self.mode_pub.publish(info)
self.prev_publish_time = now
def set_companion_mode(self, data):
"""
Service callback to set companion computer modes
:param data: String that represents a mode
:return: True/False when mode switch has taken place
"""
mode_name = data.mode_to_set
if self.cur_mode.name is not mode_name:
if mode_name == "Inactive":
result = self.to_Inactive()
elif mode_name == "RTD":
result = self.to_RTD()
elif mode_name == "Autospray":
result = self.to_Autospray()
else:
rospy.logerr("Service mode transition: Mode (%s) not found." % mode_name)
result = False
else:
rospy.logerr("Service mode transition: Already in this mode, not transitioning.")
result = False
return result
def run(self):
self.publish_mode()
self.cur_mode.run()
|
10,891 | b31c9db61ce58da20e3f6aca3b3a36d48b1cffb1 | from . import db
class PostModel(db.Model):
__tablename__ = "posts"
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String, nullable=False)
content = db.Column(db.Text, nullable=False)
user_id = db.Column(db.Integer, db.ForeignKey("users.id"), nullable=False)
|
10,892 | 65c3bdab8e21ed881f34248f1985f1cb9c3b6ef4 | from PIL import Image
def main():
image = Image.open("eliza.jpg")
# image.show()
my_pixel = Pixel((40, 100, 254))
print(my_pixel)
print("R component:", my_pixel.r)
print("RGB tuple:", my_pixel.get_tuple())
# my_pixel.set_rgb(255, 255, 0)
# print("RGB tuple after changing it:", my_pixel.get_tuple())
print()
my_pixel.make_grayscale()
print("After making it grayscale:", my_pixel.get_tuple())
detect_grayscale(image)
image.show()
# gray_pixel = Pixel((80, 80, 80))
# print(gray_pixel)
# print("gray_pixel's R component:", gray_pixel.r)
#
# not_gray = Pixel((54, 54, 190))
#
# print()
# print("my_pixel grayscale?", my_pixel.is_grayscale())
# print("gray_pixel grayscale?", gray_pixel.is_grayscale())
# print("not_gray grayscale?", not_gray.is_grayscale())
# for y in range(image.height):
# for x in range(image.width):
# pixel_tuple = image.getpixel((x,y))
# pixel = Pixel(pixel_tuple)
#
# ### Print all locations that are grayscale in image
# if pixel.is_grayscale():
# print("Location", (x, y), "is grayscale with a value of",
# pixel.get_tuple())
class Pixel:
"""Represents a pixel in an image."""
## The following code is what is called when we create a new
## Pixel object:
## This is a method:
def __init__(self, rgb):
"""
__init__ is always the name of the *constructor* of a class.
A constructor is the method that gets called when we create
a new object.
The first parameter of any method in Python must be self.
self refers to the object that is created by this class.
rgb is the other parameter, and will be a RGB tuple.
"""
## The following are this class's attributes
self.r = rgb[0]
self.g = rgb[1]
self.b = rgb[2]
def get_tuple(self):
"""Returns the tuple corresponding with this pixel."""
return (self.r, self.g, self.b)
def is_grayscale(self):
"""Return True if this is a grayscale pixel, and False otherwise."""
return self.r == self.g == self.b
def set_rgb(self, r, g, b):
"""This method sets the r, g, and b attributes of this pixel.
Note: This doesn't return anything, since its purpose is to
change the pixel."""
self.r = r
self.g = g
self.b = b
def luminance(self):
"""Returns the luminance of this pixel."""
return (self.r + self.g + self.b) // 3
def make_grayscale(self):
"""Changes a pixel's components to grayscale based on their luminance."""
lum = self.luminance()
# self.r = lum
# self.g = lum
# self.b = lum
# Instead, we can call them method we already defined for setting RGB
self.set_rgb(lum, lum, lum)
def __str__(self):
"""This method with this particular name is automatically called whenever
Python needs a string representation of an object.
This needs to return (not print) a string.
For this class, it will look like: (R: 40, G: 53, B: 214)
"""
s = "(R: " + str(self.r) + ", G: " + str(self.g) + ", B: " + str(self.b) + ")"
return s
def detect_grayscale(image):
""" Detects grayscale pixels in an image, making them bright red. All other
pixels are turned into grayscale to make it easier to see the red pixels."""
for y in range(image.height):
print("y:", y)
for x in range(image.width):
pixel = Pixel(image.getpixel((x, y)))
if pixel.is_grayscale():
## Make pixel bright red
pixel.set_rgb(255, 0, 0)
else:
## Make this pixel grayscale
pixel.make_grayscale()
image.putpixel((x, y), pixel.get_tuple())
if __name__ == "__main__":
main()
|
10,893 | de669fdb077f7b04ceeb81a15d11c6195e73519a | '''
-Medium-
*Math*
*GCD*
*Binary Search*
Write a program to find the n-th ugly number.
Ugly numbers are positive integers which are divisible by a or b or c.
Example 1:
Input: n = 3, a = 2, b = 3, c = 5
Output: 4
Explanation: The ugly numbers are 2, 3, 4, 5, 6, 8, 9, 10... The 3rd is 4.
Example 2:
Input: n = 4, a = 2, b = 3, c = 4
Output: 6
Explanation: The ugly numbers are 2, 3, 4, 6, 8, 9, 10, 12... The 4th is 6.
Example 3:
Input: n = 5, a = 2, b = 11, c = 13
Output: 10
Explanation: The ugly numbers are 2, 4, 6, 8, 10, 11, 12, 13... The 5th is 10.
Example 4:
Input: n = 1000000000, a = 2, b = 217983653, c = 336916467
Output: 1999999984
Constraints:
1 <= n, a, b, c <= 10^9
1 <= a * b * c <= 10^18
It's guaranteed that the result will be in range [1, 2 * 10^9]
'''
class Solution(object):
def nthUglyNumber(self, n, a, b, c):
"""
:type n: int
:type a: int
:type b: int
:type c: int
:rtype: int
"""
def gcd(x, y):
return x if y == 0 else gcd(y, x % y)
lo, hi = 1, 2 * 10**9
ab = a * b // gcd(a, b)
bc = b * c // gcd(b, c)
ac = a * c // gcd(a, c)
abc = a * bc // gcd(a, bc)
def enough(mid):
cnt = mid//a + mid//b + mid//c - mid//ab - mid//bc - mid//ac + mid//abc
return cnt >= n
while lo < hi:
mid = lo + (hi - lo)//2
if enough(mid):
hi = mid
else:
# the condition: F(N) >= k
lo = mid + 1
return lo
if __name__ == "__main__":
print(Solution().nthUglyNumber(3, 2, 3, 5)) |
10,894 | 94249c16b35cc12105202be06b30c3925f5a8409 | #
# @lc app=leetcode id=110 lang=python3
#
# [110] Balanced Binary Tree
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def isBalanced(self, root: TreeNode) -> bool:
def balanceAndDepth(root: TreeNode) -> int:
if not root:
return 0
left = balanceAndDepth(root.left)
right = balanceAndDepth(root.right)
if (left == -1) or (right == -1):
return -1
if (left - right < 2) & (left - right > -2):
return max(left, right) + 1
else:
return -1
result = balanceAndDepth(root)
return (result != -1)
# @lc code=end
|
10,895 | aa8b7a846cb3261f2a5d5e8a48c49a89868ce6c9 | """
Copyright (C) 2020 Piek Solutions LLC
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import requests
import urllib.parse
class UartHttpInstrument:
def __init__(self, ip):
# gpib address 29 is hardcoded for UART
self.url = 'http://' + ip + '/uart/'
def read(self):
"""
read uart device
:return: response string from device
"""
try:
req_url = self.url + 'read/'
resp = requests.get(url=req_url)
return resp.content.decode('utf-8')
except ValueError:
print("uart failed read")
def query(self, command):
"""
query uart device with command string, adding newline to the end
:param command: (str)
:return: response string from device
"""
try:
command += '\\n'
cmd = urllib.parse.quote(command) # escape special chars
req_url = self.url + 'query/' + cmd
resp = requests.get(url=req_url)
return resp.content.decode('utf-8')
except ValueError:
print("uart failed query")
def queryBytes(self, command):
"""
query uart device with command string, adding newline to the end
:param command: (str) hex-encoded, with 2 hex digits per byte
:return: (bytes) response bytes from device
"""
try:
command += '0a'
req_url = self.url + 'bquery/' + command
resp = requests.get(url=req_url)
return resp.content
except ValueError:
print("uart failed queryBytes")
def write(self, command):
"""
write command string to uart instrument
:param command: (str)
:return: success
"""
try:
cmd = urllib.parse.quote(command) # escape special chars
req_url = self.url + 'write/' + cmd
requests.get(url=req_url)
except ValueError:
print("uart failed write")
def writeBytes(self, command):
"""
write command string to uart instrument
:param command: (str) hex-encoded, with 2 hex digits per byte
:return: None
"""
try:
command += '0a'
req_url = self.url + 'bwrite/' + command
requests.get(url=req_url)
except ValueError:
print("uart failed write")
def set_config(self, data_rate, num_bits, parity, stop_bits, msg_timeout, byte_timeout):
"""
set uart configuration
:param data_rate: (int) baud rate
:param num_bits: (int) number of bits in a message (7 or 8)
:param parity: (int) 0=None, 1=Odd, 2=Even
:param stop_bits: (int) stopbit value
:param msg_timeout: (int) message timeout in ms
:param byte_timeout: (int) byte read timeout in us
:return: None
"""
params = 'baud=%d&numbits=%d&parity=%d&stopbits=%d&m_timo=%d&b_timo=%d' \
% (data_rate, num_bits, parity, stop_bits, msg_timeout, byte_timeout)
try:
req_url = self.url + 'config/?' + params
requests.get(req_url)
except ValueError:
print('uart device failed set config')
def get_config(self):
try:
req_url = self.url + 'getconfig/'
resp = requests.get(req_url).json(strict=False)
return resp
except ValueError:
print('uart device failed get config')
class Agilent_E3631(UartHttpInstrument):
def _get_outPutOnOff(self):
try:
resp = self.query(':outp?')
self._startWavelength = int(resp)
except ValueError:
print('Agilent E3631 query fails')
return self._outpuOnOff
def _set_outPutOnOff(self, x):
try:
cmd = 'outp ' + str(x)
self.write(cmd)
except ValueError:
print('Agilent E3631 write fails')
self._outpuOnOff = x
outputOnOff = property(_get_outPutOnOff, _set_outPutOnOff, "outputOnOff property")
def queryCurrent(self):
try:
resp = self.query(':meas:curr:dc?')
except ValueError:
print('Agilent E3631 query failure')
return float(resp)
def queryVoltage(self):
try:
resp = self.query(':meas:volt:dc?')
except ValueError:
print('Agilent E3631 query failure')
return float(resp)
|
10,896 | aa4bfd8f63df8690c7507234be5de1e61516a5ee | import sys
import random
# Gen sınıfı
class gen:
sample=None
fitness=None
def __init__(self,sample_value,fitness_value):
self.sample=sample_value
self.fitness=fitness_value
# Fonksiyonu tanımla
def calculate_fitness(value):
function=15*value-value*value
return function
def generate_random_generation(length,quantity):
max_value = 15
min_value = 0
generation_list=[]
for i in range(0,quantity):
value = int(random.uniform(min_value,max_value+1))
binary=bin(value)[2:]
binary=binary.zfill(length)
generation_list.append(binary)
test=['1100','0100','0001','1110','0111','1001']
return test
#döndür generation_list
def fitness_calculation(generation_list):
fitness_list=[]
generation_sum=0
for i in range(0,len(generation_list)):
generation_sum=generation_sum+calculate_fitness(int(generation_list[i],2))
for i in range(0,len(generation_list)):
fitness=calculate_fitness(int(generation_list[i],2))
fitness_list.append(round((fitness/(generation_sum*1.0))*100.0,2))
return fitness_list
def min_index(generation_fitness,invalid):
min=-1
min_value=sys.minint
for i in range(0,len(generation_fitness)):
if min_value>generation_fitness[i] and i!=invalid:
min_value=generation_fitness[i]
min=i
return min
def sort_gene_list(gene_list):
return sorted(gene_list,key=lambda x: x.fitness, reverse=True)
def crossover(generation_list,generation_fitness,length):
probability_value_in_number=int(len(generation_list)*.7)
gene_list=[]
for i in range(0,len(generation_list)):
gene_list.append(gene(generation_list[i],generation_fitness[i]))
gene_list=sort_gene_list(gene_list)
breaking_point=random.randrange(1,length)
# kırılma noktası = 2
first_portion=[]
second_portion=[]
for i in range(0,probability_value_in_number):
first_portion.append(gene_list[i].sample[0:breaking_point])
second_portion.append(gene_list[i].sample[breaking_point:length])
semi_new_generation=[]
start=1
# -----------------------------------------------------
for i in range(0,len(generation_list)):
if i<=probability_value_in_number-1 and (i+1)<len(second_portion):
semi_new_generation.append(first_portion[0]+second_portion[i+1])
else:
semi_new_generation.append(second_portion[0] + first_portion[start])
start=start+1
return semi_new_generation
def mutation(new_generation):
random_selection = random.randrange(0, len(new_generation))
if new_generation[random_selection][0:1] == '1':
new_generation[random_selection] = new_generation[random_selection][0:1].replace('1', '0') + new_generation[
random_selection][
1:]
else:
new_generation[random_selection] = new_generation[random_selection][0:1].replace('0', '1') + new_generation[
random_selection][
1:]
return new_generation
def genetic_algorithm(length,quantity,iteration):
new_generation=generate_random_generation(length, quantity)
for i in range (0,iteration):
generation_fitness=fitness_calculation(new_generation)
print ('----------------------------------')
print ('----------------------------------')
print (i,'.nesil ve onların fitness değeri:')
for j in range(0,len(new_generation)):
print ('Gen',new_generation[j],'Fitness',generation_fitness[j])
semi_new_generation=crossover(new_generation,generation_fitness,length)
new_generation=mutation(semi_new_generation)
genetic_algorithm(4,6,100)
|
10,897 | ddd7a2de462086904ea33d207db4ddfe97a5f090 | import random
import sys
import time
def _simulate(n, p):
return len([1 for _ in range(n) if random.random() < p])
def main():
p = 0
sim = "--simulate" in sys.argv
args = [e for e in sys.argv if e != "--simulate"]
if len(args) > 1:
N = int(args[1])
else:
N = random.randint(5, 250)
print(f"N = {N}")
if len(args) > 2:
p = float(args[2])
while not 0.01 < p < 0.99:
p = round(random.random(), 2)
print(f"p = {p}")
current = N
data = []
while current > (N * 0.2) and len(data) < 35:
if sim:
q = _simulate(current, p)
else:
q = int(current * p)
data.append(q)
current -= q
print(" ".join([str(e) for e in data]))
if __name__ == "__main__":
main()
|
10,898 | b668874db5535af924577f8abae3623385287b59 | def f(x):
from math import sqrt
return sqrt(1-x**2)
import matplotlib.pyplot as plt
import time
x, dx = -0.5, 0.1
X, Y, Points = [], [], []
while x <= 0.5:
y = f(x)
point = (x, y)
X.append(x)
Y.append(y)
Points.append(point)
x += dx
plt.plot(X, Y, 'y')
plt.grid()
#plt.show()
plt.savefig('figure2.png')
|
10,899 | dc9fed3b80188ff3e257deae452721d973b7a4b7 | """Various non-core functions."""
import tensorflow as tf
import numpy as np
import pdb
import constants
####################
### THRESHOLDING ###
####################
def get_threshold_mask(hparams, x):
"""Threshold the mixtures to 1 or 0 for each TF bin.
Input:
X_mixtures: B x T x F
Output:
X_mixtures: B x T x F \in {0,1}
"""
axis = list(range(1, x.shape.ndims))
min_val = tf.reduce_min(x, axis=axis, keepdims=True)
max_val = tf.reduce_max(x, axis=axis, keepdims=True)
thresh = min_val + hparams.threshold_factor * (max_val - min_val)
cond = tf.less(x, thresh)
return tf.where(cond, tf.zeros(tf.shape(x)), tf.ones(tf.shape(x)))
def np_get_threshold_mask(hparams, x):
min_val = np.min(x)
max_val = np.max(x)
thresh = min_val + hparams.threshold_factor * (max_val - min_val)
return (x > thresh).astype(np.int32)
def get_attractors(hparams, threshold_mask, embeddings, oracle_mask):
"""Calculate the attractors of the embeddings.
Input:
threshold_mask: BxN - Binary Mask indicating non-thresholded TF bins
embeddings: BxNxK - All N K-dimensional embeddings
oracle_mask: BxNxC - Binary Mask indicating classification of each TF bin
Output:
attractors: BxCxK - C attractor points in the embedding space
"""
threshold_mask = tf.expand_dims(threshold_mask, -1) * oracle_mask
bin_count = tf.reduce_sum(threshold_mask, axis=1) # Count of non-threshold TF bins
bin_count = tf.expand_dims(bin_count, -1)
unnormalized_attractors = tf.einsum("bik,bic->bck", embeddings, threshold_mask)
attractors = tf.divide(unnormalized_attractors, bin_count + 1e-6) # Dont' divide by 0
return attractors
############
### MISC ###
############
def np_collapse_freq_into_time(x):
"""Collapse the freq and time dimensions."""
if x.ndim == 4:
return np.reshape(x, [x.shape[0], x.shape[1] * x.shape[2], -1])
return np.reshape(x, [x.shape[0], x.shape[1] * x.shape[2]])
def collapse_freq_into_time(x):
"""Collapse the freq and time dimensions."""
if x.shape.ndims == 4:
return tf.reshape(x, [x.shape[0], x.shape[1] * x.shape[2], -1])
return tf.reshape(x, [x.shape[0], x.shape[1] * x.shape[2]])
def uncollapse_freq_into_time(hparams, x):
"""UNCollapse the freq and time dimensions."""
if x.shape.ndims == 3:
return tf.reshape(x, [x.shape[0], hparams.ntimebins, constants.nfreqbins, -1])
return tf.reshape(x, [x.shape[0], hparams.ntimebins, constants.nfreqbins])
def collapse_time_into_batch(x):
"""Collapse the batch and time dimensions."""
return tf.reshape(x, [-1] + x.shape.as_list()[2:])
def uncollapse_time_from_batch(hparams, x):
"""Separate the batch and time dimensions."""
return tf.reshape(x, [hparams.batch_size, -1] + x.shape.as_list()[1:])
def model_is_recurrent(model):
return "lstm" in model.lower()
def model_is_convolutional(model):
return "cnn" in model.lower()
def get_oracle_waveform_savedir(hparams):
return "ORACLE_%s" % hparams.data_source
def get_kmeans_waveform_savedir(hparams):
if model_is_convolutional(hparams.model):
name = "%s_%d_c%d_%s_%d" % (hparams.model, hparams.filter_shape[1],
hparams.channels[0], hparams.data_source, hparams.ntimebins)
else:
name = "%s_%s_%d" % (hparams.model, hparams.data_source, hparams.ntimebins)
if hparams.add_white_noise:
name = "white_noise_" + name
return name
def flush(*args):
for arg in args:
arg.flush()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.