seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8660980424 | import numpy as np
from ctypes import * # c 类型库
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy.io import ascii
from astropy.table import Table, vstack
import os
from scipy.stats import *
import time
z4figpre = '../z4/figs/'
z4datapre = '../z4/data/'
z5figpre = '../z5/figs/'
z5datapre = '../z5/data/'
z6figpre = '../z6/figs/'
z6datapre = '../z6/data/'
datapre = '../data/'
figpre = '../figs/'
#os.system("g++ evol.cpp -L/usr/local/lib class_gas.o LE_iso.o read_aTree.o class_halo.o dyn.o thermo.o reaction.o Newton5.o my_linalg.o gsl_inverse.o RK4.o -lgsl -lgslcblas -lm -o cc.so -shared -fPIC")
#libc = CDLL('cc.so') # 装入动态链接库 ## 居然必须放在这里
global G, h0, H0, Omega_m0, Omega_L0, m_H, mu, Ms, pi, km, pc, Myr, alpha_T
G, c, k_B, m_H = 6.67408e-8, 2.9979245e10, 1.38064852e-16, 1.66053904e-24
pi = 3.141593
mu = 1.2
Ms = 2.e33
Lsun = 3.828e33
pc = 3.e18
Mpc = 1.e6*pc
km = 1.e5
yr = 365*24*3600
Myr = 1.e6*(365*24*3600)
Omega_m0 = 0.307
Omega_L0 = 1 - Omega_m0
h0 = .677
H0 = h0*100*km/Mpc
t_Edd = 1./(4*pi*G/.4/(0.1*c))
fbol_1450 = 4.4
n_base = [1.63,1.09e-01,4.02e-03,3.87e-05,1.07e-08]
# n_base = [4.41e-01, 2.33e-02, 5.05e-04, 1.29e-06]
# n_base = [4.02e-03,3.87e-05,1.07e-08]
f_bsm = [.6,.4]
f_seed = 1.
W37 = 1e44
alpha_T = 2.324e4
Nbsm = 4
fstick = 20
fstxt = 20
fslabel = 23
fstitle = 20
fslegend = 20
my_cmap = plt.get_cmap("viridis")
rescale = lambda y: (y - np.min(y)) / (np.max(y) - np.min(y))
def MF(M,z=6):
alpha = -1.03
Phi_star = 1.23e-8
M_star = 2.24e9
if z==6:
return Phi_star*pow(M/M_star,alpha)*np.exp(-M/M_star)
if z==4:
M_star *= 10
return Phi_star*pow(M/M_star,alpha)*np.exp(-M/M_star)
def L_M(M,Edd_ratio):
return 1.25e38*Edd_ratio*M
def Mdot2M(Mdot):
eta = 1
beta = 2.775e-6*(1.5)**.5
Mdot1 = 0.04
Mdot2 = 0.1
if Mdot<Mdot1:
M = eta*Mdot/beta
elif Mdot>Mdot2:
M = (0.83*np.log10(Mdot)+2.48)*1.e5
else:
M1 = eta*Mdot1/beta
M2 = (0.83*np.log10(Mdot2)+2.48)*1.e5
t = (np.log(Mdot)-np.log(Mdot1))/(np.log(Mdot2)-np.log(Mdot1))
M = np.exp( t*np.log(M2) + (1-t)*np.log(M1) )
return M
def LF(l): # dn/dlogL in Mpc^-3 dex^-1
Phi_M_star = 1.14e-8
M_star = -25.13
alpha = -1.5; beta = -2.81
Phi_L_star = Phi_M_star * 2.5
L_star = pow(10,-.4*(M_star-34.1)) * 3e18/1450 *1e7 * fbol_1450
L_1 = pow(10,-.4*(-27.2-34.1)) * 3e18/1450 *1e7 * fbol_1450
L_2 = pow(10,-.4*(-20.7-34.1)) * 3e18/1450 *1e7 * fbol_1450
# print('break L',L_star/W37, 'Phi_L_star', Phi_L_star)
t = (np.log10(l) - np.log10(L_1)) / (np.log10(L_2) - np.log10(L_1))
return Phi_L_star/( pow(l/L_star,-(alpha+1)) + pow(l/L_star,-(beta+1)) ) * (2*(1-t)+3*t)
def LF_M1450(M,z=6): # dn/dmag in Mpc^-3 mag^-1
if z==6:
# Willot 2010 CFHQS + SDSS
Phi_M_star = 1.14e-8
M_star = -25.13
alpha = -1.5; beta = -2.81
# Matsuoka 2018
Phi_M_star = 1.09e-8
M_star = -24.9
alpha = -1.23; beta = -2.73
elif z==5:
# McGreer 2018 data;
Phi_M_star = pow(10., -8.97+0.47)
M_star = -27.47
alpha = -1.97; beta = -4.
# refit by Matsuoka 2018 (beta & M_star); me: (alpha & Phi_M_star)
Phi_M_star = 3.8e-8
M_star = -25.6
alpha = -1.23; beta = -3.
elif z==4: # Akiyama 2018
Phi_M_star = 2.66e-7
M_star = -25.36
alpha = -1.3; beta = -3.11
else:
print("wrong redshift")
return Phi_M_star/( pow(10., 0.4*(alpha+1)*(M-M_star)) + pow(10., 0.4*(beta+1)*(M-M_star)) ) #* (2*(1-t)+3*t)
def M1450_Lbol(L):
return 34.1-2.5*np.log10(L/(fbol_1450*3e18/1450*1e7))
def Lbol_M1450(M):
return pow(10., -0.4*(M-34.1)) * (fbol_1450*3e18/1450*1e7)
# X-ray bolometric correction; Hopkins+07 & Duras+20
def K_AVE07(Lbol):
return 10.83*pow(Lbol/(1e10*Lsun),0.28)+6.08*pow(Lbol/(1e10*Lsun),-0.02)
def K_AVE20(Lbol):
a = 10.96
b = 11.93
c = 17.79
return a*( 1 + pow(np.log10(Lbol/Lsun)/b,c) )
# obscured fraction = Type II AGN fraction
def f_obsc_U14(logLx,z): # Ueda 14; 22< log NH < 24 fraction; as a func of Lx
eta = 1.7
a1 = .48
phi4375_0 = .43
phi4375_z = phi4375_0*(1+z)**a1
phimax = (1+eta)/(3+eta)
phimin = .2
beta = .24
phi = min( phimax, max(phi4375_z - beta*(logLx-43.75), phimin))
f_obsc_sum = phi # sum over 22< log NH < 24 range
return f_obsc_sum
# constant obscured fraction; motivated by Vito+ 2018
f_obsc_const = .8
# correction factor including Compton thick AGNs; different fbol_Xray
def corr_U14H07(M1450): # Ueda+14 & Shankar+09
L_bol = Lbol_M1450(M1450)
f_bol = K_AVE07(L_bol)
Lx = L_bol/f_bol
eta = 1.7
a1 = .48
phi4375_0 = .43
phi4375_z = phi4375_0*(1+2.)**a1
phimax = (1+eta)/(3+eta)
phimin = .2
beta = .24
phi = min( phimax, max(phi4375_z - beta*(np.log10(Lx)-43.75), phimin))
f_obsc_sum = phi # sum over 22< log NH < 24 range
f_CTK = phi
return (1+f_CTK)/(1-f_obsc_sum)
def corr_U14D20(M1450): # Ueda 14
L_bol = Lbol_M1450(M1450)
f_bol = K_AVE20(L_bol)
Lx = L_bol/f_bol
eta = 1.7
a1 = .48
phi4375_0 = .43
phi4375_z = phi4375_0*(1+2.)**a1
phimax = (1+eta)/(3+eta)
phimin = .2
beta = .24
if isinstance(M1450,float):
phi = min( phimax, max(phi4375_z - beta*(np.log10(Lx)-43.75), phimin))
else:
phi = np.zeros(len(M1450))
for i in range(len(M1450)):
phi[i] = min( phimax, max(phi4375_z - beta*(np.log10(Lx[i])-43.75), phimin))
f_obsc_sum = phi # sum over 22< log NH < 24 range
f_CTK = phi
return (1+f_CTK)/(1-f_obsc_sum)
def LF_M1450_CO(M,z): # dn/dmag in Mpc^-3 mag^-1
# Matsuoka 2018
return LF_M1450(M,z)/(1-f_obsc_const)
def LF_M1450_DO(M,z): # dn/dmag in Mpc^-3 mag^-1
# Matsuoka 2018
return LF_M1450(M,z)*corr_U14D20(M)
def t_freefall(nH):
C = np.sqrt( 32*G*(mu*m_H)/ (3*pi) )
return 1./C/np.sqrt(nH)
def t_from_z(z): # age of universe at redshift z: tH = 2/(3Hz)
return 2./(3*H0*np.sqrt(Omega_m0)) * pow(1+z, -1.5)
def Tv(Mh,z):
return alpha_T * (Mh/1.e8)**(2./3.) * (1+z)/11.
def Mh_Tv(Tv,z):
return 1.e8*(Tv/alpha_T/(1+z)*11.)**1.5
def Omega_mz(z):
return Omega_m0*(1+z)**3 /(Omega_m0*(1+z)**3 + Omega_L0)
def Hz(z):
return H0*np.sqrt( Omega_m0*(1+z)**3 + Omega_L0 )
def RHO_crit(z):
return 3*pow(H0,2)/(8*pi*G)*(1+z)**3*Omega_m0/Omega_mz(z)
class HALO:
def __init__(self,M,z0):
self.Mh = M
self.z = z0
self.c = 18*pow(self.Mh/(1.e11*Ms), -0.13)/(1+self.z) #concentration parameter c from Dekel & Birnboim 2006 Eq(22)
c, z = self.c, self.z
self.d = Omega_mz(z) - 1
d = self.d
self.Delta_crit = 18.0*pi*pi + 82*d - 39*d*d # Delta_crit ~ 200, overdensity
Delta_crit = self.Delta_crit
self.delta0 = self.Delta_crit/3.*pow(c,3)/(-c/(1+c) + np.log(1+c)) # characteristic overdensity parameter
delta0 = self.delta0
self.rho_crit = RHO_crit(z) # mean density of DM at z
self.rho_c = self.rho_crit * delta0
self.Rvir = pow( self.Mh/(4./3*pi*Delta_crit*self.rho_crit),1./3. )
self.Rs = self.Rvir/self.c
self.Vc = np.sqrt(G*self.Mh/self.Rvir)
self.t_dyn = self.Rvir/self.Vc
self.Tvir = G*self.Mh*(mu*m_H)/(2.*k_B*self.Rvir)
self.gc = 2*c/(np.log(1+c) - c/(1+c))
self.alpha = self.Tvir/self.Mh**(2./3)
def Rho_r(self, r):
rho_crit, delta0, Rvir = self.rho_crit, self.delta0, self.Rvir
c, x = self.c, r/Rvir
return rho_crit*delta0/( c*x * (1+c*x)**2 )
# x = r/Rvir c = Rvir/Rs
def F_NFW(self,x):
c = self.c
return -c*x/(1+c*x) + np.log(1+c*x)
def M_enc(self,r):
rho_crit, delta0, Rs, Rvir = self.rho_crit, self.delta0, self.Rs, self.Rvir
M_r = 4*pi*rho_crit*delta0*pow(Rs,3)*self.F_NFW(r/Rvir)
return M_r
def Phi(self, r):
# lim r -> 0
#return -4*pi*G*rho_crit*delta0*Rs*Rs
rho_crit, delta0, Rs = self.rho_crit, self.delta0, self.Rs
return -4*pi*G*rho_crit*delta0*(Rs**3)/r*np.log(1+r/Rs)
| lovetomatoes/BHMF | PYmodule/__init__.py | __init__.py | py | 8,267 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.get_cmap",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 61,
"usage_type": "name"
},
{
"api_name": "numpy.min"... |
2628647448 | import os
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import random
from ipdb import set_trace as bp
size_h, size_w = 600, 600
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=False, dtype='uint8')
obj_xml = '''
<object>
<name>{}</name>
<bndbox>
<xmin>{}</xmin>
<ymin>{}</ymin>
<xmax>{}</xmax>
<ymax>{}</ymax>
</bndbox>
</object>\
'''
ann_xml = '''\
<annotation>
<filename>{}</filename>
<size>
<width>{}</width>
<height>{}</height>
<depth>3</depth>
</size>{}
</annotation>\
'''
def writedata(idx, image, label, group):
imgdir = 'data/' + group + '/images/'
lbldir = 'data/' + group + '/labels/'
if not os.path.exists(imgdir):
os.makedirs(imgdir)
if not os.path.exists(lbldir):
os.makedirs(lbldir)
imgname = '{:05d}.png'.format(idx)
xmlname = '{:05d}.xml'.format(idx)
cv2.imwrite(imgdir + imgname, image)
xml = ''
for i in [0, 1]:
true_label = label[i + 2]
xmin, ymin = label[i][0]
xmax, ymax = label[i][1]
xml += obj_xml.format(
true_label, xmin, ymin, xmax, ymax)
xml = ann_xml.format(imgname, xml)
with open(lbldir + xmlname, 'x') as file:
file.write(xml)
def preprocess(data, targets):
images = []
labels = []
for i in range(data.shape[0]):
idx1 = i
idx2 = np.random.choice(data.shape[0])
img1 = pickimg(data, idx1)
img2 = pickimg(data, idx2)
img, coords = combine(img1, img2)
images.append(img)
coords.extend([targets[idx1], targets[idx2]])
labels.append(coords)
return images, labels
def plotbbox(img, xmin, ymin, xmax, ymax):
img = np.copy(img)
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 255), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# plt.imshow(img)
# plt.show()
def combine(img1, img2, w=40):
'''Overlay 2 images
Returns:
New image and bounding box locations
'''
img1 = croptobbox(img1)
img2 = croptobbox(img2)
img3 = np.zeros([w, w], 'uint8')
i1, i2 = np.random.randint(15, 25, size=2)
j1, j2 = np.random.randint(12, 16, size=2)
mask = img2 > 0
h1, w1 = img1.shape
h2, w2 = img2.shape
i1 = i1 - h1 // 2
i2 = i2 - h2 // 2
j1 = j1 - w1 // 2
j2 = j2 - w2 // 2 + 12
img3[i1:i1 + h1, j1:j1 + w1] = img1
img3[i2:i2 + h2, j2:j2 + w2][mask] = img2[mask]
tl1 = j1, i1 # topleft row and column indices
br1 = j1 + w1, i1 + h1 # bottom right
tl2 = j2, i2
br2 = j2 + w2, i2 + h2
return img3, [[tl1, br1], [tl2, br2]]
# <xmin>81</xmin>
def pickimg(images, index):
'''Pick one from images'''
return np.copy(images[index].reshape([28, 28]))
def findbbox(img):
a = np.where(img != 0)
bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])
return bbox
def croptobbox(img):
'''Crop image to bounding box size'''
a = np.where(img != 0)
xmin, xmax, ymin, ymax = np.min(a[1]), np.max(a[1]), np.min(a[0]), np.max(a[0])
return np.copy(img[ymin:ymax, xmin:xmax])
def placeincanvas(canvas, img, i, j):
# Mutates canvas
mask = img > 0
canvas[i:i + img.shape[0], j:j + img.shape[1]][mask] = img[mask]
def applyscaling(img, size=None):
fx = 2 ** np.random.sample()
fy = 2 ** np.random.sample()
if size is not None:
x, y = size
return cv2.resize(np.copy(img), size, interpolation=cv2.INTER_CUBIC)
return cv2.resize(np.copy(img), None, fx=fx, fy=fy, interpolation=cv2.INTER_CUBIC)
# dataset = {
# 'train': {},
# 'test': {}
# }
#
# for group, data in [("train", mnist.train),("test", mnist.test)]:
# images, labels = preprocess(data.images, data.labels)
# dataset[group]['images'] = images
# dataset[group]['labels'] = labels
#
#
# for group in ['train', 'test']:
# images = dataset[group]['images']
# labels = dataset[group]['labels']
# for i in range(len(images)):
# writedata(i, images[i], labels[i], group)
list = np.arange(len(mnist.train.images))
random.shuffle(list)
count = 0
def ffg(path, size_h, size_w, sample_idx):
global list
global count
imgdir = path + '/images/'
lbldir = path + '/labels/'
canvas = np.zeros((size_h, size_w), 'uint8')
for path in [imgdir, lbldir]:
if not os.path.exists(path):
os.makedirs(path)
step_size_h = int(size_h / 10)
step_size_w = int(size_w / 10)
xml = ''
img_name = '{:05d}.png'.format(sample_idx)
xml_name = '{:05d}.xml'.format(sample_idx)
for h in range(0, size_h, step_size_h):
for w in range(0, size_w, step_size_w):
if count == len(mnist.train.images)-1:
count = 0
random.shuffle(list)
else:
count += 1
x = random.random()
if x>0.7:
img = pickimg(mnist.train.images, count)
lbl = mnist.train.labels[count]
img = applyscaling(img)
img = croptobbox(img)
# bp()
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', img)
# cv2.waitKey(0)w
# cv2.destroyAllWindows()
tl_i = h+int(random.randint(0,5))
tl_j = w+int(random.randint(0,5))
br_i = tl_i + img.shape[0]
br_j = tl_j + img.shape[1]
placeincanvas(canvas, img, tl_i, tl_j)
xml += obj_xml.format(lbl, tl_j, tl_i, br_j, br_i)
# print(h)
xml = ann_xml.format(img_name, *(size_h, size_w), xml)
canvas = 255 - canvas
cv2.imwrite(imgdir + img_name, canvas)
with open(lbldir + xml_name, 'w+') as f:
f.write(xml)
# print(len(mnist.train.images))
def preprocess2(path, images, labels, size, num_samples=1, digit_range=(1, 2)):
imgdir = path + '/images/'
lbldir = path + '/labels/'
for path in [imgdir, lbldir]:
if not os.path.exists(path):
os.makedirs(path)
for sample_idx in tqdm(range(1, num_samples + 1)):
img_name = '{:05d}.png'.format(sample_idx)
xml_name = '{:05d}.xml'.format(sample_idx)
img_path = imgdir + img_name
canvas = np.zeros(size, 'uint8')
xml = ''
num_digits = np.random.randint(*digit_range)
for i in np.random.randint(0, len(data.images), size=num_digits):
img = pickimg(data.images, i)
lbl = data.labels[i]
img = applyscaling(img)
img = croptobbox(img)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
tl_i = np.random.randint(0, canvas.shape[0] - img.shape[0])
tl_j = np.random.randint(0, canvas.shape[1] - img.shape[1])
br_i = tl_i + img.shape[0]
br_j = tl_j + img.shape[1]
placeincanvas(canvas, img, tl_i, tl_j)
xml += obj_xml.format(lbl, tl_j, tl_i, br_j, br_i)
xml = ann_xml.format(img_name, *size, xml)
canvas = 255 - canvas
cv2.imwrite(imgdir + img_name, canvas)
with open(lbldir + xml_name, 'x') as f:
f.write(xml)
# f.write(xml)
def generate(num_img):
# count = 0
for idx in tqdm(range(num_img)):
ffg('./data/data/', size_h, size_w, idx)
# for data in [(mnist.train), (mnist.test)]:
generate(100)
# for group, data in [("train", mnist.train), ("test", mnist.test)]:
# preprocess2('./data/256-simple/' + group, data.images, data.labels, (size_h, size_w) , num_samples=data.images.shape[0])
# img = cv2.imread('data/toy/images/00000.png')
#
# # < xmin > 0 < / xmin >
# # < ymin > 0 < / ymin >
# # < xmax > 22 < / xmax >
# # < ymax > 34 < / ymax >
# plotbbox(img, 64, 1, 87, 34 )
| nguyenvantui/mnist-object-detection | mnist_gen.py | mnist_gen.py | py | 8,145 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.examples.tutorials.mnist.input_data",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": ... |
39076799665 | from string import ascii_lowercase
class Node:
def __init__(self, val, parents = []):
self.val = val
self.parents = parents
def __str__(self):
return self.val
from collections import deque
from string import ascii_lowercase
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
if not endWord in wordList:
return []
root = Node(beginWord)
dq = deque()
dq.append((root, 0))
wl = set(wordList)
endNode = None
pre_level = -1
add = {}
while len(dq):
(head, level) = dq.popleft()
if level != pre_level:
for _, value in add.items():
wl.remove(value.val)
add = {}
pre_level = level
if self.diffByOne(head.val, endWord):
endNode = Node(endWord, [head])
while len(dq) and dq[0][1] == level:
(head1, level1) = dq.popleft()
if self.diffByOne(endWord, head1.val):
endNode.parents += [head1]
break
for i in range(len(head.val)):
for s in ascii_lowercase:
newWord = head.val[:i] + s + head.val[1 + i:]
if newWord in wl:
if not newWord in add:
add[newWord] = Node(newWord,[head])
dq.append((add[newWord], level + 1))
else:
add[newWord].parents += [head]
if endNode:
return self.formLists(endNode)
else:
return []
def formLists(self, endNode):
if len(endNode.parents) == 0:
return [[endNode.val]]
ret = []
for p in endNode.parents:
l = self.formLists(p)
ret += [x + [endNode.val] for x in l]
return ret
def diffByOne(self, s1, s2):
if len(s1) != len(s2): return False
diff = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
diff += 1
return diff == 1 | YuxiLiuAsana/LeetCodeSolution | q0126.py | q0126.py | py | 2,196 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "string.ascii_lowercase",
"line_number": 36,
"usage_type": "name"
}
] |
74274579943 | import pytest
import ruleset
import util
import os
def get_testdata(rulesets):
"""
In order to do test-level parametrization (is this a word?), we have to
bundle the test data from rulesets into tuples so py.test can understand
how to run tests across the whole suite of rulesets
"""
testdata = []
for ruleset in rulesets:
for test in ruleset.tests:
if test.enabled:
testdata.append((ruleset, test))
return testdata
def test_id(val):
"""
Dynamically names tests, useful for when we are running dozens to hundreds
of tests
"""
if isinstance(val, (dict,ruleset.Test,)):
# We must be carful here because errors are swallowed and defaults returned
if 'name' in val.ruleset_meta.keys():
return '%s -- %s' % (val.ruleset_meta['name'], val.test_title)
else:
return '%s -- %s' % ("Unnamed_Test", val.test_title)
@pytest.fixture
def destaddr(request):
"""
Destination address override for tests
"""
return request.config.getoption('--destaddr')
@pytest.fixture
def port(request):
"""
Destination port override for tests
"""
return request.config.getoption('--port')
@pytest.fixture
def protocol(request):
"""
Destination protocol override for tests
"""
return request.config.getoption('--protocol')
@pytest.fixture
def http_serv_obj():
"""
Return an HTTP object listening on localhost port 80 for testing
"""
return HTTPServer(('localhost', 80), SimpleHTTPRequestHandler)
@pytest.fixture
def with_journal(request):
"""
Return full path of the testing journal
"""
return request.config.getoption('--with-journal')
@pytest.fixture
def tablename(request):
"""
Set table name for journaling
"""
return request.config.getoption('--tablename')
def pytest_addoption(parser):
"""
Adds command line options to py.test
"""
parser.addoption('--ruledir', action='store', default=None,
help='rule directory that holds YAML files for testing')
parser.addoption('--destaddr', action='store', default=None,
help='destination address to direct tests towards')
parser.addoption('--rule', action='store', default=None,
help='fully qualified path to one rule')
parser.addoption('--ruledir_recurse', action='store', default=None,
help='walk the directory structure finding YAML files')
parser.addoption('--with-journal', action='store', default=None,
help='pass in a journal database file to test')
parser.addoption('--tablename', action='store', default=None,
help='pass in a tablename to parse journal results')
parser.addoption('--port', action='store', default=None,
help='destination port to direct tests towards', choices=range(1,65536),
type=int)
parser.addoption('--protocol', action='store',default=None,
help='destination protocol to direct tests towards', choices=['http','https'])
def pytest_generate_tests(metafunc):
"""
Pre-test configurations, mostly used for parametrization
"""
options = ['ruledir','ruledir_recurse','rule']
args = metafunc.config.option.__dict__
# Check if we have any arguments by creating a list of supplied args we want
if [i for i in options if i in args and args[i] != None] :
if metafunc.config.option.ruledir:
rulesets = util.get_rulesets(metafunc.config.option.ruledir, False)
if metafunc.config.option.ruledir_recurse:
rulesets = util.get_rulesets(metafunc.config.option.ruledir_recurse, True)
if metafunc.config.option.rule:
rulesets = util.get_rulesets(metafunc.config.option.rule, False)
if 'ruleset' in metafunc.fixturenames and 'test' in metafunc.fixturenames:
metafunc.parametrize('ruleset,test', get_testdata(rulesets),
ids=test_id)
| fastly/ftw | ftw/pytest_plugin.py | pytest_plugin.py | py | 3,940 | python | en | code | 263 | github-code | 36 | [
{
"api_name": "ruleset.tests",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "ruleset.Test",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixture",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "pytest.fixtu... |
18701687808 | import numpy as np
from numpy import linalg as LA
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from PIL import Image
from cv2 import imread,resize,cvtColor,COLOR_BGR2RGB,INTER_AREA,imshow
'''
VGG16模型,权重由ImageNet训练而来
使用vgg16模型提取特征
输出归一化特征向量
'''
class feature():
"""
Feature extraction class
"""
def __init__(self):
pass
def extract_feat(self,img_path):
input_shape = (272, 480, 3)
model = VGG16(input_shape = (input_shape[0],input_shape[1],input_shape[2]), pooling = 'max', include_top = False)
_img = imread (img_path)
res = resize (_img, (input_shape[1], input_shape[0]), interpolation=INTER_AREA)
img = Image.fromarray (cvtColor (res, COLOR_BGR2RGB))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
if __name__ == '__main__':
print("local run .....")
# models = VGG16 (weights='imagenet', pooling = 'max', include_top=False)
# img_path = './database/001_accordion_image_0001.jpg'
# img = image.load_img (img_path, target_size=(224, 224))
# x = image.img_to_array (img)
# x = np.expand_dims (x, axis=0)
# x = preprocess_input (x)
# features = models.predict (x)
# norm_feat = features[0]/LA.norm(features[0])
# feats = np.array(norm_feat)
# print(norm_feat.shape)
# print(feats.shape)
img_path = "H:/datasets/testingset/19700102125648863.JPEG"
f = feature()
norm_feat = f.extract_feat(img_path)
print(norm_feat)
print(norm_feat.shape)
| 935048000/ImageSearch | core/feature_extraction.py | feature_extraction.py | py | 1,808 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "keras.applications.vgg16.VGG16",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",... |
43135278080 |
# Mnemonic: em.py
# Abstract: Run em (Expectation Maximisation)
#
# Author: E. Scott Danies
# Date: 06 March 2019
#
# Acknowledgements:
# This code is based in part on information gleaned from, or
# code examples from the following URLs:
# https://github.com/minmingzhao?tab=repositories
#
# ------------------------------------------------------------------
import sys
from time import time
import numpy as np
import scipy as sp
import pandas as pd
import sklearn
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
# ---- my tools -----------------------------------------------------------
from tools import printf,fprintf,sprintf
from tools import parse_args,bool_flag,val_flag,str_flag,int_flag
from tools import print_pd
from plotter import c_cplot,gt_cplot
def usage() :
printf( "usage: kcluster.py [-f] [-i iterations] [-k k-value] train-path test-path\n" )
# ----------------------------------------------------------------------------------------------------------
# -- parse command line and convert to convenience variables -----------------------------------------------
# if you programme in go, then you recognise the beauty here :)
flags = { # define possible flags and the default: map key, type, default value
"-d": ("output-dir", str_flag, "/tmp"),
"-f": ("classfirst", bool_flag, False),
"-i": ("iterations", int_flag, 10),
"-k": ("k-components", int_flag, 2), # number of clusters to divide into
"-s": ("plot-samp-rate", int_flag, 10) # to keep plot output sizes reasonable, sample at x% for plots
}
opts = { } # map where option values or defaults come back
pparms = parse_args( flags, opts, "training-file [testing-file]" )
if pparms == None or len( pparms ) < 2 :
printf( "missing filenames on command line (training testing)\n" )
sys.exit( 1 )
train_fn = pparms[0] # file names; training validation (test)
test_fn = pparms[1];
components = opts["k-components"]
out_dir = opts["output-dir"]
np.random.seed( 17 )
# -----------------------------------------------------------------------------------------
train_data = pd.read_csv( train_fn, sep= ',' ) # suck in datasets
test_data = pd.read_csv( test_fn, sep= ',' )
train_n, train_p = train_data.shape # number of training instances and parameters
test_n, test_p = test_data.shape
if opts["classfirst"] : # class target value is in col 0
data = train_data.iloc[:,1:train_p]
labels = train_data.values[:,0] # get just the first column which has 'truth'
else :
data = train_data.iloc[:,0:train_p-1]
labels = train_data.values[:,-1] # get just the last column which has 'truth'
data = data.values
data_n, data_p = data.shape # data instances and parameters
printf( "data: %d instances %d parameters\n", data_n, data_p )
#--------------------------------------------------------------------------------------------
printf( "#%-5s %-5s %-5s %-5s %-5s %-5s %-5s\n", "ACC", "HOMO", "COMPL", "VM", "ARAND", "MI", "CH-idx" )
for i in range( opts["iterations"] ) :
em = GaussianMixture( n_components=components, n_init=13, covariance_type="full" ).fit( data )
guess = em.predict( data )
acc = metrics.accuracy_score( labels, guess )
homo = metrics.homogeneity_score( labels, guess ) # compare the true lables to those em predicted
comp = metrics.completeness_score( labels, guess )
vm = metrics.v_measure_score( labels, guess )
arand = metrics.adjusted_rand_score( labels, guess )
mi = metrics.adjusted_mutual_info_score( labels, guess, average_method="arithmetic" )
ch = metrics.calinski_harabaz_score( data, guess );
printf( " %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f\n", acc, homo, comp, vm, arand, mi, ch)
if i == 0 : # just plot the first
tokens = train_fn.split( "/" ); # build file name as emax_<data-type>_<clusters>.eps
tokens = tokens[-1].split( "_" )
title = sprintf( "Exp Max %s k=%d", tokens[0], components )
gfname = sprintf( "%s/emax_%s_%d.eps", out_dir, tokens[0], components )
# pretend guess is ground truth and plot predicted cluster
gt_cplot( data, guess, components, gfname, title, sample_pct=opts["plot-samp-rate"], cpattern=2 )
sys.exit( 0 )
| ScottDaniels/gtcs7641 | a3/em.py | em.py | py | 4,623 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tools.printf",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "tools.str_flag",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "tools.bool_flag",
"line_number": 47,
"usage_type": "name"
},
{
"api_name": "tools.int_flag",
"lin... |
1054566885 | # -*- coding = utf-8 -*-
# @Time : 2021/5/5 18:25
# @Author : 水神与月神
# @File : 灰度转彩色.py
# @Software : PyCharm
import cv2 as cv
import numpy as np
import os
import mypackage.dip_function as df
# demo
# path = r"C:\Users\dell\Desktop\8.png"
#
# image = cv.imread(path, cv.IMREAD_UNCHANGED)
#
# image1 = image[:, :, 0]
# image2 = image[:, :, 1]
# image3 = image[:, :, 2]
#
# gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# gray2 = cv.bitwise_not(gray)
# image4 = image3 - image2
# im_color = cv.applyColorMap(gray2, cv.COLORMAP_PARULA)
# # COLORMAP_SUMMER
# # COLORMAP_RAINBOW
# # COLORMAP_PARULA
# cv.imshow("images", im_color)
# cv.waitKey()
# cv.destroyWindow('images')
path = r'G:\colour_img'
save = r'G:\colour_img\processed'
folders = os.listdir(path)
for folder in folders[0:-1]:
path_read = os.path.join(path, folder)
path_save = os.path.join(save, folder)
paths = df.get_path(path_read, path_save)
for p in paths:
image = cv.imread(p[0], cv.IMREAD_UNCHANGED)
gray = cv.equalizeHist(image)
colour = cv.applyColorMap(gray, cv.COLORMAP_JET)
r = colour[:, :, 2]
shaper = r.shape
for i in range(shaper[0]):
for j in range(shaper[1]):
if r[i][j] - 150 < 0:
r[i][j] = 70
else:
r[i][j] = r[i][j] - 150
temp = r
colour[:, :, 2] = temp
g = colour[:, :, 1]
shapeg = g.shape
for i in range(shapeg[0]):
if i < 600:
for j in range(shapeg[1]):
if g[i][j] - 100 < 70:
g[i][j] = 70
else:
g[i][j] = g[i][j] - 100
else:
for j in range(shapeg[1]):
if g[i][j] > 160:
g[i][j] = 160
# elif g[i][j] < 70:
# g[i][j] = 70
temp = g
colour[:, :, 1] = temp
cv.imwrite(p[1], colour)
print("保存成功")
| mdwalu/previous | 数字图像处理/灰度转彩色.py | 灰度转彩色.py | py | 2,065 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number"... |
3572388059 | ##
# @file mathlib.py
# @package mathlib
# @brief Module with functions to convert and evaluate expression using expression tree
import treeClass
import logging
# """Priorities of operators"""
priority = {
'!' : 3,
'^' : 2,
'*' : 1,
'/' : 1,
'%' : 1,
'+' : 0,
'-' : 0,
}
# """Associativity of operators"""
associativity = {
'!' : "unar",
'^' : "RL",
'*' : "LR",
'/' : "LR",
'%' : "LR",
'+' : "LR",
'-' : "LR",
}
##
# @brief Check if element is an operator
# @param element Element of expression to be checked
# @return True if element is an operator, False - otherwise
def is_operator(element):
return element in priority.keys()
##
# @brief Check if priority of the first is higher/lower/equal than priority of the second operator
# @param oper1 The first operator
# @param oper2 The second operator
# @return 1 if priority of oper1 is higher than the priority of oper2, -1 - if lower, 0 - if equal
def check_priority(oper1, oper2):
if priority[oper1] > priority[oper2]:
return 1
elif priority[oper1] < priority[oper2]:
return -1
else:
return 0
##
# @brief Convert infix expression into postfix expression
# @param infix_expr Given infix expression
def to_postfix(infix_expr):
oper_stack = treeClass.Stack()
postfix_expr = []
position = 0
while position < len(infix_expr):
# Adding operand to postfix
elem = infix_expr[position]
if is_operator(elem):
top_stack = oper_stack.top()
if top_stack == None or top_stack == '(': # Checking if stask was empty
oper_stack.push(elem)
position += 1
else:
if check_priority(elem, top_stack) == 1:
oper_stack.push(elem)
position += 1
elif check_priority(elem, top_stack) == 0:
if associativity[elem] == "LR":
postfix_expr.append(oper_stack.pop())
elif associativity[infix_expr[position]] == "RL":
oper_stack.push(elem)
position += 1
elif check_priority(elem, top_stack) == -1:
postfix_expr.append(oper_stack.pop())
elif elem == '(':
oper_stack.push(elem)
position += 1
elif elem == ')':
top_stack = oper_stack.top()
while top_stack != '(':
postfix_expr.append(oper_stack.pop())
top_stack = oper_stack.top()
oper_stack.pop()
position += 1
else:
operand = ''
temp_position = position
while temp_position < len(infix_expr) and (not is_operator(infix_expr[temp_position])
and infix_expr[temp_position] != ')'):
operand += infix_expr[temp_position]
logging.debug(f"{temp_position} : <{operand}>")
temp_position += 1
postfix_expr.append(operand)
position += (temp_position - position)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! neverending story
while oper_stack.size() != 0:
postfix_expr.append(oper_stack.pop())
return postfix_expr
##
# @brief Function to be called from side modules. Evaluates an input expression. Converting into postfix, building expression tree and evaluating it
# @param input_expr Given input expression
# @return Float value of evaluated expression
def evaluate(input_expr):
logging.debug(f"INPUT: <{input_expr}>")
postfix_expr = to_postfix(input_expr)
logging.debug(f"POSTFIX INPUT: <{postfix_expr}>")
equation_tree = treeClass.EqTree(list(postfix_expr))
result = equation_tree.evaluate_tree(equation_tree.root)
return result
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
line = input()
print(evaluate(line))
| Hedgezi/jenna_calcutega | src/mathlib.py | mathlib.py | py | 4,055 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "treeClass.Stack",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 100,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"line_number": 117,
"usage_type": "call"
},
{
"api_name": "logging.debug",
"li... |
70167417064 | from mongoengine import Q
from django_pds.conf import settings
from django_pds.core.managers import UserReadableDataManager, GenericReadManager, UserRoleMapsManager
from django_pds.core.rest.response import error_response, success_response_with_total_records
from django_pds.core.utils import get_fields, get_document, is_abstract_document
from django_pds.core.utils import print_traceback
from django_pds.serializers import GenericSerializerAlpha
from ..parser.query import QueryParser
from ..parser.terms import FILTER, WHERE, SELECT, PAGE_SIZE, PAGE_NUM, ORDER_BY, RAW_WHERE
NOT_SELECTABLE_ENTITIES_BY_PDS = settings.SELECT_NOT_ALLOWED_ENTITIES
SECURITY_ATTRIBUTES = settings.SECURITY_ATTRIBUTES
def basic_data_read(document_name, fields='__all__',
page_size=10, page_num=1, order_by=None,
include_security_fields=False,
error_track=False):
try:
document = get_document(document_name)
if not document or not document_name:
return True, error_response(f'document by name `{document_name}` doesn\'t exists')
if fields != '__all__' and not isinstance(fields, (list, tuple)):
return True, error_response('fields must be a list or tuple')
sql_ctrl = GenericReadManager()
data, cnt = sql_ctrl.read(document_name, Q(), page_size, page_num, order_by)
if cnt == 0:
return False, success_response_with_total_records([], cnt)
gsa = GenericSerializerAlpha(document_name=document_name)
if not fields == '__all__':
for field in fields:
gsa.select(field)
else:
fields = get_fields(document_name)
if not include_security_fields:
fields = tuple(set(fields) - set(SECURITY_ATTRIBUTES))
gsa.fields(fields)
json = gsa.serialize(data)
res = success_response_with_total_records(json.data, cnt)
return False, res
except BaseException as e:
if error_track:
print_traceback()
return True, error_response(str(e))
def data_read(
document_name, sql_text, user_id=None,
roles=None, checking_roles=True,
readable=True, security_attributes=True,
selectable=True, read_all=False, exclude_default=False,
page_number=1, _size=10, error_track=False):
"""
:param page_number:
:param _size:
:param checking_roles:
:param document_name:
:param sql_text:
:param user_id:
:param roles:
:param readable:
:param security_attributes:
:param selectable:
:param read_all:
:return:
"""
document = get_document(document_name)
# checking either model exists
# or entity exists in not selectable entities
if not document:
return False, error_response('document model not found')
if is_abstract_document(document_name):
return False, error_response('document model not found')
if selectable and document_name in NOT_SELECTABLE_ENTITIES_BY_PDS:
return False, error_response('document model is not selectable')
try:
parser = QueryParser(sql_text)
dictionary = parser.parse()
# filtering fields in where clause
_filters = []
if dictionary.get(FILTER, None):
_filters = dictionary[FILTER]
filter_fields = set(_filters)
document_fields = set(get_fields(document_name))
if len(filter_fields - document_fields) > 0:
return True, error_response('Where clause contains unknown attribute to this Document')
if security_attributes:
security_attr = set(SECURITY_ATTRIBUTES)
contains_security_attributes = filter_fields.intersection(security_attr)
if len(contains_security_attributes) > 0:
return True, error_response('Security attributes found in where clause')
# checking user readable data from database for this particular request
fields = ['ItemId']
if dictionary.get(SELECT, None):
fields = dictionary[SELECT]
if read_all:
fields = document_fields
urm_ctrl = UserRoleMapsManager()
if readable:
urds_ctrl = UserReadableDataManager()
__roles = None
if user_id and not roles:
__roles = urm_ctrl.get_user_roles(user_id)
err, _fields = urds_ctrl.get_user_readable_data_fields(document_name, __roles, exclude_default)
if err:
msg = f'Entity \'{document_name}\' is missing from user readable data\'s'
return True, error_response(msg)
diff = set(fields) - _fields # _fields are already a set
if len(diff) > 0:
return True, error_response("Select clause contains non readable attributes")
sql_ctrl = GenericReadManager()
__raw__where = dictionary.get(RAW_WHERE, {})
page_num = dictionary.get(PAGE_NUM, page_number)
page_size = dictionary.get(PAGE_SIZE, _size)
q = Q()
if dictionary.get(WHERE, None):
q = dictionary[WHERE]
# checking for row level permission starts
q2 = Q()
if user_id:
q2 = Q(IdsAllowedToRead=user_id)
if checking_roles:
if not roles and user_id:
roles = urm_ctrl.get_user_roles(user_id)
if roles and not isinstance(roles, (list, tuple)):
return True, error_response('roles must be a list or a tuple.')
for role in roles:
q2 = q2.__or__(Q(RolesAllowedToRead=role))
if user_id or (checking_roles and roles):
q = q.__and__(q2)
# checking for row level permission ends
order_by = []
if dictionary.get(ORDER_BY, None):
order_by = dictionary[ORDER_BY]
data, cnt = sql_ctrl.read(document_name, q, page_size, page_num, order_by)
if cnt == 0:
return False, success_response_with_total_records([], cnt)
gsa = GenericSerializerAlpha(document_name=document_name)
for field in fields:
gsa.select(field)
json = gsa.serialize(data)
res = success_response_with_total_records(json.data, cnt)
return False, res
except BaseException as e:
if error_track:
print_traceback()
return True, error_response(str(e))
| knroy/django-pds | django_pds/core/pds/generic/read.py | read.py | py | 6,463 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django_pds.conf.settings.SELECT_NOT_ALLOWED_ENTITIES",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django_pds.conf.settings",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "django_pds.conf.settings.SECURITY_ATTRIBUTES",
"line_numbe... |
20145639624 | """
Cobweb plot function
"""
import numpy as np
import matplotlib.pyplot as plt
__all__ = [
'cobweb'
]
def cobweb(func, initial_conditon, nsteps, limits, args=(), ax=None):
"""
Plot cobweb diagram for onedimensional iterated functions
``x[n+1] = func(x[n])``.
Parameters
----------
func : callable
Function that compute the next system state from the current one.
initial_conditon : float
Simulation initial condition.
nsteps : int
Number of steps displayed be the cobweb diagram.
limits : 2 elements array_like
Upper and lower limits for the cobweb diagram.
arg : tuple, optional
Extra arguments to pass to function ``func``.
ax : matplotlib axis object, optional
Axis in which the phase plane will be plot. If none is provided
create a new one.
"""
# Create new subplot if no axis is provided
if ax is None:
_, ax = plt.subplots()
# Plot basic curves
x = np.linspace(limits[0], limits[1], 1000)
y = list(map(lambda z: func(z, *args), x))
ax.plot(x, x, linewidth=1.5, color='black')
ax.plot(x, y, linewidth=1.5, color='blue')
# Interate and plot cobweb segments
startpoint = initial_conditon
for i in range(nsteps):
endpoint = func(startpoint, *args)
ax.plot([startpoint, startpoint, endpoint],
[startpoint, endpoint, endpoint],
color='red',
marker='o',
markersize=3,
markerfacecolor='black')
startpoint = endpoint
| antonior92/dynamic-system-plot | dynplt/cobweb.py | cobweb.py | py | 1,586 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 40,
"usage_type": "call"
}
] |
5765246532 | import pyautogui
import schedule
import time
import datetime
import random
pyautogui.FAILSAFE = False
screenWidth, screenHeight = pyautogui.size() # Get the size of the primary monitor.
currentMouseX, currentMouseY = pyautogui.position() # Get the XY position of the mouse.
datetime.datetime.now()
print(datetime.datetime.now())
print("Starting")
#activehours
hours=random.randint(10,11)
minuts=random.randint(10,59)
date = f"{hours}:{minuts}"
hourst=random.randint(0,2)
minutst=random.randint(10,59)
date2 = f"0{hourst}:{minutst}"
#launchtime
food=random.randint(13,14)
foodminuts=random.randint(10,59)
endfood=random.randint(15,16)
endfoodminuts=random.randint(10,59)
launch = random.randint(0, 2)
if launch == 0:
food=14
foodminuts=random.randint(30,59)
endfood=15
endfoodminuts=random.randint(21,59)
date3 = f"{food}:{foodminuts}"
date4 = f"{endfood}:{endfoodminuts}"
freemorning = None
freeevening = None
#random stops
#
#
#Morning clicking, copy this one for every click action you want
def click():
global freemorning
global date
global date2
rwork=random.randint(0,3)
if rwork == 0:
freemorning = True
print(datetime.datetime.now())
print ("Will take a break next morning")
if not freemorning:
print(datetime.datetime.now())
print("Morning click")
rdelay=random.randint(1,59)
time.sleep(rdelay)
pyautogui.doubleClick(500,100)
print(f"Active from {date} to {date2}")
rsleep=random.randint(300,1400)
rback=random.randint(360,2500)
print(datetime.datetime.now())
print(f"Will take a break of {rback} seconds after {rdelay+rsleep} seconds")
time.sleep(rsleep)
pyautogui.doubleClick(500,100)
time.sleep(rback)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print("End of the break")
#Launch
def click3():
global freemorning
if not freemorning:
launchdelay=random.randint(1,60)
time.sleep(launchdelay)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print ("Launch time click")
def click4():
global freeevening
global date3
global date4
global date2
global date
hours=random.randint(10,11)
minuts=random.randint(10,59)
date = f"{hours}:{minuts}"
hourst=random.randint(0,2)
minutst=random.randint(10,59)
date2 = f"0{hourst}:{minutst}"
rsiesta =random.randint(0,5)
if rsiesta == 0:
freeevening = True
print(datetime.datetime.now())
print ("Will take a break next evening")
else:
print(datetime.datetime.now())
print ("Click after launch")
launchdelay=random.randint(1,60)
time.sleep(launchdelay)
pyautogui.doubleClick(500,100)
#randomstops
breaksevening=random.randint(0,3)
if breaksevening != 0:
rsleep=random.randint(300,2500)
rback=random.randint(360,5000)
print(datetime.datetime.now())
print(f"Will take a break of {rback} seconds after {rsleep} seconds")
time.sleep(rsleep)
pyautogui.doubleClick(500,100)
time.sleep(rback)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print("End of the break")
if breaksevening !=1:
rsleep2=random.randint(300,3000)
rback2=random.randint(360,9900)
print(datetime.datetime.now())
print(f"Will take a break of {rback2} seconds after {rsleep2} seconds")
time.sleep(rsleep2)
pyautogui.doubleClick(500,100)
time.sleep(rback2)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print("End of the break")
if breaksevening !=2:
rsleep3=random.randint(400,1500)
rback3=random.randint(360,8500)
print(datetime.datetime.now())
print(f"Will take a break of {rback3} seconds after {rsleep3} seconds")
time.sleep(rsleep3)
pyautogui.doubleClick(500,100)
time.sleep(rback3)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print("End of the break")
#
#
#Evening click
#Remember to add a zero to single decimal ints (or parse it properly to time format)
def click2():
global freeevening
global date2
global date3
global date4
launch = random.randint(0,2)
if launch == 0:
food=14
foodminuts=random.randint(30,59)
endfood=15
endfoodminuts=random.randint(21,59)
else:
food=random.randint(13,14)
foodminuts=random.randint(10,59)
endfood=random.randint(15,16)
endfoodminuts=random.randint(10,59)
date3 = f"{food}:{foodminuts}"
date4 = f"{endfood}:{endfoodminuts}"
if freeevening:
print(datetime.datetime.now())
print ("Sleeping early without click")
else:
print(datetime.datetime.now())
print("Click and Sleep")
rdelay=random.randint(1,2200)
time.sleep(rdelay)
pyautogui.doubleClick(500,100)
print(f"Next click at {date} and {date2}")
print(f"Active from {date} to {date2}")
print(f"Launch from {date3} to {date4}")
schedule.every().day.at(date).do(click)
schedule.every().day.at(date2).do(click2)
schedule.every().day.at(date3).do(click3)
schedule.every().day.at(date4).do(click4)
while True:
schedule.run_pending()
time.sleep(1)
| jbernax/autoclicktimer | autoclick.py | autoclick.py | py | 5,721 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyautogui.FAILSAFE",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pyautogui.size",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pyautogui.position",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datet... |
73313388265 | # %%
import csv
import sys
import numpy as np
import pandas as pd
import os
from matplotlib import pyplot as plt
# plt.rcParams['font.sans-serif'] = ['Times New Roman']
# 找到全局最优 10 20 41
# 不断找到最优 15 34
filename1 = "results/Cifar10-center/LeNet/record_sgd weight.csv"
filename2 = "results/Cifar10-center/LeNet/record_spf weight.csv"
filename3 = "painting/csv/freezing_weight.csv"
colors = ['#FD6D5A', '#FEB40B', '#6DC354', '#994487', '#518CD8', '#443295']
df1 = pd.read_csv(filename1)
df2 = pd.read_csv(filename3)
# 33
# %%
# s = list(range(40, 45))
s = [41]
df1_v = df1.iloc[:35000, s].values.T
df2_v = df2.iloc[:, s].values.T
for i in range(len(s)):
plt.clf()
plt.title(f"weight changed of {s[i]}")
# plt.plot(df1_v[i], linewidth=0.5, color=colors[0], label="sgd")
plt.plot(df2_v[i], linewidth=0.5, color=colors[3], label="spf")
plt.legend()
plt.show()
# %% 保存图片
plt.clf()
plt.figure(figsize=(6, 4))
# plt.title(f"Weight Changed of Parameter in SPF")
plt.xticks(size=20)
plt.yticks(size=20)
plt.xlabel("Time", fontsize=25)
plt.ylabel("Value", fontsize=25)
plt.plot(df2_v[1][:2150], linewidth=0.5, color='r', label="spf")
# plt.savefig(f"./painting/pic/weight_change_spf_41.svg", bbox_inches='tight')
plt.show()
| zhengLabs/FedLSC | painting/painting_weight_change.py | painting_weight_change.py | py | 1,279 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.clf",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "matplotlib.pypl... |
35584359150 | from zipfile import ZipFile
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import StandardScaler
import seaborn as sns
bikes = pd.read_csv('bsd/hour.csv', index_col='dteday', parse_dates=True)
bikes['hour'] = bikes.index.hour
bikes.head()
bikes.tail()
# - **hour** ranges from 0 (midnight) through 23 (11pm)
# - **workingday** is either 0 (weekend or holiday) or 1 (non-holiday weekday)
# ## Task 1
#
# Run these two `groupby` statements and figure out what they tell you about the data.
# mean rentals for each value of "workingday"
# sns.set(style='whitegrid', context='notebook')
# cols = ['hr', 'temp', 'atemp', 'hum', 'windspeed', 'cnt']
# sns.pairplot(bikes[cols], size=2.5)
# plt.show()
#WARNING: dont run code below(mem overflow)
# cm = np.corrcoef(bikes[cols])
#
# hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size':15}, yticklabels=cols, xticklabels=cols)
# plt.show()
# bikes.groupby('workingday').cnt.mean()
# mean rentals for each value of "hour"
# bikes.groupby('hour').cnt.mean()
# bikes.groupby(['holiday', 'season']).cnt.mean().unstack().plot()
feature_cols = ['casual']
X = bikes[feature_cols].values
y = bikes.cnt.values
# X = StandardScaler().fit(X.reshape(-1, 1)).transform(X.reshape(-1, 1))
# y_scaler = StandardScaler().fit(y.reshape(-1, 1))
# y = y_scaler.transform(y.reshape(-1, 1))
X_len = len(X)
test_value = round(X_len * 0.05)
X_train, X_test = X[:-test_value], X[-test_value:]
y_train, y_test = y[:-test_value], y[-test_value:]
linreg = linear_model.LinearRegression()
linreg.fit(X_train.reshape(-1,1), y_train.reshape(-1,1))
y_pred = linreg.predict(X_test.reshape(test_value, 1))
plt.scatter(X_test.reshape(-1,1), y_test, color='b')
plt.plot(X_test.reshape(-1,1), y_pred, color='red',linewidth=1)
plt.show()
# pred = linreg.predict(X_test)
#
# # scores = cross_val_score(linreg, X, y, cv=10, scoring='mean_squared_error')
#
# # The coefficients
print('Coefficients: \n', linreg.coef_)
# # The mean squared error
print("Mean squared error: %.2f"
% mean_squared_error(y_test, y_pred))
# # Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % r2_score(y_test, y_pred))
#
# pass | Kamkas/Bike-Sharing-Data-Analysis | lr.py | lr.py | py | 2,416 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model",
"line_number": 62,
"usage_type": "name"
},
{
"api_na... |
11370165603 | import numpy as np
import torch
import torch.nn as nn
from ml.modules.backbones import Backbone
from ml.modules.bottoms import Bottom
from ml.modules.heads import Head
from ml.modules.layers.bifpn import BiFpn
from ml.modules.tops import Top
class BaseModel(nn.Module):
def __init__(self, config):
super().__init__()
# bottom
self.bottom = Bottom(config.bottom) # conv bn relu
# get backbone
self.backbone = Backbone(config.backbone)
# get top
self.top = Top(config.top)
# get head, i wish ;P
self.head = Head(config.head)
# if 'dorn' in self.head:
# self.dorn_layer = torch.nn.Sequential(torch.nn.Conv2d(in_channels=config['backbone_features'],
# out_channels=self.ord_num * 2,
# kernel_size=1,
# stride=1),
# OrdinalRegressionLayer())
# self.dorn_criterion = OrdinalRegressionLoss(self.ord_num, self.beta, self.discretization)
# if 'reg' in self.head:
# self.reg_layer = torch.nn.Conv2d(in_channels=config['backbone_features'],
# out_channels=1,
# kernel_size=1,
# stride=1)
#
# self.reg_criterion = get_regression_loss(config['regression_loss'])
def forward(self, image, depth=None, target=None):
"""
:param image: RGB image, torch.Tensor, Nx3xHxW
:param target: ground truth depth, torch.Tensor, NxHxW
:return: output: if training, return loss, torch.Float,
else return {"target": depth, "prob": prob, "label": label},
depth: predicted depth, torch.Tensor, NxHxW
prob: probability of each label, torch.Tensor, NxCxHxW, C is number of label
label: predicted label, torch.Tensor, NxHxW
"""
input_feature = self.bottom(image, depth)
p0, p1, p2, p3 = self.backbone(input_feature)
feature = self.top([p0, p1, p2, p3])
pred = self.head([feature, target])
return pred
def get_prediction_and_loss(self, feat, target):
# predicion
# dorn prediction
if 'dorn' in self.head:
prob, label = self.dorn_layer(feat)
if self.discretization == "SID":
t0 = torch.exp(np.log(self.beta) * label.float() / self.ord_num)
t1 = torch.exp(np.log(self.beta) * (label.float() + 1) / self.ord_num)
else:
t0 = 1.0 + (self.beta - 1.0) * label.float() / self.ord_num
t1 = 1.0 + (self.beta - 1.0) * (label.float() + 1) / self.ord_num
dorn_depth = (t0 + t1) / 2 - self.gamma
else:
dorn_depth = torch.as_tensor([0], device=torch.device('cuda'))
# regression prediction
if 'reg' in self.head:
reg_depth = self.reg_layer(feat).squeeze(1)
else:
reg_depth = torch.as_tensor([0], device=torch.device('cuda'))
# the full depth
depth = dorn_depth + reg_depth
# loss
if self.training and target is not None:
# dorn loss
if 'dorn' in self.head:
dorn_loss = self.dorn_criterion(prob, target)
else:
dorn_loss = torch.as_tensor([0], device=torch.device('cuda'))
# regression loss
if 'reg' in self.head:
reg_loss = self.reg_criterion(depth, target)
else:
reg_loss = torch.as_tensor([0], device=torch.device('cuda'))
# full loss
loss = dorn_loss + reg_loss
else:
loss = torch.as_tensor([0], device=torch.device('cuda'))
return depth, loss
| gregiberri/DepthPrediction | ml/models/base_model.py | base_model.py | py | 4,058 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "ml.modules.bottoms.Bottom",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "ml.modules.ba... |
44575910113 | from dbmanager import DatabaseManager
from tgbot import Bot
from market import Market
from plot_provider import PlotProvider
import threading
import sys
import logging
import logging.handlers
import queue
from apscheduler.schedulers.background import BackgroundScheduler
class MarketManager:
def __init__(self, path, bot_token):
self._bot_token = bot_token
self._logger = logging.getLogger('MarketManagerLogger')
self._logger.setLevel(logging.ERROR)
handler = logging.handlers.SysLogHandler(address='/dev/log')
self._logger.addHandler(handler)
self._db = DatabaseManager()
self._path = path
self._scheduler = BackgroundScheduler()
self._scheduler.add_job(self._daily_market_plot_job, trigger='cron', hour='0')
self._scheduler.add_job(self._predictions_job, trigger='cron', hour='*')
self._scheduler.add_job(self._bot_job, trigger='cron', minute='*')
self._markets = dict()
self._message_queue = queue.Queue()
def process_market_message(self):
try:
db = DatabaseManager()
bot = Bot(self._bot_token)
message = self._message_queue.get()
chats = db.get_chat_list()
if message["type"] == "text":
bot.send_text_message(message["data"], chats)
elif message["type"] == "image":
bot.send_image(message["data"], chats)
self._message_queue.task_done()
except Exception:
self._logger.exception(f"Failed to process market message.")
def _predictions_job(self):
try:
db = DatabaseManager()
markets_list = db.get_markets()
# Create thread for each market
for m in markets_list:
if m in self._markets and self._markets[m].is_alive():
self._logger.error(f"Thread for market {m} is still alive.")
continue
else:
t = threading.Thread(target=market_thread_func, args=(m, self._path, self._message_queue))
t.start()
self._markets[m] = t
except Exception:
self._logger.exception("Failed to start predictions job.")
def _bot_job(self):
try:
db = DatabaseManager()
bot = Bot(self._bot_token)
chats = bot.get_chat_list()
for c in chats:
db.add_chat(c)
except Exception:
self._logger.exception("Failed to collect bot chats.")
def _daily_market_plot_job(self):
try:
db = DatabaseManager()
pp = PlotProvider()
markets = db.get_markets()
for m in markets:
data = db.get_24h_plot_data(m)
image = pp.get_market_24plot(data, m[1:])
self._message_queue.put({'type': 'image', 'data': image})
except Exception:
self._logger.exception("Failed to push daily market plots.")
def start(self):
self._scheduler.start()
def market_thread_func(market_symbol, path, queue):
m = Market(path, market_symbol, queue)
m.genotick_predict_and_train()
def main(argv):
usage = "usage: {} path bot_token".format(argv[0])
if len(argv) != 3:
print(usage)
sys.exit(1)
manager = MarketManager(argv[1], argv[2])
manager.start()
while True:
manager.process_market_message()
if __name__ == "__main__":
main(sys.argv)
| hype-ecosystem/predictions_bot | market_manager.py | market_manager.py | py | 3,531 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "logging.handlers.SysLogHandler",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "l... |
73895256743 | import numpy as np
import torch
from torch.utils.data import Dataset
import pickle as pkl
def valence_map(elements: list, valences: list):
"""
## Given a list of elements and their corresponding valences, create a dictionary mapping each element to its valence.
### Args:
- elements: a list of elements, such as [Element F, Element Pb].
- valences: a list of valences corresponding to the elements, such as ['Pb2+', 'F-'].
### Return:
A dictionary mapping each element to its valence, such as {'F': -1, 'Pb': 2}.
"""
map = {}
for ele in elements:
ele = str(ele)
for val in valences:
if ele in val:
num = val.replace(ele, "")
if not num[0].isalpha():
if num == "-":
num = "1-"
if num == "+":
num = "1+"
num = float(num[-1] + num[:-1])
map[ele] = num
return map
class ValenceDataset(Dataset):
"""
The ValenceDataset class is a PyTorch Dataset that loads and preprocesses X-ray absorption near edge structure (XANES) spectra data for machine learning tasks.
It takes an annotation file as input, which contains the paths to the data files to be loaded. The class unpacks the data files,
extracts the XANES spectra and corresponding valences of the elements in the spectra, and returns them as a tuple of data and label for each sample.
## Args:
- annotation: the path of the annotation text file which contains the paths of data samples to be used to train/test the model.
"""
def __init__(self, annotation="",xy_label=False):
super().__init__()
with open(annotation, "r") as f:
self.mp_list = f.readlines()
self.dataset = []
self.xy_label=xy_label
self.unpack_data()
def unpack_data(self):
for i in range(len(self.mp_list)):
self.mp_list[i] = self.mp_list[i].split("\n")[0]
with open(self.mp_list[i], "rb") as f:
info = pkl.load(f)
valences = valence_map(info["elements"], info["valences"])
spectrum = info["xanes"]
for sub_spec in spectrum.keys():
element = sub_spec.split("-")[-2]
# if element == "Fe" and valences[element].is_integer() and sub_spec.endswith('K'):
if element == "Fe" and sub_spec.endswith('K'):
if self.xy_label:
spec=np.array(spectrum[sub_spec])
else:
spec=np.array(spectrum[sub_spec][1])
self.dataset.append(
[spec, int(valences[element])-0]
)
def __getitem__(self, index):
data, label = self.dataset[index]
data = torch.from_numpy(data).type(torch.FloatTensor)
label = torch.LongTensor([label])
# label=torch.Tensor([float(label)]).type(torch.FloatTensor)
return data, label
def __len__(self):
return len(self.dataset)
class ValenceDatasetV2(Dataset):
"""
The ValenceDatasetV2 class is a PyTorch Dataset that loads and preprocesses X-ray absorption near edge structure (XANES) spectra data for machine learning tasks.
It takes an PKL file which contains all data samples as input, extracts the XANES spectra and corresponding valences of the elements in the spectra, and returns them as a tuple of data and label for each sample.
## Args:
- annotation: the path of the annotation text file which contains the paths of data samples to be used to train/test the model.
"""
def __init__(self, annotation="",xy_label=True):
super().__init__()
self.xy_label=xy_label
with open(annotation, "rb") as f:
self.dataset=pkl.load(f)
def __getitem__(self, index):
label, data = self.dataset[index]
if not self.xy_label:
data=data[1]
else:
data=np.array(data)
data[0]=data[0]/10000.0
data = torch.from_numpy(data).type(torch.FloatTensor)
label = torch.Tensor([float(label)]).type(torch.FloatTensor)
return data, label
def __len__(self):
return len(self.dataset)
| Airscker/DeepMuon | DeepMuon/dataset/XASData.py | XASData.py | py | 4,337 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pickle.load",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"l... |
43069956711 | import csv
import io
from Crypto.Signature import pkcs1_15
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256, SHA
gSigner = "signer@stem_app"
def loadVoters(fname):
try:
voters = {s['studNr']: s for s in csv.DictReader(
loadFile(fname), delimiter=';')}
return voters
except Exception as e:
return {}
def loadCandidates(fname):
try:
candidates = {s['mdwId']: s for s in csv.DictReader(
loadFile(fname), delimiter=';')}
return candidates
except Exception as e:
return {}
def sign(data, signer=gSigner, sfx='.prv'):
if isinstance(data, io.StringIO):
data = data.read()
if not isinstance(data, bytes):
data = bytes(data, encoding='utf-8')
key = RSA.import_key(open('keys/private.key').read())
h = SHA256.new(data)
signature = pkcs1_15.new(key).sign(h)
return ':'.join(['#sign', 'sha256-PKCS1-rsa2048', signer, signature.hex()])
def verify(data, signature, signer=gSigner, sfx='.pub'):
if isinstance(data, io.StringIO):
data = data.read()
if not isinstance(data, bytes):
data = bytes(data, encoding='utf-8')
flds = signature.split(':')
if flds[1] != 'sha256-PKCS1-rsa2048' and flds[2] != signer:
print('Error: Unknown signature:', signature)
return None
sign = bytes.fromhex(flds[3])
key = RSA.import_key(open('keys/public.pub').read())
h = SHA256.new(data)
res = False
try:
pkcs1_15.new(key).verify(h, sign)
print("The signature is valid.")
res = True
except (ValueError, TypeError):
print("The signature is not valid.")
res = False
return res
def saveFile(fname, data, signer=gSigner, useSign=True, ):
""" Save file check signature """
if isinstance(data, io.StringIO):
data = data.read()
n = data.find('#sign')
if n > 0:
data = data[0:n]
if useSign:
data += sign(data, signer) + '\n'
io.open(fname, 'w', encoding='UTF-8').write(data)
return
def loadFile(fname, useSign=True, signer=gSigner):
""" Load file check signature """
data = io.open(fname, 'r', encoding='UTF-8').read()
n = data.find('#sign')
if n > 0:
sign = data[n:].strip()
data = data[0:n]
if useSign:
res = verify(data, sign, signer, sfx='.pub')
if not res:
return None
return io.StringIO(data)
| Tataturk/stem_app | audit.py | audit.py | py | 2,468 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.DictReader",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "Crypto.PublicKey.RSA.i... |
71910544423 | from collections import OrderedDict
from Models.Utils.FRRN_utils import *
class FRRNet(nn.Module):
"""
implementation table A of Full-Resolution Residual Networks
"""
def __init__(self, in_channels=3, out_channels=21, layer_blocks=(3, 4, 2, 2)):
super(FRRNet, self).__init__()
# 5×5
self.first = nn.Sequential(
OrderedDict([
('conv', nn.Conv2d(in_channels=in_channels, out_channels=48, kernel_size=5, padding=2)),
('bn', nn.BatchNorm2d(48)),
('relu', nn.ReLU()),
]))
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.up = nn.UpsamplingBilinear2d(scale_factor=2)
self.relu = nn.ReLU()
# 3×48 Residual Unit
self.reslayers_in = nn.Sequential(*[BasicBlock(48, 48, efficient=False) for _ in range(3)])
# divide
self.divide = nn.Conv2d(in_channels=48, out_channels=32, kernel_size=1)
# frrlayer 1
self.frrnlayer1 = FRRLayer(48, 96, factor=2, num_blocks=layer_blocks[0])
# frrlayer2
self.frrnlayer2 = FRRLayer(96, 192, factor=4, num_blocks=layer_blocks[1])
# frrnlayer3
self.frrnlayer3 = FRRLayer(192, 384, factor=8, num_blocks=layer_blocks[2])
# frrnlayer4
self.frrnlayer4 = FRRLayer(384, 384, factor=16, num_blocks=layer_blocks[3])
# defrrnlayer1
self.defrrnlayer1 = FRRLayer(384, 192, factor=8, num_blocks=2)
# defrrnlayer2
self.defrrnlayer2 = FRRLayer(192, 192, factor=4, num_blocks=2)
# defrrnlayer3
self.defrrnlayer3 = FRRLayer(192, 96, factor=2, num_blocks=2)
# join
self.compress = nn.Conv2d(96 + 32, 48, kernel_size=1)
# 3×48 reslayer
self.reslayers_out = nn.Sequential(*[BasicBlock(48, 48, efficient=True) for _ in range(3)])
self.out_conv = nn.Conv2d(48, out_channels, 1)
def forward(self, x):
x = self.first(x)
y = self.reslayers_in(x)
z = self.divide(y)
y = self.pool(y)
y, z = self.frrnlayer1(y, z)
y = self.pool(y)
y, z = self.frrnlayer2(y, z)
y = self.pool(y)
y, z = self.frrnlayer3(y, z)
y = self.pool(y)
y, z = self.frrnlayer4(y, z)
y = self.up(y)
y, z = self.defrrnlayer1(y, z)
y = self.up(y)
y, z = self.defrrnlayer2(y, z)
y = self.up(y)
y, z = self.defrrnlayer3(y, z)
y = self.up(y)
refine = self.compress(torch.cat((y, z), 1))
out = self.reslayers_out(refine)
out = self.out_conv(out)
return out
| akshatgarg99/FRR-Net | Models/FRRN.py | FRRN.py | py | 2,658 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 15,
"usage_type": "call"
}
] |
37664896770 | from page.E_confirm_order_page import ConfirmOrderPage
class MyOrderPage(ConfirmOrderPage):
"""我的订单页面"""
order_sn_loc = ('id', 'com.tpshop.malls:id/order_sn_tv') # 订单编号
to_be_received_loc = ('id','com.tpshop.malls:id/status_receive_tv') # 待收货
back_loc = ('id','com.tpshop.malls:id/title_back_img') # 返回图标
confirm_received_loc = ('id','com.tpshop.malls:id/id_index_gallery_item_button') # 确认收货
positive_button_loc = ('id','com.tpshop.malls:id/positiveButton') # "确定"按钮
cart_loc = ('id','com.tpshop.malls:id/bottom_cart_img') # 购物车
def get_order_sn(self):
"""获取订单编号"""
return self.get_ele_text(self.order_sn_loc)
def click_to_be_received(self):
"""点击<待收货>"""
self.click(self.to_be_received_loc)
def click_confirm_received(self):
"""点击<确认收货>"""
self.click(self.confirm_received_loc)
def click_positive_Button(self):
"""点击<确定>"""
self.click(self.positive_button_loc)
def click_back(self):
"""点击返回图标"""
self.click(self.back_loc)
def click_cart(self):
"""点击购物车"""
self.click(self.cart_loc)
if __name__ == '__main__':
from common.base_app import open_app
from time import sleep
driver = open_app() # 打开TPShop,进入登录页面
confirm = ConfirmOrderPage(driver)
confirm.input_account_num('13730626896') # 输入账号
confirm.input_password('123456') # 输入密码
confirm.click_confirm_login() # 点击登录
confirm.wait_page() # 等待页面加载
confirm.click_search() # 点击搜索框
confirm.input_search_content('容声冰箱') # 搜索容声冰箱
confirm.click_search_button() # 点击搜索按钮
confirm.wait_page() # 等待页面加载
sleep(2)
confirm.click_RSfridge() # 点击容声冰箱
confirm.wait_page() # 等待页面加载
sleep(3)
confirm.click_buy() # 点击立即购买
confirm.click_confrim_buy() # 点击确认购买
confirm.click_address_RS() # 点击选择收货地址
confirm.choose_consignee_RS() # 选择收货人
confirm.click_order_balance_RS() # 点击使用余额
confirm.wait_page() # 等待页面加载
confirm.click_sub_order_RS() # 点击提交订单
confirm.input_pay_pwd_RS('123456') # 输入支付密码
confirm.click_confirm_pay_pwd_RS() # 确认支付密码
order = MyOrderPage(driver)
sn = order.get_order_sn() # 获取订单编号
print(sn)
sleep(3)
confirm.quit()
| 15008477526/- | APP_aaaaaaaa/page/F_my_order.py | F_my_order.py | py | 2,633 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "page.E_confirm_order_page.ConfirmOrderPage",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "common.base_app.open_app",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "page.E_confirm_order_page.ConfirmOrderPage",
"line_number": 43,
"usage... |
18482251122 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(inplanes, inplanes, 1, stride, bias=False)
self.bn1 = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(inplanes, planes, 1, stride, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = nn.Sequential(
nn.Conv2d(inplanes, planes,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes))
self.isdownsample = True if inplanes != planes else False
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.isdownsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Decoder(nn.Module):
def __init__(self, backbone):
super(Decoder, self).__init__()
if backbone == 'resnet':
channel = [0, 256, 512, 1024, 256]
else:
raise NotImplementedError
self.layer1 = nn.Sequential(nn.Conv2d(channel[1], 256, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU())
self.layer2 = nn.Sequential(nn.Conv2d(channel[2], 256, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU())
self.layer3 = nn.Sequential(nn.Conv2d(channel[3], 256, 1, 1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU())
plances = [256, 256, 256, 256]
self.last_conv1 = BasicBlock(256, plances[0])
self.last_conv2 = BasicBlock(256, plances[1])
self.last_conv3 = BasicBlock(256, plances[2])
self.last_conv4 = BasicBlock(256, plances[3])
self.plances = plances
self._init_weight()
def forward(self, layer1_feat, layer2_feat, layer3_feat, layer4_feat):
x = layer4_feat
dssdlayer4 = self.last_conv4(x)
y = self.layer3(layer3_feat)
if x.size()[2:] != y.size()[2:]:
x = F.interpolate(x, size=y.size()[2:], mode='bilinear', align_corners=True)
x = x + y
dssdlayer3 = self.last_conv3(x)
y = self.layer2(layer2_feat)
if x.size()[2:] != y.size()[2:]:
x = F.interpolate(x, size=y.size()[2:], mode='bilinear', align_corners=True)
x = x + y
dssdlayer2 = self.last_conv2(x)
y = self.layer1(layer1_feat)
if x.size()[2:] != y.size()[2:]:
x = F.interpolate(x, size=y.size()[2:], mode='bilinear', align_corners=True)
x = x + y
dssdlayer1 = self.last_conv1(x)
return dssdlayer1, dssdlayer2, dssdlayer3, dssdlayer4
def _init_weight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
torch.nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def build_decoder(backbone):
return Decoder(backbone)
if __name__ == "__main__":
model = build_decoder('resnet')
layer4_feat = torch.rand((1, 256, 16, 16))
layer3_feat = torch.rand((1, 1024, 16, 16))
layer2_feat = torch.rand((1, 512, 32, 32))
layer1_feat = torch.rand((1, 256, 64, 64))
output = model(layer1_feat, layer2_feat, layer3_feat, layer4_feat)
pass | TWSFar/DSSD | model/decoder.py | decoder.py | py | 3,824 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
33078013582 | # pylint: disable=W0102
# pylint: disable=W0212
# pylint: disable=W0221
# pylint: disable=W0231
# pylint: disable=W0640
# pylint: disable=C0103
"""Module for representing UDS corpora."""
import os
import json
import requests
from pkg_resources import resource_filename
from os.path import basename, splitext
from logging import warn
from glob import glob
from random import sample
from functools import lru_cache
from typing import Union, Optional, Any, TextIO
from typing import Dict, List, Set
from io import BytesIO
from zipfile import ZipFile
from rdflib.query import Result
from rdflib.plugins.sparql.sparql import Query
from ..predpatt import PredPattCorpus
from .document import UDSDocument
from .annotation import UDSAnnotation
from .annotation import RawUDSAnnotation
from .annotation import NormalizedUDSAnnotation
from .graph import UDSSentenceGraph
from .metadata import UDSCorpusMetadata
from .metadata import UDSAnnotationMetadata
from .metadata import UDSPropertyMetadata
Location = Union[str, TextIO]
class UDSCorpus(PredPattCorpus):
"""A collection of Universal Decompositional Semantics graphs
Parameters
----------
sentences
the predpatt sentence graphs to associate the annotations with
documents
the documents associated with the predpatt sentence graphs
sentence_annotations
additional annotations to associate with predpatt nodes on
sentence-level graphs; in most cases, no such annotations
will be passed, since the standard UDS annotations are
automatically loaded
document_annotations
additional annotations to associate with predpatt nodes on
document-level graphs
version
the version of UDS datasets to use
split
the split to load: "train", "dev", or "test"
annotation_format
which annotation type to load ("raw" or "normalized")
"""
UD_URL = 'https://github.com/UniversalDependencies/' +\
'UD_English-EWT/archive/r1.2.zip'
ANN_DIR = resource_filename('decomp', 'data/')
CACHE_DIR = resource_filename('decomp', 'data/')
def __init__(self,
sentences: Optional[PredPattCorpus] = None,
documents: Optional[Dict[str, UDSDocument]] = None,
sentence_annotations: List[UDSAnnotation] = [],
document_annotations: List[UDSAnnotation] = [],
version: str = '2.0',
split: Optional[str] = None,
annotation_format: str = 'normalized'):
self._validate_arguments(sentences, documents,
version, split, annotation_format)
self.version = version
self.annotation_format = annotation_format
self._metadata = UDSCorpusMetadata()
# methods inherited from Corpus that reference the self._graphs
# attribute will operate on sentence-level graphs only
self._graphs = self._sentences = {}
self._documents = {}
self._initialize_paths(version, annotation_format)
all_built = self._check_build_status()
if sentences is None and split in self._sentences_paths:
self._load_split(split)
elif sentences is None and split is None and all_built:
for split in ['train', 'dev', 'test']:
self._load_split(split)
elif sentences is None:
# download UD-EWT
udewt = requests.get(self.UD_URL).content
if sentence_annotations or document_annotations:
warn("sentence and document annotations ignored")
self._process_conll(split, udewt)
else:
self._sentences = sentences
self._documents = documents
self.add_annotation(sentence_annotations, document_annotations)
def _validate_arguments(self, sentences, documents,
version, split, annotation_format):
# neither documents nor graphs should be supplied to the constructor
# without the other
if sentences is None and documents is not None:
raise ValueError('UDS documents were provided without sentences. '
'Cannot construct corpus.')
elif sentences is not None and documents is None:
raise ValueError('UDS sentences were provided without documents. '
'Cannot construct corpus.')
if not (split is None or split in ['train', 'dev', 'test']):
errmsg = 'split must be "train", "dev", or "test"'
raise ValueError(errmsg)
if annotation_format not in ['raw', 'normalized']:
errmsg = f'Unrecognized annotation format {annotation_format}.'\
f'Must be either "raw" or "normalized".'
raise ValueError(errmsg)
def _initialize_paths(self, version, annotation_format) -> bool:
self._sentences_paths = {splitext(basename(p))[0].split('-')[-2]: p
for p
in glob(os.path.join(self.CACHE_DIR,
version,
annotation_format,
'sentence',
'*.json'))}
self._documents_paths = {splitext(basename(p))[0].split('-')[-2]: p
for p
in glob(os.path.join(self.CACHE_DIR,
version,
annotation_format,
'document',
'*.json'))}
self._sentences_annotation_dir = os.path.join(self.ANN_DIR,
version,
annotation_format,
'sentence',
'annotations')
self._documents_annotation_dir = os.path.join(self.ANN_DIR,
version,
annotation_format,
'document',
'annotations')
sent_ann_paths = glob(os.path.join(self._sentences_annotation_dir,
'*.json'))
doc_ann_paths = glob(os.path.join(self._documents_annotation_dir,
'*.json'))
# out of the box, the annotations are stored as zip files and the
# JSON they contain must be extracted
if not sent_ann_paths:
zipped_sent_paths = os.path.join(self._sentences_annotation_dir,
'*.zip')
zipped_sentence_annotations = glob(zipped_sent_paths)
for zipped in zipped_sentence_annotations:
ZipFile(zipped).extractall(path=self._sentences_annotation_dir)
sent_ann_paths = glob(os.path.join(self._sentences_annotation_dir,
'*.json'))
if not doc_ann_paths:
zipped_doc_paths = os.path.join(self._documents_annotation_dir,
'*.zip')
zipped_document_annotations = glob(zipped_doc_paths)
for zipped in zipped_document_annotations:
ZipFile(zipped).extractall(path=self._documents_annotation_dir)
doc_ann_paths = glob(os.path.join(self._documents_annotation_dir,
'*.json'))
self._sentence_annotation_paths = sent_ann_paths
self._document_annotation_paths = doc_ann_paths
def _check_build_status(self):
sentences_built = self._sentences_paths and \
all(s in self._sentences_paths
for s in ['train', 'dev', 'test'])
documents_built = self._documents_paths and \
all(s in self._documents_paths
for s in ['train', 'dev', 'test'])
return sentences_built and documents_built
def _load_split(self, split):
sentence_fpath = self._sentences_paths[split]
doc_fpath = self._documents_paths[split]
split = self.__class__.from_json(sentence_fpath, doc_fpath)
self._metadata += split.metadata
self._sentences.update(split._sentences)
self._documents.update(split._documents)
def _process_conll(self, split, udewt):
with ZipFile(BytesIO(udewt)) as zf:
conll_names = [fname for fname in zf.namelist()
if splitext(fname)[-1] == '.conllu']
for fn in conll_names:
with zf.open(fn) as conll:
conll_str = conll.read().decode('utf-8')
sname = splitext(basename(fn))[0].split('-')[-1]
spl = self.__class__.from_conll(conll_str,
self._sentence_annotation_paths,
self._document_annotation_paths,
annotation_format=self.annotation_format,
version=self.version,
name='ewt-'+sname)
if sname == split or split is None:
# add metadata
self._metadata += spl.metadata
# prepare sentences
sentences_json_name = '-'.join(['uds', 'ewt', 'sentences',
sname, self.annotation_format]) +\
'.json'
sentences_json_path = os.path.join(self.__class__.CACHE_DIR,
self.version,
self.annotation_format,
'sentence',
sentences_json_name)
self._sentences.update(spl._sentences)
self._sentences_paths[sname] = sentences_json_path
# prepare documents
documents_json_name = '-'.join(['uds', 'ewt', 'documents',
sname, self.annotation_format]) +\
'.json'
documents_json_path = os.path.join(self.__class__.CACHE_DIR,
self.version,
self.annotation_format,
'document',
documents_json_name)
self._documents.update(spl._documents)
self._documents_paths[sname] = documents_json_path
# serialize both
spl.to_json(sentences_json_path, documents_json_path)
@classmethod
def from_conll(cls,
corpus: Location,
sentence_annotations: List[Location] = [],
document_annotations: List[Location] = [],
annotation_format: str = 'normalized',
version: str = '2.0',
name: str = 'ewt') -> 'UDSCorpus':
"""Load UDS graph corpus from CoNLL (dependencies) and JSON (annotations)
This method should only be used if the UDS corpus is being
(re)built. Otherwise, loading the corpus from the JSON shipped
with this package using UDSCorpus.__init__ or
UDSCorpus.from_json is suggested.
Parameters
----------
corpus
(path to) Universal Dependencies corpus in conllu format
sentence_annotations
a list of paths to JSON files or open JSON files containing
sentence-level annotations
document_annotations
a list of paths to JSON files or open JSON files containing
document-level annotations
annotation_format
Whether the annotation is raw or normalized
version
the version of UDS datasets to use
name
corpus name to be appended to the beginning of graph ids
"""
if annotation_format == 'raw':
loader = RawUDSAnnotation.from_json
elif annotation_format == 'normalized':
loader = NormalizedUDSAnnotation.from_json
else:
raise ValueError('annotation_format must be either'
'"raw" or "normalized"')
predpatt_corpus = PredPattCorpus.from_conll(corpus, name=name)
predpatt_sentence_graphs = {name: UDSSentenceGraph(g, name)
for name, g in predpatt_corpus.items()}
predpatt_documents = cls._initialize_documents(predpatt_sentence_graphs)
# process sentence-level graph annotations
processed_sentence_annotations = []
for ann_path in sentence_annotations:
ann = loader(ann_path)
processed_sentence_annotations.append(ann)
# process document-level graph annotations
processed_document_annotations = []
for ann_path in document_annotations:
ann = loader(ann_path)
processed_document_annotations.append(ann)
return cls(predpatt_sentence_graphs, predpatt_documents,
processed_sentence_annotations,
processed_document_annotations,
version=version,
annotation_format=annotation_format)
@classmethod
def _load_ud_ids(cls, sentence_ids_only: bool = False) -> Dict[str, Dict[str, str]]:
# load in the document and sentence IDs for each sentence-level graph
ud_ids_path = os.path.join(cls.ANN_DIR, 'ud_ids.json')
with open(ud_ids_path) as ud_ids_file:
ud_ids = json.load(ud_ids_file)
if sentence_ids_only:
return {k: v['sentence_id'] for k, v in ud_ids.items()}
else:
return ud_ids
@classmethod
def from_json(cls, sentences_jsonfile: Location,
documents_jsonfile: Location) -> 'UDSCorpus':
"""Load annotated UDS graph corpus (including annotations) from JSON
This is the suggested method for loading the UDS corpus.
Parameters
----------
sentences_jsonfile
file containing Universal Decompositional Semantics corpus
sentence-level graphs in JSON format
documents_jsonfile
file containing Universal Decompositional Semantics corpus
document-level graphs in JSON format
"""
sentences_ext = splitext(basename(sentences_jsonfile))[-1]
documents_ext = splitext(basename(documents_jsonfile))[-1]
sent_ids = cls._load_ud_ids(sentence_ids_only=True)
# process sentence-level graphs
if isinstance(sentences_jsonfile, str) and sentences_ext == '.json':
with open(sentences_jsonfile) as infile:
sentences_json = json.load(infile)
elif isinstance(sentences_jsonfile, str):
sentences_json = json.loads(sentences_jsonfile)
else:
sentences_json = json.load(sentences_jsonfile)
sentences = {name: UDSSentenceGraph.from_dict(g_json, name)
for name, g_json in sentences_json['data'].items()}
# process document-level graphs
if isinstance(documents_jsonfile, str) and documents_ext == '.json':
with open(documents_jsonfile) as infile:
documents_json = json.load(infile)
elif isinstance(documents_jsonfile, str):
documents_json = json.loads(documents_jsonfile)
else:
documents_json = json.load(documents_jsonfile)
documents = {name: UDSDocument.from_dict(d_json, sentences,
sent_ids, name)
for name, d_json in documents_json['data'].items()}
corpus = cls(sentences, documents)
metadata_dict = {'sentence_metadata': sentences_json['metadata'],
'document_metadata': documents_json['metadata']}
metadata = UDSCorpusMetadata.from_dict(metadata_dict)
corpus.add_corpus_metadata(metadata)
return corpus
def add_corpus_metadata(self, metadata: UDSCorpusMetadata) -> None:
self._metadata += metadata
def add_annotation(self, sentence_annotation: UDSAnnotation,
document_annotation: UDSAnnotation) -> None:
"""Add annotations to UDS sentence and document graphs
Parameters
----------
sentence_annotation
the annotations to add to the sentence graphs in the corpus
document_annotation
the annotations to add to the document graphs in the corpus
"""
for ann in sentence_annotation:
self.add_sentence_annotation(ann)
for ann in document_annotation:
self.add_document_annotation(ann)
def add_sentence_annotation(self, annotation: UDSAnnotation) -> None:
"""Add annotations to UDS sentence graphs
Parameters
----------
annotation
the annotations to add to the graphs in the corpus
"""
self._metadata.add_sentence_metadata(annotation.metadata)
for gname, (node_attrs, edge_attrs) in annotation.items():
if gname in self._sentences:
self._sentences[gname].add_annotation(node_attrs,
edge_attrs)
def add_document_annotation(self, annotation: UDSAnnotation) -> None:
"""Add annotations to UDS documents
Parameters
----------
annotation
the annotations to add to the documents in the corpus
"""
self._metadata.add_document_metadata(annotation.metadata)
for dname, (node_attrs, edge_attrs) in annotation.items():
if dname in self._documents:
self._documents[dname].add_annotation(node_attrs,
edge_attrs)
@classmethod
def _initialize_documents(cls, graphs: Dict[str, 'UDSSentenceGraph']) -> Dict[str, UDSDocument]:
# Load the UD document and sentence IDs
ud_ids = cls._load_ud_ids()
# Add each graph to the appropriate document
documents = {}
for name, graph in graphs.items():
doc_id = ud_ids[name]['document_id']
sent_id = ud_ids[name]['sentence_id']
graph.document_id = doc_id
graph.sentence_id = sent_id
# Add the graph to an existing document
if doc_id in documents:
documents[doc_id].add_sentence_graphs({name: graph}, {name: sent_id})
# Create a new document
else:
genre = doc_id.split('-')[0]
timestamp = UDSDocument._get_timestamp_from_document_name(doc_id)
documents[doc_id] =\
UDSDocument({name: graph}, {name: sent_id}, doc_id, genre, timestamp)
return documents
def to_json(self,
sentences_outfile: Optional[Location] = None,
documents_outfile: Optional[Location] = None) -> Optional[str]:
"""Serialize corpus to json
Parameters
----------
sentences_outfile
file to serialize sentence-level graphs to
documents_outfile
file to serialize document-level graphs to
"""
metadata_serializable = self._metadata.to_dict()
# convert graphs to dictionaries
sentences_serializable = {'metadata': metadata_serializable['sentence_metadata'],
'data': {name: graph.to_dict()
for name, graph
in self._sentences.items()}}
if sentences_outfile is None:
return json.dumps(sentences_serializable)
elif isinstance(sentences_outfile, str):
with open(sentences_outfile, 'w') as out:
json.dump(sentences_serializable, out)
else:
json.dump(sentences_serializable, sentences_outfile)
# Serialize documents (Note: we serialize only the *graphs*
# for each document — not the metadata, which is loaded by
# other means when calling UDSDocument.from_dict)
documents_serializable = {'metadata': metadata_serializable['document_metadata'],
'data': {name: doc.document_graph.to_dict()
for name, doc
in self._documents.items()}}
if documents_outfile is None:
return json.dumps(documents_serializable)
elif isinstance(documents_outfile, str):
with open(documents_outfile, 'w') as out:
json.dump(documents_serializable, out)
else:
json.dump(documents_serializable, documents_outfile)
@lru_cache(maxsize=128)
def query(self, query: Union[str, Query],
query_type: Optional[str] = None,
cache_query: bool = True,
cache_rdf: bool = True) -> Union[Result,
Dict[str,
Dict[str, Any]]]:
"""Query all graphs in the corpus using SPARQL 1.1
Parameters
----------
query
a SPARQL 1.1 query
query_type
whether this is a 'node' query or 'edge' query. If set to
None (default), a Results object will be returned. The
main reason to use this option is to automatically format
the output of a custom query, since Results objects
require additional postprocessing.
cache_query
whether to cache the query. This should usually be set to
True. It should generally only be False when querying
particular nodes or edges--e.g. as in precompiled queries.
clear_rdf
whether to delete the RDF constructed for querying
against. This will slow down future queries but saves a
lot of memory
"""
return {gid: graph.query(query, query_type,
cache_query, cache_rdf)
for gid, graph in self.items()}
@property
def documents(self) -> Dict[str, UDSDocument]:
"""The documents in the corpus"""
return self._documents
@property
def documentids(self):
"""The document ID for each document in the corpus"""
return list(self._documents)
@property
def ndocuments(self):
"""The number of IDs in the corpus"""
return len(self._documents)
def sample_documents(self, k: int) -> Dict[str, UDSDocument]:
"""Sample k documents without replacement
Parameters
----------
k
the number of documents to sample
"""
return {doc_id: self._documents[doc_id]
for doc_id
in sample(self._documents.keys(), k=k)}
@property
def metadata(self):
return self._metadata
@property
def sentence_node_subspaces(self) -> Set[str]:
"""The UDS sentence node subspaces in the corpus"""
raise NotImplementedError
@property
def sentence_edge_subspaces(self) -> Set[str]:
"""The UDS sentence edge subspaces in the corpus"""
raise NotImplementedError
@property
def sentence_subspaces(self) -> Set[str]:
"""The UDS sentence subspaces in the corpus"""
return self.sentence_node_subspaces |\
self.sentence_edge_subspaces
@property
def document_node_subspaces(self) -> Set[str]:
"""The UDS document node subspaces in the corpus"""
raise NotImplementedError
@property
def document_edge_subspaces(self) -> Set[str]:
"""The UDS document edge subspaces in the corpus"""
return self._document_edge_subspaces
@property
def document_subspaces(self) -> Set[str]:
"""The UDS document subspaces in the corpus"""
return self.document_node_subspaces |\
self.document_edge_subspaces
def sentence_properties(self, subspace: Optional[str] = None) -> Set[str]:
"""The properties in a sentence subspace"""
raise NotImplementedError
def sentence_property_metadata(self, subspace: str,
prop: str) -> UDSPropertyMetadata:
"""The metadata for a property in a sentence subspace
Parameters
----------
subspace
The subspace the property is in
prop
The property in the subspace
"""
raise NotImplementedError
def document_properties(self, subspace: Optional[str] = None) -> Set[str]:
"""The properties in a document subspace"""
raise NotImplementedError
def document_property_metadata(self, subspace: str,
prop: str) -> UDSPropertyMetadata:
"""The metadata for a property in a document subspace
Parameters
----------
subspace
The subspace the property is in
prop
The property in the subspace
"""
raise NotImplementedError
| decompositional-semantics-initiative/decomp | decomp/semantics/uds/corpus.py | corpus.py | py | 26,248 | python | en | code | 56 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.TextIO",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "predpatt.PredPattCorpus",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "pkg_resources.reso... |
30629658909 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import json, urllib
import plotly.graph_objects as go
import pandas as pd
import numpy as np
# In[2]:
asi_measures = pd.read_csv('final-data.csv')
asi_measures.head()
# In[4]:
all_nodes = asi_measures.Category.values.tolist() + asi_measures.ASI.values.tolist()
source_indices = [all_nodes.index(Category) for Category in asi_measures.Category]
target_indices = [all_nodes.index(ASI) for ASI in asi_measures.ASI]
fig = go.Figure(data=[go.Sankey(
node = dict(
pad = 20,
thickness = 20,
line = dict(color = "black", width = 1.0),
label = all_nodes,
),
link = dict(
source = source_indices,
target = target_indices,
value = asi_measures.Count,
))])
fig.update_layout(title_text="Transport mitigation actions in the context of Avoid, Shift and Improve",
font_size=10)
fig.show()
| nikolamedi/sankey-diagram | Sankey diagram with plotly.py | Sankey diagram with plotly.py | py | 912 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects.Figure",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "plotly.graph_objects",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "plot... |
34091963292 | from loader import dp, bot
from aiogram.types import ContentType, Message
from pathlib import Path
# kelgan hujjatlar (rasm/video/audio...) downloads/categories papkasiga tushadi
download_path = Path().joinpath("downloads","categories")
download_path.mkdir(parents=True, exist_ok=True)
@dp.message_handler()
async def text_handler(message: Message):
await message.reply("Siz matn yubordingiz!")
@dp.message_handler(content_types=ContentType.DOCUMENT)
# @dp.message_handler(content_types='document')
async def doc_handler(message: Message):
await message.document.download(destination=download_path)
doc_id = message.document.file_id
await message.reply("Siz hujjat yubordingiz!\n"
f"file_id = {doc_id}")
# @dp.message_handler(content_types=ContentType.VIDEO)
@dp.message_handler(content_types='video')
async def video_handler(message: Message):
await message.video.download(destination=download_path)
await message.reply("Video qabul qilindi\n"
f"file_id = {message.video.file_id}")
@dp.message_handler(content_types='photo')
async def video_handler(message: Message):
await message.photo[-1].download(destination=download_path)
await message.reply("Rasm qabul qilindi\n"
f"file_id = {message.photo[-1].file_id}")
# Bu yerga yuqoridagi 3 turdan boshqa barcha kontentlar tushadi
@dp.message_handler(content_types=ContentType.ANY)
async def any_handler(message: Message):
await message.reply(f"{message.content_type} qabul qilindi") | BakhtiyarTayir/mukammal-bot | handlers/users/docs_handlers.py | docs_handlers.py | py | 1,536 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "aiogram.types.Message",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "loader.dp.message_handler",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "loader.dp"... |
35896409907 | from typing import NamedTuple, Optional, List, Dict, Any, Union
from enum import Enum, auto
import pysam
def _build_filter(rec: pysam.VariantRecord) -> List[Union[str, int]]:
return [f for f in rec.filter]
def _build_info(rec: pysam.VariantRecord) -> Dict[str, Any]:
info = dict()
for key, value in rec.info.items():
info[key] = value
return info
def _build_format(rec: pysam.VariantRecord) -> List[str]:
return [f for f in rec.format]
def _build_samples(rec: pysam.VariantRecord) -> Dict[str, Dict[str, Any]]:
samples = dict()
for sample_name in rec.samples:
sample_dict = dict()
for key, value in rec.samples[sample_name].items():
sample_dict[key] = value
samples[sample_name] = sample_dict
return samples
class VariantType(Enum):
"""Enumeration with the different types of variations
"""
SNV = auto()
DEL = auto()
INS = auto()
DUP = auto()
INV = auto()
CNV = auto()
TRA = auto()
SGL = auto()
class BreakendSVRecord(NamedTuple):
"""NamedTuple with the information of a breakend notated SV record
"""
prefix: Optional[str]
"""Prefix of the SV record with breakend notation. For example, for :code:`G]17:198982]` the prefix will be :code:`G`"""
bracket: str
"""Bracket of the SV record with breakend notation. For example, for :code:`G]17:198982]` the bracket will be :code:`]`"""
contig: str
"""Contig of the SV record with breakend notation. For example, for :code:`G]17:198982]` the contig will be :code:`17`"""
pos: int
"""Position of the SV record with breakend notation. For example, for :code:`G]17:198982]` the position will be :code:`198982`"""
suffix: Optional[str]
"""Suffix of the SV record with breakend notation. For example, for :code:`G]17:198982]` the suffix will be :code:`None`"""
class ShorthandSVRecord(NamedTuple):
"""NamedTuple with the information of a shorthand SV record
"""
type: str
"""One of the following, :code:`'DEL'`, :code:`'INS'`, :code:`'DUP'`, :code:`'INV'` or :code:`'CNV'`"""
extra: Optional[List[str]]
"""Extra information of the SV. For example, for :code:`<DUP:TANDEM:AA>` the extra will be :code:`['TANDEM', 'AA']`"""
def _str_value(value):
if isinstance(value, str):
return value
elif isinstance(value, float):
return f'{value:.2f}'
elif hasattr(value, '__iter__'):
return ','.join([_str_value(v) for v in value])
elif value is None:
return '.'
else:
return str(value)
def _convert_info_key_value(key, value):
if value is None:
return key
elif isinstance(value, bool):
return key if value else None
else:
return key+'='+_str_value(value)
def _convert_sample_value(key, value):
if key == 'GT':
return '/'.join([_str_value(v) for v in value])
else:
return _str_value(value)
class VariantRecord():
"""NamedTuple with the information of a variant record
"""
contig: str
"""Contig name"""
pos: int
"""Position of the variant in the contig"""
end: int
"""End position of the variant in the contig (same as `pos` for TRA and SNV)"""
length: int
"""Length of the variant"""
id: Optional[str]
"""Record identifier"""
ref: str
"""Reference sequence"""
alt: str
"""Alternative sequence"""
qual: Optional[float]
"""Quality score for the assertion made in ALT"""
filter: List[Union[str, int]]
"""Filter status. PASS if this position has passed all filters. Otherwise, it contains the filters that failed"""
variant_type: VariantType
"""Variant type"""
alt_sv_breakend: Optional[BreakendSVRecord]
"""Breakend SV info, present only for SVs with breakend notation. For example, :code:`G]17:198982]`"""
alt_sv_shorthand: Optional[ShorthandSVRecord]
"""Shorthand SV info, present only for SVs with shorthand notation. For example, :code:`<DUP:TANDEM>`"""
def __init__(self, rec: pysam.VariantRecord, contig: str, pos: int, end: int,
length: int, id: Optional[str], ref: str,
alt: str, variant_type: VariantType,
alt_sv_breakend: Optional[BreakendSVRecord] = None,
alt_sv_shorthand: Optional[ShorthandSVRecord] = None):
self._rec = rec
self.contig = contig
self.pos = pos
self.end = end
self.length = length
self.id = id
self.ref = ref
self.alt = alt
self.qual = rec.qual
self.filter = _build_filter(rec)
self.variant_type = variant_type
self.alt_sv_breakend = alt_sv_breakend
self.alt_sv_shorthand = alt_sv_shorthand
self._info = None
self._format = None
self._samples = None
@property
def info(self):
"""Additional information"""
if self._info is None:
self._info = _build_info(self._rec)
return self._info
@info.setter
def info(self, value):
self._info = value
@property
def format(self):
"""Specifies data types and order of the genotype information"""
if self._format is None:
self._format = _build_format(self._rec)
return self._format
@format.setter
def format(self, value):
self._format = value
@property
def samples(self):
"""Genotype information for each sample"""
if self._samples is None:
self._samples = _build_samples(self._rec)
return self._samples
@samples.setter
def samples(self, value):
self._samples = value
def _replace(self, **kwargs):
new_record = VariantRecord(self._rec, self.contig, self.pos, self.end,
self.length, self.id, self.ref, self.alt,
self.variant_type, self.alt_sv_breakend,
self.alt_sv_shorthand)
for key, value in kwargs.items():
setattr(new_record, key, value)
return new_record
def _info_str(self, rec_str: List[str]) -> str:
# If info has not been loaded, return the original info string
if self._info is None and len(rec_str) > 7:
return rec_str[7]
info_list = []
for key, value in self.info.items():
info_str = _convert_info_key_value(key, value)
if info_str is None:
continue
info_list.append(info_str)
if self.alt_sv_shorthand:
info_list.insert(0, 'END='+str(self.end))
info = ";".join(info_list)
return info
def _format_str(self, rec_str: List[str]) -> str:
# If format has not been loaded, return the original format string
if self._format is None and len(rec_str) > 8:
return rec_str[8]
return ":".join(self.format)
def _samples_str(self, rec_str: List[str]) -> str:
# If samples and format have not been loaded, return the original samples string
if self._samples is None and self._format is None and len(rec_str) > 9:
return '\t'.join(rec_str[9:])
samples_list = [":".join([_convert_sample_value(k, self.samples[sample_name][k])
for k in self.format]) for sample_name in self.samples]
samples = "\t".join(samples_list)
return samples
def __str__(self):
rec_str_split = str(self._rec).split('\t')
contig = self.contig
pos = self.pos
id_ = self.id if self.id else '.'
ref = self.ref
alt = self.alt
qual = _str_value(self.qual)
filter_ = ";".join(map(str, self.filter)) if self.filter else '.'
info = self._info_str(rec_str_split)
format_ = self._format_str(rec_str_split)
samples = self._samples_str(rec_str_split)
return f'{contig}\t{pos}\t{id_}\t{ref}\t{alt}\t{qual}\t{filter_}\t{info}\t{format_}\t{samples}'.strip()
| EUCANCan/variant-extractor | src/variant_extractor/variants.py | variants.py | py | 8,031 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "pysam.VariantRecord",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pysam.VariantRecord",
... |
25046698571 | #!/usr/bin/python
import json
import re
import os
import sys
# Get data.json from Twitter for
# Followers: https://oauth-playground.glitch.me/?id=usersIdFollowers¶ms=%28%27user.fields%21%27description%27%29_
# Followings: https://oauth-playground.glitch.me/?id=usersIdFollowing¶ms=%28%27user.fields%21%27description%27%29_
with open(os.path.join(sys.path[0], "data.json"), "r") as fileData:
jsonData = json.load(fileData)
mastodonUrlRegex = re.compile(r'@\w*@\w*\.\w*')
webUrlRegex = re.compile(r'http(s?)://.*/@\w*')
for follower in jsonData['data']:
name = follower['name']
username = follower['username']
description = follower['description']
match1 = mastodonUrlRegex.search(description)
if match1:
print("%s (@%s) - %s" % (name, username, match1.group()))
match2 = webUrlRegex.search(description)
if match2:
print("%s (@%s) - %s" % (name, username, match2.group()))
| tjosten/twitter-mastodon-finder | finder.py | finder.py | py | 983 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number"... |
4197719073 | """Utilities for plotting the results of the experiments."""
import os
import json
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("pdf")
# Avoid trouble when generating pdf's on a distant server
# matplotlib.use("TkAgg") # Be able to import matplotlib in ipython
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def plot_cost_acc(params, lim_acc=None, lim_cost=None):
"""Plots the cost value and accuracy for test and train."""
plt.plot(params["step"], params["cost_test"],
label="SCE test", color="red")
plt.plot(params["step"], params["cost_train"],
label="SCE train", color="red", linestyle="--")
plt.grid()
plt.legend(loc="lower left")
plt.ylabel("SCE", color="red")
if lim_cost:
plt.ylim(lim_cost)
plt.twinx()
plt.plot(params["step"], 1 - np.array(params["acc_test"]),
label="miss test", color="blue")
plt.plot(params["step"], 1 - np.array(params["acc_train"]),
label="miss train", color="blue", linestyle="--")
plt.ylabel("misses", color="blue")
if lim_acc:
plt.ylim(lim_acc)
plt.legend(loc="upper right")
plt.tight_layout()
def plot_norm(params):
"""Plots the regularization."""
plt.plot(params["step"], params["norm_mat"], label="norm", color="red")
plt.tight_layout()
# --- Quantile plotting functions ---
def quantile(X, q, axis=0):
"""np.quantile only exists on numpy 1.15 and higher."""
assert axis == 0
X = np.array(X)
return np.sort(X, axis=0)[int(X.shape[0]*q), :]
def param_list_to_quant(key, q, p_list):
"""Returns a quantile."""
if key.startswith("acc") or key.startswith("top"):
# We plot the error rates 1-acc.
return quantile(1-np.array([p[key] for p in p_list]), q)
return quantile(np.array([p[key] for p in p_list]), q)
def plot_quant(params_list, param_name, label, color,
linestyle="-", alpha=0.05):
"""Plots quantile intervals with the desired value."""
p_list = params_list
params = p_list[0]
# Plot the result
plt.fill_between(params["step"],
param_list_to_quant(param_name, (1-alpha/2), p_list),
param_list_to_quant(param_name, alpha/2, p_list),
color=color, alpha=0.25)
plt.plot(params["step"], param_list_to_quant(param_name, 0.5, p_list),
label=label, color=color, linestyle=linestyle)
def plot_quant_cost_acc(params_list, alpha, lim_acc=None, lim_cost=None,
left_label_remove=False, right_label_remove=False):
"""Plots quantile intervals of the cost value and accuracy."""
p_list = params_list
params = p_list[0]
# Plot the result
plt.fill_between(params["step"],
param_list_to_quant("cost_test", (1-alpha/2), p_list),
param_list_to_quant("cost_test", alpha/2, p_list),
color="red", alpha=0.25)
plt.plot(params["step"], param_list_to_quant("cost_test", 0.5, p_list),
label="SCE test", color="red")
plt.plot(params["step"], param_list_to_quant("cost_train", 0.5, p_list),
label="SCE train", color="red", linestyle="--")
plt.grid()
plt.legend(loc="lower left")
if not left_label_remove:
plt.ylabel("SCE", color="red")
else:
plt.gca().yaxis.set_ticklabels([])
if lim_cost:
plt.ylim(lim_cost)
plt.twinx()
plt.fill_between(params["step"],
param_list_to_quant("acc_test", (1-alpha/2), p_list),
param_list_to_quant("acc_test", alpha/2, p_list),
color="blue", alpha=0.25)
plt.plot(params["step"],
np.array(param_list_to_quant("acc_test", 0.5, p_list)),
label="miss test", color="blue")
plt.plot(params["step"],
np.array(param_list_to_quant("acc_train", 0.5, p_list)),
label="miss train", color="blue", linestyle="--")
if not right_label_remove:
plt.ylabel("misses", color="blue")
else:
plt.gca().yaxis.set_ticklabels([])
if lim_acc:
plt.ylim(lim_acc)
plt.legend(loc="upper right")
plt.tight_layout()
def plot_quantiles(root_exp_folder,
subfolds=("uniform", "prediction", "stratum"),
alpha=0.05, figsize=(3, 3),
lim_acc=None, lim_cost=None,
camera_ready=True):
"""
Plot the quantile graphs for a standardized experiment.
If the experiment data is in a folder named exp, it expects to find
subfolders subfolds in which there are folders with runs and a
params.json file that contains the result of the runs. It will generate
a quantile plot for each of the experiments.
"""
for type_weight in subfolds:
cur_dir = "{}/tw_{}/".format(root_exp_folder, type_weight)
params_list = get_param_list_for_quantiles(root_exp_folder,
type_weight)
plt.figure(figsize=figsize)
right_label_remove = camera_ready and type_weight == "uniform"
left_label_remove = camera_ready and type_weight == "prediction"
plot_quant_cost_acc(params_list, alpha=alpha,
lim_acc=lim_acc, lim_cost=lim_cost,
left_label_remove=left_label_remove,
right_label_remove=right_label_remove)
if not camera_ready:
plt.title(type_weight)
plt.savefig("{}/{}.pdf".format(cur_dir, "quantiles"), format="pdf")
def get_param_list_for_quantiles(root_exp_folder, type_weight):
"""Accumulates all of the json files for different runs and reweighting."""
cur_dir = "{}/tw_{}/".format(root_exp_folder, type_weight)
cur_runs = os.listdir(cur_dir)
params_list = list()
for cur_run in cur_runs:
cur_file = cur_dir + cur_run + "/params.json"
if os.path.exists(cur_file):
params_list.append(json.load(open(cur_file, "rt")))
return params_list
def plot_quantiles_cost_n_miss(root_exp_folder,
subfolds=("uniform", "prediction", "stratum"),
sf_style={"uniform": {"color": "blue",
"label": "Uniform"},
"prediction": {"color": "green",
"label": "Weighted"},
"stratum": {"color": "green",
"label": "Stratum"},
},
alpha=0.05, figsize=(3, 3), lim_acc=None,
lim_cost=None, lim_top=None, camera_ready=True):
"""
Plot the quantile graphs for a standardized experiment.
If the experiment data is in a folder named exp, it expects to find
subfolders subfolds in which there are folders with runs and a
params.json file that contains the result of the runs. It will generate
a quantile plot for each of the experiments.
"""
print("Loading the data...")
list_params_list = list()
for type_weight in subfolds:
list_params_list.append(get_param_list_for_quantiles(
root_exp_folder, type_weight))
print("Plotting results...")
for plotted_val in ("acc", "cost", "topk"):
plt.figure(figsize=figsize)
for itw, type_weight in enumerate(subfolds):
params_list = list_params_list[itw]
params = params_list[0]
color = sf_style[type_weight]["color"]
plot_quant(params_list, "{}_test".format(plotted_val), "",
color=color)
plt.plot(params["step"], param_list_to_quant(
"{}_train".format(plotted_val), 0.5, params_list),
color=color, linestyle="--")
if plotted_val == "acc" and lim_acc:
plt.ylim(lim_acc)
plt.ylabel("Miss rate")
if plotted_val == "cost" and lim_cost:
plt.ylim(lim_cost)
plt.ylabel("SCE")
if plotted_val == "topk" and lim_top:
plt.ylim(lim_top)
plt.ylabel("Top-5 error")
if not camera_ready:
plt.title(plotted_val)
train_lgd = Line2D([0, 0], [1, 1], color="black", linestyle="--")
test_lgd = Line2D([0, 0], [1, 1], color="black", linestyle="-")
legend1 = plt.gca().legend([train_lgd, test_lgd], ["Train", "Test"],
loc="upper right")
legend_lines = [Line2D([0, 0], [1, 1],
color=sf_style[k]["color"], linestyle="-")
for k in subfolds]
legend_names = [sf_style[k]["label"] for k in subfolds]
plt.gca().legend(legend_lines, legend_names, loc="lower left")
plt.gca().add_artist(legend1)
plt.grid()
plt.tight_layout()
# elif plotted_val == "acc":
# plt.title("Miss rate")
# else:
# plt.title("SCE")
plt.savefig("{}/{}_{}.pdf".format(root_exp_folder, "quant",
plotted_val), format="pdf")
print("Done !")
def plot_class_probas(params, with_ticklabels=True):
"""Plots the probabilities of each class for the train and test set."""
n_classes = len(params["p_y_train"])
width = 0.35
ind = np.arange(n_classes)
ax = plt.gcf().subplots()
rects1 = ax.bar(ind, params["p_y_train"], width, color="blue")
rects2 = ax.bar(ind + width, params["p_y_test"], width, color="green")
ax.set_ylabel("Probability")
ax.set_xlabel("Class")
ax.set_xticks(ind+width/2)
if with_ticklabels:
ax.set_xticklabels(ind)
else:
ax.set_xticklabels(["" for _ in ind])
ax.legend((rects1[0], rects2[0]), ("Train", "Test"))
plt.grid()
plt.gcf().tight_layout()
def plot_strata_probas(params, with_ticklabels=True):
"""Plot the probabilities of each strata for train and test."""
n_stratas = len(params["p_z_train"])
width = 0.35
ind = np.arange(n_stratas)
ax = plt.gcf().subplots()
rects1 = ax.bar(ind, params["p_z_train"], width, color="blue")
rects2 = ax.bar(ind + width, params["p_z_test"], width, color="green")
ax.set_ylabel("Probability")
ax.set_xlabel("Strata")
ax.set_xticks(ind+width/2)
if with_ticklabels:
ax.set_xticklabels(ind)
else:
ax.set_xticklabels(["" for _ in ind])
ax.legend((rects1[0], rects2[0]), ("Train", "Test"))
plt.gca().yaxis.grid(True)
# plt.grid()
plt.gcf().tight_layout()
| RobinVogel/Weighted-Empirical-Risk-Minimization | plot_utils.py | plot_utils.py | py | 10,788 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyp... |
9896960203 | #!/usr/bin/env python3
#-*- coding: utf-8 -*-
import os
import sys
import rospy
import numpy as np
import cv2
from sensor_msgs.msg import Image, CompressedImage
from cv_bridge import CvBridge, CvBridgeError
from darknet_ros_msgs.msg import BoundingBoxes, ObjectCount
class Bridge(object):
"""압축된 이미지를 센서 메세지 형태로 변환한다.."""
def __init__(self):
# 글로벌 변수 설정
self.bridge = CvBridge()
self.bounding_boxes = BoundingBoxes()
self.image = None
# 발행 설정
self.compressed_detection_image_pub = rospy.Publisher("/detection_image/compressed", CompressedImage, queue_size=1)
# 구독 설정
compressed_color_image_sub = rospy.Subscriber("camera/color/image_raw/compressed", CompressedImage, self.bridge_color_image)
bounding_boxes_sub = rospy.Subscriber('darknet_ros/bounding_boxes', BoundingBoxes, self.update_bounding_boxes)
def bridge_color_image(self, data):
"""
"""
# 압축 데이터를 CV 배열로 변환
np_arr = np.fromstring(data.data, np.uint8)
self.image = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)
# try:
# self.color_image_pub.publish(self.bridge.cv2_to_imgmsg(color_image, "bgr8"))
# except CvBridgeError as e:
# print(e)
def update_bounding_boxes(self, data):
bounding_boxes = data
for i in range(len(self.bounding_boxes.bounding_boxes)):
try:
if self.bounding_boxes.bounding_boxes[i].Class == 'person':
probability = self.bounding_boxes.bounding_boxes[i].probability
xmin = self.bounding_boxes.bounding_boxes[i].xmin
ymin = self.bounding_boxes.bounding_boxes[i].ymin
xmax = self.bounding_boxes.bounding_boxes[i].xmax
ymax = self.bounding_boxes.bounding_boxes[i].ymax
_id = i + 1
_class = self.bounding_boxes.bounding_boxes[i].Class
except:
pass
# def bridge_detection_image(self, data):
# """
# """
# # try:
# detection_image = self.bridge.imgmsg_to_cv2(data, "bgr8")
# # except CvBridgeError as e:
# # print(e)
# compressed_detection_image = CompressedImage()
# compressed_detection_image.header.stamp = rospy.Time.now()
# compressed_detection_image.format = "jpeg"
# compressed_detection_image.data = cv2.imencode('.jpg', detection_image)[1].tostring()
# try:
# self.compressed_detection_image_pub.publish(compressed_detection_image)
# except CvBridgeError as e:
# print(e)
if __name__ == '__main__':
rospy.init_node('bridge', anonymous=False)
bridge = Bridge()
rospy.spin()
| Taemin0707/minibot_control | pedestrian_tracking/src/visualizing.py | visualizing.py | py | 2,884 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv_bridge.CvBridge",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "darknet_ros_msgs.msg.BoundingBoxes",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": ... |
39154979229 | from typing import Callable
def format_number(number: int) -> str:
num = float(f"{number:.3g}")
magnitude = 0
format_human: Callable[[float], str] = lambda x: f"{x:f}".rstrip("0").rstrip(".")
while abs(num) >= 1000:
magnitude += 1
num /= 1000.0
return f"{format_human(num)}{['', 'K', 'M', 'G', 'T', 'P'][magnitude]}"
| SkyLissh/skylet-discord | app/utils/format_number.py | format_number.py | py | 358 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Callable",
"line_number": 8,
"usage_type": "name"
}
] |
12685785417 | import itertools
from itertools import izip, cycle
import os
import string
import glob
from moduleBaseClass import ModuleBaseClass
class XorStuff:
def __init__(self, filepath=None):
"""Constructor : set xored file (optional)
"""
self.file_type = None
self.list_types = self.load_files_types('modules/')
if filepath is not None:
self.file_content = self.set_file_content(filepath)
def load_files_types(self, path):
"""Load all modules from modules/ and make them available
"""
list_types = {}
files = glob.glob(path + "*")
for file in files:
file_name, file_extension = os.path.splitext(file)
if not file_name.endswith("__init__") and file_extension == ".py":
module_name = file_name.replace("/", ".")
mod = __import__(module_name)
modules = module_name.split('.')
for module in modules[1:]:
mod = getattr(mod, module)
if issubclass(mod.Module, ModuleBaseClass):
instance = mod.Module()
list_types[instance.name] = instance
return list_types
def xor(self, data, key, file_type=None):
"""Perform a simple xor with data and key
file_type is an instance of modules and provide file checking
"""
result = []
for data, char_key in izip(data, cycle(key)):
byte = chr(ord(data) ^ ord(char_key))
if file_type is not None:
if not file_type.live_check(byte):
return None
result.append(byte)
return ''.join(result)
def set_file_content(self, filepath, length=None):
"""Open xored file and store content
Optional : can store n bytes only
"""
bin_file = ''
with open(filepath, "rb") as f:
byte = f.read(1)
index = 0
while byte != "":
bin_file = bin_file + byte
byte = f.read(1)
if length is not None:
if index == length:
break
index = index + 1
self.file_content = bin_file
def get_pass(self, key_length, grep=None):
"""Try to recover key(s) for a given length and yield them
Optional : can grep bytes in result
"""
# Padding of header with %s if key length > header length
if int(key_length) > len(self.file_type.header):
header_no_formatters = self.file_type.header.replace('%s', '?')
formatters = '%s' * (int(key_length) - len(header_no_formatters))
self.file_type.header = "%s%s" % (self.file_type.header,
formatters)
bf_length = self.file_type.header.count('%s')
header_length = len(self.file_type.header.replace('%s', '?'))
bin_header = self.file_content[:header_length]
charset = ''.join([chr(i) for i in range(128)])
key_charset = string.ascii_letters + string.digits + string.punctuation
# generate keys
for char in itertools.product(charset, repeat=bf_length):
generated_header = self.file_type.header % char
output = self.xor(bin_header, generated_header)
key = output[0: key_length]
if not [c for c in key if c not in key_charset]:
raw = self.xor(self.file_content, key, self.file_type)
if raw is not None:
if self.file_type.final_check(raw):
if grep is not None:
if grep in raw:
yield key
else:
yield key
def set_file_type(self, file_type):
"""Load correct file type module according to file extension name
"""
self.file_type = self.list_types[file_type]
| tengwar/xorstuff | xorstuff.py | xorstuff.py | py | 4,032 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "moduleBaseClass.ModuleBaseCla... |
8036114944 | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
#from django.template import loader # no longer needed b/c of render shortcut
from .models import Question, Choice
# Create your views here.
class IndexView(generic.ListView):
"""
"""
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
"""
This is a generic view. Each generic view needs to know
what model it will be acting upon. This is provided using
the model attribute. The detail view expects the primary key
value captured from the url to be called 'pk' so we changed
question_id to pk for the generic views.
"""
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
"""
This is a generic view
"""
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
"""
This function will increase the number of votes a selected
question have, if it exists, and then redirect us to the results
page. If said question fails, we are given a 404 error!
Reverse will take us to the view specified by arg1 along with the
variable portion of the url specified by arg2.
"""
question = get_object_or_404(Question, pk=question_id)
try:
# this will obtain the id of the selected choice as a string
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# error message if choice isn't given
return render(request, 'polls/detail.html', {'question': question, 'error_message': "You didn't select a choice."})
else:
selected_choice.votes += 1
selected_choice.save()
# redirected to this url - always return a redirect after successfully
# dealing with POST data - prevents data from being posted twice if the
# user hits the back button
return HttpResponseRedirect(reverse('polls:results', args=(question_id,)))
#################################################################################################################3
### Previous versions of functions below!
#################################################################################################################3
#def index(request):
#"""
#index will display the latest 5 poll questions in the system, separated by commans,
#according to the publication date.
#"""
#latest_question_list = Question.objects.order_by('-pub_date')[:5]
#template = loader.get_template('polls/index.html')
#context = {
#'latest_question_list': latest_question_list
#}
# httpresponse is common, but render is a shortcut!
#return HttpResponse(template.render(context, request))
#def index(request):
#"""
#The simplest view possible in Django
#"""
#return HttpResponse("Hello world! You're at the polls index!")
# one way of writing a 404 error
#def detail(request, question_id):
#"""
#Returns a simple template response.
#"""
#try:
#question = Question.objects.get(pk=question_id)
#except Question.DoesNotExist:
#raise Http404("Question does not exist!")
#return render(request, 'polls/detail.html', {'question': question})
| Alex-Bishka/Languages | Django/mysite/polls/views.py | views.py | py | 3,562 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.views.generic",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "models.Question.objects.order_by",
"line_number": 18,
"usage_type": "call"
},
... |
11473243619 | import os
import zipfile
from abc import ABCMeta
from pathlib import Path
from typing import Optional, Union
from urllib.request import urlretrieve
class BaseDownloader(metaclass=ABCMeta):
"""Base downloader for all Movielens datasets."""
DOWNLOAD_URL: str
DEFAULT_PATH: str
def __init__(self, zip_path: Optional[Union[Path, str]] = None):
if zip_path is None:
zip_path = self.DEFAULT_PATH
else:
zip_path = zip_path
self.zip_path = Path(zip_path)
if not self.zip_path.exists():
self._retrieve()
def _retrieve(self) -> None:
url: str = self.DOWNLOAD_URL
file_name: str = str(self.zip_path) + ".zip"
urlretrieve(url, filename=file_name)
with zipfile.ZipFile(file_name) as zf:
zf.extractall(self.zip_path.parent)
os.remove(file_name)
| smartnews/rsdiv | src/rsdiv/dataset/base.py | base.py | py | 876 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "abc.ABCMeta",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_numb... |
16539248084 | import markovify
import sys
import argparse
import configparser
import twitter
model_depth_default = 2
model_depth = model_depth_default
def main():
arg_parser = argparse.ArgumentParser(description="Generate text with Markov chains based on a source corpus.")
subparser = arg_parser.add_subparsers(dest="subparser_name")
subparser_train = subparser.add_parser("train")
subparser_train.add_argument("corpus", help="Path to a corpus to train with.")
subparser_train.add_argument("savepath", help="Path to where to save the model, in JSON format.")
subparser_tweet = subparser.add_parser("tweet")
subparser_tweet.add_argument("corpus", help="Path to a corpus.")
subparser_tweet.add_argument("modelpath", help="Path to a model built with \"train\"")
subparser_tweet.add_argument("--no-post", help="Do not post to Twitter, write to stdout and exit.", action="store_true")
args = arg_parser.parse_args()
config = configparser.ConfigParser()
config.read("poorlytrained.ini")
twitter_consumer_key = config["keys"]["consumerkey"]
twitter_consumer_secret = config["keys"]["consumersecret"]
twitter_access_token = config["keys"]["accesstoken"]
twitter_access_token_secret = config["keys"]["accesstokensecret"]
try:
model_depth = config["markov"]["modeldepth"]
except:
sys.stderr.write("WARNING: Could not read model depth from configuration file. Defaulting to {}.\n".format(model_depth_default))
if(args.subparser_name == "train"):
with open(args.corpus) as f:
text = f.read()
text_model = markovify.Text(text)
with open(args.savepath, "w") as f:
f.write(text_model.chain.to_json())
elif(args.subparser_name == "tweet"):
with open(args.corpus) as corpus:
with open(args.modelpath) as model:
model_chain = markovify.Chain.from_json(model.read())
text_model = markovify.Text(corpus.read(), model_depth, model_chain)
tweet_message = text_model.make_short_sentence(140)
print(tweet_message)
if(args.no_post == False): # If --no-post was not specified, go ahead and post.
tapi = twitter.Api(twitter_consumer_key, twitter_consumer_secret, twitter_access_token, twitter_access_token_secret)
tapi.PostUpdate(tweet_message)
if __name__ == "__main__":
main()
| nanovad/poorlytrained | poorlytrained.py | poorlytrained.py | py | 2,216 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "configparser.ConfigParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "sy... |
33066511083 | """
enums.py
Contains the different types of objects for the application.
"""
# Import modules and libraries
from enum import Enum
class UserType(Enum):
"""
Represents the different types of users in the application.
"""
admin = "admin"
manager = "manager"
inspector = "inspector"
maintenance_worker = "maintenance_worker"
repair_tech = "repair_tech"
user = "user" # Default value
archived = "archived"
class FireClass(Enum):
"""
Represents the common fire types an extinguisher can put out in the United States.
"""
A = "A" # Ordinary solid combustibles
B = "B" # Flammable liquids and gases
C = "C" # Energized electrical equipment
D = "D" # Combustible metals
K = "K" # Oils and fats
ABC = "ABC" # Multi-purpose dry chemical
other = "other" # Default value
class TicketType(Enum):
"""
Represents the type of ticket.
"""
inspect = "inspect"
damaged_ext = "damaged_ext"
repair_ext = "repair_ext"
damaged_box = "damaged_box"
repair_box = "repair_box"
other = "other" # Default value
class TicketState(Enum):
"""
Represents the state of a ticket.
"""
open = "open"
in_progress = "in_progress"
closed = "closed"
archived = "archived" | Xata/cis4050-spring2023-prototipo | backend/app/enums.py | enums.py | py | 1,280 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 44,
"... |
28555182089 | # This is a sample Python script.
# Press ⌃R to execute it or replace it with your code.
# Press Double ⇧ to search everywhere for classes, files, tool windows, actions, and settings.
import sys
import asyncio
import datetime
import time
import collections
import json
# function to execute aws cli 'iam' command
async def getAwsIamData(require_data:str) -> list:
command = "aws iam "
command += require_data
process = await asyncio.create_subprocess_shell(
command,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
return await process.communicate()
# function to get user list
async def getAwsUserList() -> str:
output, errs = await getAwsIamData("list-users")
if errs:
print("Error occurred to get user list")
return "error"
return output.decode()
# function to get access key list
async def getAwsAccessKeys(userName) -> str:
output, errs = await getAwsIamData("list-access-keys --user-name " + userName)
if errs:
print("Error occurred to get access key list:", errs)
return "error"
return output.decode()
# get access key list per user and arranging them in the dictionary
async def arrangeUserAccessKeyData(user_map, userName:str, current_time:float, valid_time:int):
# request access key list assigned to {userName}
keys = json.loads(await getAwsAccessKeys(userName))
# iterate access keys and check if it is valid.
for key in keys["AccessKeyMetadata"]:
if await checkIsValid(key["CreateDate"], current_time, valid_time):
user_map[key["UserName"]]["UserName"] = key["UserName"]
user_map[key["UserName"]]["AccessKeyId"] = key["AccessKeyId"]
user_map[key["UserName"]]["CreateDate"] = key["CreateDate"]
# check if create_time + valid_time is older than current_time
async def checkIsValid(created_time:str, current_time:float, valid_time:int) -> bool:
date = datetime.datetime.strptime(created_time, '%Y-%m-%dT%H:%M:%S+00:00')
created_at = time.mktime(date.timetuple())
# print("diff: ", (current_time - (created_at + valid_time)) // 3600)
if created_at + valid_time < current_time: # user is not valid
return True
else: # user is still valid
return False
async def detectInvalidUserkey(valid_time:int):
users = json.loads(await getAwsUserList())
if users == "error":
print("Error occurred to get User data")
return
# dictionary to save user/key data
user_data = collections.defaultdict(dict)
# current time to be base
current_time = time.time()
valid_time *= 3600
tasks = []
# get access keys per user and arrange them in 'user_data' dictionary
for user in users["Users"]:
tasks.append(arrangeUserAccessKeyData(user_data, user["UserName"], current_time, valid_time))
# running the tasks asynchronously
await asyncio.gather(*tasks)
# write result(user_data) on the file
with open("invalid_access_keys.txt", "w") as fd:
fd.write(" - Get Invalid User List - At: " + datetime.datetime.fromtimestamp(current_time).strftime("%m/%d/%Y, %H:%M:%S") + "\n\n")
for key, val in user_data.items():
fd.write(" " + "{\n")
for nkey, nval in val.items():
fd.write(" \t" + nkey + ": " + nval + "\n")
fd.write(" }\n")
# Press the green button in the gutter to run the script.
if __name__ == '__main__':
limit = int(sys.argv[1])
if not limit:
print("please put validation time to filter access keys.")
else:
asyncio.run(detectInvalidUserkey(limit))
| SeungwookE/InvalidAccessKeyDetector | main.py | main.py | py | 3,645 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "asyncio.create_subprocess_shell",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "asyncio.subprocess",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "asyncio.subprocess",
"line_number": 19,
"usage_type": "attribute"
},
{
"a... |
10663941067 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Oct 21 16:38:00 2016
@author: Neo
Oct 25, 2016: updated by Niu.
"""
import numpy as np
import matplotlib.pyplot as plt
res_dir = '../results/'
dat_fil = ['OVrot.dat', 'GRrot.dat']
#glide for the three special catalog
w3 = np.loadtxt(res_dir + 'SpecialSets.rot', usecols=(1,3,5,7))
Num = np.loadtxt(res_dir + dat_fil[0], usecols=(0,), dtype=int)
W, W_E, WX, WX_E, WY, WY_E, WZ, WZ_E = \
np.loadtxt(res_dir + dat_fil[0], usecols=list(range(1,9)), unpack=True)
Wg, W_Eg, WXg, WX_Eg, WYg, WY_Eg, WZg, WZ_Eg = \
np.loadtxt(res_dir + dat_fil[1], usecols=list(range(1,9)), unpack=True)
i = Num
y = np.ones(Num.size)
#############################################################################
#plot, writted a year ago.
#i = range(100,len(Sou)+1)
#y = np.ones(len(i))
#
#fig, ax = plt.subplots(2, 2)
#((ax1, ax2), (ax3, ax4)) = ax
#
#ax1.plot(i, WX, 'b')
##ax1.plot(i, WXg, 'r')
#ax1.set_ylabel('$\omega_x$',fontsize = 25)
#ax1.plot(i, w3[0][1]*y, ':', label = '212 ICRF' )
#ax1.plot(i, w3[1][1]*y, '-.', label = '295 ICRF' )
#ax1.plot(i, w3[2][1]*y, '--', label = '247 MFV' )
#
#ax2.plot(i, WY, 'b')
##ax2.plot(i, WYg, 'r')
#ax2.set_ylabel('$\omega_y$',fontsize = 25)
#ax2.plot(i, w3[0][2]*y, ':', label = '212 ICRF' )
#ax2.plot(i, w3[1][2]*y, '-.', label = '295 ICRF' )
#ax2.plot(i, w3[2][2]*y, '--', label = '247 MFV' )
#
#ax3.plot(i, WZ, 'b')
##ax3.plot(i, WZg, 'r')
#ax3.set_ylabel('$\omega_z$',fontsize = 25)
#ax3.plot(i, w3[0][3]*y, ':', label = '212 ICRF' )
#ax3.plot(i, w3[1][3]*y, '-.', label = '295 ICRF' )
#ax3.plot(i, w3[2][3]*y, '--', label = '247 MFV' )
#
#ax4.plot(i, W, 'b')
##ax4.plot(i, Wg, 'r')
#ax4.set_ylabel('$\omega$',fontsize = 25)
#ax4.plot(i, w3[0][0]*y, ':' , label = '212 ICRF' )
#ax4.plot(i, w3[1][0]*y, '-.', label = '295 ICRF' )
#ax4.plot(i, w3[2][0]*y, '--', label = '247 MFV' )
#
#ax1.legend()
#ax2.legend()
#ax3.legend()
#ax4.legend()
#
#ax1.set_xlabel('No. Sources',fontsize = 15)
#ax2.set_xlabel('No. Sources',fontsize = 15)
#ax3.set_xlabel('No. Sources',fontsize = 15)
#ax4.set_xlabel('No. Sources',fontsize = 15)
#
#plt.show()
##plt.savefig('../plot/OARank_rot.eps')
#plt.savefig('../plot/GRank_rot.eps')
##############################################################################
plt.figure()
#set the size of subplots
left,width = 0.10,0.85
bottom,height = 0.1, 0.17
bottom_3 = bottom + height*2 + 0.01
bottom_2 = bottom_3 + height + 0.01
bottom_1 = bottom_2 + height + 0.01
scale4 = [left, bottom, width, height*2]
scale3 = [left, bottom_3, width, height]
scale2 = [left, bottom_2, width, height]
scale1 = [left, bottom_1, width, height]
ax1 = plt.axes(scale1)
ax2 = plt.axes(scale2, sharex = ax1)
ax3 = plt.axes(scale3, sharex = ax1)
ax4 = plt.axes(scale4)
ax1.plot(i, np.abs(WX) , 'b', linewidth=3)
ax1.plot(i, np.abs(WXg), 'r', linewidth=3)
ax1.set_ylabel('$r_1$',fontsize = 25)
ax1.set_xlim([100, max(i)])
ax1.set_xticks([100,150,200,250,300,350,400,450,500,550])
ax1.set_xticklabels(['','','','','','','','','',''])
ax1.set_ylim([0,15])
ax1.set_yticks(np.arange(0, 15, 5))
ax1.set_yticklabels(['0','5','10'],fontsize = 12)
ax1.plot(i, np.abs(w3[0][1])*y, 'b--', label = '212 ICRF' )
ax1.plot(i, np.abs(w3[1][1])*y, 'g--', label = '295 ICRF')
ax1.plot(i, np.abs(w3[2][1])*y, 'y--', label = '247 MFV' )
ax1.plot(i, np.abs(w3[3][1])*y, 'k--', label = '260 AMS' )
ax2.plot(i, np.abs(WY) , 'b', linewidth=3)
ax2.plot(i, np.abs(WYg), 'r', linewidth=3)
ax2.set_ylabel('$r_2$',fontsize = 25)
ax2.set_ylim([0,20])
ax2.set_yticks([0,5,10,15])
ax2.set_yticklabels(['0','5','10','15'],fontsize = 12)
ax2.plot(i, np.abs(w3[0][2])*y, 'b--', label = '212 ICRF' )
ax2.plot(i, np.abs(w3[1][2])*y, 'g--', label = '295 ICRF')
ax2.plot(i, np.abs(w3[2][2])*y, 'y--', label = '247 MFV' )
ax2.plot(i, np.abs(w3[3][2])*y, 'k--', label = '260 AMS' )
ax3.plot(i, np.abs(WZ) , 'b', linewidth=3)
ax3.plot(i, np.abs(WZg), 'r', linewidth=3)
ax3.set_ylabel('$r_3$',fontsize = 25)
ax3.set_ylim([0, 15])
ax3.set_yticks(np.arange(0, 15, 5))
ax3.set_yticklabels(['0','5','10'],fontsize = 12)
ax3.plot(i, w3[0][3]*y, 'b--', label = '212 ICRF' )
ax3.plot(i, w3[1][3]*y, 'g--', label = '295 ICRF')
ax3.plot(i, w3[2][3]*y, 'y--', label = '247 MFV' )
ax3.plot(i, w3[3][3]*y, 'k--', label = '260 AMS' )
ax4.plot(i, W, 'b', linewidth=3)
ax4.plot(i, Wg, 'r', linewidth=3)
ax4.set_ylabel('$r$', fontsize=25)
ax4.set_ylim([0,20])
ax4.set_yticks(np.arange(0, 20, 5))
ax4.set_yticklabels(['0','5','10','15'],fontsize = 12)
ax4.plot(i, w3[0][0]*y, 'b--' , label = '212 ICRF' )
ax4.plot(i, w3[1][0]*y, 'g--', label = '295 ICRF' )
ax4.plot(i, w3[2][0]*y, 'y--', label = '247 MFV' )
ax4.plot(i, w3[3][0]*y, 'k--', label = '260 AMS' )
ax4.set_xlim([100,max(i)])
ax4.set_xticks([100,150,200,250,300,350,400,450,500,550, max(i)])
ax4.set_xticklabels(['100','','200','','300','','400','','500', ''],fontsize = 15)
ax4.legend(loc=0, fontsize=10)
ax4.set_xlabel('No. Sources', fontsize=15)
plt.show()
plt.savefig('../plot/Rotation_No.eps', dpi=100)
plt.close()
print('Done!') | Niu-Liu/thesis-materials | sou-selection/progs/RotationPlot.py | RotationPlot.py | py | 5,084 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.loadtxt",
"line_n... |
15193997907 | from enum import Enum
from dynsimf.models.helpers.ConfigValidator import ConfigValidator
from dynsimf.models.components.conditions.Condition import Condition
__author__ = "Mathijs Maijer"
__email__ = "m.f.maijer@gmail.com"
class UpdateType(Enum):
'''
An Enum to specify the type of the update
'''
STATE = 0
NETWORK = 1
EDGE_VALUES = 2
class UpdateConfiguration(object):
'''
Configuration for Updates
:var config: The dictionary containing the key/value pairs of the members of this class,
if no key/value pair is provided, a default value is used instead
:vartype config: dict
:var arguments: A dictionary with arguments for the update function, defaults to empty dict
:vartype arguments: dict
:var condition: A condition for nodes that must be met before the update is executed on them
:vartype condition: Condition or None
:var get_nodes: A boolean indicating whether the update function should receive a list of sampled nodes as argument,
defaults to None
:vartype get-nodes: bool or None
:var update_type: A value from the `UpdateType` enum, indicating what kind of update is being performed,
defaults to `UpdateType.STATE`
:vartype update_type: UpdateType
'''
def __init__(self, config=None):
self.set_config(config)
self.validate()
def set_config(self, config):
'''
Set the values for the members of the class by reading them from the config or setting their default values
:param dict config: The configuration dictionary with the key/value pairs for the class members
'''
self.config = config if config else {}
self.arguments = config['arguments'] if 'arguments' in self.config else {}
self.condition = config['condition'] if 'condition' in self.config else None
self.get_nodes = config['get_nodes'] if 'get_nodes' in self.config else None
self.update_type = config['update_type'] if 'update_type' in self.config else UpdateType.STATE
def validate(self):
'''
Validate the update configuration
:raises ValueError: if the `update_type` member is not of type `UpdateType`
'''
ConfigValidator.validate('arguments', self.arguments, dict)
ConfigValidator.validate('condition', self.condition, Condition, optional=True)
ConfigValidator.validate('get_nodes', self.get_nodes, bool, optional=True)
ConfigValidator.validate('update_type', self.update_type, UpdateType)
if not isinstance(self.update_type, UpdateType):
raise ValueError('Update type should of enum UpdateType')
class Update(object):
"""
Update class
:var fun: The update function that should be executed
:vartype fun: function
:var config: UpdateConfiguration object, defaults to a new UpdateConfiguration object
:vartype config: UpdateConfiguration
:var arguments: A dictionary with arguments for the update function, defaults to empty dict
:vartype arguments: dict
:var condition: A condition for nodes that must be met before the update is executed on them
:vartype condition: Condition or None
:var get_nodes: A boolean indicating whether the update function should receive a list of sampled nodes as argument,
defaults to None
:vartype get-nodes: bool or None
:var update_type: A value from the `UpdateType` enum, indicating what kind of update is being performed,
defaults to `UpdateType.STATE`
:vartype update_type: UpdateType
"""
def __init__(self, fun, config=None):
'''
Initialise the update by setting the class members to their values defined in the config
:param function fun: The update function to execute
:param config: The configuration object containing the values for the class members
:type config: UpdateConfiguration, optional
'''
self.function = fun
self.config = config if config else UpdateConfiguration()
self.arguments = self.config.arguments
self.condition = self.config.condition
self.get_nodes = self.config.get_nodes
self.update_type = self.config.update_type
def execute(self, nodes=None):
'''
Execute the update function with or without the sampled nodes from the scheme/condition
and return the output
:param nodes: An optional list of nodes that the update function should be applied on.
The given nodes are filtered by the schemes and conditions
:type nodes: list, optional
'''
if self.get_nodes:
output = self.function(nodes, **self.arguments)
else:
output = self.function(**self.arguments)
return output
| Tensaiz/DyNSimF | dynsimf/models/components/Update.py | Update.py | py | 4,795 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "dynsimf.models.helpers.ConfigValidator.ConfigValidator.validate",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "dynsimf.models.helpers.ConfigValidator.ConfigValidator",
"line_numbe... |
73550814184 | #-*- coding: utf-8 -*-
'''
1. 添加系:adddepartment
2. 添加班级:addclass
3. 删除系:deldepartment
4. 删除班级:delclass
5. 编辑系:editdepartment
6. 编辑班级:editclass
'''
from django.shortcuts import render_to_response
from django.template import RequestContext, Template, Context
from classes.models import Class, Department
from teachers.models import Teacher
from students.models import Student
from django.http import HttpResponse, Http404, HttpResponseRedirect
import datetime
import json
import logging
logger = logging.getLogger('mysite.log')
def adddepartment(request):
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
deptls = Department.objects.order_by('id')
classls = Class.objects.order_by('id')
judgeadd=0
errors = []
if request.method == 'POST':
if not request.POST.get('deptname',''):
errors.append('deptname')
for dept in deptls:
if(dept.deptname==request.POST['deptname']):
judgeadd=1
break;
if(judgeadd==0):
dept = Department(deptname=request.POST['deptname'],teacherid_id= request.session['userid'],createtime=datetime.datetime.now(),edittime=datetime.datetime.now())
dept.save()
deptls = Department.objects.order_by('id')
departments = Department.objects.order_by('id')
return HttpResponseRedirect('/students/')
def addclass(request):
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
deptls = Department.objects.order_by('id')
classls = Class.objects.order_by('id')
judgeadd=0
errors = []
if request.method == 'POST':
if not request.POST.get('claname',''):
errors.append('claname')
for clas in classls:
if(clas.claname == request.POST['claname']):
if(clas.grade == int(request.POST['grade'])):
if(clas.departmentid_id ==int(request.POST['deptid'])):
judgeadd=1
break;
if(judgeadd==0):
cla = Class(claname=request.POST['claname'],departmentid_id=int(request.POST['deptid']),grade=int(request.POST['grade']),teacherid_id= request.session['userid'],createtime=datetime.datetime.now(),edittime=datetime.datetime.now())
cla.save()
deptls = Department.objects.order_by('id')
classes = Class.objects.order_by('id')
return HttpResponseRedirect('/students/')
def delclass(request, did):
global logger
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
error = ''
try:
did = int(did)
cla = Class.objects.get(id=did)
except ValueError:
logger.error("classes")
raise Http404()
cla.delete()
return HttpResponseRedirect('/students/')
def deldepartment(request, did):
global logger
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
error = ''
try:
did = int(did)
dept = Department.objects.get(id=did)
except ValueError:
logger.error("classes")
raise Http404()
dept.delete()
return HttpResponseRedirect('/students/')
def editdepartment(request, did):
global logger
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
errors = []
deptls = Department.objects.order_by('id')
classls = Class.objects.order_by('id')
judgeadd=0
try:
did = int(did)
dept= Department.objects.get(id=did)
except ValueError:
logger.error("classes")
raise Http404()
if request.method == 'POST':
if not request.POST.get('deptname',''):
errors.append('deptname')
for deptl in deptls:
if(deptl.deptname==request.POST['deptname']):
if(dept.deptname!=request.POST['deptname']):
judgeadd=1
break;
if(judgeadd==0):
dept.deptname = request.POST['deptname']
dept.edittime=datetime.datetime.now()
dept.save()
deptls = Department.objects.order_by('id')
departments = Department.objects.order_by('id')
return HttpResponseRedirect('/students/')
def editclass(request, did):
global logger
if not 'username' in request.session:
return HttpResponseRedirect('/Login/')
errors = []
deptls = Department.objects.order_by('id')
classls = Class.objects.order_by('id')
judgeadd=0
try:
did = int(did)
clas=Class.objects.get(id=did)
except ValueError:
logger.error("classes")
raise Http404()
if request.method == 'POST':
if not request.POST.get('claname',''):
errors.append('claname')
for clasl in classls:
if(clasl.claname == request.POST['claname']):
if(clasl.grade == int(request.POST['grade'])):
if(clasl.departmentid_id ==int(request.POST['deptid'])):
judgeadd=1
break;
if(judgeadd==0):
clas.claname = request.POST['claname']
clas.grade = request.POST['grade']
clas.departmentid_id = request.POST['deptid']
clas.edittime=datetime.datetime.now()
clas.save()
deptls = Department.objects.order_by('id')
classes = Class.objects.order_by('id')
return HttpResponseRedirect('/students/')
def deptnamecheck(request):
departments = Department.objects.order_by('-id')
judgedeptname= 0
if request.method == 'POST':
deptdeptname=request.POST['deptname']
# print deptdeptname
for dept in departments:
if (dept.deptname == deptdeptname):
judgedeptname=1
break
data={}
data["judgedeptname"]=judgedeptname
return HttpResponse(json.dumps(data))
def clanamecheck(request):
classes = Class.objects.order_by('-id')
judgeclaname= 0
if request.method == 'POST':
cladeptid= int(request.POST['deptid'])
clagrade = int(request.POST['grade'])
claclaname = request.POST['claname']
for cla in classes:
if (cla.claname==claclaname):
if(cla.grade==clagrade):
#print cla.departmentid_id
#print cladeptid
if(cla.departmentid_id==cladeptid):
judgeclaname=1
break
data={}
data["judgeclaname"]=judgeclaname
return HttpResponse(json.dumps(data))
| Luokun2016/QuickSort | classes/views.py | views.py | py | 7,941 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponseRedirect",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "classes.models.Department.objects.order_by",
"line_number": 24,
"usage_type": "call"
... |
21662366530 | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Time: 2020/4/5 21:14
# @Author: qyh
import matplotlib.pyplot as plt
import numpy.random as rdm
import networkx as nx
node_num = 100
probability = 0.01
er_graph = nx.erdos_renyi_graph(node_num, probability)
susceptible = 'S'
infected = 'I'
recovered = 'R'
# Init
def onset(graph):
for i in graph.nodes.keys():
graph.nodes[i]['state'] = susceptible
# Set infection rate
def infect_prop(graph, proportion):
for i in graph.nodes.keys():
if rdm.random() <= proportion:
graph.nodes[i]['state'] = infected
# Model building
def build_model(p_infect, p_recover):
def model(graph, i):
if graph.nodes[i]['state'] == infected:
for m in graph.neighbors(i):
if graph.nodes[m]['state'] == susceptible:
if rdm.random() <= p_infect:
graph.nodes[m]['state'] = infected
if rdm.random() <= p_recover:
graph.nodes[i]['state'] = recovered
return model
# Single model run
def model_run(graph, model):
for i in graph.nodes.keys():
model(graph, i)
# Multiple model cycles
def model_iter(graph, model, iter_num):
for i in range(iter_num):
model_run(graph, model)
def draw(graph):
fig, ax = plt.subplots(figsize=(12, 10))
ax.set_xticks([])
ax.set_xticks([])
pos = nx.spring_layout(graph, k=0.2)
nx.draw_networkx_edges(graph, pos, alpha=0.5, width=1)
nx.draw_networkx_nodes(graph, pos, node_size=80)
plt.show()
def calc_infection_rate(graph):
onset(graph)
infect_prop(graph, 0.05)
model = build_model(0.2, 0.8)
model_iter(graph, model, 10)
infect = [v for (v, attr) in graph.nodes(data=True) if attr['state'] == recovered]
infection_rate = len(infect) / node_num
print(infection_rate)
if __name__ == '__main__':
draw(er_graph)
calc_infection_rate(er_graph)
| QCloudHao/COVID19 | development.py | development.py | py | 2,014 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "networkx.erdos_renyi_graph",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random.random",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "numpy.ran... |
5741967370 | import os
import pandas as pd
from sklearn.model_selection import KFold, train_test_split
root_path = os.path.dirname(__file__)
asset_path = os.path.join(root_path, '../assets')
# load the full titanic example data
data = pd.read_csv(os.path.join(root_path, '../data/train.csv'))
# train / test split
train_data, test_data = train_test_split(data, test_size=0.2)
# number of data samples for the train and test sets
N_TRAIN_DATA_SAMPLES = 10
N_TEST_DATA_SAMPLES = 2
train_test_configs = [
{
'data': train_data,
'n_samples': N_TRAIN_DATA_SAMPLES,
'data_samples_root': os.path.join(asset_path, 'train_data_samples'),
'data_samples_content': [],
},
{
'data': test_data,
'n_samples': N_TEST_DATA_SAMPLES,
'data_samples_root': os.path.join(asset_path, 'test_data_samples'),
'data_samples_content': [],
},
]
# generate data samples
for conf in train_test_configs:
kf = KFold(n_splits=conf['n_samples'], shuffle=True)
splits = kf.split(conf['data'])
for _, index in splits:
conf['data_samples_content'].append(conf['data'].iloc[index])
# save data samples
for conf in train_test_configs:
for i, data_sample in enumerate(conf['data_samples_content']):
filename = os.path.join(conf['data_samples_root'], f'data_sample_{i}/data_sample_{i}.csv')
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
data_sample.to_csv(f)
| Esadruhn/owkin_elixir_hackathon | substra_materials/titanic_example/titanic/scripts/generate_data_samples.py | generate_data_samples.py | py | 1,475 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7... |
39642904721 | """ CONTROLLER.PY
The site controller controlls the production of the response
for requests to the website. The controller creates and interacts
with both, the SiteModel and the SiteViews. A call to the controller
calls start_response and returns the contents of the response.
Upgrade to multi-site:
./ set up with config in apache config
./ changes to main
./ read and open config
./ set paths from config
./ check if import weberror works else set flag
./ changes to controller
./ look for script_name split into sitename and responsearr
(keep older code arround but commented out)
./ update config if section with name exists
./ clean up baseurlpath or replace with siteurl where appropriate
./ test on current site
./ make 2 sites with 2 different folders and names (also check if basename works)
./ also check if old system with scriptalias still works
./ changes to main with extra error
./ make changes to have both error functions, call of correct application at bottom
./ test both errors
- look at main <-> controller: does split make sense?
- update log in controller
Add search tab
"""
### Preparation
# Import
import sys # System Library
import os # OS Library
import time # Time Library
import logging.config # Logging Config Object
#import urlparse # URLParse Library (should be used instead of CGI)
import cgi # CGI library (needed because urlparse.parse_qs broken in py2.5)
import hashlib # Hashlib Library
import shelve # Shelve Module
from configobj import ConfigObj # Configuration Object
from views import SiteViews # Site Views Object
from model import SiteModel # Site Model Object
class SiteController(object):
""" Controller object that formulates the response to the http
request.
"""
def __init__(self):
""" Constructor: declares the variables
"""
# Declare Variables
self.env = {'No Env':0} # Environment
self.conf = None # Configuration Object
self.output = 'No Output' # Output
self.log = None # Logging Object
self.sid = '' # Session ID
self.session = {} # Session Variables
self.request = {} # Request Variables
self.views = None # The views object
self.model = None # The model object
def __call__(self,environ,start_response):
""" Object Call: creates the response
"""
### Setup / Preparation
errormsg = ''
# Set Environment
self.env = environ
# Split URI into sitename and RESPONSEARR with path info -> fill / update session vars
# response = environ.get('PATH_INFO','list') # Old code: PATH_INFO not available using SciptAliasMatch
#responsearr = environ.get('SCRIPT_NAME').strip('/').split('/') # Does not work with older browsers
responsearr = environ.get('REQUEST_URI').strip('/').split('/')
if len(responsearr) > 0:
siteurl = responsearr[0]
responsearr = responsearr[1:]
else: siteurl = ''
# Load Configuration
self.conf = ConfigObj(environ.get('WEBVIEW_CONFIG'))
# Add configuration from site_sitename
if 'site_'+siteurl in self.conf:
self.conf.merge(self.conf['site_'+siteurl])
# Edit siteurl in config
if len(siteurl):
self.conf['path']['siteurl'] = '/'+siteurl
# Set up logging & print message
logfile = os.path.join(self.conf['path']['basepath'],
self.conf['ctrl']['logfile'])
logging.basicConfig(level='DEBUG',filename = logfile,
format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
self.log = logging.getLogger('webview.control')
self.log.info('********* Started Controller')
self.log.info(' Request from %s to %s'
% (environ.get('REMOTE_ADDR'),
environ.get('REQUEST_URI')))
# Get Post request parameters (decode if needed)
try:
request_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
request_size = 0
request_body = environ['wsgi.input'].read(request_size)
try:
request_body = request_body.decode()
except(UnicodeDecodeError,AttributeError):
pass
request_params = cgi.parse_qs(request_body)
# Attach GET request parameters
query_string = environ['QUERY_STRING']
request_params.update(cgi.parse_qs(query_string))
self.request = request_params
# Get session id from Post request
self.log.debug('Request Params = ' + repr(request_params))
self.sid = request_params.get('sid',[''])[0]
if len(self.sid):
self.log.info('Existing Session SID = %s' % self.sid)
else:
self.log.info('New Session')
self.sid = hashlib.sha1((repr(time.time())+environ['REMOTE_ADDR']).encode('utf-8')).hexdigest()
# Get session information / make new session file
sessionfile = os.path.join(self.conf['path']['basepath'],
self.conf['path']['session'],
'sess_%s' % self.sid)
self.session = shelve.open(sessionfile, writeback = True)
self.session['sid'] = self.sid
# Make other objects
self.views = SiteViews(self.env, self.session, self.conf)
self.model = SiteModel(self.env, self.session, self.conf)
self.views.model = self.model
###### Compute response (format is page/request)
# Select Response and Query from URI -> fill / update session vars
if len(responsearr) > 0: self.session['page'] = responsearr[0].lower()
else: self.session['page'] = 'list'
if not (self.session['page'] in ['data','error','log', 'list', 'search', 'test']):
self.session['page'] = 'list'
self.log.info('Page Type is = %s' % self.session['page'])
#-- DATA Response: update session variables and validate request
self.session['request'] = '' # Clear request
if self.session['page'] == 'data':
responsefolder = '' # variable to allow check if folder is valid
# Get and Validate request
if responsearr[-1].lower() in ['raw']:
self.session['request'] = responsearr[-1].lower()
responsearr = responsearr[0:-1]
# FOLDER selection from request parameters or response path array
if 'folder_selection' in request_params:
self.session['folder'] = request_params.get('folder_selection')[0]
elif len(responsearr) > 1:
responsefolder = os.path.join(*responsearr[1:])
self.session['folder'] = responsefolder
# FILE selection from request parameters
if 'file_selection' in request_params:
self.session['file'] = request_params.get('file_selection')[0]
# STEP selection from request parameters
if 'step_selection' in request_params:
self.session['step'] = request_params.get('step_selection')[0]
# DATA selection from request parameters
if 'data_selection' in request_params:
self.session['data'] = request_params.get('data_selection')[0]
# PLANE selection from request parameters
if 'plane_selection' in request_params:
self.session['plane'] = request_params.get('plane_selection')[0]
# Validate / Set session variables
self.model.set_selection()
# if no data available -> error
if len(self.session['data']) == 0:
self.session['page'] = 'error'
errormsg = 'No FITS data avaialable:<br> '
errormsg += ' folder = <%s>' % (self.session['folder'])
errormsg += ' file = <%s>' % (self.session['file'])
errormsg += ' step = <%s>' % (self.session['step'])
errormsg += ' data = <%s>' % (self.session['data'])
errormsg += ' plane = <%s>' % (self.session['plane'])
# if responsefolder was invalid raise error
if ( len(responsefolder) and not responsefolder in self.session['folder'] and
int(self.conf['ctrl']['erronbadurl']) ):
self.session['page'] = 'error'
errormsg = 'Nonexistent or empty folder requested: %s is not available or contains no data' % responsefolder
#-- LOG Response: update session variables and validate request
if self.session['page'] == 'log':
# Get and Validate request
if responsearr[-1].lower() in ['update']:
self.session['request'] = responsearr[-1].lower()
# LOG_LEVEL selection from request parameters
if 'log_level' in request_params:
level = request_params.get('log_level')[0]
if level in 'DEBUG INFO WARNING ERROR CRITICAL':
self.session['loglevel'] = level
elif not 'loglevel' in self.session:
self.session['loglevel'] = 'INFO'
#-- LIST Response: update session variables and validate request
if self.session['page'] == 'list':
# LIST_FOLDER selection from response path array
responsefolder = '' # variable to allow check if folder is valid
if len(responsearr) > 1:
responsefolder = responsearr[1]
self.session['listfolder'] = responsefolder
else:
self.session['listfolder'] = ''
# Get Folder list and make sure there's something there
folderlist = self.model.folderlist(0)
if len(folderlist) == 0:
self.session['page'] = 'error'
errormsg = '<b>NO Data Folders Available</b><p> No folders were found under '
errormsg += os.path.join(self.conf['path']['basepath'], self.conf['path']['datapath'])
errormsg += '. Check the server settings or contact the administrator.'
elif ( len(responsefolder) and not responsefolder in folderlist and
int(self.conf['ctrl']['erronbadurl'])):
self.session['page'] = 'error'
errormsg = 'Nonexistent folder requested: %s is not available or contains no data' % responsefolder
else:
# Set list_folder
if not self.session['listfolder'] in folderlist:
self.session['listfolder'] = folderlist[-1]
#-- TEST Response: log the messages
if self.session['page'] == 'test':
if 'messagetext' in request_params:
self.log.debug('Test - Message from %s: %s' %
(environ.get('REMOTE_ADDR'), request_params['messagetext'][0]) )
# Print request if it came up
if len(self.session['request']) > 0:
self.log.info('Request Type is = %s' % self.session['request'])
###### Make response page
# Initialize Response
status = '200 OK'
response_headers = [('Content-Type','text/html')]
self.output = ''
# If there is no request -> return regular page
if len(self.session['request']) == 0 or self.session['page'] == 'error':
# Create Response Header
self.output += self.views.header()
# Create Text
if self.session['page'] == 'data':
# Request is to see data
self.output += self.views.data()
elif self.session['page'] == 'error':
# Request is an Error
self.output += self.views.error(errormsg)
elif self.session['page'] == 'log':
# Request is log
self.output += self.views.pipelog()
elif self.session['page'] == 'list':
# Request is list
self.output += self.views.folderlist()
elif self.session['page'] == 'search':
# Request is search
self.output += self.views.search()
elif self.session['page'] == 'test':
# Request is test
self.output += self.views.test()
# Close Response
self.log.debug('debuginfo = %d' % int(self.conf['ctrl']['debuginfo']) )
if( int(self.conf['ctrl']['debuginfo']) > 0 or
self.session['page'] == 'test' ):
self.list_env()
self.output += '</body></html>'
# If there is a querry -> return request text instead
else:
# Data, Raw request
if self.session['page']+'-'+self.session['request'] == 'data-raw':
self.output += self.views.dataraw()
# Logging, Update request
if self.session['page']+'-'+self.session['request'] == 'log-update':
self.output += self.views.logupdate()
# Return
start_response(status, response_headers)
self.log.info('********* Finished Controller')
return self.output
def list_env(self):
""" Creates a response containing path and environment variables.
"""
# Initialize Output
output = "<hr>\n <h2>Environment Setup</h2>\n"
# Add request text
reqtext = ['<li>%s: %s' % (key, self.request[key])
for key in self.request]
reqtext = '\n'.join(reqtext)
output += '<b>Request:</b><ul>\n' + reqtext + '</ul>\n'
# Add current path
output += '<b>Current Path:</b> %s<p>\n' % os.getcwd()
# Add session variables
sesstext = ['<li>%s: %s' % (key, self.session[key])
for key in self.session]
sesstext = '\n'.join(sesstext)
output += '<b>Session Variables:</b><ul>\n' + sesstext + '</ul>\n'
# Add environment Variables
envstr = ['<li>%s: %s' % (key,self.env[key])
for key in sorted(self.env.keys())]
envstr = '\n'.join(envstr)
output += '<b>Environment Variables:</b><ul>\n' + envstr + '</ul>\n'
# Add path
pathstr = ['<li>%s' % p for p in sorted(sys.path) ]
pathstr = '\n'.join(pathstr)
output += '<b>Path Settings:</b><ul>\n' + pathstr + '</ul>\n'
# Return answer
self.output += output
""" === History ===
2021-4 Marc Berthoud, remove use of logconfig
2020 Marc Berthoud, Upgrade to multi-site
2020-1-10 Marc Berthoud,
* removed [path][baseurlpath from config: Either use absolute paths
or use siteurl (which is set automatically), also in logscripts.js
* Config now comes from environment variable WEBVIEW_CONFIG
* Load site_siteurl preferences from config section into config
to allow multiple sites on a server.
* Main.py now loads pythonpaths from config file
* Main.py checks if weberror.errormiddleware exists else uses
simpler error reporting function
2015-2-20 Marc Berthoud, Various improvements
* Update code for using astropy.io.fits
2014-4-3 Marc Berthoud, Added self.infohead to model object to specify
which header contains main information.
2012-11-13 Marc Berthoud, Ability to specify instrument name and icons
* Ability to have information come from specific headers
* Configuration file name is now in main.py
2012-9-13 Marc Berthoud, Added file name and format flexibility
* Added flexible detection of pipe step in file name (model.filelist)
* Added ability to have no image in primary FITS header
2012-6-15 Marc Berthoud, Added use of jQuery for JavaScript elements
* New ['scripts'] section in the configuration, scripts are now
loaded in the page header
* Updated logscripts.js for use of jQuery
2012-4-12 Marc Berthoud, Various improvements during system testing
* Validate flights and aors to make sure data is present
* Add INSTMODE to the end of AOR entries in data
2011-11-23 Marc Berthoud, Ver0.2: Added imageanalysis javascript object
to the viewer to manage client side user interface.
2011-1-31 Marc Berthoud, Ver0.1: Wrote and Tested
"""
| berthoud/webfitsviewer | webfitsviewer/src/controller.py | controller.py | py | 16,483 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "configobj.ConfigObj",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 88,
"usage_type": "attribute"
},
{
"api_name": "logging.config.basicCon... |
6671044786 | from sense_hat import SenseHat
from PIL import Image
from random import randint
import time
sense = SenseHat()
amount_of_pics = 3
while True:
pic_nr = str(randint(1, amount_of_pics))
img = Image.open('pic'+pic_nr+'.png')
byteList = list(img.getdata())
pixels = []
for index, byte in enumerate(byteList):
pixels.append([byte[0], byte[1], byte[2]])
sense.set_pixels(pixels)
time.sleep(1)
| gdmgent-IoT-1920/labo-2-sensehat-hansvertriest | avatar_animated.py | avatar_animated.py | py | 425 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sense_hat.SenseHat",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line... |
74518602024 | #import os
from typing import Union
import torch
import numpy as np
from . import torch_knn
gpu_available = torch_knn.check_for_gpu()
if not gpu_available:
print("The library was not successfully compiled using CUDA. Only the CPU version will be available.")
_transl_torch_device = {"cpu": "CPU", "cuda": "GPU"}
class TorchKDTree:
def __init__(self, points_ref : torch.Tensor, device : torch.device, levels : int, squared_distances : bool):
"""Builds the KDTree. See :ref:`build_kd_tree` for more details.
"""
assert(device.type in ['cpu', 'cuda'])
assert points_ref.shape[0] < 2**31, "Only 32 bit signed indexing implemented"
self.dtype = points_ref.dtype
self.dims = points_ref.shape[-1]
self.nr_ref_points = points_ref.shape[0]
kdtree_str = "KDTree" + _transl_torch_device[device.type] + "%dD" % (self.dims) + ("F" if self.dtype == torch.float32 else "")
try:
self.kdtree = getattr(torch_knn, kdtree_str)(points_ref.detach().cpu().numpy(), levels)
except AttributeError as err:
raise RuntimeError("Could not find the KD-Tree for your specified options. This probably means the library was not compiled for the specified dimensionality, precision or the targeted device. Original error:", err)
self.structured_points = torch.from_numpy(self.kdtree.get_structured_points())
self.shuffled_ind = torch.from_numpy(self.kdtree.get_shuffled_inds()).long()
self.use_gpu = use_gpu = (device.type == 'cuda')
self.device = device
self.dtype_idx = torch.int32 #Restriction in the compiled library
self.ref_requires_grad = points_ref.requires_grad
self.points_ref_bak = points_ref #.clone()
self.squared_distances = squared_distances
if self.use_gpu:
self.structured_points = self.structured_points.to(self.device)
self.shuffled_ind = self.shuffled_ind.to(self.device)
def _search_kd_tree_gpu(self, points_query, nr_nns_searches, result_dists, result_idx):
torch_knn.searchKDTreeGPU(points_query, nr_nns_searches, self.part_nr, result_dists, result_idx)
def _search_kd_tree_cpu(self, points_query, nr_nns_searches, result_dists, result_idx):
torch_knn.searchKDTreeCPU(points_query, nr_nns_searches, self.part_nr, result_dists, result_idx)
def query(self, points_query : torch.Tensor, nr_nns_searches : int=1,
result_dists : torch.Tensor=None, result_idx : torch.Tensor=None):
"""Searches the specified KD-Tree for KNN of the given points
Parameters
----------
points_query : torch.Tensor of float or double precision
Points for which the KNNs will be computed
nr_nns_searches : int, optional
How many closest nearest neighbors will be queried (=k), by default 1
result_dists : torch.Tensor of float or double precision, optional
Target array that will hold the resulting distance. If not specified, this will be dynamically created.
result_idx : torch.Tensor of dtype_idx type, optional
Target array that will hold the resulting KNN indices. If not specified, this will be dynamically created.
Returns
-------
tuple
Returns the tuple containing
* dists (ndarray of float or double precision) : Quadratic distance of KD-Tree points to the queried points
* inds (ndarray of type int) : Indices of the K closest neighbors
Raises
------
RuntimeError
If the requested KDTree can not be constructed.
"""
if nr_nns_searches > self.nr_ref_points:
raise RuntimeError("You requested more nearest neighbors than there are in the KD-Tree")
points_query = points_query.to(self.device)
if result_dists is None:
result_dists = torch.empty(size=[points_query.shape[0], nr_nns_searches], dtype=self.dtype, device=self.device)
if result_idx is None:
result_idx = torch.empty(size=[points_query.shape[0], nr_nns_searches], dtype=self.dtype_idx, device=self.device)
assert(list(result_dists.shape) == [points_query.shape[0], nr_nns_searches])
assert(result_dists.dtype == self.dtype)
assert(list(result_idx.shape) == [points_query.shape[0], nr_nns_searches])
assert(result_idx.dtype == self.dtype_idx)
assert(points_query.dtype == self.dtype)
if not result_dists.is_contiguous():
result_dists = result_dists.contiguous()
if not result_idx.is_contiguous():
result_idx = result_idx.contiguous()
if not points_query.is_contiguous():
points_query = points_query.contiguous()
#Get pointer as int
points_query_ptr = points_query.data_ptr()
dists_ptr = result_dists.data_ptr()
knn_idx_ptr = result_idx.data_ptr()
self.kdtree.query(points_query_ptr, points_query.shape[0], nr_nns_searches, dists_ptr, knn_idx_ptr)
dists = result_dists
inds = self.shuffled_ind[result_idx.long()]
if (points_query.requires_grad or self.ref_requires_grad) and torch.is_grad_enabled():
dists = torch.sum((points_query[:, None] - self.points_ref_bak[inds])**2, dim=-1)
if not self.squared_distances:
dists = torch.sqrt(dists)
return dists, inds
def build_kd_tree(points_ref : Union[torch.Tensor, np.ndarray], device : torch.device = None,
squared_distances = True, levels : int=None):
"""Builds the KD-Tree for subsequent queries using searchKDTree
Builds the KD-Tree for subsequent queries using searchKDTree. Note that the
tree is always built on the CPU and then transferred to the GPU if necessary.
Parameters
----------
points_ref : torch.Tensor
Points from which to build the KD-Tree
device : torch.device
Specify a target torch device where the KD-Tree will be located.
Will automatically pick points_ref.device if not specified.
squared_distances : bool
If true, the squared euclidean distances will be returned, by default True,
levels : int, optional
Levels of the KD-Tree (currently between 1 and 13 levels). If None is specified, will pick an appropriate value.
Returns
-------
TorchKDTree
Returns a kdtree with a query method to find the nearest neighbors inside a point-cloud
"""
if device is None:
device = points_ref.device
if levels is None:
levels = np.maximum(1, np.minimum(13, int(np.log(int(points_ref.shape[0])) / np.log(2))-3))
if issubclass(type(points_ref), np.ndarray):
points_ref = torch.from_numpy(points_ref)
if issubclass(type(device), str):
device = torch.device(device)
assert(levels >= 1 and levels <= 13)
assert issubclass(type(points_ref), torch.Tensor)
assert device.type != 'cuda' or gpu_available, "You requested the KD-Tree on the GPU, but the library was compiled with CPU support only"
assert(device.type in ['cuda', 'cpu'])
return TorchKDTree(points_ref, device, levels, squared_distances)
| thomgrand/torch_kdtree | torch_kdtree/nn_distance.py | nn_distance.py | py | 7,259 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "torch.Tensor",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.device",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.float32",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "torch.from_num... |
9135106371 | #!/usr/bin/env python3
import sqlite3
def nice_print(sql_table):
""" used to show information in a slightly more readable fashion"""
for row in sql_table:
row_string = map(str,row)
pretty_row = '\t\t'.join(list(row_string))
print(pretty_row)
conn = sqlite3.connect('northwind.sqlite3')
curs = conn.cursor()
# Basic Questions
ten_expensive = """
SELECT ProductName, UnitPrice
from Product
ORDER BY UnitPrice DESC
LIMIT 10;
"""
avg_age = """
SELECT ROUND(AVG(HireDate - BirthDate)) as "average hire age"
from Employee;
"""
avg_age_city = """
SELECT City, ROUND(AVG(HireDate - BirthDate)) as "average hire age by City"
from Employee
GROUP BY City;
"""
print("\n\ntop 10 most expensive items")
nice_print(curs.execute(ten_expensive).fetchall())
print("\n\navg. age at hire")
nice_print(curs.execute(avg_age).fetchall())
print("\n\navg. age by city")
nice_print(curs.execute(avg_age_city).fetchall())
# Advanced Questions
ten_expensive_supplier = """
SELECT ProductName, UnitPrice, CompanyName
from Product JOIN Supplier
ON Product.SupplierID = Supplier.Id
ORDER BY UnitPrice DESC
LIMIT 10;
"""
largest_category = """
SELECT CategoryName, COUNT(ProductName) as "count"
FROM Product JOIN Category
ON Product.CategoryId = Category.Id
GROUP BY CategoryName
ORDER BY "count" DESC
LIMIT 1;
"""
give_raise_to_this_employee = """
SELECT FirstName || ' ' || LastName as fullname
FROM Employee JOIN EmployeeTerritory
ON Employee.Id = EmployeeTerritory.EmployeeId
GROUP BY fullname
ORDER BY COUNT( DISTINCT TerritoryId) DESC
LIMIT 1;
"""
print("\n\nWhat are the 10 most expensive items AND their Supplier?")
nice_print(curs.execute(ten_expensive_supplier).fetchall())
print("\n\nWhat is the largest category (by unique products)")
nice_print(curs.execute(largest_category).fetchall())
print("\n\nWho's the employee with the most territories?")
nice_print(curs.execute(give_raise_to_this_employee).fetchall())
| Tclack88/Lambda | DS-3-2-SQL-and-Databases/sc/northwind.py | northwind.py | py | 2,024 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 13,
"usage_type": "call"
}
] |
7413102812 | """Sorts duplicate photos/files and places all copies in their own folder."""
import click
import halo
from hashlib import sha256
from pathlib import Path
from shutil import copyfile, move
""" Return the SHA256 sum of the provided file name.
:param file_name - Name of the file to check
:return Hexdigst of SHA sum
"""
def sha256sum(file_name):
with open(file_name, 'rb') as file_bytes:
return sha256(file_bytes.read()).hexdigest()
return ''
@click.command()
@click.argument('directory', type=click.Path(file_okay=False, exists=True))
@click.option('--move/--copy', '-m/-c', default=False)
def main(directory, move):
dup_dir = Path('{}/duplicate_files'.format(directory))
# Do stuff
if not dup_dir.exists():
dup_dir.mkdir()
click.echo('This script will iterate through the directory \'{0}\' and move all duplicate files to a subdirectory named \'{1}\'.'.format(directory, dup_dir))
#click.confirm('Do you wish to continue?', abort=True)
path = Path(directory)
hash_dict = dict()
for file_path in path.iterdir():
print(file_path)
if Path(file_path).is_file():
sha_sum = sha256sum(file_path)
if sha_sum in hash_dict.keys():
hash_dict[sha_sum].append(file_path)
else:
hash_dict[sha_sum] = [file_path]
print(hash_dict)
for sha_sum in list(filter(lambda x: len(hash_dict[x]) > 1, hash_dict.keys())):
for filepath in hash_dict[sha_sum]:
if move:
move(filepath, dup_dir / filepath.parts[-1])
else:
copyfile(filepath, dup_dir / filepath.parts[-1])
if __name__ == '__main__':
main()
| poiriermike/sort_dup_photos | sort_dupes.py | sort_dupes.py | py | 1,698 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "hashlib.sha256",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "click.echo",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_numbe... |
40066321475 | import pandas as pd
from decouple import config
import numpy as np
import os
import nilearn.image as img
from nilearn.glm.second_level import non_parametric_inference
import nibabel
import argparse
def options() -> dict:
'''
Function to accept accept command line flags.
Needs -t for task name and -p for number of
permutations
Parameters
---------
None
Returns
-------
dict: dictionary object
Dictionary of task and number of
permuations
'''
args= argparse.ArgumentParser()
args.add_argument('-t', '--task',
dest='task',
help='Task name. Either happy, eft or fear')
args.add_argument('-p', '--perms',
dest='perms',
help='number of permutations to run')
return vars(args.parse_args())
def paths(task: str) -> dict:
'''
Function to return paths to save and load images.
Parameters
----------
task: str
str of happy, fear, eft.
Returns
-------
dict of paths
'''
base_path = config(task)
return {
'base_path': base_path,
'mixed_model': os.path.join(base_path, '2ndlevel', 'mixed_model')
}
def subject_scans(base_path: str) -> pd.DataFrame:
'''
Function to load csv of subjects scans.
Will remove one subject who doesn't have T1 scan.
Parameters
----------
base_path: str
absolute path to task directory
Returns
-------
subject_scans_df: pd.DataFrame
csv of subjects scans locations
'''
subject_scans_df = pd.read_csv(f"{base_path}/1stlevel_location.csv")
subject_scans_df = subject_scans_df.drop(subject_scans_df[subject_scans_df['t1'] == 75].index)
return subject_scans_df
def create_desgin_matrix(subjects_scans: dict) -> pd.DataFrame:
'''
Function to create a singe design matrix of group
Parameters
----------
subjects_scans: dict,
dictionary of subject images with keys of group.
Returns
-------
design matrix: pd.DataFrame,
(92 x 1) design matrix of -1 and 1.
'''
return pd.DataFrame(data={'Group': np.hstack((-np.ones(len(subjects_scans['HC'])), np.ones(len(subjects_scans['AN']))))})
def mean_img(subject_scans: pd.DataFrame) -> dict:
'''
Function to get the mean image from the two time points
Parameters
----------
subject_scans: pd.DataFrame.
Dataframe of location of subjects scans of T1, T2
Returns
-------
subjects_mean_images: dict
dictionary of mean images
'''
subjects_mean_images = {
'HC' : [],
'AN' : []
}
for subject in range(0, subject_scans.shape[0]):
try:
t1_image = img.load_img(subject_scans['t1'].iloc[subject])
t2_image = img.load_img(subject_scans['t2'].iloc[subject])
mean_img = img.mean_img([t1_image, t2_image])
except Exception as e:
print(e)
continue
if 'G1' in subject_scans['t1'].iloc[subject]:
subjects_mean_images['HC'].append(mean_img)
else:
subjects_mean_images['AN'].append(mean_img)
return subjects_mean_images
def ols(subjects_to_analyse: list,
design_matrix: pd.DataFrame,
masks_2ndlevel: nibabel.nifti1.Nifti1Image,
perm: int) -> dict:
'''
Function to run nilearn permutated ols.
Parameters
----------
subjects_to_analyse: list
list of nibabel.nifti1.Nifti1Image scans
design_matrix: pd.DataFrame
(92 x 1) design matrix of group
mask_2ndlevel: nibabel.nifti1.Nifti1Image
mask of 1st level inputs
perm: int
Number of permutations
Returns
-------
dictionary of nibabel.nifti1.Nifti1Image
'''
return non_parametric_inference(
second_level_input=subjects_to_analyse,
design_matrix=design_matrix,
second_level_contrast="Group",
mask=masks_2ndlevel,
model_intercept=True,
n_perm=int(perm),
n_jobs=6,
tfce=True,
verbose=3
)
if __name__ == "__main__":
print('Starting up permutated ols for group differences')
flags = options()
path = paths(flags['task'])
scans_location = subject_scans(path['base_path'])
mean_images = mean_img(scans_location)
design_matrix = create_desgin_matrix(mean_images)
mask = img.load_img(os.path.join(path['mixed_model'], 'mask_img.nii.gz' ))
print(f'Running OLS with {flags["perms"]} permutations for {flags["task"]} task')
subjects_to_analyse = mean_images['HC'] + mean_images['AN']
group_diff = ols(subjects_to_analyse, design_matrix, mask, flags["perms"])
print(f'Saving scans to {path["mixed_model"]}')
group_diff['logp_max_tfce'].to_filename(f'{path["mixed_model"]}/tfce_fwep_group.nii.gz')
group_diff['tfce'].to_filename(f'{path["mixed_model"]}/tfce_tstat_group.nii.gz')
group_diff['t'].to_filename(f'{path["mixed_model"]}/vox_tstat_group.nii.gz')
group_diff['logp_max_t'].to_filename(f'{path["mixed_model"]}/vox_fwep_group.nii.gz') | WMDA/socio-emotion-cognition | task_fmri/modelling/nilearn_notebooks/second_level_group_differences_nilearn.py | second_level_group_differences_nilearn.py | py | 5,119 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "decouple.config",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "os.path",
"l... |
21811338637 | from elasticsearch import Elasticsearch
from services.caption_processor import split_captions
es = Elasticsearch()
def index_captions(captions, video_id):
for ctime, ctext in split_captions(captions):
doc = {
'time': ctime,
'text': ctext,
'video': video_id
}
es.index(index="simple-captions", doc_type='caption', body=doc)
def index_caption_pause_splitted(captions, video_id, index_name = "pause-splitted-captions"):
for ctime, ctext in captions:
doc = {
'time': ctime,
'text': ctext,
'video': video_id,
}
es.index(index=index_name, doc_type='caption', body=doc)
| veotani/youtube-caption-search | server/services/caption_indexator.py | caption_indexator.py | py | 696 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "services.caption_processor.split_captions",
"line_number": 8,
"usage_type": "call"
}
] |
31061471225 |
from ..utils import Object
class GetLanguagePackStrings(Object):
"""
Returns strings from a language pack in the current localization target by their keys. Can be called before authorization
Attributes:
ID (:obj:`str`): ``GetLanguagePackStrings``
Args:
language_pack_id (:obj:`str`):
Language pack identifier of the strings to be returned
keys (List of :obj:`str`):
Language pack keys of the strings to be returned; leave empty to request all available strings
Returns:
LanguagePackStrings
Raises:
:class:`telegram.Error`
"""
ID = "getLanguagePackStrings"
def __init__(self, language_pack_id, keys, extra=None, **kwargs):
self.extra = extra
self.language_pack_id = language_pack_id # str
self.keys = keys # list of str
@staticmethod
def read(q: dict, *args) -> "GetLanguagePackStrings":
language_pack_id = q.get('language_pack_id')
keys = q.get('keys')
return GetLanguagePackStrings(language_pack_id, keys)
| iTeam-co/pytglib | pytglib/api/functions/get_language_pack_strings.py | get_language_pack_strings.py | py | 1,077 | python | en | code | 20 | github-code | 36 | [
{
"api_name": "utils.Object",
"line_number": 6,
"usage_type": "name"
}
] |
27912151130 | import boto3
import pg8000
import datetime
import json
import time
# Create an S3 client
s3 = boto3.client('s3')
# Create a CloudWatch client
cloudwatch = boto3.client('cloudwatch')
def ingest_database_to_s3(bucket_name):
# Retrieve the database connection details from AWS Secrets Manager
secretsmanager = boto3.client("secretsmanager")
secret_value_response = secretsmanager.get_secret_value(SecretId="db-creds-source") #Change this to match
secret_dict = json.loads(secret_value_response["SecretString"])
host = secret_dict["host"]
port = secret_dict["port"]
user = secret_dict["username"]
password = secret_dict["password"]
database = secret_dict["database"]
# Connect to the PostgreSQL database
conn = pg8000.connect(
host=host,
port=port,
user=user,
password=password,
database=database
)
cursor = conn.cursor()
logs = boto3.client('logs')
log_groups = logs.describe_log_groups()
log_group = log_groups['logGroups'][-1]['logGroupName']
log_streams = logs.describe_log_streams(logGroupName=log_group)
log_stream = log_streams['logStreams'][0]['logStreamName']
log_events = logs.get_log_events(logGroupName=log_group, logStreamName=log_stream)
first_ingestion = True
for event in log_events['events']:
if "[INGESTION] Ingestion completed" in event['message']:
first_ingestion = False
break
try:
# Retrieve all table names from the totesys database
cursor.execute("SELECT table_name FROM information_schema.tables WHERE table_schema = 'public'")
table_names = [row[0] for row in cursor.fetchall()]
# Save the data from each table in a separate file in the "ingestion" S3 bucket
for table_name in table_names:
# Check if the table has been modified since the last time it was ingested
cursor.execute(f"SELECT max(last_updated) FROM {table_name}")
last_update = cursor.fetchone()[0]
# If the table has been modified, retrieve and save the updated data
if first_ingestion or last_update > datetime.datetime.utcnow() - datetime.timedelta(minutes=5): #Change this to what you decide on
# Retrieve column names from the current table
cursor.execute(f"SELECT column_name FROM (SELECT * FROM INFORMATION_SCHEMA.COLUMNS WHERE TABLE_NAME = N'{table_name}') AS column_schema")
column_names = cursor.fetchall()
# Retrieve the data from the current table
cursor.execute(f"SELECT * FROM {table_name}")
rows = cursor.fetchall()
# Save the data to a CSV file in the "ingestion" S3 bucket
with open(f"/tmp/{table_name}.csv", "w") as file:
file.write(",".join([column_name[0] for column_name in column_names]))
file.write("\n")
for row in rows:
file.write(",".join(["\"" + str(cell) + "\"" if "," in str(cell) else str(cell) for cell in row]))
file.write("\n")
s3.upload_file(f"/tmp/{table_name}.csv", bucket_name, f"{table_name}.csv")
print(f'[INGESTION] MODIFIED: {table_name} was last modified at {last_update}')
else:
# Log a message to CloudWatch indicating that the table has not been modified
print(f'[INGESTION] {table_name} was last modified at {last_update}')
# Close the database connection
cursor.close()
conn.close()
except Exception as e:
# Log the error message to CloudWatch
print(f'[INGESTION] ERROR: {e}')
def lambda_handler(event, context):
# Log the start time of the function execution
print(f'[INGESTION] Ingestion started')
# Allow time for cloudwatch log to be created
time.sleep(15)
# Ingest the database to S3
ingest_database_to_s3(event['ingested_bucket'])
# Log the end time of the function execution
print(f'[INGESTION] Ingestion completed') | vasilecondrea/lake-cabin-project | extract/src/extract.py | extract.py | py | 4,151 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "boto3.client",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number":... |
17122612080 | # import the packages
import matplotlib.pyplot as plt
import networkx as nx
# Define the data structures of vertices and edges
vertices = range(1, 10)
edges = [(7, 2), (2, 3), (7, 4), (4, 5), (7, 3), (7, 5),
(1, 6), (1, 7), (2, 8), (2, 9)]
# Let's first instantiate the graph
G = nx.Graph()
# let's draw the graph
G.add_nodes_from(vertices)
G.add_edges_from(edges)
pos = nx.spring_layout(G)
# Let's define the NF nodes
nx.draw_networkx_nodes(G, pos,
nodelist=[1, 4, 3, 8, 9],
label=True,
node_color='g',
node_size=1300)
# let's create the nodes that are known to be involved in fraud
nx.draw_networkx_nodes(G, pos,
nodelist=[2, 5, 6, 7],
label=True,
node_color='r',
node_size=1300)
# Let's create labels for the nodes
nx.draw_networkx_edges(G, pos, edges, width=3, alpha=0.5, edge_color='b')
# Creating labels names
labels = {1: r'1 NF', 2: r'2 F', 3: r'3 NF', 4: r'4 NF', 5: r'5 F', 6: r'6 F', 7: r'7F', 8: r'8 NF', 9: r'9 NF'}
nx.draw_networkx_labels(G, pos, labels={n: lab for n, lab in labels.items() if n in pos})
plt.show()
nx.draw_networkx_labels(G, pos)
| amrmabdelazeem/40-Algorithms-to-Know | Fraud Analytics.py | Fraud Analytics.py | py | 1,258 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "networkx.Graph",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "networkx.spring_layout",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "networkx.draw_networkx_nodes",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ne... |
2760395481 | import os
from datetime import datetime, timedelta
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
import googleapiclient.discovery
import googleapiclient.errors
import httplib2.error
import youtube_dl
import logging
class LiveBroadcast():
def __init__(self, broadcast_id, title, channel_id, channel_name="", m3u8_url=None, protocol="m3u8", mine=False):
self.id = broadcast_id
self.title = title
self.m3u8_url = m3u8_url
self.url = f"https://www.youtube.com/watch?v={broadcast_id}"
self.channel_id = channel_id
self.channel_url = f"https://www.youtube.com/channel/{channel_id}"
self.channel_name = channel_name
self.protocol = protocol
self.mine = mine
class GoogleApis:
class NetworkException(Exception):
pass
class HttpException(Exception):
pass
class AuthException(Exception):
pass
def __init__(self, api_name, api_version, scopes):
self.api_name = api_name
self.api_version = api_version
self.scopes = scopes
self.service = None
def is_authorized(self):
return self.service is not None
def get_credentials(self, token_file, client_secrets_file, force_new=False):
creds = None
# Get previous credentials from file
if os.path.exists(token_file):
if not force_new:
creds = Credentials.from_authorized_user_file(token_file, self.scopes)
else:
creds = None
# If the credentials don't exist, do oauth
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(client_secrets_file, self.scopes)
creds = flow.run_console()
with open(token_file, "w") as token:
token.write(creds.to_json())
return creds
def auth_key(self, api_key):
self.service = googleapiclient.discovery.build(self.api_name, self.api_version, developerKey=api_key)
def auth_oauth(self, token_file, client_secrets_file, force_new=False):
credentials = self.get_credentials(token_file, client_secrets_file, force_new)
self.service = googleapiclient.discovery.build(self.api_name, self.api_version, credentials=credentials)
class YoutubeApis(GoogleApis):
def __init__(self):
super().__init__("youtube", "v3", ["https://www.googleapis.com/auth/youtube.force-ssl"])
# Not recommended to use: costs 100 quota units and takes ~5 minutes to detect newly started broadcasts
def search_livebroadcasts_ytapi(self, channel_id):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.search().list(part="snippet", eventType="live", type="video", channelId=channel_id)
livestreams = []
try:
res = request.execute()
items = res.get("items", [])
for item in items:
single_stream = LiveBroadcast(
item.get("id").get("videoId"),
item.get("snippet").get("title"),
channel_id
)
livestreams.append(single_stream)
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return livestreams
def search_livebroadcasts(self, channel_id):
channel_url = f"https://www.youtube.com/channel/{channel_id}"
options = {
"playlistend": 1, # only the first item
"quiet": True
}
livestreams = []
with youtube_dl.YoutubeDL(options) as yt_dl:
try:
res = yt_dl.extract_info(channel_url, download=False)
res_item = res["entries"][0]["entries"][0]
if res_item["protocol"] == "m3u8":
single_stream = LiveBroadcast(
res_item["id"],
res_item["title"],
channel_id,
channel_name=res_item["channel"],
m3u8_url=res_item["url"]
)
livestreams.append(single_stream)
except youtube_dl.utils.DownloadError as e:
raise GoogleApis.NetworkException(f"youtube-dl failed to search live broadcasts: {str(e)}")
except (IndexError, KeyError):
pass # no livestreams found
return livestreams
def parse_livestream_res(self, res):
ingestion_info = res.get("cdn").get("ingestionInfo")
res_data = {
"id": res.get("id", ""), # ex 'AniW-ozy_koWoLjDw3F2Rg1618885401806773'
"rtmp_url": ingestion_info.get("ingestionAddress", ""),
"rtmp_key": ingestion_info.get("streamName", "")
}
return res_data
def list_videos(self, video_id):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.videos().list(
part="contentDetails,id,snippet,status",
id=video_id
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res.get("items")[0]
# Creates the RTMP ingestion point that can be reused for every stream
def insert_livestream(self, title, fps="variable", resolution="variable"):
# fps can be "30fps", "60fps"
# resolution "1080p", "720p", "480p", etc
# both can be set to "variable" for automatic detection
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.liveStreams().insert(
# part="snippet,cdn,id,status",
part = "id,cdn",
body={
"cdn": {
"ingestionType": "rtmp",
"resolution": resolution,
"frameRate": fps
},
"snippet": {
"title": title
}
}
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return self.parse_livestream_res(res)
def create_variable_livestream(self, title):
livestreams = self.list_livestream()
variable_stream = None
for livestream in livestreams:
if livestream.get("cdn").get("resolution") == "variable":
variable_stream = livestream
break
# Seems like YT will always create a default variable stream if deleted
variable_stream_data = None
if variable_stream is None:
logging.info("Variable livestream not found, creating new one")
variable_stream_data = self.insert_livestream(title)
else:
variable_stream_data = self.parse_livestream_res(variable_stream)
return variable_stream_data
def list_livestream(self):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.liveStreams().list(
part="id,cdn,snippet,status",
mine=True
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res.get("items", [])
# Creates the actual stream video instance that viewers see
def insert_broadcast(self, title, description=None, archive=True, privacy="public"):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
# Privacy may be: "public", "private", "unlisted"
broadcast_date = datetime.utcnow()
#broadcast_date += timedelta(minutes=1)
request = self.service.liveBroadcasts().insert(
part="id,snippet,contentDetails,status",
body={
"contentDetails": {
"enableDvr": archive,
"enableAutoStart": True,
"enableAutoStop": False
},
"snippet": {
"scheduledStartTime": broadcast_date.isoformat(),
"title": title,
"description": description
},
"status": {
"privacyStatus": privacy
}
}
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
res_data = {
"id": res.get("id", "") # ex '1b9GoutrU7k'
}
return res_data
def list_broadcast(self):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
# acceptable status values: complete, live, testing
request = self.service.liveBroadcasts().list(
part="id,snippet,contentDetails,status",
mine=True
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res.get("items", [])
def transition_broadcast(self, broadcast_id, status):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
# acceptable status values: complete, live, testing
request = self.service.liveBroadcasts().transition(
broadcastStatus=status,
id=broadcast_id,
part="id"
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res
def bind_broadcast(self, broadcast_id, stream_id):
if not self.is_authorized():
raise GoogleApis.AuthException("Requires OAuth")
request = self.service.liveBroadcasts().bind(
id=broadcast_id,
part="id,snippet,contentDetails,status",
streamId=stream_id
)
res = None
try:
res = request.execute()
except googleapiclient.errors.HttpError as e:
raise GoogleApis.HttpException(str(e))
except httplib2.error.ServerNotFoundError as e:
raise GoogleApis.NetworkException(str(e))
return res
def create_rtmp_broadcast(self, title, description, privacy):
# First, check if a stream exists
stream_data = self.create_variable_livestream("Variable stream")
broadcast_data = self.insert_broadcast(title, description, privacy=privacy)
data = {
"video_id": broadcast_data["id"],
"rtmp_url": stream_data["rtmp_url"],
"rtmp_key": stream_data["rtmp_key"]
}
self.bind_broadcast(data["video_id"], stream_data["id"])
return data
# TODO support other quality levels?
# TODO distinguish between net and param exceptions
def get_stream_m3u8_url(self, video_url):
options = {
"noplaylist": True,
}
playlist_url = None
with youtube_dl.YoutubeDL(options) as yt_dl:
try:
res = yt_dl.extract_info(video_url, download=False)
playlist_url = res["url"]
except youtube_dl.utils.DownloadError as e:
raise GoogleApis.NetworkException(f"youtube-dl failed to download m3u8: {str(e)}")
return playlist_url
| vachau/youtube-restreamer | utils/apis.py | apis.py | py | 12,777 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "google.oauth2.credentials.Credentials.from_authorized_user_file",
"line_number": 49,
"usage_type": "call"
}... |
3984159879 | from django.http import HttpResponse
from django.shortcuts import render
from .models import *
def home_view(request):
names = ['Jitendra', 'Rimaljit', 'Mohit', 'Deepak']
address = ['Chandigarh', 'Ludhiana', 'Ludhiana', 'Ludhian']
info = zip(names, address)
data = {
'info': info
}
return render(request, 'home.html', context=data)
def about_view(request):
qs = AdmissionDetails.objects.all()
data = {'queryset': qs}
return render(request, 'about.html', context=data)
def login_view(request):
data = {}
if (request.method=='POST'):
ad_no = request.POST.get('adno_text')
name = request.POST.get('name_text')
section = request.POST.get('class_text')
# orm code to store value in database
# admission_details = AdmissionDetails()
# admission_details.application_no = ad_no
# admission_details.name = name
# admission_details.section = section
# admission_details.save()
# OR
# admission_details = AdmissionDetails(
# application_no = ad_no,
# name = name,
# section = section
# )
# admission_details.save()
# OR
AdmissionDetails.objects.create(
application_no = ad_no,
name = name,
section = section
)
#********************
data['result'] = "your record has been saved"
return render(request, 'login.html', context=data)
| jitendra5581/cms-training | studentapp/views.py | views.py | py | 1,601 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 61,
"usage_type": "call"
}
] |
19112292097 |
import torch as T
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
class DeepQNetwork(nn.Module):
def __init__(self, lr, input_dims, fc1_dims, fc2_dims, n_actions):
super(DeepQNetwork, self).__init__()
self.input_dims = input_dims
self.fc1_dims = fc1_dims
self.fc2_dims = fc2_dims
self.n_actions = n_actions
self.fc1 = nn.Linear(*self.input_dims, self.fc1_dims)
self.fc2 = nn.Linear(self.fc1_dims, self.fc2_dims)
self.fc3 = nn.Linear(self.fc2_dims, self.n_actions)
self.optimizer = optim.Adam(self.parameters(), lr=lr)
self.loss = nn.MSELoss()
self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
self.to(self.device)
def forward(self, state):
x = F.relu(self.fc1(state))
x = F.relu(self.fc2(x))
actions = self.fc3(x) #this does return estimates of PV(Q-value)
return actions
class Agent():
def __init__(self, gamma, epsilon, lr, input_dims, batch_size, n_actions, epsilon_min):
self.gamma = gamma
self.epsilon = epsilon
self.epsilon_decay = 0.99
self.lr = lr
self.input_dims = input_dims
self.n_actions = n_actions
self.batch_size = batch_size
self.mem_size = 10000
self.epsilon_min = epsilon_min
self.action_space = [i for i in range(self.n_actions)]
self.mem_cntr = 0
self.Q_eval = DeepQNetwork(self.lr, n_actions=self.n_actions, input_dims=self.input_dims, fc1_dims=256, fc2_dims=256)
self.state_memory = np.zeros((self.mem_size, *input_dims), dtype=np.float32)
self.new_state_memory = np.zeros((self.mem_size, *input_dims), dtype=np.float32)
self.action_memory = np.zeros(self.mem_size, dtype=np.int32)
self.reward_memory = np.zeros(self.mem_size, dtype=np.float32)
self.terminal_memory = np.zeros(self.mem_size, dtype=np.bool)
def store_transitions(self, state, action, reward, new_state, done):
# This is the memory function to create samples to learn from
index = self.mem_cntr % self.mem_size #starts at 0 when reaching mem_size
self.state_memory[index] = state
self.new_state_memory[index] = new_state
self.reward_memory[index] = reward
self.action_memory[index] = action
self.terminal_memory[index] = done
self.mem_cntr += 1
def act(self, observation):
print(observation)
print(observation.shape)
print(type(observation[0]))
if np.random.random() > self.epsilon:
state = T.tensor([observation]).to(self.Q_eval.device)
actions = self.Q_eval.forward(state)
action = T.argmax(actions).item()
else:
action = np.random.choice(self.action_space)
return action
def learn(self):
if self.mem_cntr < self.batch_size: # skip learning till enough samples
return
self.Q_eval.optimizer.zero_grad()
max_mem = min(self.mem_cntr, self.mem_size)
batch = np.random.choice(max_mem, self.batch_size, replace=False)
batch_index = np.arange(self.batch_size, dtype=np.int32)
state_batch = T.tensor(self.state_memory[batch]).to(self.Q_eval.device)
new_state_batch = T.tensor(self.new_state_memory[batch]).to(self.Q_eval.device)
reward_batch = T.tensor(self.reward_memory[batch]).to(self.Q_eval.device)
terminal_batch = T.tensor(self.terminal_memory[batch]).to(self.Q_eval.device)
action_batch = self.action_memory[batch]
q_eval = self.Q_eval.forward(state_batch)[batch_index, action_batch] # values for actions that were took
#print(self.Q_eval.forward(state_batch))
#print(batch_index)
#print(action_batch)
#print(q_eval)
q_next = self.Q_eval.forward(new_state_batch) # value of new state (TODO TARGET NETWORK HERE)
#print(q_next)
q_next[terminal_batch] = 0.0
#print(q_next)
#print(reward_batch)
q_target = reward_batch +self.gamma * T.max(q_next, dim=1)[0]
#print(q_target)
loss = self.Q_eval.loss(q_target, q_eval).to(self.Q_eval.device)
loss.backward()
self.Q_eval.optimizer.step()
self.epsilon = max(self.epsilon*self.epsilon_decay, self.epsilon_min)
import gym
if __name__ == '__main__':
env = gym.make('LunarLander-v2')
agent = Agent(gamma=0.99, epsilon=1.0, batch_size=3, n_actions=4, epsilon_min=0.01, input_dims=[8], lr=0.003)
scores = []
eps_history = []
episodes = 500
for episode in range(episodes):
score = 0
done = False
cur_state = env.reset()
while not done:
print(cur_state)
action = agent.act(cur_state)
new_state, reward, done, info = env.step(action)
score += reward
agent.store_transitions(cur_state, action, reward, new_state, done)
agent.learn()
cur_state = new_state
scores.append(score)
eps_history.append(agent.epsilon)
avg_score = np.mean(scores[-100:])
print("Epsiode", episode, "Score %.2f" % score, "Average Score %.2f" % avg_score, "Epsilon %.2f" %agent.epsilon)
| miczed/learny-mc-learnface | DQN/Examples/DQN_Lunar_MLwithPhil.py | DQN_Lunar_MLwithPhil.py | py | 5,392 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
15083780449 | import ray
import numpy as np
import gym
import tensorflow as tf
import tensorflow.contrib.slim as slim
import time
import sys
sys.path.insert(0, "/home/ubuntu/pong_py")
from pongjsenv import PongJSEnv
ray.init(num_workers=0)
n_obs = 8 # dimensionality of observations
n_h = 256 # number of hidden layer neurons
#n_actions = 2 # number of available actions
n_actions = 3 # number of available actions
learning_rate = 5e-4 # how rapidly to update parameters
gamma = .99 # reward discount factor
def make_policy(observation_placeholder):
hidden = slim.fully_connected(observation_placeholder, n_h)
log_probability = slim.fully_connected(hidden, n_actions, activation_fn=None, weights_initializer=tf.truncated_normal_initializer(0.001))
return tf.nn.softmax(log_probability)
def discounted_normalized_rewards(r):
"""Take 1D float array of rewards and compute normalized discounted reward."""
result = np.zeros_like(r)
running_sum = 0
for t in reversed(range(0, r.size)):
running_sum = running_sum * gamma + r[t]
result[t] = running_sum
return (result - np.mean(result)) / np.std(result)
@ray.remote
class Env(object):
def __init__(self):
self.env = env = PongJSEnv()
self.input_observation = tf.placeholder(dtype=tf.float32, shape=[None, n_obs])
input_probability = tf.placeholder(dtype=tf.float32, shape=[None, n_actions])
input_reward = tf.placeholder(dtype=tf.float32, shape=[None,1])
# The policy network.
self.action_probability = make_policy(self.input_observation)
loss = tf.nn.l2_loss(input_probability - self.action_probability)
optimizer = tf.train.AdamOptimizer(learning_rate)
self.train_op = optimizer.minimize(loss, grad_loss=input_reward)
# Create TensorFlow session and initialize variables.
self.sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
self.variables = ray.experimental.TensorFlowVariables(self.action_probability, self.sess)
def rollout(self):
observation = self.env.reset()
observations, rewards, labels = [], [], []
reward_sum = 0
reward_sums = []
episode_number = 0
num_timesteps = 0
done = False
start_time = time.time()
# Training loop
while not done:
# stochastically sample a policy from the network
probability = self.sess.run(self.action_probability, {self.input_observation: observation[np.newaxis, :]})[0,:]
action = np.random.choice(n_actions, p = probability)
label = np.zeros_like(probability) ; label[action] = 1
observations.append(observation)
labels.append(label)
observation, reward, done, info = self.env.step(action)
reward_sum += reward
rewards.append(reward)
return np.vstack(observations), discounted_normalized_rewards(np.vstack(rewards)), np.vstack(labels)
def load_weights(self, weights):
self.variables.set_weights(weights)
agents = [Env.remote() for _ in range(4)]
input_observation = tf.placeholder(dtype=tf.float32, shape=[None, n_obs])
input_probability = tf.placeholder(dtype=tf.float32, shape=[None, n_actions])
input_reward = tf.placeholder(dtype=tf.float32, shape=[None, 1])
action_probability = make_policy(input_observation)
loss = tf.nn.l2_loss(input_probability - action_probability)
optimizer = tf.train.AdamOptimizer(learning_rate)
train_op = optimizer.minimize(loss, grad_loss=input_reward)
sess = tf.Session()
tf.global_variables_initializer().run(session=sess)
variables = ray.experimental.TensorFlowVariables(loss, sess)
num_timesteps = 0
reward_sums = []
# Barrier for the timing (TODO(pcm): clean this up)
weights = ray.put(variables.get_weights())
ray.get([agent.load_weights.remote(weights) for agent in agents])
start_time = time.time()
for _ in range(100):
weights = ray.put(variables.get_weights())
# EXERCISE: Set weights on the remote agents
[agent.load_weights.remote(weights) for agent in agents]
# EXERCISE: Call agent.rollout on all the agents, get results, store them in variable "trajectories"
trajectories = ray.get([agent.rollout.remote() for agent in agents])
reward_sums.extend([trajectory[0].shape[0] for trajectory in trajectories])
timesteps = np.sum([trajectory[0].shape[0] for trajectory in trajectories])
if (num_timesteps + timesteps) // 5000 > num_timesteps // 5000:
print('time: {:4.1f}, timesteps: {:7.0f}, reward: {:7.3f}'.format(
time.time() - start_time, num_timesteps + timesteps, np.mean(reward_sums)))
num_timesteps += timesteps
results = [np.concatenate(x) for x in zip(*trajectories)]
sess.run(train_op, {input_observation: results[0], input_reward: results[1], input_probability: results[2]})
| robertnishihara/ray-tutorial-docker | rl_exercises/pong_py_no_git/pong_py/parallel_train.py | parallel_train.py | py | 4,946 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "ray.init",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.contrib.slim.fully_co... |
14489788093 | # Lab 24 - Rain Data
import datetime
import matplotlib.pyplot as plt
def open_file():
"""opens the file and returns its contents as a list separated by each row"""
with open('lab24_ankeny_rain.txt', 'r') as data:
return data.read().split("\n")
def get_dates(data):
"""accepts data and returns a list of just the dates"""
dates = []
for row in data:
row_as_list = row.split(" ")
for value in row_as_list:
if len(value) == 11:
dates.append(value)
return dates
def get_daily_totals(data):
"""accepts data and returns a list of the first integer located in each row, i.e. the daily total for each date"""
daily_totals = []
for row in data:
row_as_list = row.split(" ")
for value in row_as_list:
if value == "-":
value = 0
try:
num = int(value)
daily_totals.append(num)
break
except:
pass
return daily_totals
def get_merged_data(dates, daily_totals):
"""accepts two lists and zips together to return as a list of tuples"""
merged_data = list(zip(dates, daily_totals))
return merged_data
def calculate_mean(daily_totals):
"""calculates the mean of the data by dividing the total sum by the length of the list"""
sum_total = sum(daily_totals)
return sum_total / len(daily_totals)
def calculate_variance(mean, daily_totals):
"""calculates the variance by accepting the mean; then, for each number, subtracts the mean and squares the result; finally, returns the average of those squared differences"""
sq_diffs = [((num - mean) ** 2) for num in daily_totals]
sum_sq_diff = sum(sq_diffs)
return sum_sq_diff / len(sq_diffs)
def get_max_rain_date(merged_data):
"""iterates through the tuples of merged rain data and returns the date with the highest rain total"""
rain_totals = [date[1] for date in merged_data]
max_rain_total = max(rain_totals)
max_rain_date = [date[0] for date in merged_data if date[1] == max_rain_total]
return max_rain_date[0]
def print_results(mean, variance, max_rain_date):
"""accepts results and prints each back to the user"""
print(f"\nThe mean of the data is {mean}.")
print(f"The variance of the data is {variance}.")
print(f"The date which had the most rain was {max_rain_date}.\n")
def plot_results(dates, daily_totals):
"""creates an x, y graph with the dates along the x-axis and the daily totals along the y-axis"""
plt.plot(dates, daily_totals)
plt.ylabel("Daily Totals")
plt.xlabel("Dates")
plt.show()
def main():
data = open_file()
dates = get_dates(data)
daily_totals = get_daily_totals(data)
merged_data = get_merged_data(dates, daily_totals)
mean = calculate_mean(daily_totals)
variance = calculate_variance(mean, daily_totals)
max_rain_date = get_max_rain_date(merged_data)
print_results(mean, variance, max_rain_date)
plot_results(dates, daily_totals)
main()
| mjhcodes/pdxcodeguild | python/lab24.py | lab24.py | py | 2,881 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 68,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.ylabel",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "mat... |
23777096489 | import re
import sys
from random import randrange, randint, choices, shuffle
from typing import List, Dict, Tuple
import numpy as np
import pandas as pd
from pepfrag import ModSite, IonType, pepfrag
from pyteomics.mass import calculate_mass
from src.fragment_matching import write_matched_fragments
from src.model.fragment import Fragment
from src.model.modification import IAA_ALKYLATION, CYS_BOND
from src.model.peptide import Peptide
from src.model.precursor import Precursor
from src.model.scan import Scan
from src.precursor_matching import write_matched_precursors
from src.utilities.constants import PROTON
from src.utilities.dataloading import cleave_protein
def intersects(t, u):
x, y = t
a, b = u
return not (x >= b or y <= a)
def remove_peptide_duplicates(xs):
return list(dict(tp) for tp in set(tuple(p.items()) for p in (xs)))
def connected_cys_count(prec):
return sum(res == "C" for res in prec.sequence) - prec.alkylation_count
def generate_simple_peptides(
tryptides: List[Peptide],
cys_bond_tryptides,
base_count=10_000,
max_mc_count=5,
):
peptide_without_bond_cys: List[Dict] = []
peptide_with_bond_cys: List[Dict] = []
for _ in range(0, base_count):
b = randrange(0, len(tryptides) - 1)
e = randrange(b + 1, min(len(tryptides), b + 2 + max_mc_count))
if b < e:
charge = randint(1, 5)
sequence = "".join(t.sequence for t in tryptides[b:e])
alkylations = sum(res == "C" for res in sequence)
cys_overlap = [i for i in cys_bond_tryptides if i in range(b, e)]
if cys_overlap:
alkylations -= len(cys_overlap)
mass = calculate_mass(sequence) + alkylations * IAA_ALKYLATION.mass
prec: Dict = {
"charge": charge,
"precursor": Precursor(
sequence=sequence,
mass=mass,
mz=mass / charge + PROTON,
segments=[(b, e)],
residue_ranges=[(tryptides[b].beginning, tryptides[e - 1].end)],
cys_bond_count=0,
alkylation_count=alkylations,
modifications=[],
error_ppm=0,
),
}
if cys_overlap:
peptide_with_bond_cys.append(prec)
else:
peptide_without_bond_cys.append(prec)
return (
remove_peptide_duplicates(peptide_without_bond_cys),
remove_peptide_duplicates(peptide_with_bond_cys),
)
def generate_dipeptides(peptides: List[Dict], max_charge=5):
dipeptides = []
for i, s in enumerate(peptides):
prec: Precursor = s["precursor"]
for t in peptides[i:]:
qrec: Precursor = t["precursor"]
if not intersects(prec.segments[0], qrec.segments[0]):
charge = randint(1, max_charge)
ps = sorted([prec, qrec], key=lambda p: p.segments[0][0])
mass = prec.mass + qrec.mass + CYS_BOND.mass
joined = Precursor(
sequence=ps[0].sequence + "+" + ps[1].sequence,
mass=mass,
mz=mass / charge + PROTON,
segments=ps[0].segments + ps[1].segments,
residue_ranges=ps[0].residue_ranges + ps[1].residue_ranges,
cys_bond_count=1,
alkylation_count=prec.alkylation_count + qrec.alkylation_count,
modifications=ps[0].modifications + ps[1].modifications,
error_ppm=0,
)
dipeptides.append({"charge": charge, "precursor": joined})
return remove_peptide_duplicates(dipeptides)
def generate_unipeptides(peptides: List[Dict]):
unipeptides = []
for s in peptides:
p: Precursor = s["precursor"]
if connected_cys_count(p) == 2:
charge = s["charge"]
precursor = Precursor(
p.sequence,
p.mass + CYS_BOND.mass,
(p.mass + CYS_BOND.mass) / charge + PROTON,
p.segments,
p.residue_ranges,
p.cys_bond_count,
p.alkylation_count,
p.modifications,
p.error_ppm,
)
unipeptides.append({"charge": charge, "precursor": precursor})
return remove_peptide_duplicates(unipeptides)
def valid_frags(frags, cys, length):
def ok(frag):
if "b" in frag[1]:
return frag[2] > cys
else:
return frag[2] >= (length - cys)
return [f for f in frags if ok(f)]
def charge_from_code(code):
match = re.match(r".*\[(\d+)?\+]$", code)
if match.group(1) is None:
return 1
else:
return int(match.group(1))
def split_on_simple_frags(seq, frags, cysteines):
b, e = seq
safe = []
unsafe = []
for f in frags:
mass, code, i = f
if "b" in code:
if not any(b <= c < b + i for c in cysteines):
safe.append(f)
continue
else:
if not any(e - i <= c < e for c in cysteines):
safe.append(f)
continue
unsafe.append(f)
return safe, unsafe
def simple_fragment(
id, sequence, residue_range, charge, mz, break_count, intensity_ratio, intensity=10
):
return Fragment(
id=id,
sequence=sequence,
residue_ranges=residue_range,
intensity=intensity,
intensity_ratio=intensity_ratio,
target_mass=(mz - PROTON) * charge,
mass=(mz - PROTON) * charge,
target_mz=mz,
mz=mz,
charge=charge,
break_count=break_count,
error_ppm=0,
modifications=[IAA_ALKYLATION for res in sequence if res == "C"],
connected_bonds=[],
disconnected_cys=[],
)
def fragment_sequence(seq, frag, residue_range):
_, code, i = frag
sequence = seq[:i] if "b" in code else seq[-i:]
b, e = residue_range
frag_residue_range = (b, b + i) if "b" in code else (e - i, e)
return sequence, frag_residue_range
def simple_frags_to_fragments(frags, prec_sequence, prec_residue_range, precursor):
fragments = []
for id, frag in enumerate(frags):
mz, code, i = frag
frag_charge = charge_from_code(code)
frag_sequence, frag_residue_range = fragment_sequence(
prec_sequence, frag, prec_residue_range
)
fragment = simple_fragment(
id=id,
sequence=frag_sequence,
residue_range=[frag_residue_range],
charge=frag_charge,
mz=mz,
break_count=int(prec_residue_range != frag_residue_range),
intensity_ratio=1 / len(frags),
)
fragments.append(
{"fragment": fragment, "precursor": precursor, "var_bonds": []}
)
return fragments
def safe_choose_n(fragments, n=50):
return list(sorted(list(set(choices(fragments, k=n)))))
def pepfrag_fragments(
sequence: str,
residue_range: Tuple[int, int],
charge: int,
ion_types,
bond_cys_res: List[int],
count=50,
):
frags = pepfrag.Peptide(
sequence,
charge=charge,
modifications=[
ModSite(IAA_ALKYLATION.mass, ri + 1, IAA_ALKYLATION.description)
for ri, (ai, res) in enumerate(zip(sequence, range(*residue_range)))
if res == "C" and ai not in bond_cys_res
],
).fragment(ion_types=ion_types)
return safe_choose_n(frags, count)
def fragment_simple_peptide(
peptide: Dict,
bond_cys_res: List[int],
count=50,
ion_types=None,
):
if ion_types is None:
ion_types = {IonType.y: [], IonType.b: [], IonType.precursor: []}
precursor: Precursor = peptide["precursor"]
sequence = precursor.sequence
residue_range = precursor.residue_ranges[0]
# if connected_cys_count(precursor) == 0:
frags = pepfrag_fragments(
sequence=precursor.sequence,
residue_range=residue_range,
charge=peptide["charge"],
bond_cys_res=bond_cys_res,
ion_types=ion_types,
count=count,
)
return simple_frags_to_fragments(frags, sequence, residue_range, precursor)
def fragment_dipeptide(
peptide: Dict, bond_cys_res: List[int], ion_types=None, count=50
):
if ion_types is None:
ion_types = {IonType.y: [], IonType.b: [], IonType.precursor: []}
max_charge = peptide["charge"]
precursor: Precursor = peptide["precursor"]
ps, qs = precursor.sequence.split("+")
prr, qrr = precursor.residue_ranges
result = []
building_fragments = []
for sequence, residue_range in [(ps, prr), (qs, qrr)]:
frags = pepfrag_fragments(
sequence=sequence,
residue_range=residue_range,
charge=1,
ion_types=ion_types,
bond_cys_res=bond_cys_res,
count=count,
)
simple_frags, cys_frags = split_on_simple_frags(
residue_range, frags, bond_cys_res
)
result += simple_frags_to_fragments(
simple_frags, sequence, residue_range, precursor
)
shuffle(cys_frags)
building_fragments.append(
[
fr["fragment"]
for fr in simple_frags_to_fragments(
cys_frags, sequence, residue_range, precursor
)
]
)
for i, (pf, qf) in enumerate(
choices(list(zip(building_fragments[0], building_fragments[1])), k=count)
):
total_charge = randint(1, max_charge)
total_mass = pf.mz + qf.mz + CYS_BOND.mass - 2 * PROTON
if "C" not in pf.sequence or "C" not in qf.sequence:
continue
fragment = Fragment(
id=0,
sequence=pf.sequence + "+" + qf.sequence,
residue_ranges=pf.residue_ranges + qf.residue_ranges,
intensity=10,
intensity_ratio=1,
mass=total_mass,
target_mass=total_mass,
mz=total_mass / total_charge + PROTON,
target_mz=total_mass / total_charge + PROTON,
charge=total_charge,
break_count=pf.break_count + qf.break_count,
error_ppm=0,
modifications=qf.modifications + pf.modifications,
connected_bonds=tuple([(72, 119)]),
disconnected_cys=tuple([]),
)
result.append(
{"fragment": fragment, "precursor": precursor, "var_bonds": [(72, 119)]}
)
return result
def fragment_unipeptide(
peptide: Dict, bond_cys_res: List[int], ion_types=None, count=50
):
if ion_types is None:
ion_types = {IonType.y: [], IonType.b: [], IonType.precursor: []}
max_charge = peptide["charge"]
precursor: Precursor = peptide["precursor"]
sequence = precursor.sequence
residue_range = precursor.residue_ranges[0]
frags = pepfrag_fragments(
sequence=sequence,
residue_range=residue_range,
charge=1,
ion_types=ion_types,
bond_cys_res=bond_cys_res,
count=count,
)
simple_frags, cys_frags = split_on_simple_frags(residue_range, frags, bond_cys_res)
result = []
b_ions, y_ions = [], []
for frag in cys_frags:
if "b" in frag[1]:
b_ions.append(frag)
else:
y_ions.append(frag)
fragments = []
for ions in [b_ions, y_ions]:
fragments.append(
[
fr["fragment"]
for fr in simple_frags_to_fragments(
b_ions, sequence, residue_range, precursor
)
]
)
for i, (pf, qf) in enumerate(
choices(list(zip(fragments[0], fragments[1])), k=count)
):
if "C" not in pf.sequence or "C" not in qf.sequence:
continue
total_charge = randint(1, max_charge)
pr, qr = pf.residue_ranges[0], qf.residue_ranges[0]
if intersects(pr, qr):
continue
total_mass = pf.mz + qf.mz + CYS_BOND.mass - 2 * PROTON
fragment = Fragment(
id=i,
sequence=pf.sequence + "+" + qf.sequence,
residue_ranges=pf.residue_ranges + qf.residue_ranges,
intensity=10,
intensity_ratio=1,
mass=total_mass,
target_mass=total_mass,
mz=total_mass / total_charge + PROTON,
target_mz=total_mass / total_charge + PROTON,
charge=total_charge,
break_count=2 if pr[1] != qr[0] else 1,
error_ppm=0,
modifications=qf.modifications + pf.modifications,
connected_bonds=tuple([(72, 119)]),
disconnected_cys=tuple([]),
)
result.append(
{"fragment": fragment, "precursor": precursor, "var_bonds": [(72, 119)]}
)
return result
def generate_fragments(peptide: Dict, **kwargs):
precursor: Precursor = peptide["precursor"]
if precursor.cys_bond_count == 0:
return fragment_simple_peptide(peptide, **kwargs)
elif len(precursor.segments) == 2:
return fragment_dipeptide(peptide, **kwargs)
else:
return fragment_unipeptide(peptide, **kwargs)
if __name__ == "__main__":
import argparse
args = argparse.ArgumentParser(description="Generate precursors and fragments")
args.add_argument(
"--protein",
type=str,
required=True,
help="protein code (usually three letters)",
)
args.add_argument(
"--kind",
type=str,
choices=["AT", "RAT"],
required=True,
help="measurement type (AT/RAT)",
)
args.add_argument(
"--prec_error",
type=int,
required=True,
help="allowed precursor error in ppm",
)
args.add_argument(
"--frag_error",
type=int,
required=True,
help="allowed fragment error in ppm",
)
args.add_argument(
"--prec_segments",
type=int,
required=True,
help="upper bound of segment count in matched precursors",
)
args.add_argument(
"--frag_breaks",
type=int,
required=True,
help="upper bound of break count in matched fragments",
)
args = args.parse_args()
tryptides = cleave_protein(args.protein)
print(f"Generating precursors...")
precursors, cys_peptides = generate_simple_peptides(tryptides, [7, 10])
if args.kind == "AT":
precursors += generate_dipeptides(cys_peptides)
precursors += generate_unipeptides(cys_peptides)
print(f"In total there's {len(precursors)} precursors.")
sys.exit()
scans: List[Scan] = []
fragment_records = []
precursor_records = []
for i, peptide in enumerate(precursors):
precursor: Precursor = peptide["precursor"]
fragments = generate_fragments(peptide, bond_cys_res=[72, 119])
fragment_objects: List[Fragment] = [f["fragment"] for f in fragments]
scan = Scan(
nth_in_order=i,
id=i,
time=i,
charge=peptide["charge"],
prec_mz=precursor.mz,
prec_intensity=100,
prec_mass=precursor.mass,
fragments_mz=np.array(sorted([f.mz for f in fragment_objects])),
fragments_intensity=np.array([f.intensity for f in fragment_objects]),
threshold=0,
)
scans.append(scan)
precursor_records.append(scan.to_dict() | precursor.to_dict())
fragment_records += [
scan.to_dict()
| fr["precursor"].to_dict()
| {"var_bonds": fr["var_bonds"]}
| fr["fragment"].to_dict()
for fr in fragments
]
precursor_path = (
"../out/precursor_matches/{}_{}_segments={}_error={}ppm.pickle".format(
args.protein, args.kind, args.prec_segments, args.prec_error
)
)
precursor_matches = write_matched_precursors(
tryptides,
scans,
precursor_path,
max_segments=args.prec_segments,
error_ppm=args.prec_error,
)
precursor_match_records = []
for pm in precursor_matches:
precursor_match_records.append(pm["scan"].to_dict() | pm["precursor"].to_dict())
prec_df = pd.DataFrame(precursor_match_records)
precursor_csv_path = (
"../out/csv/precursor_matches_{}_{}_segments={}_error={}ppm.pickle".format(
args.protein, args.kind, args.prec_segments, args.prec_error
)
)
print(f"Saving precursor csv to {precursor_csv_path}")
prec_df.to_csv(precursor_csv_path, index=False)
fragment_path = (
"../out/fragment_matches/{}_{}_segments={}_breaks={}_error={}ppm.pickle".format(
args.protein,
args.kind,
args.prec_segments,
args.frag_breaks,
args.frag_error,
)
)
print(f"Computing fragments...")
fragment_matches = write_matched_fragments(
precursor_matches=precursor_matches,
tryptides=tryptides,
output_path=fragment_path,
max_allowed_breaks=args.frag_breaks,
error_ppm=args.frag_error,
)
fragment_match_records = []
for fm in fragment_matches:
fragment_match_records.append(
fm["scan"].to_dict()
| fm["precursor"].to_dict()
| fm["variant"].to_dict()
| (fm["fragment"].to_dict() if fm["fragment"] is not None else {})
| {"prec_variant_count": fm["variant_count"]}
)
frag_df = pd.DataFrame(fragment_match_records)
fragment_csv_path = "../out/fragment_matches/fragment_matches_{}_{}_segments={}_breaks={}_error={}ppm.pickle".format(
args.protein,
args.kind,
args.prec_segments,
args.frag_breaks,
args.frag_error,
)
print(f"Saving fragments csv to {fragment_csv_path}")
frag_df.to_csv(fragment_csv_path, index=False)
| Eugleo/dibby | src/generate_data.py | generate_data.py | py | 18,105 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "src.model.peptide.Peptide",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"... |
21195656811 | import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='snmpdesk',
version='0.0.91',
description='Scripts for easy get snmp data',
author='Svintsov Dmitry',
author_email='spam@19216801.ru',
url='http://github.com/uralbash/snmpdesk/',
keywords = "snmp",
install_requires=['pysnmp'],
license='GPL',
packages=['snmpdesk'],
long_description=read('README'),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Natural Language :: Russian',
'Operating System :: Unix',
'Programming Language :: Python',
'Topic :: System :: Networking :: Monitoring',
],
)
| uralbash/snmpdesk | setup.py | setup.py | py | 927 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_n... |
29432851011 | from asyncore import read
#JMP:xkalou03
__author__ = 'xkalou03'
import sys
import argparse
import re
import reader
import table
def checkParameters():
parser = argparse.ArgumentParser(description = 'Projekt do IPP.', add_help = False)
parser.add_argument('--help', action = "count", default = 0, help = 'Prints help')
parser.add_argument('--input=', action = "store", default = [], dest = "input", nargs = 1, help = 'Input file')
parser.add_argument('--output=', action = "store", default = [], dest = "output", nargs = 1, help = 'Output file')
parser.add_argument('--cmd=', action = "store", default = "", nargs = '+', dest = "text", help = 'Input text')
parser.add_argument('-r', action = "store_true", dest = "redef", default = False, help = 'Redefination macros')
try:
args = parser.parse_args()
except:
print("Parameters Error", file = sys.stderr)
exit(1)
if(args.help == 1):
if len(sys.argv) == 2:
print(parser.print_help())
exit(0)
else:
print("Zadany help + jine parametry", file = sys.stderr)
exit(1)
return args
def readInput(input, reader, table, params):
stringIn = ""
rest = ""
outputString = ""
count = 0
c = reader.getc()
while c:
if c == '@':
x = macro(reader, table, params, c)
if x in {'@', '{', '}', '$'}:
outputString += x
elif c in {'$', '}'}:
exit(55)
elif c == '{':
block = reader.readBlock(False)
if block != None:
outputString += block
else:
exit(55)
else:
outputString += c
c = reader.getc()
return outputString
def macro(reader, table, params, x):
macroString = ""
reg = '^[a-zA-Z_][0-9a-zA-Z_]*$'
i = 0
c = reader.getc()
if c in {'@', '{', '}', '$'} and x == '@': # kontrola, zda nejde o escape sekvenci
return c
while c:
i += 1
if re.match(reg, macroString + c):
macroString += c
c = reader.getc()
if not c:
find = table.readMacro(macroString)
if find:
argumentsMacro(reader, find, c, table, params)
c = '%'
break
else:
print("Toto makro neexistuje", file = sys.stderr)
exit(56)
continue
else:
# mam precten nazev - po kouknuti do tabulky budu koukat dale
# print("Budu koukat do tabulky", macroString)
find = table.readMacro(macroString)
if find:
argumentsMacro(reader, find, c, table, params)
c = '%'
break
else:
if i == 1:
exit(55)
print("Toto makro neexistuje", file = sys.stderr)
exit(56)
macroString += c
c = reader.getc()
return c
def argumentsMacro(reader, find, x, table, params):
name = ""
temp = False
if params.redef:
temp = True
c = x # pismeno po nazvu makra
if find.name == 'def' or find.name == '__def__':
table.insertMacro(reader, x, temp)
elif find.name == 'undef' or find.name == '__undef__':
table.deleteMacro(reader, x)
elif find.name == 'set' or find.name == '__set__':
table.setMacro(reader, x)
else:
stringExpand = table.expandMacro(reader, x, find.name)
reader.attachString(stringExpand)
return
def main():
params = checkParameters()
if params.input: # Otevreni vstupniho souboru
try:
inputFile = open(params.input[0], 'r')
except IOError:
print("Vstupni soubor nejde otevrit", file = sys.stderr)
exit(2)
else:
inputFile = sys.stdin
if params.output: # Otevreni vystupniho souboru
try:
outputFile = open(params.output[0], 'w')
except IOError:
print("Soubor nejde otevrit", file = sys.stderr)
exit(2)
else:
outputFile = sys.stdout
r = reader.Reader(inputFile) # vytvoreni readeru
if params.text:
r.attachString(str(params.text[0]))
macroTable = table.Table() # vytvoreni tabulky maker
stringInput = readInput(inputFile, r, macroTable, params) # spusteni cteni vstupu
if params.output:
print(stringInput, file = outputFile, end="") # vytisknuti vystupu
else:
print(stringInput, file = sys.stdout)
if __name__ == "__main__":
main() | Strihtrs/IPP_Projects | JMP/jmp.py | jmp.py | py | 4,768 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "sys.stderr",
... |
16128248265 | import functools
from importlib import import_module
from pyws.public import InvalidPath
def route(path):
def wrapper(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
return func(*args, **kwargs)
_wrapper.__route__ = path
return _wrapper
return wrapper
class Route:
routes = {}
@classmethod
def add_routes(cls, module_name):
point = module_name.rfind('.')
if point == (-1):
mod = import_module(module_name)
else:
mod = getattr(import_module(module_name[:point]), module_name[point + 1:])
for attr in dir(mod):
if attr.startswith('_'):
continue
func = getattr(mod, attr)
path = getattr(func, '__route__', None)
if path and callable(func):
cls.routes.setdefault(path, func)
@classmethod
def get(cls, path):
func = cls.routes.get(path)
if not func:
raise InvalidPath
return func
| czasg/ScrapyLearning | czaSpider/dump/bootstrap_test/blogs_v1/pyws/route.py | route.py | py | 1,042 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "functools.wraps",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "importlib.import_module",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pyws.... |
22450428766 | """
Team 46
Haoyue Xie 1003068 @Melbourne
Jiayu Li 713551 @Melbourne
Ruqi Li 1008342 @Melbourne
Yi Zhang 1032768 @Melbourne
Zimeng Jia 978322 @Hebei, China
"""
import json
path = "E:/Unimelb/2020semester1/COPM90024 Cluster and Cloud Computing/assignment2/code/"
filename = path + 'SA4_2016_AUST.json'
with open(filename, 'r') as f:
jsonfile = json.load(f) #jsonfile is a dict
print("---------------reading json file done--------------------------------------")
print(jsonfile.keys()) #dict_keys(['type', 'features'])
features = jsonfile['features']
'''
print("type of features:",type(features)) #features is a list
print(len(features))
print("type of features[0]:",type(features[0])) #each feature is a dict
feature = features[0]
print("keys of feature:",feature.keys()) #dict_keys(['type', 'geometry', 'properties'])
properities = feature['properties']
geometry = feature['geometry']
print("type of properities:",type(properities))
print("keys of properities:",properities.keys()) #dict_keys(['SA4_CODE', 'SA4_CODE16', 'SA4_NAME', 'STATE_CODE', 'STATE_NAME', 'AREA_SQKM'])
print("type of geometry:",type(geometry)) #<class 'dict'>
print("keys of geometry:",geometry.keys()) #dict_keys(['type', 'coordinates'])
coordinates = geometry['coordinates']
print("type of coordinates",type(coordinates)) #<class 'list'>
print(len(coordinates)) # len=1
'''
coordinates_Melbourne = []
coordinates_Sydney = []
coordinates_Brisbane = []
coordinates_GoldCoast = []
coordinates_Adelaide = []
coordinates_Perth = []
coordinates_Canberra = []
coordinates_ACT = []
coordinates_NSW = []
coordinates_NT = []
coordinates_QLD = []
coordinates_SA = []
coordinates_TAS = []
coordinates_VIC = []
coordinates_WA = []
area_sqkm_Sydney = 0
area_sqkm_Melbourne = 0
area_sqkm_Brisbane = 0
area_sqkm_GoldCoast = 0
area_sqkm_Adelaide = 0
area_sqkm_Perth = 0
area_sqkm_Canberra = 0
area_sqkm_NT = 0
area_sqkm_WA = 0
area_sqkm_TAS = 0
area_sqkm_SA = 0
area_sqkm_QLD = 0
area_sqkm_VIC = 0
area_sqkm_NSW = 0
area_sqkm_ACT = 0
for feature in features:
properities = feature['properties']
sa4_code16 = properities['SA4_CODE16']
print("+++++++++++++++"+sa4_code16+"++++++++++++")
geometry = feature['geometry'] #geometry has two keys: "tpye" and "coordinates"
print(properities)
if(int(sa4_code16)>=115 and int(sa4_code16)<=128): #-------------------------------merge for Sydney---------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
#print(properities['SA4_NAME'],geometry['type'],len(coordinates))
if (geometry["type"]=="Polygon"):
coordinates_Sydney.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Sydney.append(coordinate)
area_sqkm_Sydney += properities['AREA_SQKM']
elif(int(sa4_code16)>=206 and int(sa4_code16)<=213): #-----------------------------merge for Melbourne-----------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_Melbourne.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Melbourne.append(coordinate)
area_sqkm_Melbourne += properities['AREA_SQKM']
elif(int(sa4_code16)>=401 and int(sa4_code16)<=404): #-----------------------------merge for Adelaide-------------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_Adelaide.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Adelaide.append(coordinate)
area_sqkm_Adelaide += properities['AREA_SQKM']
elif(int(sa4_code16)>=301 and int(sa4_code16)<=305): #-----------------------------merge for Brisbane-------------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_Brisbane.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Brisbane.append(coordinate)
area_sqkm_Brisbane += properities['AREA_SQKM']
elif(int(sa4_code16)==801): #------------------------------------------------------merge for Canberra-------------------------------------
if(geometry !=None): #some coordinates are None
geometry_Canberra = geometry #Canberraonly have one region
#coordinates = geometry['coordinates']
#if (geometry["type"]=="Polygon"):
# coordinates_Canberra.append(coordinates)
#else:
# for coordinate in coordinates:
# coordinates_Canberra.append(coordinate)
area_sqkm_Canberra += properities['AREA_SQKM']
elif(int(sa4_code16)==309): #------------------------------------------------------merge for Gold Coast-------------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_GoldCoast.append(coordinates)
else:
for coordinate in coordinates:
coordinates_GoldCoast.append(coordinate)
area_sqkm_GoldCoast += properities['AREA_SQKM']
elif(int(sa4_code16)>=503 and int(sa4_code16)<=507): #-----------------------------merge for Perth-------------------------------------
if(geometry !=None): #some coordinates are None
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_Perth.append(coordinates)
else:
for coordinate in coordinates:
coordinates_Perth.append(coordinate)
area_sqkm_Perth += properities['AREA_SQKM']
else: #other regions for each state
if(geometry !=None):
if(properities['STATE_CODE']=="8"): #---------------------------------------merge for ACT
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_ACT.append(coordinates)
else:
for coordinate in coordinates:
coordinates_ACT.append(coordinate)
area_sqkm_ACT += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="1"): #---------------------------------------merge for NSW
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_NSW.append(coordinates)
else:
for coordinate in coordinates:
coordinates_NSW.append(coordinate)
area_sqkm_NSW += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="7"): #---------------------------------------merge for NT
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_NT.append(coordinates)
else:
for coordinate in coordinates:
coordinates_NT.append(coordinate)
area_sqkm_NT += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="3"): #---------------------------------------merge for QLD
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_QLD.append(coordinates)
else:
for coordinate in coordinates:
coordinates_QLD.append(coordinate)
area_sqkm_QLD += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="4"): #---------------------------------------merge for SA
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_SA.append(coordinates)
else:
for coordinate in coordinates:
coordinates_SA.append(coordinate)
area_sqkm_SA += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="6"): #---------------------------------------merge for TAS
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_TAS.append(coordinates)
else:
for coordinate in coordinates:
coordinates_TAS.append(coordinate)
area_sqkm_TAS += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="2"): #---------------------------------------merge for VIC
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_VIC.append(coordinates)
else:
for coordinate in coordinates:
coordinates_VIC.append(coordinate)
area_sqkm_VIC += properities['AREA_SQKM']
elif(properities['STATE_CODE']=="5"): #---------------------------------------merge for WA
coordinates = geometry['coordinates']
if (geometry["type"]=="Polygon"):
coordinates_WA.append(coordinates)
else:
for coordinate in coordinates:
coordinates_WA.append(coordinate)
area_sqkm_WA += properities['AREA_SQKM']
#create a new ragion code
#===============================properties,geometry, feature of each region=========================
#----------------------Melbourne--------------------------
properties_Melbbourne = {}
properties_Melbbourne["REGION_CODE"]="01"
properties_Melbbourne["AREA_SQKM"] = area_sqkm_Melbourne
properties_Melbbourne["STATE_CODE"]="2"
properties_Melbbourne["STATE_NAME"]="VIC"
properties_Melbbourne["CITY_NAME"]="Melbourne"
geometry_Melbourne = {}
geometry_Melbourne["type"] = "MultiPolygon"
geometry_Melbourne["coordinates"] = coordinates_Melbourne
feature_Melbbourne = {}
feature_Melbbourne["type"]="Feature"
feature_Melbbourne["geometry"] = geometry_Melbourne
feature_Melbbourne["properties"] = properties_Melbbourne
#----------------------Sydney-------------------------------
properties_Sydney = {}
properties_Sydney["REGION_CODE"]="02"
properties_Sydney["AREA_SQKM"] = area_sqkm_Sydney
properties_Sydney["STATE_CODE"]="1"
properties_Sydney["STATE_NAME"]="NSW"
properties_Sydney["CITY_NAME"]="Sydney"
geometry_Sydney = {}
geometry_Sydney["type"] = "MultiPolygon"
geometry_Sydney["coordinates"] = coordinates_Sydney
feature_Sydney = {}
feature_Sydney["type"]="Feature"
feature_Sydney["geometry"] = geometry_Sydney
feature_Sydney["properties"] = properties_Sydney
#----------------------Brisbane-------------------------------
properties_Brisbane = {}
#properties_Brisbane["type"]="Feature"
properties_Brisbane["REGION_CODE"]="03"
properties_Brisbane["AREA_SQKM"] = area_sqkm_Brisbane
properties_Brisbane["STATE_CODE"]="3"
properties_Brisbane["STATE_NAME"]="QLD"
properties_Brisbane["CITY_NAME"]="Brisbane"
geometry_Brisbane = {}
geometry_Brisbane["type"] = "MultiPolygon"
geometry_Brisbane["coordinates"] = coordinates_Brisbane
feature_Brisbane = {}
feature_Brisbane["type"]="Feature"
feature_Brisbane["geometry"] = geometry_Brisbane
feature_Brisbane["properties"] = properties_Brisbane
#----------------------GoldCoast-------------------------------
properties_GoldCoast = {}
#properties_GoldCoast["type"]="Feature"
properties_GoldCoast["REGION_CODE"]="04"
properties_GoldCoast["AREA_SQKM"] = area_sqkm_GoldCoast
properties_GoldCoast["STATE_CODE"]="3"
properties_GoldCoast["STATE_NAME"]="QLD"
properties_GoldCoast["CITY_NAME"]="Gold Coast"
geometry_GoldCoast = {}
geometry_GoldCoast["type"] = "MultiPolygon"
geometry_GoldCoast["coordinates"] = coordinates_GoldCoast
feature_GoldCoast = {}
feature_GoldCoast["type"]="Feature"
feature_GoldCoast["geometry"] = geometry_GoldCoast
feature_GoldCoast["properties"] = properties_GoldCoast
#----------------------Adelaide-------------------------------
properties_Adelaide = {}
#properties_Adelaide["type"]="Feature"
properties_Adelaide["REGION_CODE"]="05"
properties_Adelaide["AREA_SQKM"] = area_sqkm_Adelaide
properties_Adelaide["STATE_CODE"]="4"
properties_Adelaide["STATE_NAME"]="SA"
properties_Adelaide["CITY_NAME"]="Adelaide"
geometry_Adelaide = {}
geometry_Adelaide["type"] = "MultiPolygon"
geometry_Adelaide["coordinates"] = coordinates_Adelaide
feature_Adelaide = {}
feature_Adelaide["type"]="Feature"
feature_Adelaide["geometry"] = geometry_Adelaide
feature_Adelaide["properties"] = properties_Adelaide
#----------------------Perth-------------------------------
properties_Perth = {}
#properties_Perth["type"]="Feature"
properties_Perth["REGION_CODE"]="06"
properties_Perth["AREA_SQKM"] = area_sqkm_Perth
properties_Perth["STATE_CODE"]="5"
properties_Perth["STATE_NAME"]="WA"
properties_Perth["CITY_NAME"]="Perth"
geometry_Perth = {}
geometry_Perth["type"] = "MultiPolygon"
geometry_Perth["coordinates"] = coordinates_Perth
feature_Perth = {}
feature_Perth["type"]="Feature"
feature_Perth["geometry"] = geometry_Perth
feature_Perth["properties"] = properties_Perth
#----------------------Canberra-------------------------------
properties_Canberra = {}
#properties_Canberra["type"]="Feature"
properties_Canberra["REGION_CODE"]="07"
properties_Canberra["AREA_SQKM"] = area_sqkm_Canberra
properties_Canberra["STATE_CODE"]="8"
properties_Canberra["CITY_NAME"]="Canberra"
#geometry_Canberra = {}
#geometry_Canberra["type"] = "Polygon"
#geometry_Canberra["coordinates"] = coordinates_Canberra
feature_Canberra = {}
feature_Canberra["type"]="Feature"
feature_Canberra["geometry"] = geometry_Canberra
feature_Canberra["properties"] = properties_Canberra
#----------------------ACT-------------------------------
properties_ACT = {}
#properties_ACT["type"]="Feature"
properties_ACT["REGION_CODE"]="08"
properties_ACT["AREA_SQKM"] = area_sqkm_ACT
properties_ACT["STATE_CODE"]="8"
properties_ACT["STATE_NAME"]="ACT"
properties_ACT["CITY_NAME"]="Australian Capital Territory Other Regions"
geometry_ACT = {}
#geometry_ACT["type"] = "Polygon"
#geometry_ACT["coordinates"] = coordinates_ACT
feature_ACT = {}
feature_ACT["type"]="Feature"
feature_ACT["geometry"] = geometry_ACT
feature_ACT["properties"] = properties_ACT
#----------------------NSW-------------------------------
properties_NSW = {}
#properties_NSW["type"]="Feature"
properties_NSW["REGION_CODE"]="09"
properties_NSW["AREA_SQKM"] = area_sqkm_NSW
properties_NSW["STATE_CODE"]="1"
properties_NSW["STATE_NAME"]="NSW"
properties_NSW["CITY_NAME"]="New South Wales Other Regions"
geometry_NSW = {}
geometry_NSW["type"] = "MultiPolygon"
geometry_NSW["coordinates"] = coordinates_NSW
feature_NSW = {}
feature_NSW["type"]="Feature"
feature_NSW["geometry"] = geometry_NSW
feature_NSW["properties"] = properties_NSW
#----------------------VIC-------------------------------
properties_VIC = {}
#properties_VIC["type"]="Feature"
properties_VIC["REGION_CODE"]="10"
properties_VIC["AREA_SQKM"] = area_sqkm_VIC
properties_VIC["STATE_CODE"]="2"
properties_VIC["STATE_NAME"]="VIC"
properties_VIC["CITY_NAME"]="Victoria Other Regions"
geometry_VIC = {}
geometry_VIC["type"] = "MultiPolygon"
geometry_VIC["coordinates"] = coordinates_VIC
feature_VIC = {}
feature_VIC["type"]="Feature"
feature_VIC["geometry"] = geometry_VIC
feature_VIC["properties"] = properties_VIC
#----------------------QLD-------------------------------
properties_QLD = {}
#properties_QLD["type"]="Feature"
properties_QLD["REGION_CODE"]="11"
properties_QLD["AREA_SQKM"] = area_sqkm_QLD
properties_QLD["STATE_CODE"]="3"
properties_QLD["STATE_NAME"]="QLD"
properties_QLD["CITY_NAME"]="Queensland Other Regions"
geometry_QLD = {}
geometry_QLD["type"] = "MultiPolygon"
geometry_QLD["coordinates"] = coordinates_QLD
feature_QLD = {}
feature_QLD["type"]="Feature"
feature_QLD["geometry"] = geometry_QLD
feature_QLD["properties"] = properties_QLD
#----------------------SA-------------------------------
properties_SA = {}
#properties_SA["type"]="Feature"
properties_SA["REGION_CODE"]="12"
properties_SA["AREA_SQKM"] = area_sqkm_SA
properties_SA["STATE_CODE"]="4"
properties_SA["STATE_NAME"]="SA"
properties_SA["CITY_NAME"]="South Australia Other Regions"
geometry_SA = {}
geometry_SA["type"] = "MultiPolygon"
geometry_SA["coordinates"] = coordinates_SA
feature_SA = {}
feature_SA["type"]="Feature"
feature_SA["geometry"] = geometry_SA
feature_SA["properties"] = properties_SA
#----------------------TAS-------------------------------
properties_TAS = {}
#properties_TAS["type"]="Feature"
properties_TAS["REGION_CODE"]="13"
properties_TAS["AREA_SQKM"] = area_sqkm_TAS
properties_TAS["STATE_CODE"]="6"
properties_TAS["STATE_NAME"]="TAS"
properties_TAS["CITY_NAME"]="Tasmania Other Regions"
geometry_TAS = {}
geometry_TAS["type"] = "MultiPolygon"
geometry_TAS["coordinates"] = coordinates_TAS
feature_TAS = {}
feature_TAS["type"]="Feature"
feature_TAS["geometry"] = geometry_TAS
feature_TAS["properties"] = properties_TAS
#----------------------WA-------------------------------
properties_WA = {}
#properties_WA["type"]="Feature"
properties_WA["REGION_CODE"]="14"
properties_WA["AREA_SQKM"] = area_sqkm_WA
properties_WA["STATE_CODE"]="5"
properties_WA["STATE_NAME"]="WA"
properties_WA["CITY_NAME"]="Western Australia Other Regions"
geometry_WA = {}
geometry_WA["type"] = "MultiPolygon"
geometry_WA["coordinates"] = coordinates_WA
feature_WA = {}
feature_WA["type"]="Feature"
feature_WA["geometry"] = geometry_WA
feature_WA["properties"] = properties_WA
#----------------------NT-------------------------------
properties_NT = {}
#properties_NT["type"]="Feature"
properties_NT["REGION_CODE"]="15"
properties_NT["AREA_SQKM"] = area_sqkm_NT
properties_NT["STATE_CODE"]="7"
properties_NT["STATE_NAME"]="NT"
properties_NT["CITY_NAME"]="Northern Territory Other Regions"
geometry_NT = {}
geometry_NT["type"] = "MultiPolygon"
geometry_NT["coordinates"] = coordinates_NT
feature_NT = {}
feature_NT["type"]="Feature"
feature_NT["geometry"] = geometry_NT
feature_NT["properties"] = properties_NT
#=============================Add feature into features and output====================================
#new_features = [feature_Adelaide,feature_Brisbane,feature_Canberra,feature_GoldCoast,feature_Melbbourne,feature_Perth,feature_Sydney,\
#feature_NSW,feature_NT,feature_QLD,feature_SA,feature_TAS,feature_VIC,feature_WA]
#feature_ACT is empty
new_features = [feature_WA,feature_Perth,feature_Adelaide,feature_Brisbane,feature_Canberra,feature_GoldCoast,feature_Melbbourne,feature_Sydney,\
feature_NSW,feature_NT,feature_QLD,feature_SA,feature_TAS,feature_VIC,feature_WA]
#print("************************")
#print(feature_ACT["properties"])
#print(feature_ACT["geometry"])
newjsonfile = {}
newjsonfile["type"] = "FeatureCollection"
newjsonfile["features"] = new_features
#print(newjsonfile.keys())
#print(newjsonfile["features"][0].keys())
#print(newjsonfile["features"][0]['properties'])
#print(newjsonfile["features"][0]['geometry'])
#print("newjsonfile done!")
#json_str = json.dumps(newjsonfile)
#print("create json done!")
outputfilename = path + "City_geojson.json"
with open(outputfilename, 'w') as json_file:
#json_file.write(json_str)
for chunk in json.JSONEncoder().iterencode(newjsonfile):
json_file.write(chunk)
print("All Done!") | yzzhan4/COMP90024-AuzLife | Create City_GeoJSON file/coordinates for cities.py | coordinates for cities.py | py | 19,866 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "json.JSONEncoder",
"line_number": 516,
"usage_type": "call"
}
] |
27521291277 | from pyspark.sql import SparkSession
from pyspark.sql import functions as fs
spark= SparkSession.builder.appName("word_count").getOrCreate()
data= spark.read.text("book.txt")
pro_data= data.select(fs.explode(fs.split(data.value,"\\W+")).alias("words"))
pro_data.filter(pro_data.words !="")
a=pro_data.select("words").groupBy("words").count().show()
| AmanSolanki007/Pyspark_problems | word_count_dataframe.py | word_count_dataframe.py | py | 353 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 4,
"usage_type": "... |
15783422482 | # -*- coding: utf-8 -*-
"""
Created on Sat May 8 12:16:46 2021
@author: tamon
"""
import csv
import numpy as np
from scipy.interpolate import griddata
from scipy.interpolate import interp1d
import matplotlib.pyplot as plt
radius = []
angles = []
points = []
result = []
with open('angrad.csv', newline='') as csvfile:
readfile = csv.reader(csvfile, quotechar='|')
for row in readfile:
radius.append(row[12])
angles.append(row[13])
result.append(row[20])
radius.pop(0)
angles.pop(0)
result.pop(0)
radius = [int(i) for i in radius]
angles = [int(i) for i in angles]
for i in range(len(radius)):
points.append([angles[i], radius[i]])
result = [np.float64(i) for i in result]
xgrid, ygrid = np.mgrid[10:90:1000j, 30:240:1000j]
grid = griddata(points, result, (xgrid, ygrid), method='cubic')
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(angles, radius, 'k.', ms=1)
sp = ax.imshow(grid.T, cmap='jet', extent=(10, 90, 30, 240), origin='lower')
ax.set_aspect(80/(210))
ax.set_xlabel('Angle [deg]')
ax.set_ylabel('Radius [mm]')
clb = fig.colorbar(sp)
clb.set_label('Equivelant Maximum Stress [Pa]')
fig.savefig('angrad.pdf', format='pdf', bbox_inches='tight')
plt.show()
angslice1 = []
angslice2 = []
angslice3 = []
angs = np.linspace(10, 90, 9)
j = 1
for j in range(9):
angslice1.append(result[8*j + 0])
angslice2.append(result[8*j + 1])
angslice3.append(result[8*j + 2])
xnew = np.linspace(10, 90, 200)
f1 = interp1d(angs, angslice1, kind='cubic')
f2 = interp1d(angs, angslice2, kind='cubic')
f3 = interp1d(angs, angslice3, kind='cubic')
plt.plot(xnew, f1(xnew), 'r', label='Radius=30 [mm]')
plt.plot(xnew, f2(xnew), 'b', label='Radius=60 [mm]')
plt.plot(xnew, f3(xnew), 'g', label='Radius=90 [mm]')
plt.grid('major')
plt.legend(loc='lower right')
plt.xlabel('Angle [deg]')
plt.ylabel('Equivelant Maximum Stress [Pa]')
plt.savefig('angslice.pdf', format='pdf', bbox_inches='tight')
# angslice1 = []
# angslice2 = []
# angslice3 = []
# angs = np.linspace(10, 90, 9)
# j = 1
# for j in range(9):
# angslice1.append(result[8*j + 0])
# angslice2.append(result[8*j + 1])
# angslice3.append(result[8*j + 2])
# xnew = np.linspace(10, 90, 200)
# f1 = interp1d(angs, angslice1, kind='cubic')
# f2 = interp1d(angs, angslice2, kind='cubic')
# f3 = interp1d(angs, angslice3, kind='cubic')
# plt.plot(xnew, np.gradient(f1(xnew)), 'r', label='Radius=30 [mm]')
# plt.plot(xnew, np.gradient(f2(xnew)), 'b', label='Radius=60 [mm]')
# plt.plot(xnew, np.gradient(f3(xnew)), 'g', label='Radius=90 [mm]')
# plt.grid('major')
# plt.legend(loc='lower right')
# plt.xlabel('Angle [deg]')
# plt.ylabel('Equivelant Maximum Stress [Pa]')
# plt.savefig('angslice.pdf', format='pdf', bbox_inches='tight')
radslice1 = result[:8]
radslice2 = result[8:16]
radslice3 = result[16:24]
radslice4 = result[24:32]
radslice5 = result[32:40]
radslice6 = result[40:48]
radslice7 = result[48:56]
rads = np.linspace(30, 240, 8)
xnew = np.linspace(30, 240, 200)
f1 = interp1d(rads, radslice1, kind='cubic')
f2 = interp1d(rads, radslice2, kind='cubic')
f3 = interp1d(rads, radslice3, kind='cubic')
f4 = interp1d(rads, radslice4, kind='cubic')
f5 = interp1d(rads, radslice5, kind='cubic')
f6 = interp1d(rads, radslice6, kind='cubic')
f7 = interp1d(rads, radslice7, kind='cubic')
fig2 = plt.figure()
ax2 = plt.subplot(111)
ax2.plot(xnew, f1(xnew), 'r', label='Radius=10 [mm]')
ax2.plot(xnew, f2(xnew), 'b', label='Radius=20 [mm]')
ax2.plot(xnew, f3(xnew), 'g', label='Radius=30 [mm]')
ax2.plot(xnew, f4(xnew), 'y', label='Radius=40 [mm]')
ax2.plot(xnew, f5(xnew), 'orange', label='Radius=50 [mm]')
ax2.plot(xnew, f6(xnew), 'cyan', label='Radius=60 [mm]')
ax2.plot(xnew, f7(xnew), 'purple', label='Radius=70 [mm]')
ax2.grid('major')
chartBox = ax2.get_position()
ax2.set_position([chartBox.x0, chartBox.y0, chartBox.width*0.6, chartBox.height])
ax2.legend(loc='upper center', bbox_to_anchor=(1.4, 0.8), shadow=True, ncol=1)
ax2.set_xlabel('Radius [mm]')
ax2.set_ylabel('Equivelant Maximum Stress [Pa]')
fig2.savefig('radslice.pdf', format='pdf', bbox_inches='tight')
| Maselko/individual-project | Angrad.py | Angrad.py | py | 4,228 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.float64",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.mgrid",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "scipy.interpolate.griddata"... |
74120455144 | import pygame
from constantes import *
from auxiliar import Auxiliar
class Background:
'''
Clase para representar un fondo en un juego utilizando Pygame.
Attributes:
x (int): La coordenada x de la esquina superior izquierda del fondo.
y (int): La coordenada y de la esquina superior izquierda del fondo.
width (int): Ancho del fondo.
height (int): Alto del fondo.
path (str): Ruta del archivo de imagen para el fondo.
'''
def __init__(self, x, y,width, height, path):
'''
Constructor de la clase. Inicializa las propiedades del fondo.
'''
self.image = pygame.image.load(path).convert()
self.image = pygame.transform.scale(self.image,(width,height))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
def draw(self,screen):
'''
Dibuja el fondo en la pantalla especificada.
'''
screen.blit(self.image,self.rect)
if(DEBUG):
pygame.draw.rect(screen,color=ROJO,rect=self.collition_rect) | valverdecristian/cristian_valverde_tp_pygame | background.py | background.py | py | 1,080 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "pygame.image.load",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "pygame.transform.scale",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pygame.tra... |
39055642818 | #!/usr/bin/env python
from __future__ import print_function
import os, sys, json
import numpy as np
from ase.build import bulk, surface
from ase.units import Rydberg, Bohr
from ase.io import read
from ase.visualize import view
from ase.spacegroup import crystal
from ase.calculators.espresso import Espresso
infile = sys.argv[1]
print(infile)
with open(infile) as handle:
system = json.loads(handle.read())
face = system['face']
layers = system['layers']
vacuum = system['vacuum']
kpts = system['kpts']
ecut = system['ecut']
mode = system['mode']
cwd = os.getcwd()
def cassiterite(show=False):
a = 4.7382
c = 3.1871
sno2 = crystal(['Sn','O'], basis=[(0, 0, 0), (0.3, 0.3, 0.0)],
spacegroup='P 4 2/mnm', cellpar=[a,a,c,90,90,90],
pbc=True)
return sno2
def create_surface(atoms,face=(1,1,0),layers=3,vacuum=10.813,kpts=([6,3,1])):
mySurface = surface( atoms, face, layers)
mySurface.center(vacuum=vacuum, axis=2)
kpts = np.asarray(kpts)
return mySurface
sno2 = cassiterite()
sno2_surface = create_surface(sno2,
face=face,
layers=layers,
vacuum=vacuum,
kpts=kpts)
# Put together QE input dict
input_dict = {
'control': {
'calculation': 'scf',
'etot_conv_thr': 1e-6,
'nstep': 100,
'outdir': 'sno2_test_face_{0}{1}{2}'.format(face[0], face[1], face[2]),
},
'system': {
'ecutwfc': ecut,
},
'electrons': {
'diagonalization': 'cg',
},
}
# Put together pseudopotential dict
psp_dict = {'Sn': 'Sn.UPF',
'O': 'O.UPF',
}
calc = Espresso(input_data=input_dict,
kpts=kpts,
pseudo_dir=cwd + "/../pseudo",
pseudopotentials=psp_dict,
)
sno2_surface.set_calculator(calc)
if mode == 'view':
view(sno2_surface)
elif mode == 'calc':
calc.calculate(sno2_surface)
print('SnO2 PE:', sno2_surface.get_potential_energy())
| marshallmcdonnell/sno2_ase_espresso | surfaces/surfaces_sno2.py | surfaces_sno2.py | py | 2,092 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "ase.spacegroup.crystal",
"line... |
34450828117 | import pytest
from PyQt6.QtTest import QTest
from PyQt6.QtWidgets import QLineEdit
from pytestqt import qtbot
from main import OLXWork, OLXSettings
from PyQt6 import QtCore
def test_olxwork_button_stop_clicked(qtbot):
parent = OLXSettings()
widget = OLXWork(parent= parent)
widget.show()
qtbot.addWidget(widget)
assert widget.isVisible()
widget.start_timer()
qtbot.mouseClick(widget.button_stop, QtCore.Qt.MouseButton.LeftButton)
assert not widget.isVisible()
assert parent.isVisible()
def test_olxwork_animation_label(qtbot):
widget = OLXWork()
assert widget.in_progress_label.text() == "Program w trakcie pracy"
widget.animation_label_counter = 0
widget.animation_label()
assert widget.in_progress_label.text() == "Program w trakcie pracy."
for _ in range(3):
widget.animation_label()
assert widget.in_progress_label.text() == "Program w trakcie pracy"
def test_olxwork_start_timer(qtbot):
widget = OLXWork()
qtbot.addWidget(widget)
widget.start_timer()
assert widget.animation_label_timer.isActive()
assert widget.room_olx_timer.isActive()
def test_olxwork_stop_timer(qtbot):
widget = OLXWork()
qtbot.addWidget(widget)
widget.start_timer()
widget.stop_timer()
assert not widget.animation_label_timer.isActive()
assert not widget.room_olx_timer.isActive()
def test_olxwork_update_data_olxwork(qtbot):
widget = OLXWork()
qtbot.addWidget(widget)
widget.update_data_olxwork("http://something.pl","City")
assert widget.city == "City"
assert widget.url == "http://something.pl"
def test_olxsettings_button_start_clicked(qtbot):
widget = OLXSettings()
son = OLXWork(parent=widget)
widget.olx_work = son
qtbot.keyClicks(widget.city, "Zakopane")
widget.show()
assert widget.isVisible()
qtbot.addWidget(widget)
qtbot.mouseClick(widget.button_start,QtCore.Qt.MouseButton.LeftButton)
assert son.isVisible()
assert not widget.isVisible()
def test_olxsettings_check_city(qtbot):
widget = OLXSettings()
widget.city = QLineEdit()
widget.city.setText("Warszawa")
assert widget.check_city() == 0
def test_olxsettings_check_city_empty(qtbot):
widget = OLXSettings()
widget.city = QLineEdit()
widget.city.setText("")
assert widget.check_city() == 1
def test_olxsettings_check_city_invalid(qtbot):
widget = OLXSettings()
widget.city = QLineEdit()
widget.city.setText("InvalidCity")
assert widget.check_city() == 1
def test_olxsettings_main_window_type_index_changed_visible(qtbot):
widget = OLXSettings()
qtbot.addWidget(widget)
widget.show()
assert widget.type.currentIndex() == 0
assert not widget.rooms.isVisible()
assert not widget.rooms_label.isVisible()
assert widget.m_2_from.isVisible()
assert widget.m_2_to.isVisible()
qtbot.waitUntil(lambda: widget.type.count() > 0)
qtbot.mouseClick(widget.type, QtCore.Qt.MouseButton.LeftButton)
qtbot.keyClick(widget.type, QtCore.Qt.Key.Key_Down)
qtbot.keyClick(widget.type, QtCore.Qt.Key.Key_Return)
assert widget.rooms.isVisible()
assert widget.rooms_label.isVisible()
assert widget.m_2_from.isVisible()
assert widget.m_2_to.isVisible()
qtbot.waitUntil(lambda: widget.type.count() > 0)
qtbot.mouseClick(widget.type, QtCore.Qt.MouseButton.LeftButton)
qtbot.keyClick(widget.type, QtCore.Qt.Key.Key_Down)
qtbot.keyClick(widget.type, QtCore.Qt.Key.Key_Return)
assert not widget.rooms.isVisible()
assert not widget.rooms_label.isVisible()
assert not widget.m_2_from.isVisible()
assert not widget.m_2_to.isVisible() | Kandel269/OLXroom | test_main.py | test_main.py | py | 3,676 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "main.OLXSettings",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "main.OLXWork",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytestqt.qtbot.addWidget",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pytestqt.qtbot... |
22353260658 | import torch
from .logger_utils import get_logger
import matplotlib.pyplot as plt
import numpy as np
import itertools
logger = get_logger()
def prediction(*, test_data, model, device):
"""Predict on test data and generate confusion matrix.
Args:
test_data (torch.utils.data.Dataset): Test dataset.
model (torch.nn.Module): Model.
device (str): Device (cpu or gpu or mps)
"""
num_classes = 10
class_names = [
"T-shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
confusion_matrix = torch.zeros(num_classes, num_classes)
with torch.no_grad():
for X, y in test_data:
X = X.unsqueeze(0).to(device)
y = torch.tensor([y]).to(device)
outputs = model(X)
_, predicted = torch.max(outputs.data, 1)
for t, p in zip(y.view(-1), predicted.view(-1)):
confusion_matrix[t.long(), p.long()] += 1
plt.figure(figsize=(10,10))
plt.imshow(confusion_matrix, interpolation='nearest', cmap=plt.cm.Blues)
plt.title('Confusion matrix')
plt.colorbar()
tick_marks = np.arange(len(class_names))
plt.xticks(tick_marks, class_names, rotation=45)
plt.yticks(tick_marks, class_names)
thresh = confusion_matrix.max() / 2.
for i, j in itertools.product(range(confusion_matrix.shape[0]), range(confusion_matrix.shape[1])):
plt.text(j, i, format(confusion_matrix[i, j], '.1f'),
horizontalalignment="center",
color="white" if confusion_matrix[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.savefig('confusion_matrix.png')
logger.info('Confusion Matrix saved as confusion_matrix.png')
| abhijitramesh/hpc-demo | utils/prediction_utils.py | prediction_utils.py | py | 1,910 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logger_utils.get_logger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "torch.zeros",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "torch.no_grad",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.tensor",
"... |
11130997604 | import csv
import os
from flask import Blueprint, redirect, render_template, request
import sqlalchemy
from forms.addTeacherForm import AddTeacherForm
from models import GroupOfTeacher, Teacher, TeacherInGroup, db
from forms.editForm import EditTeacherForm
ROWS_PER_PAGE = 5
teachers_blueprint = Blueprint("teachers_blueprint", __name__)
@teachers_blueprint.route("/teachers", methods=["get", "post"])
def teachers():
page = request.args.get('page', 1, type=int)
teachers = Teacher.query.order_by(Teacher.last_name).paginate(page=page, per_page=ROWS_PER_PAGE)
addTeacherFormData = AddTeacherForm()
return render_template("teachers/teachers.html",
teachers=teachers,
form=addTeacherFormData)
add_teachers_blueprint = Blueprint("add_teachers_blueprint", __name__)
@add_teachers_blueprint.route("/teachers/add", methods=["get", "post"])
def add_teachers():
addTeacherFormData = AddTeacherForm()
# teachers = db.session.query(Teacher).all()
if True: # request.method == "POST":
if addTeacherFormData.validate_on_submit():
teacherData = Teacher()
teacherData.first_name = addTeacherFormData.first_name.data
teacherData.last_name = addTeacherFormData.last_name.data
teacherData.birthdate = addTeacherFormData.birthdate.data
# teacherData.teacher_in_group_Id = addTeacherFormData.teacher_in_group_Id.data
db.session.add(teacherData)
db.session.commit()
return redirect("/teachers")
else:
return render_template("teachers/addTeacherForm.html",
form=addTeacherFormData)
show_edit_teachers_blueprint = Blueprint(
"show_edit_teachers_blueprint", __name__)
@show_edit_teachers_blueprint.route("/teachers/edit")
def show_edit_teachers():
editTeacherFormData = EditTeacherForm()
global current_data
current_data = editTeacherFormData
# itemId auslesen
teacher_Id = request.args["teacher_Id"]
# Item laden
teacher_to_edit = db.session.query(Teacher).filter(
Teacher.teacher_Id == teacher_Id).first()
# Form befüllen
editTeacherFormData.teacher_Id.data = teacher_Id
editTeacherFormData.first_name.data = teacher_to_edit.first_name
editTeacherFormData.last_name.data = teacher_to_edit.last_name
editTeacherFormData.birthdate.data = teacher_to_edit.birthdate
# editTeacherFormData.teacher_in_group_Id.data = teacher_to_edit.teacher_in_group_Id
group_of_teachers = db.session.query(GroupOfTeacher).filter(
sqlalchemy.and_(TeacherInGroup.group_of_teachers_Id == GroupOfTeacher.group_of_teachers_Id,
TeacherInGroup.teacher_Id == teacher_Id))
return render_template("/teachers/editTeacherForm.html", form=editTeacherFormData,
group_of_teachers=group_of_teachers)
submit_edit_teachers_blueprint = Blueprint(
"submit_edit_teachers_blueprint", __name__)
@submit_edit_teachers_blueprint.route("/teachers/edit", methods=["post"])
def submit_edit_teachers():
editTeacherFormData = EditTeacherForm()
create_report_file(editTeacherFormData)
if editTeacherFormData.validate_on_submit():
# daten aus Form auslesen
teacher_Id = editTeacherFormData.teacher_Id.data
teacher_to_edit = db.session.query(Teacher).filter(
Teacher.teacher_Id == teacher_Id).first()
# daten mit update in DB speichern
teacher_to_edit.first_name = editTeacherFormData.first_name.data
teacher_to_edit.last_name = editTeacherFormData.last_name.data
teacher_to_edit.birthdate = editTeacherFormData.birthdate.data
# teacher_to_edit.teacher_in_group_Id = editTeacherFormData.teacher_in_group_Id.data
db.session.commit()
return redirect("/teachers")
else:
raise("Fatal Error")
def create_report_file(teacher_data):
header = ["Data", "Previous Data", "New Data"]
teacher_id = ["Teacher ID", current_data.teacher_Id.data,
teacher_data.teacher_Id.data]
first_name = ["First Name", current_data.first_name.data,
teacher_data.first_name.data]
last_name = ["Last Name", current_data.last_name.data,
teacher_data.last_name.data]
birthdate = ["Birthdate", current_data.birthdate.data,
teacher_data.birthdate.data]
i = 0
while os.path.exists("TeacherDataEdit%s.csv" % i):
i += 1
f = open(f"TeacherDataEdit{i}.csv", "w")
writer = csv.writer(f)
writer.writerow(header)
writer.writerow(teacher_id)
writer.writerow(first_name)
writer.writerow(last_name)
writer.writerow(birthdate)
| IngNoN/School_App | controllers/teachers.py | teachers.py | py | 4,762 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.... |
16154091028 | from django.contrib import admin
from .models import InstrumentItem
# Register your models here.
class InstrumentItemAdmin(admin.ModelAdmin):
search_fields = ['definition']
list_filter = ['instrument']
list_display = ['definition','instrument','discrimination','difficulty','guessing','upper_asymptote']
admin.site.register(InstrumentItem, InstrumentItemAdmin)
| langcog/web-cdi | webcdi/cdi_forms/cat_forms/admin.py | admin.py | py | 375 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.contrib.admin.ModelAdmin",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.contrib.admin.site.register",
"line_number": 10,
"usage_type": "call"
},... |
15065246438 | import asyncio
import base64
import collections
import json
import struct
import sys
import aiohttp
import pytest
import six
from pytest_httpserver import RequestHandler
import consul
import consul.aio
Check = consul.Check
@pytest.fixture
def local_server(httpserver):
from pytest_httpserver import RequestHandler
handler = httpserver.expect_request('/v1/agent/services')
assert isinstance(handler, RequestHandler)
handler.respond_with_data(json.dumps({"foo": "bar"}), status=599)
port = httpserver.port
LocalServer = collections.namedtuple('LocalServer', ['port'])
yield LocalServer(port)
httpserver.stop()
@pytest.fixture
async def local_timeout_server(httpserver):
async def func():
return json.dumps({"foo": "bar"})
handler = httpserver.expect_request('/v1/agent/services')
assert isinstance(handler, RequestHandler)
handler.respond_with_data(await func(), status=200)
LocalServer = collections.namedtuple('LocalServer', ['port', 'server'])
return LocalServer(httpserver.port, httpserver)
@pytest.fixture
def loop(request):
asyncio.set_event_loop(None)
loop = asyncio.new_event_loop()
def fin():
loop.close()
request.addfinalizer(fin)
return loop
class TestAsyncioConsul(object):
def test_kv(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
index, data = await c.kv.get('foo')
assert data is None
response = await c.kv.put('foo', 'bar')
assert response is True
response = await c.kv.put('foo-2', 'bar')
assert response is True
index, data = await c.kv.get('foo')
assert data['Value'] == six.b('bar')
loop.run_until_complete(main())
def test_consul_ctor(self, loop, consul_port):
# same as previous but with global event loop
async def main():
c = consul.aio.Consul(port=consul_port)
assert c._loop is loop
await c.kv.put('foo', struct.pack('i', 1000))
index, data = await c.kv.get('foo')
assert struct.unpack('i', data['Value']) == (1000,)
asyncio.set_event_loop(loop)
loop.run_until_complete(main())
def test_kv_binary(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
await c.kv.put('foo', struct.pack('i', 1000))
index, data = await c.kv.get('foo')
assert struct.unpack('i', data['Value']) == (1000,)
loop.run_until_complete(main())
def test_kv_missing(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
fut = asyncio.ensure_future(put(), loop=loop)
await c.kv.put('index', 'bump')
index, data = await c.kv.get('foo')
assert data is None
index, data = await c.kv.get('foo', index=index)
assert data['Value'] == six.b('bar')
await fut
async def put():
c = consul.aio.Consul(port=consul_port, loop=loop)
await asyncio.sleep(2.0 / 100, loop=loop)
await c.kv.put('foo', 'bar')
loop.run_until_complete(main())
def test_kv_put_flags(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
await c.kv.put('foo', 'bar')
index, data = await c.kv.get('foo')
assert data['Flags'] == 0
response = await c.kv.put('foo', 'bar', flags=50)
assert response is True
index, data = await c.kv.get('foo')
assert data['Flags'] == 50
loop.run_until_complete(main())
def test_kv_delete(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
await c.kv.put('foo1', '1')
await c.kv.put('foo2', '2')
await c.kv.put('foo3', '3')
index, data = await c.kv.get('foo', recurse=True)
assert [x['Key'] for x in data] == ['foo1', 'foo2', 'foo3']
response = await c.kv.delete('foo2')
assert response is True
index, data = await c.kv.get('foo', recurse=True)
assert [x['Key'] for x in data] == ['foo1', 'foo3']
response = await c.kv.delete('foo', recurse=True)
assert response is True
index, data = await c.kv.get('foo', recurse=True)
assert data is None
loop.run_until_complete(main())
def test_kv_subscribe(self, loop, consul_port):
async def get():
c = consul.aio.Consul(port=consul_port, loop=loop)
fut = asyncio.ensure_future(put(), loop=loop)
index, data = await c.kv.get('foo')
assert data is None
index, data = await c.kv.get('foo', index=index)
assert data['Value'] == six.b('bar')
await fut
async def put():
c = consul.aio.Consul(port=consul_port, loop=loop)
await asyncio.sleep(1.0 / 100, loop=loop)
response = await c.kv.put('foo', 'bar')
assert response is True
loop.run_until_complete(get())
def test_transaction(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
value = base64.b64encode(b"1").decode("utf8")
d = {"KV": {"Verb": "set", "Key": "asdf", "Value": value}}
r = await c.txn.put([d])
assert r["Errors"] is None
d = {"KV": {"Verb": "get", "Key": "asdf"}}
r = await c.txn.put([d])
assert r["Results"][0]["KV"]["Value"] == value
loop.run_until_complete(main())
def test_agent_services(self, loop, consul_port):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
services = await c.agent.services()
assert services == {}
response = await c.agent.service.register('foo')
assert response is True
services = await c.agent.services()
assert services == {
'foo': {'ID': 'foo',
'Service': 'foo',
'Tags': [],
'Meta': {},
'Port': 0,
'Address': '',
'Weights': {'Passing': 1, 'Warning': 1},
'EnableTagOverride': False}, }
response = await c.agent.service.deregister('foo')
assert response is True
services = await c.agent.services()
assert services == {}
loop.run_until_complete(main())
def test_catalog(self, loop, consul_port):
async def nodes():
c = consul.aio.Consul(port=consul_port, loop=loop)
fut = asyncio.ensure_future(register(), loop=loop)
index, nodes = await c.catalog.nodes()
assert len(nodes) == 1
current = nodes[0]
index, nodes = await c.catalog.nodes(index=index)
nodes.remove(current)
assert [x['Node'] for x in nodes] == ['n1']
index, nodes = await c.catalog.nodes(index=index)
nodes.remove(current)
assert [x['Node'] for x in nodes] == []
await fut
async def register():
c = consul.aio.Consul(port=consul_port, loop=loop)
await asyncio.sleep(1.0 / 100, loop=loop)
response = await c.catalog.register('n1', '10.1.10.11')
assert response is True
await asyncio.sleep(50 / 1000.0, loop=loop)
response = await c.catalog.deregister('n1')
assert response is True
loop.run_until_complete(nodes())
def test_session(self, loop, consul_port):
async def monitor():
c = consul.aio.Consul(port=consul_port, loop=loop)
fut = asyncio.ensure_future(register(), loop=loop)
index, services = await c.session.list()
assert services == []
await asyncio.sleep(20 / 1000.0, loop=loop)
index, services = await c.session.list(index=index)
assert len(services)
index, services = await c.session.list(index=index)
assert services == []
await fut
async def register():
c = consul.aio.Consul(port=consul_port, loop=loop)
await asyncio.sleep(1.0 / 100, loop=loop)
session_id = await c.session.create()
await asyncio.sleep(50 / 1000.0, loop=loop)
response = await c.session.destroy(session_id)
assert response is True
loop.run_until_complete(monitor())
@pytest.mark.skipif(sys.version_info < (3, 4, 1),
reason="Python <3.4.1 doesnt support __del__ calls "
"from GC")
def test_httpclient__del__method(self, loop, consul_port, recwarn):
async def main():
c = consul.aio.Consul(port=consul_port, loop=loop)
_, _ = await c.kv.get('foo')
del c
import gc
# run gc to ensure c is collected
gc.collect()
w = recwarn.pop(ResourceWarning)
assert issubclass(w.category, ResourceWarning)
loop.run_until_complete(main())
def test_root(self, loop, local_server):
async def test_timeout():
time_out = False
c = consul.aio.Consul(port=local_server.port, loop=loop)
try:
await c.agent.services()
except consul.Timeout:
time_out = True
assert time_out
loop.run_until_complete(test_timeout())
def test_http_session(self, loop, local_timeout_server, consul_port):
async def test_session_close():
http_server = await local_timeout_server
c = consul.aio.Consul(port=http_server.port, loop=loop)
c.agent.services()
c.http._session = aiohttp.ClientSession()
assert not c.http._session.closed
c.http.__del__()
await c.http.close()
assert c.http._session.closed
http_server.server.stop()
...
loop.run_until_complete(test_session_close())
| poppyred/python-consul2 | tests/test_aio.py | test_aio.py | py | 10,482 | python | en | code | 125 | github-code | 36 | [
{
"api_name": "consul.Check",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pytest_httpserver.RequestHandler",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "json.dumps",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "col... |
30055311792 | from collections import defaultdict
import os
import pandas as pd
path = '/home/djf/djf/POI/CDRF/data/Foursquare_NYC.txt'
dic = defaultdict(int)
f = open(path, 'r')
lines = f.readlines()
for line in lines:
user, t, lat, lon, POI = line.strip().split('\t')
dic[int(POI)] += 1
counts = [item[1] for item in dic.items()]
counts = sorted(counts)
with open('counts.txt', 'w') as f:
for item in counts[:-10]:
f.write('{}\n'.format(item))
| Mediocre250/CTMR | long_tail.py | long_tail.py | py | 462 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 8,
"usage_type": "call"
}
] |
6410410688 | import random
import os
import argparse
from typing import DefaultDict
# from paper "Learning Unknown from Correlations:
# Graph Neural Network for Inter-novel-protein Interaction Prediction"
standard_acids = [
('A', 1), ('C', 6), ('D', 5), ('E', 7), ('F', 2),
('G', 1), ('H', 4), ('I', 2), ('K', 5), ('L', 2),
('M', 3), ('N', 4), ('P', 2), ('Q', 7), ('R', 4),
('S', 3), ('T', 3), ('V', 1), ('W', 4), ('Y', 3)]
class PPI:
def __init__(self, fst_pro, sec_pro, label=1):
self.fst_pro = fst_pro
self.sec_pro = sec_pro
self.label = label
assert label == 1 or label == 0
def __eq__(self, __o: object) -> bool:
if (self.fst_pro == __o.fst_pro and self.sec_pro == __o.sec_pro) or \
(self.fst_pro == __o.sec_pro and self.sec_pro == __o.fst_pro):
return True
else:
return False
def __str__(self) -> str:
return f"{self.fst_pro}\t{self.sec_pro}\t{str(self.label)}\n"
def handle(pairfp, fastafp):
with open(pairfp, "r") as f:
ppi_lines = f.readlines()
with open(fastafp, "r") as f:
seq_lines = f.readlines()
# 存储正样本邻居,tsv文件中负样本总在最后
pneighbors = DefaultDict(set)
ppis = []
proteins = set()
for ppiline in ppi_lines:
fpro, spro, label = ppiline.split()
proteins.update((fpro, spro))
label = int(float(label))
if label == 0:
if spro in pneighbors[fpro] or fpro in pneighbors[spro]:
# 标签冲突,作为正样本
continue
elif label == 1:
pneighbors[fpro].add(spro)
pneighbors[spro].add(fpro)
else:
raise ValueError
ppis.append(PPI(fpro, spro, label))
acid_seqs = {}
for idx in range(0, len(seq_lines), 2):
key = seq_lines[idx].strip()[1:].strip()
value = seq_lines[idx+1].strip()
if key in proteins:
acid_seqs[key] = value
assert len(acid_seqs) == len(proteins)
return ppis, acid_seqs
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--pair_dir", type=str, default="./data/dscript/data/pairs")
parser.add_argument("--seq_dir", type=str, default="./data/dscript/data/seqs")
parser.add_argument('--processed_dir', type=str, default="./data/dscript/processed")
# 过滤样本的参数
parser.add_argument('--max_length', type=int, default=800)
parser.add_argument('--min_length', type=int, default=50)
args = parser.parse_args()
if not os.path.exists(args.processed_dir):
os.mkdir(args.processed_dir)
os.mkdir(os.path.join(args.processed_dir, "pairs"))
os.mkdir(os.path.join(args.processed_dir, "seqs"))
pair_fns = os.listdir(args.pair_dir)
for pairfn in pair_fns:
organism = pairfn.split("_")[0].strip()
pairfp = os.path.join(args.pair_dir, pairfn)
fastafp = os.path.join(args.seq_dir, organism + ".fasta")
ppis, acid_seqs = handle(pairfp, fastafp)
# 被丢弃的蛋白质
dropout_proteins = {"Too short": [], "Too long": [],} # "With non-standard acids": []}
# 蛋白质长度
protein_lengths = []
for pro, acid_seq in acid_seqs.items():
# 筛选蛋白质
qualified = False
if len(acid_seq) < args.min_length:
dropout_proteins['Too short'].append(pro)
elif len(acid_seq) > args.max_length:
dropout_proteins['Too long'].append(pro)
# elif len(set(list(acid_seq)) - set([acid[0] for acid in standard_acids])) > 0:
# dropout_proteins['With non-standard acids'].append(pro)
else:
qualified = True
if qualified:
protein_lengths.append((pro, len(acid_seq)))
# 输出蛋白质的数量等信息
if sum([len(value) for value in dropout_proteins.values()]) > 0:
print(f"============{pairfn.split('.')[0]} Dataset Filter============")
print(f"Total {len(acid_seqs)} proteins.")
print(f"\tFilter {len(dropout_proteins['Too short'])} because they are too short.")
print(f"\tFilter {len(dropout_proteins['Too long'])} because they are too long.")
# print(f"\tFilter {len(dropout_proteins['With non-standard acids'])} because they have non-standard acids.")
# 集合过滤掉的蛋白质
dropout_proteins = set(dropout_proteins['Too long'] + dropout_proteins['Too short'])
# + dropout_proteins['With non-standard acids'])
# 删除不合格的蛋白质和ppi
ppis = [ppi for ppi in ppis if ppi.fst_pro not in dropout_proteins and ppi.sec_pro not in dropout_proteins]
acid_seqs = {key: value for key, value in acid_seqs.items() if key not in dropout_proteins}
with open(os.path.join(args.processed_dir, "pairs", pairfn), "w") as f:
f.writelines([str(ppi) for ppi in ppis])
with open(os.path.join(args.processed_dir, "seqs", pairfn.split(".")[0] + ".fasta"), "w") as f:
f.writelines([f"{pro}\t{sequence}\n" for pro, sequence in acid_seqs.items()])
# print statistics
print(f"============{pairfn.split('.')[0]} Dataset Statistics============")
print(f'Total {len(ppis)} positive samples:')
print(f'\t Positive: {len([ppi for ppi in ppis if ppi.label == 1])}')
print(f'\t Negative: {len([ppi for ppi in ppis if ppi.label == 0])}')
print(f"Total {len(acid_seqs)} Proteins:" )
print(f"\tMax length of protein: {max([pro[1] for pro in protein_lengths])}")
print(f"\tMin length of protein: {min([pro[1] for pro in protein_lengths])}")
print(f"\tAvg length of protein: {round(sum([pro[1] for pro in protein_lengths])/len(protein_lengths), 3)}")
| LtECoD/PPITrans | data/dscript/builddataset.py | builddataset.py | py | 5,962 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "typing.DefaultDict",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 79,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
10423506179 | # coding: utf-8
import xlrd
def getProgramList(filepath):
program_list = dict()
program_list[u'网络剧'] = []
program_list[u'网络电影'] = []
program_list[u'网络综艺'] = []
data = xlrd.open_workbook(filepath)
table = data.sheet_by_name(u'网络剧')
nrows = table.nrows
ncols = table.ncols
for i in range(2, nrows):
one_program = dict()
one_program[u'节目名称'] = table.cell(i, 1).value
one_program[u'播出网站'] = table.cell(i, 3).value
program_list[u'网络剧'].append(one_program)
table = data.sheet_by_name(u'网络电影')
nrows = table.nrows
ncols = table.ncols
for i in range(2, nrows):
one_program = dict()
one_program[u'节目名称'] = table.cell(i, 1).value
one_program[u'播出网站'] = table.cell(i, 3).value
program_list[u'网络电影'].append(one_program)
table = data.sheet_by_name(u'网络综艺')
nrows = table.nrows
ncols = table.ncols
for i in range(2, nrows):
one_program = dict()
one_program[u'节目名称'] = table.cell(i, 1).value
one_program[u'播出网站'] = table.cell(i, 3).value
program_list[u'网络综艺'].append(one_program)
return program_list
def get_next_keyword(filepath, idx):
data = xlrd.open_workbook(filepath)
table = data.sheet_by_name('Sheet1')
nrows = table.nrows
ncols = table.ncols
return table.cell(idx, 0).value
if __name__ == "__main__":
program_list = getProgramList(u"征片情况汇总0213.xlsx")
print(program_list)
| LayneIns/CrawlerProject | crawl2/dataHelper/fetchProgram.py | fetchProgram.py | py | 1,465 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "xlrd.open_workbook",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 45,
"usage_type": "call"
}
] |
37213151621 | #!/usr/bin/env python3
import argparse
from pprint import pprint
import json
import hlib
def check_temp(cl, device_id):
device_name = None
# first, try and find the device as a device
url = f"/clip/v2/resource/device/{device_id}"
resp = cl.get(url)
if resp.status_code == 200:
data = resp.json()
device = data['data'][0]
# see if the device has a temperature device
temp_id = None
services = device.get('services', [])
for service in services:
if service['rtype'] == 'temperature':
temp_id = service['rid']
break
if temp_id is None:
print("Device has no temperature sensor")
for service in services:
print(f"- {service['rtype']}")
return None, None
device_name = device['metadata']['name']
device_id = temp_id
# now get the temperature
url = f"/clip/v2/resource/temperature/{device_id}"
resp = cl.get(url)
if resp.status_code != 200:
print(f"Request failed with {resp.status_code} {resp.reason}")
return None
data = resp.json()
device = data['data'][0]
temp = device['temperature']['temperature']
return float(temp), device_name
def main():
parser = argparse.ArgumentParser()
parser.add_argument('device_id', help='id of the device to query', type=str, nargs='+')
args = parser.parse_args()
bridge = hlib.find_bridge()
if bridge is None:
print("Error: failed to locate a bridge")
return
cfg = hlib.load_config()
cl = hlib.new_client(bridge, cfg['user_name'])
print("Temperatures:")
for idx, device_id in enumerate(args.device_id):
temp, device_name = check_temp(cl, device_id)
if temp is None:
continue
if device_name is None:
print(f"{idx:02d} {temp}")
else:
print(f"{idx:02d} {temp} at {device_name}")
# check for pushover configuratin
# token = cfg.get('pushover_token', None)
# clients = cfg.get('pushover_clients', None)
# if token and clients:
# messages = []
# if len(red):
# messages.append("RED: incorrectly configured lights")
# for light_id, light_name in red.items():
# messages.append(f" - {light_id} {light_name}")
#
# if len(green):
# messages.append("GREEN: correctly configured lights")
# for light_id, light_name in green.items():
# messages.append(f" - {light_id} {light_name}")
#
# if len(red):
# title = "RED: incorrectly configured lights"
# else:
# title = "GREEN: correctly configured lights"
#
# message = "\n".join(messages)
# hlib.send_message(token, clients, message, title)
if __name__ == "__main__":
main()
| parlaynu/hue-utilities | bin/check-temp.py | check-temp.py | py | 2,946 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "hlib.find_bridge",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "hlib.load_config",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "hlib.new_cl... |
41366597699 | import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
def MyPlotSO(CompMethod,trials,MaxFuncEvals,method,problem):
##% Plot
figcount=0
Convergence = np.zeros((int(CompMethod.shape[0]/trials), MaxFuncEvals))
count=-1
for i in range(0,int(CompMethod.shape[0]),trials):
count+=1
for j in range(trials):
a=np.array(CompMethod[i+j,4])
b=np.array(Convergence[count,:])
Convergence[count,:] = a + b
Convergence=problem['Max/Min']*Convergence
Convergence= Convergence / trials
##% Convergence curves
PlotConvergence(Convergence,method)
figcount+=1
savefigures(figcount,plt)
#savefigures(plots)
##% Violin Plots
for i in range(2,4):
#print(i)
vec = CompMethod[:, i]
matrix = vec.reshape((int(CompMethod.shape[0]/trials)),trials)
matrix=matrix.T
ViolinPlotting(matrix,trials,method)
figcount+=1
savefigures(figcount,plt)
return
def PlotConvergence(Convergence,method):
# Define colors, styles, and symbols for each line
colors = ['red', 'blue', 'green', 'orange', 'purple']
linestyles = ['-', '--', '-.', ':', '-']
#markers = ['o', 's', 'D', '*', '^']
# Create a figure and axes
fig, ax = plt.subplots()
# Loop through each row of the matrix and plot it as a separate line
for i in range(Convergence.shape[0]):
ax.plot(Convergence[i], color=colors[i], linestyle=linestyles[i], label=method[i])
# Add a legend to the plot
ax.legend()
ax.grid()
# Add title and axis labels
ax.set_title('Convergence characteristic curves')
ax.set_xlabel('Function evaluations')
ax.set_ylabel('Objective function value')
# Display the plot
#ax.show()
return plt
def ViolinPlotting(matrix,trials,method):
df = pd.DataFrame(matrix, columns=method)
sns.set_context("talk", font_scale=1)
plt.figure(figsize=(7,2*matrix.shape[1]))
plt.grid()
sns.violinplot(data=df, palette='pastel', bw=.5,orient="h")
sns.stripplot(data=df,color="black", edgecolor="gray", orient="h")
return plt
def plot_3d(func, points_by_dim = 50, title = '', bounds = None, cmap = 'twilight', plot_surface = True, plot_heatmap = True):
from matplotlib.ticker import MaxNLocator, LinearLocator
"""
Plots function surface and/or heatmap
Parameters
----------
func : class callable object
Object which can be called as function.
points_by_dim : int, optional
points for each dimension of plotting (50x50, 100x100...). The default is 50.
title : str, optional
title of plot with LaTeX notation. The default is ''.
bounds : tuple, optional
space bounds with structure (xmin, xmax, ymin, ymax). The default is None.
save_as : str/None, optional
file path to save image (None if not needed). The default is None.
cmap : str, optional
color map of plot. The default is 'twilight'.
plot_surface : boolean, optional
plot 3D surface. The default is True.
plot_heatmap : boolean, optional
plot 2D heatmap. The default is True.
"""
assert (plot_surface or plot_heatmap), "should be plotted at least surface or heatmap!"
xmin, xmax, ymin, ymax = bounds
x = np.linspace(xmin, xmax, points_by_dim)
y = np.linspace(ymin, ymax, points_by_dim)
a, b = np.meshgrid(x, y)
data = np.empty((points_by_dim, points_by_dim))
for i in range(points_by_dim):
for j in range(points_by_dim):
data[i,j] = func(np.array([x[i], y[j]]))
a = a.T
b = b.T
l_a, r_a, l_b, r_b = xmin, xmax, ymin, ymax
l_c, r_c = data.min(), data.max()
levels = MaxNLocator(nbins=15).tick_values(l_c,r_c)
if plot_heatmap and plot_surface:
fig = plt.figure(figsize=(16, 6))
ax1 = fig.add_subplot(1,2,1)
ax2 = fig.add_subplot(1,2,2, projection='3d')
else:
fig = plt.figure()
if plot_heatmap:
ax1 = fig.gca()
else:
ax2 = fig.gca(projection='3d')
#title = r"$\bf{" + title+ r"}$"
#min_title = title[::]
def base_plot():
c = ax1.contourf(a, b, data , cmap=cmap, levels = levels, vmin=l_c, vmax=r_c)
name = title
ax1.set_title( name, fontsize = 15)
ax1.axis([l_a, r_a, l_b, r_b])
fig.colorbar(c)
if plot_surface:
# Plot the surface.
surf = ax2.plot_surface(a, b, data, cmap = cmap, linewidth=0, antialiased=False)
ax2.contour(a, b, data, zdir='z', levels=30, offset=np.min(data), cmap=cmap)
# Customize the z axis.
ax2.set_xlabel('1st dim', fontsize=15)
ax2.set_ylabel('2nd dim', fontsize=15)
#ax2.set_zlabel('second dim', fontsize=10)
ax2.set_zlim(l_c, r_c)
ax2.zaxis.set_major_locator(LinearLocator(5))
#ax2.zaxis.set_major_formatter(FormatStrFormatter('%.2f'))
ax2.tick_params(axis='z', pad=10)
# Add a color bar which maps values to colors.
if not plot_heatmap: fig.colorbar(surf)#, shrink=0.5, aspect=5)
ax2.contour(a, b, data, zdir='z', offset=0, cmap = cmap)
ax2.view_init(30, 50)
#ax2.set_title( min_title , fontsize = 15, loc = 'right')
if plot_heatmap: base_plot()
fig.tight_layout()
#if save_as != None:
# plt.savefig(save_as, dpi = 900)
plt.show()
return fig
def savefigures(figcount,plt):
# create a directory to store the figures
if not os.path.exists('results'):
os.makedirs('results')
plt.savefig('results/figure_{}.png'.format(figcount), dpi=900, bbox_inches="tight")
return | KZervoudakis/Mayfly-Optimization-Algorithm-Python | plotting.py | plotting.py | py | 5,858 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_num... |
15130917408 | # %%
from pprint import pprint
from datasets import load_dataset
test_file = "../data/ekonspacing/test_small.txt"
val_file = "../data/ekonspacing/val_small.txt"
train_file = "../data/ekonspacing/train_small.txt"
# %%
dataset = load_dataset(
"ekonspacing.py",
name="small",
data_files={"train": str(train_file), "validation": str(val_file), "test": str(test_file)},
download_mode="force_redownload",
)
print(dataset)
# %%
pprint(dataset["train"][0])
pprint(dataset["test"][0])
# %%
| entelecheia/transformer-datasets | datasets/ekonspacing/ekonspacing_test.py | ekonspacing_test.py | py | 500 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datasets.load_dataset",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pprint.pprint",
"line_number": 20,
"usage_type": "call"
}
] |
17079442964 | import logging
from .html import extract_html_text
from .pdf import extract_pdf_text
logger = logging.getLogger(__name__)
def extract_text(file_path: str) -> str:
"""Extract text from any kind of file as long as it's html or pdf"""
try:
if file_path.endswith('.html'):
return extract_html_text(file_path)
if file_path.endswith('.pdf'):
return extract_pdf_text(file_path)
raise ValueError(f'Unknown file type {file_path}')
except Exception as an_exception: # pylint: disable=W0718
logger.warning(f'Failed to extract {file_path}: {an_exception}')
return '' # don't return None or else you can't pipeline this
| amy-langley/tracking-trans-hate-bills | lib/util/misc.py | misc.py | py | 691 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "html.extract_html_text",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pdf.extract_pdf_text",
"line_number": 17,
"usage_type": "call"
}
] |
71038521063 | import numpy as np
try:
import mc
except Exception:
pass
import cv2
import os
from PIL import Image
import torch
from torch.utils.data import Dataset
import torchvision.transforms as transforms
import utils
from . import reader
class PartialCompDataset(Dataset):
def __init__(self, config, phase):
self.dataset = config['dataset']
if self.dataset == 'COCOA':
self.data_reader = reader.COCOADataset(config['{}_annot_file'.format(phase)])
elif self.dataset == 'Mapillary':
self.data_reader = reader.MapillaryDataset(
config['{}_root'.format(phase)], config['{}_annot_file'.format(phase)])
else:
self.data_reader = reader.KINSLVISDataset(
self.dataset, config['{}_annot_file'.format(phase)])
if config['load_rgb']:
self.img_transform = transforms.Compose([
transforms.Normalize(config['data_mean'], config['data_std'])
])
self.eraser_setter = utils.EraserSetter(config['eraser_setter'])
self.sz = config['input_size']
self.eraser_front_prob = config['eraser_front_prob']
self.phase = phase
self.config = config
self.memcached = config.get('memcached', False)
self.initialized = False
self.memcached_client = config.get('memcached_client', None)
def __len__(self):
return self.data_reader.get_instance_length()
def _init_memcached(self):
if not self.initialized:
assert self.memcached_client is not None, "Please specify the path of your memcached_client"
server_list_config_file = "{}/server_list.conf".format(self.memcached_client)
client_config_file = "{}/client.conf".format(self.memcached_client)
self.mclient = mc.MemcachedClient.GetInstance(server_list_config_file, client_config_file)
self.initialized = True
def _load_image(self, fn):
if self.memcached:
try:
img_value = mc.pyvector()
self.mclient.Get(fn, img_value)
img_value_str = mc.ConvertBuffer(img_value)
img = utils.pil_loader(img_value_str)
except:
print('Read image failed ({})'.format(fn))
raise Exception("Exit")
else:
return img
else:
return Image.open(fn).convert('RGB')
def _get_inst(self, idx, load_rgb=False, randshift=False):
modal, bbox, category, imgfn, _ = self.data_reader.get_instance(idx)
centerx = bbox[0] + bbox[2] / 2.
centery = bbox[1] + bbox[3] / 2.
size = max([np.sqrt(bbox[2] * bbox[3] * self.config['enlarge_box']), bbox[2] * 1.1, bbox[3] * 1.1])
if size < 5 or np.all(modal == 0):
return self._get_inst(
np.random.choice(len(self)), load_rgb=load_rgb, randshift=randshift)
# shift & scale aug
if self.phase == 'train':
if randshift:
centerx += np.random.uniform(*self.config['base_aug']['shift']) * size
centery += np.random.uniform(*self.config['base_aug']['shift']) * size
size /= np.random.uniform(*self.config['base_aug']['scale'])
# crop
new_bbox = [int(centerx - size / 2.), int(centery - size / 2.), int(size), int(size)]
modal = cv2.resize(utils.crop_padding(modal, new_bbox, pad_value=(0,)),
(self.sz, self.sz), interpolation=cv2.INTER_NEAREST)
# flip
if self.config['base_aug']['flip'] and np.random.rand() > 0.5:
flip = True
modal = modal[:, ::-1]
else:
flip = False
if load_rgb:
rgb = np.array(self._load_image(os.path.join(
self.config['{}_image_root'.format(self.phase)], imgfn))) # uint8
rgb = cv2.resize(utils.crop_padding(rgb, new_bbox, pad_value=(0,0,0)),
(self.sz, self.sz), interpolation=cv2.INTER_CUBIC)
if flip:
rgb = rgb[:, ::-1, :]
rgb = torch.from_numpy(rgb.astype(np.float32).transpose((2, 0, 1)) / 255.)
rgb = self.img_transform(rgb) # CHW
if load_rgb:
return modal, category, rgb
else:
return modal, category, None
def __getitem__(self, idx):
if self.memcached:
self._init_memcached()
randidx = np.random.choice(len(self))
modal, category, rgb = self._get_inst(
idx, load_rgb=self.config['load_rgb'], randshift=True) # modal, uint8 {0, 1}
if not self.config.get('use_category', True):
category = 1
eraser, _, _ = self._get_inst(randidx, load_rgb=False, randshift=False)
eraser = self.eraser_setter(modal, eraser) # uint8 {0, 1}
# erase
erased_modal = modal.copy().astype(np.float32)
if np.random.rand() < self.eraser_front_prob:
erased_modal[eraser == 1] = 0 # eraser above modal
else:
eraser[modal == 1] = 0 # eraser below modal
erased_modal = erased_modal * category
# shrink eraser
max_shrink_pix = self.config.get('max_eraser_shrink', 0)
if max_shrink_pix > 0:
shrink_pix = np.random.choice(np.arange(max_shrink_pix + 1))
if shrink_pix > 0:
shrink_kernel = shrink_pix * 2 + 1
eraser = 1 - cv2.dilate(
1 - eraser, np.ones((shrink_kernel, shrink_kernel), dtype=np.uint8),
iterations=1)
eraser_tensor = torch.from_numpy(eraser.astype(np.float32)).unsqueeze(0) # 1HW
# erase rgb
if rgb is not None:
rgb = rgb * (1 - eraser_tensor)
else:
rgb = torch.zeros((3, self.sz, self.sz), dtype=torch.float32) # 3HW
erased_modal_tensor = torch.from_numpy(
erased_modal.astype(np.float32)).unsqueeze(0) # 1HW
target = torch.from_numpy(modal.astype(np.int)) # HW
return rgb, erased_modal_tensor, eraser_tensor, target
| XiaohangZhan/deocclusion | datasets/partial_comp_dataset.py | partial_comp_dataset.py | py | 6,088 | python | en | code | 764 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torchvision.transforms.Compose",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 31,
"usage_type": "name"
},
{
"ap... |
23751441892 | import os
import argparse
import matplotlib.pyplot as plt
from matplotlib import cm
import torch
import numpy as np
import statistics as st
import csv
import seaborn as sns
from timeseries import EchoCard
from quality_classification import predict_single as predict_acq
from quality_classification import CardioNet
from heart_segmentation import QuickNat
from heart_segmentation import predict_single as predict_vol
def lighten_color(color, amount=0.5):
"""
Lightens the given color by multiplying (1-luminosity) by the given amount.
Input can be matplotlib color string, hex string, or RGB tuple.
Examples:
>> lighten_color('g', 0.3)
>> lighten_color('#F034A3', 0.6)
>> lighten_color((.3,.55,.1), 0.5)
"""
import matplotlib.colors as mc
import colorsys
try:
c = mc.cnames[color]
except:
c = color
c = colorsys.rgb_to_hls(*mc.to_rgb(c))
return colorsys.hls_to_rgb(c[0], 1 - amount * (1 - c[1]), c[2])
class Graph(object):
'''
This class collects all information needed to plot graphs and has functions which creates and saves graphs
Parameters
----------
time_per_pixel: float
pixel resolution in x axis (time in seconds)
labels: list of ints with values 0 or 1
A list containing the model's quality assessment classification results
sigmoids: list of floats between [0,1]
A list containing the model's quality assessment sigmoid outputs
BEtimes: A list of tuples of floats
A list containing the Begin and End times (in pixels) of the windowing performed on the original timeseries image in
order to perform classification
Attributes
----------
time_per_pixel: float
pixel resolution in x axis (time in seconds)
labels: list of ints with values 0 or 1
A list containing the model's quality assessment classification results
sigmoids: list of floats between [0,1]
A list containing the model's quality assessment sigmoid outputs
BEtimes: A list of tuples of floats
A list containing the Begin and End times (in pixels) of the windowing performed on the original timeseries image in
order to perform classification
tot_time: float
The total time of acquisition
heatmap: numpy array with the same width as the original timeseries image and 1/3 of its height
An "image" showing the sigmoid outputs of the network in the regions of the original image
'''
def __init__(self, time_per_pixel, labels, sigmoids, BEtimes):
self.time_per_pixel = time_per_pixel
self.BEtimes = BEtimes
self.labels = labels
self.sigmoids = sigmoids
self.tot_time = (self.BEtimes[-1][1]-self.BEtimes[0][0])*self.time_per_pixel
print('Total time of acquisition:', self.tot_time)
def add_axvspan(self):
"""
This function is called to add the classification results of the quality assessment as colors in regions of plots
"""
i=0
j=0
for BEtime, label in zip(self.BEtimes, self.labels):
timeB = BEtime[0]*self.time_per_pixel #int(BEtime[0]*time_per_pixel*len(volumes)/tot_time)
timeE = BEtime[1]*self.time_per_pixel #int(BEtime[1]*time_per_pixel*len(volumes)/tot_time)
if label == 1:
plt.axvspan(timeB, timeE, facecolor='gold', alpha=1, label='_'*i +'Good')
i+=1
else:
plt.axvspan(timeB, timeE, facecolor='midnightblue', alpha=1, label='_'*j +'Bad')
j+=1
def make_graph(self, points, volumes, lvids, title, output_path):
"""
This function creates and saves a graph with two subplots. The first shows the LV Volume over time and
the second shows the LVID over time. The quality of the image is represented with colors on the graph
according to the classification results by calling the add_axvspan function.
Parameters
----------
points: numpy array
contains the corresponding points of the occurences in volumes and lvids
volumes: list of floats
A list containing the LV Volume either for systole, diastole, or all points
lvids: list of floats
A list containing the LV Inner Diameters either for systole, diastole, or all points
title: string
The title of the figure to be saved
output_path: string
The name of the file to be saved
"""
#f = plt.figure(figsize[12.8, 9.6])
volume = np.array(volumes)
lvid = np.array(lvids)
plt.figure(figsize=[12.8, 9.6])
# plot LV Vol
plt.subplot(121) # plt
#plt.plot(points*self.time_per_pixel, volume) #*10**(9) # [::3] to take every third
sns.lineplot(points*self.time_per_pixel, volume)
self.add_axvspan()
plt.legend()
plt.grid(True)
plt.ylabel('LV Vol [mm^3]')
plt.xlabel('Time [sec]')
plt.xticks(np.arange(0, self.tot_time, 0.5))
plt.title('LV Volume')
# and LVID
plt.subplot(122)
#plt.plot(points*self.time_per_pixel, lvid)
sns.lineplot(points*self.time_per_pixel, lvid)
self.add_axvspan()
plt.legend()
plt.grid(True)
plt.ylabel('LVID [mm]')
plt.xlabel('Time [sec]')
plt.xticks(np.arange(0, self.tot_time, 0.5))
plt.title('LV Inner Diameters')
plt.suptitle(title)
plt.savefig(output_path)
plt.close() #'all'
def make_custom_heatmap(self, img):
"""
This function creates a heatmap with the same width as the original timeseries image and 1/3 of its height
It is an "image" showing the sigmoid outputs of the network in the regions of the original image
Parameters
----------
img: numpy array
The original timeseries image
"""
self.heatmap = np.zeros((img.shape[0]//3, img.shape[1]))
for (Btime, Etime), label in zip(self.BEtimes, self.sigmoids):
self.heatmap[:,Btime:Etime] = label #255*
def map_sigs_to_colors(self, peaks):
"""
This function calculates the sigmoid value (continuous value of quality acquisition) at each time point in the list peaks
Parameters
----------
peaks: numpy array
A list containing points in time (represented in pixels) during which a diastole occurs
Returns
-------
new_labels: list of floats
A list containing the corresponding sigmoid value (quality of acquisition) for each point in peaks
"""
new_labels = []
for peak in peaks:
for (Btime, Etime), label in zip(self.BEtimes, self.sigmoids):
if peak >= Btime and peak < Etime:
new_labels.append(label)
break
return new_labels
def make_hr_graph(self, heartrate, peaks, vols, output_path):
"""
This function creates a graph with two subplots. The first sublots shows the heartrate over time and the second subplot
show the LV Vol;d over the heartrates. The quality of the image is represented as an image with continuos colors under the first
sbuplot according to the classification results. In the second subplot the quality of the image is represented as a heatmap
where each point in the plot is represented by a different color representing the quality of acquisition during the time the measurement
was mede
Parameters
----------
heartrate: list of ints
Contains a list of the heartrate calculated for each heart beat in [bpm]
peaks: numpy array
Contains the points (pixels) corresponding to when the heart is in diastole which were used for the heartrate calculation
vols: list of floats
Contains the LV Vol in diastole
output_path: string
The name of the file to be saved
"""
plt.figure(figsize=[12.8, 9.6])
grid = plt.GridSpec(6, 2, hspace=0.0, wspace=0.2)
ax_hrt = plt.subplot(grid[:-1, 0]) # grid for graph heartrate-time
ax_h = plt.subplot(grid[-1, 0]) # grid for classification regions
ax_vhr = plt.subplot(grid[:, 1]) # grid for graph volume-heartrate
sig_colors = self.map_sigs_to_colors(peaks)
ax_hrt.set_xlabel('Time [sec]')
ax_hrt.set_ylabel('Heart rate [bpm]]')
ax_hrt.set_xticks(np.arange(0, peaks[-1]*self.time_per_pixel, 0.5))
ax_hrt.grid()
ax_hrt.plot(peaks*self.time_per_pixel, np.array(heartrate), '-o')
ax_h.axis('off')
h = ax_h.imshow(self.heatmap)
plt.colorbar(h, ax=ax_h, orientation='horizontal')
v = ax_vhr.scatter(heartrate, vols, c=sig_colors, cmap='viridis')
ax_vhr.set_xlabel('Heart rate [bpm]]')
ax_vhr.set_ylabel('LV Vol;d [mm^3]]')
ax_vhr.grid()
plt.colorbar(v, ax=ax_vhr, orientation='horizontal')
plt.suptitle("Heart rate plots")
plt.savefig(output_path)
plt.close()
def plot_img_mask(self, img, mask, output_path):
"""
This function plots and saves the original timeseries image and the superimposed segmentation mask of the heart
Parameters
----------
img: numpy array
The original timeseries image
mask: numpy array
The segmentation mask of the heart inner diameter
output_path: string
The name of the file to be saved
"""
fig, ax = plt.subplots()
ax.imshow(img, cmap='gray')
ax.imshow(mask, cmap='winter', alpha=0.3)
xt = np.arange(0, img.shape[1], step=int(0.5/self.time_per_pixel))
ax.set_xticks(xt)
xl = np.round_(xt*self.time_per_pixel, 1)
ax.set_xticklabels(xl)
ax.set_yticks([])
plt.xlabel('Time [sec]')
plt.savefig(output_path, bbox_inches = 'tight', dpi=1200)
plt.close()
def plot_img(self, img, output_path):
"""
This function plots and saves the original timeseries image and above that the heatmap created by the function make_custom_heatmap
Parameters
----------
img: numpy array
The original timeseries image
output_path: string
The name of the file to be saved
"""
heights = [a.shape[0] for a in [self.heatmap, img]]
widths = [self.heatmap.shape[1]]
fig_width = 8
fig_height = fig_width*sum(heights)/sum(widths)
f, axarr = plt.subplots(2,1, figsize=(fig_width, fig_height+0.4), gridspec_kw={'height_ratios': heights})
ax = axarr[0].imshow(self.heatmap, cmap='viridis')
axarr[0].axis('off')
axarr[1].imshow(img, cmap='gray')
xt = np.arange(0, img.shape[1], step=int(0.5/self.time_per_pixel))
axarr[1].set_xticks(xt)
xl = np.round_(xt*self.time_per_pixel, 1)
axarr[1].set_xticklabels(xl)
axarr[1].set_yticks([])
axarr[1].set_xlabel('Time [sec]')
plt.subplots_adjust(wspace=0, hspace=0, left=0, right=1, bottom=0, top=1)
plt.colorbar(ax, ax=axarr[:]) #, orientation='horizontal'
plt.savefig(output_path, bbox_inches = 'tight', dpi=1200)
plt.close()
''' ----------- DONE WITH GRAPH CLASS ----------- '''
''' ----------- NEXT COME HELPER FUNCTIONS FOR GETTING STATISTICS AND LOADING MODELS ----------- '''
def get_stats_good(labels, times, peaks, ds, time_res):
"""
This function calculates various statistics for either the LVIDs in diastole or systole during good quality of acquisition
Parameters
----------
labels: list of ints, 0 or 1
Holds the acquisition quality classification result fror each time window
times: list of tuples
Each tuple holds the begin and end time of the window cut from the timeseries for the acquisition quality classification
peaks: numpy array
Holds the time (in pixels) of the events in ds
ds: list of floats
Holds either LVID;d, LVID;s or heartrates
time_res: float
The resolution on the x axis, i.e. to how many seconds one pixel corresponds
Returns
-------
med_ds: float
The median value of all LVIDs in either systols or diastole, or heartrates, captured during good quality of acquisition.
If no good acquisition regions were found 0 is returned
avg_ds: float
The average value of all LVIDs in either systols or diastole, or heartrates, captured during good quality of acquisition.
If no good acquisition regions were found 0 is returned
max(good_ds): float
The maximum value of all LVIDs in either systols or diastole, or heartrates, captured during good quality of acquisition.
If no good acquisition regions were found 0 is returned
min(good_ds): float
The minimum value of all LVIDs in either systols or diastole, or heartrates, captured during good quality of acquisition.
If no good acquisition regions were found 0 is returned
good_ds: list of floats
Includes a list of LVIDs either in systole or diastole, or heartrates, only during good quality of acquisition.
If no good acquisition regions were found an empty list is returned
good_times: list of ints
A list of corresponding times (in seconds) of the above good_ds
If no good acquisition regions were found an empty list is returned
"""
good_ds = []
good_times = []
for peak, ds in zip(peaks, ds):
for label, (Btime, Etime) in zip(labels, times):
if peak >=Btime and peak < Etime:
if label == 1:
good_ds.append(ds)
good_times.append(peak*time_res)
break
try:
med_ds = st.median(good_ds)
avg_ds = sum(good_ds)/len(good_ds)
return med_ds, avg_ds, max(good_ds), min(good_ds), good_ds, good_times
except (ZeroDivisionError, st.StatisticsError):
return 0, 0, 0, 0, good_ds, good_times
def load_model_device(network):
"""
This function loads the appropriate model and gets the current device
Parameters
----------
network: string
Should be either 'echo' or 'quicknat' defining the which model is to be loaded
Returns
-------
net: model.QuickNat or model.CardioNet
The network model instance
device: torch.device
The currently running divice, e.g. cpu, gpu0
"""
# get device
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
if network=='quicknat':
model_path = './heart_segmentation/checkpoints/heart-seg-net.pt'
# load model
params = {'num_channels':1,
'num_class':1,
'num_filters':64,
'kernel_h':5,
'kernel_w':5,
'kernel_c':1,
'stride_conv':1,
'pool':2,
'stride_pool':2,
'se_block': "NONE",
'drop_out':0.2}
net = QuickNat(params)
net.to(device=device)
state_dict = torch.load(os.path.join(model_path), map_location=device)
net.load_state_dict(state_dict['net_state_dict'])
else:
model_path = './quality_classification/checkpoints/quality-clas-net.pth'
net = CardioNet(n_channels=1, n_classes=1)
net.to(device=device)
net.load_state_dict(torch.load(model_path, map_location=device))
return net, device
''' ----------- DONE WITH HELPER FUNCTIONS ----------- '''
''' ----------- NEXT COMES THE FUNCTION WHERE EVERYTHING IS RUN ----------- '''
def run(input_path, output_path, weight, graphs=True, write=None, write_file=None):
"""
This function is where the end2end framework is run.
Parameters
----------
input_path: string
Path of file to be loaded
output_path: string
Directory to save results
weight: int
The weight of the mouse we are evaluating
graphs: bool
If true graphs will be created and saved in the ouput_path directory
write: string
If 'stats' then values such as max, min, median etc of the LVIDs etc are written to a csv file
If 'all' then LVIDs etc. are written for all good classified regions
write_file: string
The csv file to write results to according to what has been given to write
"""
labels = []
sigs = []
masks = []
# create an echo card instance
ec = EchoCard(input_path)
# fill in timeseries attribute of class - a numpy array of entire time of acquisition
ec.make_timeseries()
# split timeseries to get images for segmentation network
vol_windows = ec.make_seg_windows()
# load models for testing
echo_net, device = load_model_device('echo')
quicknat_net, _ = load_model_device('quicknat')
print("Using device ", device)
print("Loaded models")
print('Image shape:', ec.image.shape)
'''-----------SEGMENTATION PART-----------'''
# get masks
for img in vol_windows:
mask, _ = predict_vol(quicknat_net, device, img, 256)
masks.append(mask)
# connect back to one timeseries
ec.connect_masks(masks)
# compute volumes and lvids for all points in timeseries
ec.get_vols()
# get diastole and systole lvid, lv vol and time of occurence (in pixel values)
#plt.plot(ec.lvids)
dpeaks, dlvids, dvols = ec.get_diastoles()
speaks, slvids, svols = ec.get_systoles()
'''
plt.scatter(dpeaks, dlvids, label='g')
plt.scatter(speaks, slvids, label='r')
plt.legend()
plt.show()
'''
# get heartrate in [bpm]
heartrate = ec.get_heartrate(dpeaks)
'''
print('AAAA', len(dpeaks), len(speaks), len(heartrate))
for i in range(len(speaks)):
if i<len(speaks)-1:
print(speaks[i], dpeaks[i])
else:
print(speaks[i])
'''
'''-----------QUALITY ACQUISITION PART-----------'''
# split timeseries to get images for quality classification
# two lists are returned - one with numpy arrays (image) one with a tuple (startTime, endTime)
ec.weight_to_size(weight)
qual_windows, BEtimes = ec.make_quality_windows_man()
# classify each window as good or bad
for i,img in enumerate(qual_windows):
label, _ = predict_acq(echo_net, device, img, 256)
sigs.append(label)
labels.append(np.round(label))
'''-----------GRAPHS PART-----------'''
if graphs:
if not os.path.exists(output_path):
os.mkdir(output_path)
graphs = Graph(ec.time_res, labels, sigs, BEtimes)
graphs.make_custom_heatmap(ec.image)
#graphs.make_graph(np.arange(len(ec.vols)), ec.vols, ec.lvids, 'Heart Values Estimation', os.path.join(output_path, 'output_vol.png'))
graphs.make_graph(dpeaks, dvols, dlvids, 'Diastole', os.path.join(output_path, 'output_diastole.png'))
graphs.make_graph(speaks, svols, slvids, 'Systole', os.path.join(output_path, 'output_systole.png'))
graphs.plot_img_mask(ec.image, ec.mask, os.path.join(output_path, 'output_img_mask.png'))
graphs.plot_img(ec.image, os.path.join(output_path, 'output_img.png'))
graphs.make_hr_graph(heartrate, dpeaks[:-1], dvols[:-1], os.path.join(output_path, 'output_heartrate.png'))
'''-----------WRITING TO FILES PART-----------'''
med_diastole, avg_diastole, max_diastole, min_diastole, good_lvid_d, times_lvid_d = get_stats_good(labels, BEtimes, dpeaks, dlvids, ec.time_res)
med_systole, avg_systole, max_systole, min_systole, good_lvid_s, times_lvid_s = get_stats_good(labels, BEtimes, speaks, slvids, ec.time_res)
med_heartrate, avg_heartrate, max_heartrate, min_heartrate, good_heartrates, times_hr = get_stats_good(labels, BEtimes, dpeaks[:-1], heartrate, ec.time_res)
print('Average lvid;d is: ', avg_diastole, ' mm and average lvid;s is: ', avg_systole, ' mm')
print('Median lvid;d is: ', med_diastole, ' mm and median lvid;s is: ', med_systole, ' mm')
print('The average heart rate is: ', avg_heartrate, ' bpm and the median heart rate is: ', med_heartrate, ' bpm')
# append results to file if a csv file has been given in write
# either stats such as mean, median etc. (1 value for each file)
if write=='stats':
filename = input_path.split('/')[-1]
# if the file doesn't already exist add first row with column names first
if not os.path.isfile(write_file):
with open(write_file, 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['file','median_diastole', 'median_systole', 'median_heartrate', 'avg_diastole', 'avg_systole', 'avg_heartrate', 'max_diastole', 'max_systole', 'max_heartrate', 'min_diastole', 'min_systole', 'min_heartrate'])
# append new line to file
with open(write_file, 'a', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow([filename, med_diastole, med_systole, med_heartrate, avg_diastole, avg_systole, avg_heartrate, max_diastole, max_systole, max_heartrate, min_diastole, min_systole, min_heartrate])
# or heartrare, lvid;d etc. during all good acquisition regions
elif write=='all':
filename = input_path.split('/')[-1]
# if the file doesn't already exist add first row with column names first
if not os.path.isfile(write_file):
with open(write_file, 'w', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['file','lvid;d', 'lvid;d time', 'lvid;s', 'lvid;s time', 'heart rate', 'heart rate time'])
# append new lines to file
with open(write_file, 'a', newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL)
i = 0
# depending on windowing and classification these will not necessarily have the exact same length - take the smallest
min_len = min([len(good_lvid_s), len(good_lvid_s), len(good_heartrates)])
for i in range(min_len):
writer.writerow([filename, good_lvid_d[i], times_lvid_d[i], good_lvid_s[i], times_lvid_s[i], good_heartrates[i], times_hr[i]])
i += 1
def get_args():
'''
Required arguments
------------------
-i: The path to the dicom file you wish to extract features from
-m: The body mass in grams of the current mouse you wish to extract features from
Optional arguments
------------------
-o: The name of the directory to save graphs, images and csv to. Default is the current working directory.
-g: if True output graphs and images will be created and saved, if False they will not. Default is True
-w: If 'all' all features are extracted and saved to a csv, if 'stats' only statistics from echocardiogram are extracted and saved
(one row per image). Default value is 'all'
-f: The name of the csv file to write features to. The default value is 'output_all.csv'. If the file already exists then the new
features will be appended as new rows to the file, but if the file doesn't already exist then it is automatically created.
'''
parser = argparse.ArgumentParser(description='Run the end 2 end framework to extract useful heart features from an echocardiogram',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--input', '-i', metavar='INPUT', required=True,
help='Specify path of input image - must be in DICOM format')
parser.add_argument('--mass', '-m', type=int, required=True,
help='Specify the body mass of the mouse')
parser.add_argument('--output', '-o', default='.',
help='Specify output path to save graphs')
parser.add_argument('--graphs', '-g', default=True,
help='Specify True or False depending on whether you want to save figures')
parser.add_argument('--write', '-w', default='all',
help='Specify wheter to save all good features or statistics of features. Give all or stars as input.')
parser.add_argument('--writefile', '-f', default='output_all.csv',
help='Specify in which file to save features')
return parser.parse_args()
if __name__ == '__main__':
args = get_args()
run(args.input, args.output, args.mass, graphs=args.graphs, write=args.write, write_file=args.writefile) | HelmholtzAI-Consultants-Munich/Echo2Pheno | Module I/run4single.py | run4single.py | py | 25,479 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.colors.cnames",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.colors",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "colorsys.rgb_to_hls",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "m... |
14940809637 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
try:
import petsclinter as pl
except ModuleNotFoundError as mnfe:
try:
petsc_dir = os.environ['PETSC_DIR']
except KeyError as ke:
raise RuntimeError('Must set PETSC_DIR environment variable') from ke
sys.path.insert(0, os.path.join(petsc_dir, 'lib', 'petsc', 'bin', 'maint', 'petsclinter'))
import petsclinter as pl
def __prepare_ns_args(ns_args, parser):
slepc_mansecs = ['eps','lme','mfn','nep','pep','svd','sys']
slepc_aux_mansecs = ['bv','ds','fn','rg','st']
if ns_args.slepc_dir is None:
raise RuntimeError('Could not determine SLEPC_DIR from environment, please set via options')
extra_compiler_flags = [
'-I' + os.path.join(ns_args.slepc_dir, 'include'),
'-I' + os.path.join(ns_args.slepc_dir, ns_args.petsc_arch, 'include')
]
with open(os.path.join(ns_args.slepc_dir, ns_args.petsc_arch, 'lib', 'slepc', 'conf', 'slepcvariables'), 'r') as sv:
line = sv.readline()
while line:
if 'INCLUDE' in line:
for inc in line.split('=', 1)[1].split():
extra_compiler_flags.append(inc)
line = sv.readline()
extra_header_includes = []
mansecimpls = [m + 'impl.h' for m in slepc_mansecs + slepc_aux_mansecs] + [
'slepcimpl.h', 'vecimplslepc.h'
]
for header_file in os.listdir(os.path.join(ns_args.slepc_dir, 'include', 'slepc', 'private')):
if header_file in mansecimpls:
extra_header_includes.append(f'#include <slepc/private/{header_file}>')
if ns_args.src_path == parser.get_default('src_path'):
ns_args.src_path = os.path.join(ns_args.slepc_dir, 'src')
if ns_args.patch_dir == parser.get_default('patch_dir'):
ns_args.patch_dir = os.path.join(ns_args.slepc_dir, 'slepcLintPatches')
# prepend these
ns_args.extra_compiler_flags = extra_compiler_flags + ns_args.extra_compiler_flags
# replace these
if not ns_args.extra_header_includes:
ns_args.extra_header_includes = extra_header_includes
return ns_args
def command_line_main():
import argparse
import petsclinter.main
slepc_classid_map = {
'_p_BV *' : 'BV_CLASSID',
'_p_DS *' : 'DS_CLASSID',
'_p_FN *' : 'FN_CLASSID',
'_p_RG *' : 'RG_CLASSID',
'_p_ST *' : 'ST_CLASSID',
'_p_EPS *' : 'EPS_CLASSID',
'_p_PEP *' : 'PEP_CLASSID',
'_p_NEP *' : 'NEP_CLASSID',
'_p_SVD *' : 'SVD_CLASSID',
'_p_MFN *' : 'MFN_CLASSID',
'_p_LME *' : 'LME_CLASSID',
}
for struct_name, classid_name in slepc_classid_map.items():
pl.checks.register_classid(struct_name, classid_name)
parser = argparse.ArgumentParser(prog='slepclinter', add_help=False)
group_slepc = parser.add_argument_group(title='SLEPc location settings')
group_slepc.add_argument('--SLEPC_DIR', required=False, default=os.environ.get('SLEPC_DIR', None), help='if this option is unused defaults to environment variable $SLEPC_DIR', dest='slepc_dir')
args, parser = pl.main.parse_command_line_args(parent_parsers=[parser])
args = __prepare_ns_args(args, parser)
return pl.main.namespace_main(args)
if __name__ == '__main__':
sys.exit(command_line_main())
| firedrakeproject/slepc | lib/slepc/bin/maint/slepcClangLinter.py | slepcClangLinter.py | py | 3,156 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line... |
41896395893 | import cv2
import numpy as np
import csv
import math
img = cv2.imread(".//new_dataset//250.jpg")
img = cv2.GaussianBlur(img , (5 , 5) , 0)
n = 64
div = 256//n #n is the number of bins, here n = 64
rgb = cv2.split(img)
q = []
for ch in rgb:
vf = np.vectorize(lambda x, div: int(x//div)*div)
quantized = vf(ch, div)
q.append(quantized.astype(np.uint8))
img = cv2.merge(q)
row , col , channels = img.shape
connectivity = 8
tau = 0
if tau == 0:
tau = row*col*0.1
rgb = cv2.split(img)
q = []
for ch in rgb:
vf = np.vectorize(lambda x, div: int(x//div)*div)
quantized = vf(ch, div)
q.append(quantized.astype(np.uint8))
img = cv2.merge(q)
bgr = cv2.split(img)
total = []
for ch in bgr:
for k in range(0 , 256 , div):
temp = ch.copy()
temp = (temp == k).astype(np.uint8)
output = cv2.connectedComponentsWithStats(temp , connectivity , cv2.CV_32S)
num_labels = output[0]
labels = output[0]
stats = output[2]
centroids = output[3]
alpha = 0
beta = 0
req = stats[1:]
for r in req:
if(r[4] >= tau):
alpha += r[4]
else:
beta += r[4]
total.append(alpha)
total.append(beta)
dist = []
name = []
with open('ccv_feat.csv' , 'r') as csvFile:
reader = csv.reader(csvFile)
i = 0
for row in reader:
distance = math.sqrt(sum([(float(a) - float(b)) ** 2 for a , b in zip(row , total)]))
dist.append(distance)
nam = str(i)+".jpg"
name.append(nam)
i += 1
di = {}
for i in range(len(name)):
di[name[i]] = dist[i]
sorted_di = sorted(di.items() , key = lambda kv : kv[1])
pic = []
t = 0
first_key = list(sorted_di)[:10]
for key , val in first_key:
key = ".//new_dataset//"+key
res = cv2.imread(key)
na = str(t)
cv2.imshow(na , res)
t += 1
| kumar6rishabh/cbir-search-engine | ccv_searcher.py | ccv_searcher.py | py | 1,968 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.GaussianBlur",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.split",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.vectorize",
"line_numbe... |
71534616424 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from icalendar import Calendar, Event
from datetime import *
from dateutil.parser import parse
from pytz import UTC # timezone
cal_file = '/Users/gyyoon/Desktop/pentaa.ics'
MEETING_STR = '[회의]'
TEATIME_STR = '[Tea-Time]'
WORK_STR = '[업무]'
NONWORK_STR = '[비업무]'
LUNCH_STR = '[점심]'
CLUB_STR = '[동호회]'
WORKCLEAN_STR = '[업무정리]'
TALK_STR = '[면담]'
OUTWORK_STR = '[외근]'
BREAK_STR = '[휴식]'
TOGETHER_CTR = '[회식]'
SEATCLEAN_STR = '[자리정리]'
EVENT_STR = '[행사]'
LATE_SRT = '[지연출근]'
# Count global var
total_cnt = 0
work_cnt = 0
nonwork_cnt = 0
meeting_cnt = 0
lunch_cnt = 0
club_cnt = 0
workclean_cnt = 0
outwork_cnt = 0
unknown_cnt = 0
break_cnt = 0
together_cnt = 0
seatclean_cnt = 0
event_cnt = 0
# Duration global var
work_dur = timedelta(hours=0, minutes=0)
nonwork_dur = timedelta(hours=0, minutes=0)
meeting_dur = timedelta(hours=0, minutes=0)
lunch_dur = timedelta(hours=0, minutes=0)
club_dur = timedelta(hours=0, minutes=0)
workclean_dur = timedelta(hours=0, minutes=0)
outwork_dur = timedelta(hours=0, minutes=0)
break_dur = timedelta(hours=0, minutes=0)
together_dur = timedelta(hours=0, minutes=0)
seatclean_dur = timedelta(hours=0, minutes=0)
event_dur = timedelta(hours=0, minutes=0)
late_dur = timedelta(hours=0, minutes=0)
def process_work_event(comp):
# Duration global var
global work_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
work_dur += duration
def process_nonwork_event(comp):
# Duration global var
global nonwork_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
nonwork_dur += duration
def process_meeting_event(comp):
# Duration global var
global meeting_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
meeting_dur += duration
def process_lunch_event(comp):
# Duration global var
global lunch_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
lunch_dur += duration
def process_club_event(comp):
# Duration global var
global club_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
club_dur += duration
def process_workclean_event(comp):
# Duration global var
global workclean_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
workclean_dur += duration
def process_outwork_event(comp):
# Duration global var
global outwork_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
outwork_dur += duration
def process_break_event(comp):
# Duration global var
global break_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
break_dur += duration
def process_together_event(comp):
# Duration global var
global together_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
together_dur += duration
def process_seatclean_event(comp):
# Duration global var
global seatclean_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
seatclean_dur += duration
def process_event_event(comp):
# Duration global var
global event_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
event_dur += duration
def process_late_event(comp):
global late_dur
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
late_dur += duration
def print_results():
print('----------------------------------')
print('total count : ' + str(total_cnt))
print('meeting : ' + str(meeting_cnt))
print('work : ' + str(work_cnt))
print('nonwork : ' + str(nonwork_cnt))
print('lunch : ' + str(lunch_cnt))
print('unknown ' + str(unknown_cnt))
print('----------------------------------')
print('workdur : ' + str(work_dur))
print('nonwork dur : ' + str(nonwork_dur))
print('meeting dur : ' + str(meeting_dur))
print('lunch dur : ' + str(lunch_dur))
print('club dur : ' + str(club_dur))
print('workclean dur : ' + str(workclean_dur))
print('outwork dur : ' + str(outwork_dur))
print('break dur : ' + str(break_dur))
print('together dur : ' + str(together_dur))
print('seatclean dur : ' + str(seatclean_dur))
print('event dur : ' + str(event_dur))
print('late dur : ' + str(late_dur))
print('----------------------------------')
class EventProcessor:
def __init__(self):
print('Initialize')
def calc_event_duration(self, comp):
start_dt = comp.get('dtstart').dt
end_dt = comp.get('dtend').dt
duration = end_dt - start_dt
return duration
def main():
# Count global var
global total_cnt
global work_cnt
global nonwork_cnt
global meeting_cnt
global lunch_cnt
global club_cnt
global workclean_cnt
global outwork_cnt
global break_cnt
global together_cnt
global seatclean_cnt
global event_cnt
global unknown_cnt
global late_dur
g = open(cal_file, 'rb')
gcal = Calendar.from_ical(g.read())
for component in gcal.walk():
if component.name == "VEVENT":
start_date = component.get('dtstart').dt
end_date = component.get('dtend').dt
# I don't know why, but same formatted strs have differecne types,
# date or datetime, so I unified them to date type(datetime requires
# timezone set...)
if type(start_date) != type(date(2018, 4, 4)):
start_date = start_date.date()
if type(end_date) != type(date(2018, 4, 4)):
end_date = end_date.date()
# Maybe someday I might get inputs from user that specifies
# the date range...
if start_date >= date(2018, 1, 1) and end_date <= date(2018, 12, 31):
total_cnt += 1
event_summary = component.get('summary')
if WORK_STR in event_summary:
work_cnt += 1
process_work_event(component)
print(event_summary)
elif MEETING_STR in event_summary or TEATIME_STR in \
event_summary or TALK_STR in event_summary:
meeting_cnt += 1
process_meeting_event(component)
elif NONWORK_STR in event_summary:
nonwork_cnt += 1
process_nonwork_event(component)
elif LUNCH_STR in event_summary:
lunch_cnt += 1
process_lunch_event(component)
elif CLUB_STR in event_summary:
club_cnt += 1
process_club_event(component)
elif WORKCLEAN_STR in event_summary:
workclean_cnt += 1
process_workclean_event(component)
elif OUTWORK_STR in event_summary:
outwork_cnt += 1
process_outwork_event(component)
elif BREAK_STR in event_summary:
break_cnt += 1
process_break_event(component)
elif TOGETHER_CTR in event_summary:
together_cnt += 1
process_together_event(component)
elif SEATCLEAN_STR in event_summary:
seatclean_cnt += 1
process_seatclean_event(component)
elif EVENT_STR in event_summary:
event_cnt += 1
process_event_event(component)
elif LATE_SRT in event_summary:
event_cnt += 1
process_late_event(component)
else:
unknown_cnt += 1
# print(event_summary)
# print(component.get('dtstart').dt)
# print(component.get('dtend').dt)
# print(component.get('dtstamp').dt)
g.close()
print_results()
if __name__ == "__main__":
main()
| Dry8r3aD/ics_parser | run.py | run.py | py | 8,555 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "icalendar.Calendar.from_ical",
"line_number": 245,
"usage_type": "call"
},
{
"api_name": "icalendar.Calendar",
"line_number": 245,
"usage_type": "name"
}
] |
22706853086 | import code
import gym
import torch
from tqdm import trange
import numpy as np
import components.prioritized_memory
import components.memory
from components.filesys_manager import ExperimentPath
class BaseTrainer:
def __init__(self, args):
# init experiment hyper-parameters
self.args = args
self.debug = self.args.debug
self.allow_printing = True
self.exp_path = ExperimentPath(f"{self.args.exp_root}/{self.args.name}/{self.args.env}/{args.trainer}/{self.args.seed}")
self.print("log directory initialized")
# env init
self.expl_env = gym.make(self.args.env)
self.eval_env = gym.make(self.args.env)
# replay buffer
if args.prioritized:
self.replay_buffer = components.prioritized_memory.ReplayMemory(self.args)
else:
self.replay_buffer = components.memory.ReplayMemory(self.args)
# global time steps
self.t = 0
# exploration environment
self.s = torch.from_numpy(self.expl_env.reset()).to(dtype=torch.float32, device=self.args.device)
self.done = False
self.episode_num = 0
self.episode_len = 0
self.episode_return = 0
self.episode_discounted_return = 0
# init model
self.init_model()
# log
self.exp_path['timestamp[timenow,t,msg]'].csv_writerow([self.exp_path.now(), self.t, "model initialized"])
self.exp_path['config'].json_write(vars(self.args))
self.exp_path['model_info'].txt_write(str(self.q_learner))
self.save('initial')
def print(self, *args, **kwargs):
if self.allow_printing:
print(",", self.exp_path.now(), self.exp_path.str(), ",", *args, **kwargs)
def init_model(self):
""" init model """
self.print("warning: init model not implemented")
self.q_learner = None
self.q_target = None
self.policy = None
self.q_optimizer = None
self.policy_optimizer = None
def sample_batch(self, batch_size):
return self.replay_buffer.sample(batch_size)
def expl_action(self, obs) -> torch.Tensor:
self.print("warning: expl action not implemented")
a = np.random.uniform(low=-1, high=1, size=self.expl_env.action_space.shape[0])
return torch.from_numpy(a)
def eval_action(self, obs) -> torch.Tensor:
self.print("warning: eval action not implemented")
a = np.random.uniform(low=-1, high=1, size=self.expl_env.action_space.shape[0])
return torch.from_numpy(a)
def before_learn_on_batch(self):
self.print("warning: before learn on batch not implemented")
def learn_on_batch(self, treeidx, s, a, r, s_, d, w):
""" learn on batch """
self.print("warning: learn on batch not implemented")
def after_learn_on_batch(self):
self.print("warning: after learn on batch not implemented")
def state_value_pred(self, s):
""" log the state value of initial state """
self.print("warning: state value pred not implemented")
return -1
def reset_expl_env(self):
""" reset exploration env """
self.s = self.expl_env.reset()
self.s = torch.from_numpy(self.s).to(dtype=torch.float32, device=self.args.device)
self.done = False
self.episode_num += 1
self.episode_len = 0
self.episode_return = 0
self.episode_discounted_return = 0
def advance_expl_env(self, next_obs, reward, done):
""" advance exploration env for next step """
self.s = next_obs
self.done = done
self.episode_return += reward
if self.args.reward_clip:
reward = min(max(reward, -1), 1)
self.episode_discounted_return += reward * self.args.discount ** self.episode_len
self.episode_len += 1
def epsilon(self):
""" compute current epsilon (linear annealing 1M steps) """
return 1 - self.t * ((1 - 0.1) / self.args.epsilon_steps) if self.t < self.args.epsilon_steps else 0.1
def collect_init_steps(self, render=False):
""" fill the replay buffer before learning """
for _ in trange(self.args.min_num_steps_before_training):
if self.done:
self.reset_expl_env()
# sample transition
a = torch.from_numpy(np.random.uniform(low=-1, high=1, size=self.expl_env.action_space.shape[0]))
s_, r, d, _ = self.expl_env.step(a)
s_ = torch.from_numpy(s_).to(dtype=torch.float32, device=self.args.device)
r = r.item()
# store into replay
if self.args.reward_clip:
r = min(max(r, -1), 1)
if 'reward_std' in self.args:
r += np.random.normal(0, self.args.reward_std) # clip, add noise
if self.args.prioritized:
self.replay_buffer.append(self.s, a, r, d)
else:
self.replay_buffer.append(self.s, a, r, s_, d)
# render
if render:
self.expl_env.render()
# sample next state
self.advance_expl_env(s_, r, d)
self.t += 1
self.reset_expl_env()
self.print("finished collect init steps")
def sample_transition(self, render=False):
""" train one step """
if self.done:
self.reset_expl_env()
# sample transition
a = self.expl_action(self.s)
s_, r, d, _ = self.expl_env.step(a)
s_ = torch.from_numpy(s_).to(dtype=torch.float32, device=self.args.device)
r = r.item()
# store into replay
if self.args.reward_clip:
r = min(max(r, -1), 1)
if 'reward_std' in self.args:
r += np.random.normal(0, self.args.reward_std) # clip, add noise
if self.args.prioritized:
self.replay_buffer.append(self.s, a, r, d)
else:
self.replay_buffer.append(self.s, a, r, s_, d)
# render
if render:
self.expl_env.render()
# sample next state
self.advance_expl_env(s_, r, d)
# log status
if self.done:
self.exp_path['expl_episode_stats[t,return,discounted_return]'].csv_writerow(
[self.t, self.episode_return, self.episode_discounted_return])
if self.args.prioritized:
self.exp_path['debug[t,buffer_size,epsilon]'].csv_writerow(
[self.t, self.replay_buffer.data_buffer.index, self.epsilon()]
)
else:
self.exp_path['debug[t,buffer_size]'].csv_writerow(
[self.t, len(self.replay_buffer)]
)
self.t += 1
def evaluate(self, render=False, random=False):
""" evaluate current policy """
self.exp_path['timestamp[timenow,t,msg]'].csv_writerow([self.exp_path.now(), self.t, "evaluation started"])
self.print(f"evaluation started t={self.t}")
# init eval
s = self.eval_env.reset()
s = torch.from_numpy(s).to(dtype=torch.float32, device=self.args.device)
done = False
episode_len = 0
episode_return = 0
episode_discounted_return = 0
v_initials = []
v_initials.append(self.state_value_pred(s))
returns = []
discounted_returns = []
episode_lens = []
# start eval
for _ in trange(self.args.num_eval_steps_per_epoch):
if done:
# record data
returns.append(episode_return)
discounted_returns.append(episode_discounted_return)
episode_lens.append(episode_len)
# reset env
s = self.eval_env.reset()
s = torch.from_numpy(s).to(dtype=torch.float32, device=self.args.device)
v_initials.append(self.state_value_pred(s))
episode_len = 0
episode_return = 0
episode_discounted_return = 0
if len(returns) >= self.args.eval_max_episode:
# check if enough episodes simulated
break
# sample transition
a = self.eval_action(s)
if random:
a = np.random.randint(self.eval_env.action_space())
s_, r, d, _ = self.eval_env.step(a)
s_ = torch.from_numpy(s_).to(dtype=torch.float32, device=self.args.device)
r = r.item()
# advance env
s = s_
done = d
episode_return += r
if self.args.reward_clip:
r = min(max(r, -1), 1)
episode_discounted_return += r * self.args.discount ** episode_len
episode_len += 1
# render
if render:
print(f"s={s.shape} a={a} r={r} r_clip={r}")
self.eval_env.render()
# finished eval
self.print(f"returns={returns}\nmean={np.mean(returns) if len(returns) > 0 else 0}")
if len(returns) == 0:
self.exp_path['eval_episode_stats[t,v_init,return,discounted_return,length]'].csv_writerow(
[self.t, v_initials[0], episode_return, episode_discounted_return, episode_len])
self.exp_path['eval_mean_stats[t,num_episode,v_init,return,discounted_return,length]'].csv_writerow(
[self.t, 1, v_initials[0], episode_return, episode_discounted_return, episode_len])
else:
for V_init, R, discounted_R, length in zip(v_initials, returns, discounted_returns, episode_lens):
self.exp_path['eval_episode_stats[t,v_init,return,discounted_return,length]'].csv_writerow(
[self.t, V_init, R, discounted_R, length])
self.exp_path['eval_mean_stats[t,num_episode,v_init,return,discounted_return,length]'].csv_writerow(
[self.t, len(returns), np.mean(v_initials), np.mean(returns), np.mean(discounted_returns), np.mean(episode_lens)])
self.exp_path['timestamp[timenow,t,msg]'].csv_writerow([self.exp_path.now(), self.t, "evaluation finished"])
def save(self, name='trainer'):
""" save this trainer """
buffer = self.replay_buffer
expl_env = self.expl_env
eval_env = self.eval_env
# skip the things not saving
self.replay_buffer = None
self.expl_env = None
self.eval_env = None
self.exp_path["checkpoint"][f"{name}_{self.t}.pth"].save_model(self)
self.exp_path['timestamp[timenow,t,msg]'].csv_writerow([self.exp_path.now(), self.t, "model saved"])
self.replay_buffer = buffer
self.expl_env = expl_env
self.eval_env = eval_env
| APM150/Continuous_Envs_Experiments | mujoco/trainers/base_trainer.py | base_trainer.py | py | 10,727 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "components.filesys_manager.ExperimentPath",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "gym.make",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "component... |
22926496902 | import cv2
import math
# Source: https://richardpricejones.medium.com/drawing-a-rectangle-with-a-angle-using-opencv-c9284eae3380
# Made slight adjustments to color
def draw_angled_rec(x0, y0, width, height, angle, img, color):
_angle = angle * math.pi / 180.0
b = math.cos(_angle) * 0.5
a = math.sin(_angle) * 0.5
pt0 = (int(x0 - a * height - b * width),
int(y0 + b * height - a * width))
pt1 = (int(x0 + a * height - b * width),
int(y0 - b * height - a * width))
pt2 = (int(2 * x0 - pt0[0]), int(2 * y0 - pt0[1]))
pt3 = (int(2 * x0 - pt1[0]), int(2 * y0 - pt1[1]))
if color == 'green':
cv2.line(img, pt0, pt1, (0, 255, 0), 5)
cv2.line(img, pt1, pt2, (0, 255, 0), 5)
cv2.line(img, pt2, pt3, (0, 255, 0), 5)
cv2.line(img, pt3, pt0, (0, 255, 0), 5)
elif color == 'red':
cv2.line(img, pt0, pt1, (0, 0, 255), 5)
cv2.line(img, pt1, pt2, (0, 0, 255), 5)
cv2.line(img, pt2, pt3, (0, 0, 255), 5)
cv2.line(img, pt3, pt0, (0, 0, 255), 5)
else:
cv2.line(img, pt0, pt1, (255, 0, 255), 5)
cv2.line(img, pt1, pt2, (255, 0, 255), 5)
cv2.line(img, pt2, pt3, (255, 0, 255), 5)
cv2.line(img, pt3, pt0, (255, 0, 255), 5)
img = cv2.imread('minAreaRect_Test.png')
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(img_gray, 150, 255, cv2.THRESH_BINARY)
# detect the contours on the binary image using cv2.CHAIN_APPROX_NONE
contours, hierarchy = cv2.findContours(image=thresh, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_NONE)
slender_rat = 3
min_width = 10
max_width = 120
min_len = 120
first = 1
image_copy = img.copy()
cv2.imwrite('minAreaRect_Test_Result.png',image_copy)
# cv2.drawContours(image=image_copy, contours=contours, contourIdx=-1, color=(0, 255, 0), thickness=2, lineType=cv2.LINE_AA)
# Used to add text on pattern
counter = 0
for cnt in contours:
if counter == 0: # First contour encompasses entire image
counter += 1
continue
heir = hierarchy[0][counter][3] # [next, previous, first child, parent].
if heir == 0:
rect = cv2.minAreaRect(cnt)
x = int(rect[0][0])
y = int(rect[0][1])
w = int(rect[1][0])
h = int(rect[1][1])
theta = int(rect[2])
draw_angled_rec(x, y, w, h, theta, image_copy, 'green')
image_tmp = cv2.putText(img=image_copy, text=str(theta)+'[deg]', org=(x, y), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(0,0,0), thickness=5)
image_tmp = cv2.putText(img=image_copy, text='w='+str(w), org=(x, y+100), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(0,0,0), thickness=5)
image_tmp = cv2.putText(img=image_copy, text='h='+str(h), org=(x, y+200), fontFace=cv2.FONT_HERSHEY_SIMPLEX, fontScale=3, color=(0,0,0), thickness=5)
cv2.imwrite('minAreaRect_Test_Result.png',image_copy)
counter += 1 | MatanPazi/opt_fabric_layout | minAreaRect_Test.py | minAreaRect_Test.py | py | 2,974 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "math.pi",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "math.cos",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "math.sin",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "cv2.line",
"line_number": 20,
"... |
15733506905 | __docformat__ = "reStructuredText"
__author__ = "davidh"
import logging
import os
import re
import sys
from collections import namedtuple
from datetime import datetime, timedelta
from glob import glob
LOG = logging.getLogger(__name__)
FILENAME_RE = r"HS_H08_(?P<date>\d{8})_(?P<time>\d{4})_(?P<band>B\d{2})_FLDK_(?P<res>R\d+)\.(?P<ext>.+)"
fn_re = re.compile(FILENAME_RE)
DT_FORMAT = "%Y%m%d_%H%M"
CASE_NAME_FORMAT = "{start}_{end}_{delta:02d}"
DataCase = namedtuple("DataCase", ["topic_title", "start", "end", "delta", "bands"])
### Guam Cases ###
guam_cases: dict = {}
# Kathy's Cases
guam_cases["Introduction"] = []
guam_cases["Introduction"].append(
DataCase(
"Introduction", datetime(2015, 7, 17, 21, 0, 0), datetime(2015, 7, 18, 20, 0, 0), timedelta(minutes=60), "all"
)
)
guam_cases["Introduction"].append(
DataCase(
"Introduction", datetime(2015, 7, 18, 1, 0, 0), datetime(2015, 7, 18, 3, 20, 0), timedelta(minutes=10), "all"
)
)
guam_cases["Introduction"].append(
DataCase(
"Introduction", datetime(2015, 7, 18, 14, 0, 0), datetime(2015, 7, 18, 16, 0, 0), timedelta(minutes=10), "all"
)
)
guam_cases["Introduction"].append(
DataCase("Introduction", datetime(2016, 3, 9, 0, 0, 0), datetime(2016, 3, 9, 4, 0, 0), timedelta(minutes=60), "all")
)
guam_cases["Introduction"].append(
DataCase(
"Introduction", datetime(2016, 3, 9, 1, 30, 0), datetime(2016, 3, 9, 4, 0, 0), timedelta(minutes=10), "all"
)
)
# Scott's Cases
guam_cases["Water Vapor"] = []
guam_cases["Water Vapor"].append(
DataCase(
"Water Vapor", datetime(2015, 10, 7, 0, 0, 0), datetime(2015, 10, 8, 0, 0, 0), timedelta(minutes=30), "all"
)
)
guam_cases["Water Vapor"].append(
DataCase(
"Water Vapor", datetime(2016, 2, 19, 19, 0, 0), datetime(2016, 2, 20, 5, 0, 0), timedelta(minutes=60), "all"
)
)
# Tim's Cases
guam_cases["Weighting Functions"] = []
guam_cases["Weighting Functions"].append(
DataCase(
"Weighting Functions",
datetime(2015, 9, 20, 2, 30, 0),
datetime(2015, 9, 20, 2, 30, 0),
timedelta(minutes=0),
"all",
)
)
guam_cases["Weighting Functions"].append(
DataCase(
"Weighting Functions",
datetime(2015, 9, 20, 0, 0, 0),
datetime(2015, 9, 20, 6, 0, 0),
timedelta(minutes=60),
"all",
)
)
guam_cases["Weighting Functions"].append(
DataCase(
"Weighting Functions",
datetime(2015, 9, 20, 1, 30, 0),
datetime(2015, 9, 20, 2, 30, 0),
timedelta(minutes=10),
"all",
)
)
guam_cases["Weighting Functions"].append(
DataCase(
"Weighting Functions",
datetime(2015, 9, 20, 1, 0, 0),
datetime(2015, 9, 20, 3, 0, 0),
timedelta(minutes=10),
"all",
)
)
# Jordan's Cases
guam_cases["Extra"] = []
guam_cases["Extra"].append(
DataCase("Extra", datetime(2015, 8, 17, 12, 0, 0), datetime(2015, 8, 18, 12, 0, 0), timedelta(minutes=60), "all")
)
guam_cases["Extra"].append(
DataCase("Extra", datetime(2015, 8, 17, 22, 0, 0), datetime(2015, 8, 18, 1, 0, 0), timedelta(minutes=10), "all")
)
guam_cases["Extra"].append(
DataCase("Extra", datetime(2015, 8, 24, 15, 0, 0), datetime(2015, 8, 15, 21, 0, 0), timedelta(minutes=60), "all")
)
guam_cases["Extra"].append(
DataCase("Extra", datetime(2015, 8, 25, 2, 0, 0), datetime(2015, 8, 25, 5, 0, 0), timedelta(minutes=10), "all")
)
def main():
import argparse
parser = argparse.ArgumentParser(description="Regenerate or generate mirrored AHI data structure")
parser.add_argument(
"base_ahi_dir",
default="/odyssey/isis/tmp/davidh/sift_data/ahi",
help="Base AHI directory for the geotiff data files " "(next child directory is the full dated directory)",
)
parser.add_argument(
"-v",
"--verbose",
dest="verbosity",
action="count",
default=int(os.environ.get("VERBOSITY", 2)),
help="each occurrence increases verbosity 1 level through " "ERROR-WARNING-Info-DEBUG (default Info)",
)
parser.add_argument("--overwrite", action="store_true", help="Overwrite existing hardlinks")
args = parser.parse_args()
levels = [logging.ERROR, logging.WARN, logging.INFO, logging.DEBUG]
level = levels[min(3, args.verbosity)]
logging.basicConfig(level=level)
if not os.path.isdir(args.base_ahi_dir):
raise NotADirectoryError("Directory does not exist: %s" % (args.base_ahi_dir,))
os.chdir(args.base_ahi_dir)
for section_name, cases in guam_cases.items():
for case in cases:
start_str = case.start.strftime(DT_FORMAT)
end_str = case.end.strftime(DT_FORMAT)
# Note this only uses the minutes!
case_name = CASE_NAME_FORMAT.format(
start=start_str, end=end_str, delta=int(case.delta.total_seconds() / 60.0)
)
case_dir = os.path.join(args.base_ahi_dir, section_name, case_name)
if not os.path.isdir(case_dir):
LOG.debug("Creating case directory: %s", case_dir)
os.makedirs(case_dir)
else:
LOG.error("Case directory already exists: %s", case_dir)
continue
t = case.start
while t <= case.end:
glob_pattern = t.strftime("%Y_%m_%d_%j/%H%M/*_%Y%m%d_%H%M_B??_*.merc.tif")
t = t + case.delta
matches = glob(glob_pattern)
if len(matches) == 0:
LOG.error("Zero files found matching pattern: %s", glob_pattern)
continue
for input_pathname in matches:
fn = os.path.basename(input_pathname)
link_path = os.path.join(case_dir, fn)
if os.path.exists(link_path) and not args.overwrite:
LOG.debug("Link '%s' already exists, skipping...", link_path)
continue
LOG.debug("Creating hardlink '%s' -> '%s'", link_path, input_pathname)
os.link(input_pathname, link_path)
if int(case.delta.total_seconds()) == 0:
LOG.debug("Only one file needed to meet delta of 0")
break
LOG.info("done mirroring files")
if __name__ == "__main__":
sys.exit(main())
| ssec/sift | uwsift/project/organize_data_topics.py | organize_data_topics.py | py | 6,413 | python | en | code | 45 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime... |
29290663147 | from django.db import models
from wagtail.admin.edit_handlers import MultiFieldPanel, RichTextFieldPanel, StreamFieldPanel
from wagtail.core.fields import RichTextField, StreamField
from wagtail.snippets.models import register_snippet
from ..modules import text_processing
from .. import configurations
from ..blogs.blocks import SectionBlock
@register_snippet
class BlogPost(models.Model):
post_title = RichTextField(
features=[], blank=False, null=True,
)
post_summary = RichTextField(
features=configurations.RICHTEXT_FEATURES, blank=False, null=True,
)
post_introduction = RichTextField(
features=configurations.RICHTEXT_FEATURES, blank=True, null=True,
)
post_conclusion = RichTextField(
features=configurations.RICHTEXT_FEATURES, blank=True, null=True,
)
sections = StreamField(
[
('section', SectionBlock()),
], blank=False
)
panels = [
MultiFieldPanel(
[
RichTextFieldPanel('post_title'),
RichTextFieldPanel('post_summary'),
RichTextFieldPanel('post_introduction'),
StreamFieldPanel('sections'),
RichTextFieldPanel('post_conclusion'),
], heading='Post Content'
),
]
@property
def sections_with_title(self):
sections = []
for section in self.sections:
if section.value['title']:
sections.append(section)
return sections
def __str__(self):
return text_processing.html_to_str(
self.post_title
)
| VahediRepositories/AllDota | dotahub/home/blogs/models.py | models.py | py | 1,626 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "wagtail.core.fields.RichTextField",
"line_number": 13,
"usage_type": "call"
},
{
"ap... |
7182836122 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @Author: José Sánchez-Gallego (gallegoj@uw.edu)
# @Date: 2023-01-19
# @Filename: test_callback.py
# @License: BSD 3-clause (http://www.opensource.org/licenses/BSD-3-Clause)
import unittest.mock
import click
from click.testing import CliRunner
from unclick.core import build_command_string
@click.command()
@click.argument("ARG1", type=str)
@click.argument("ARG2", type=int, required=False)
@click.option("--flag1", "-f", is_flag=True, help="A flag.")
def my_command1(*args, **kwrgs):
"""Test command."""
return
def _command_invoke(command: click.Command, string: str):
"""Checks that command is called and returns a mock of the callback."""
with unittest.mock.patch.object(command, "callback") as mock_callback:
runner = CliRunner()
result = runner.invoke(command, string[len(command.name or "") :])
assert result.exit_code == 0
mock_callback.assert_called()
return mock_callback
def test_callback():
command_string = build_command_string(my_command1, "hi", 2, flag1=True)
mock_callback = _command_invoke(my_command1, command_string)
mock_callback.assert_called_once_with(arg1="hi", arg2=2, flag1=True)
def test_callback2():
command_string = build_command_string(my_command1, "hi")
mock_callback = _command_invoke(my_command1, command_string)
mock_callback.assert_called_once_with(arg1="hi", arg2=None, flag1=False)
def test_callback_string_with_spaces():
command_string = build_command_string(my_command1, "hi how are you")
mock_callback = _command_invoke(my_command1, command_string)
mock_callback.assert_called_once_with(arg1="hi how are you", arg2=None, flag1=False)
| albireox/unclick | tests/test_callback.py | test_callback.py | py | 1,732 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "click.command",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "click.argument",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "click.option",
"line_... |
27549684390 | """
Query builder examples.
NOTES:
# Infix notation (natural to humans)
NOT ((FROM='11' OR TO="22" OR TEXT="33") AND CC="44" AND BCC="55")
# Prefix notation (Polish notation, IMAP version)
NOT (((OR OR FROM "11" TO "22" TEXT "33") CC "44" BCC "55"))
# Python query builder
NOT(AND(OR(from_='11', to='22', text='33'), cc='44', bcc='55'))
# python to prefix notation steps:
1. OR(1=11, 2=22, 3=33) ->
"(OR OR FROM "11" TO "22" TEXT "33")"
2. AND("(OR OR FROM "11" TO "22" TEXT "33")", cc='44', bcc='55') ->
"AND(OR(from_='11', to='22', text='33'), cc='44', bcc='55')"
3. NOT("AND(OR(from_='11', to='22', text='33'), cc='44', bcc='55')") ->
"NOT (((OR OR FROM "1" TO "22" TEXT "33") CC "44" BCC "55"))"
"""
import datetime as dt
from imap_tools import AND, OR, NOT, A, H, U
# date in the date list (date=date1 OR date=date3 OR date=date2)
q1 = OR(date=[dt.date(2019, 10, 1), dt.date(2019, 10, 10), dt.date(2019, 10, 15)])
# '(OR OR ON 1-Oct-2019 ON 10-Oct-2019 ON 15-Oct-2019)'
# date not in the date list (NOT(date=date1 OR date=date3 OR date=date2))
q2 = NOT(OR(date=[dt.date(2019, 10, 1), dt.date(2019, 10, 10), dt.date(2019, 10, 15)]))
# 'NOT ((OR OR ON 1-Oct-2019 ON 10-Oct-2019 ON 15-Oct-2019))'
# subject contains "hello" AND date greater than or equal dt.date(2019, 10, 10)
q3 = A(subject='hello', date_gte=dt.date(2019, 10, 10))
# '(SUBJECT "hello" SINCE 10-Oct-2019)'
# from contains one of the address parts
q4 = OR(from_=["@spam.ru", "@tricky-spam.ru"])
# '(OR FROM "@spam.ru" FROM "@tricky-spam.ru")'
# marked as seen and not flagged
q5 = AND(seen=True, flagged=False)
# '(SEEN UNFLAGGED)'
# (text contains tag15 AND subject contains tag15) OR (text contains tag10 AND subject contains tag10)
q6 = OR(AND(text='tag15', subject='tag15'), AND(text='tag10', subject='tag10'))
# '(OR (TEXT "tag15" SUBJECT "tag15") (TEXT "tag10" SUBJECT "tag10"))'
# (text contains tag15 OR subject contains tag15) OR (text contains tag10 OR subject contains tag10)
q7 = OR(OR(text='tag15', subject='tag15'), OR(text='tag10', subject='tag10'))
# '(OR (OR TEXT "tag15" SUBJECT "tag15") (OR TEXT "tag10" SUBJECT "tag10"))'
# header IsSpam contains '++' AND header CheckAntivirus contains '-'
q8 = A(header=[H('IsSpam', '++'), H('CheckAntivirus', '-')])
# '(HEADER "IsSpam" "++" HEADER "CheckAntivirus" "-")'
# UID range
q9 = A(uid=U('1034', '*'))
# '(UID 1034:*)'
# complex from README
q10 = A(OR(from_='from@ya.ru', text='"the text"'), NOT(OR(A(answered=False), A(new=True))), to='to@ya.ru')
# '((OR FROM "from@ya.ru" TEXT "\\"the text\\"") NOT ((OR (UNANSWERED) (NEW))) TO "to@ya.ru")'
| ikvk/imap_tools | examples/search.py | search.py | py | 2,613 | python | en | code | 608 | github-code | 36 | [
{
"api_name": "imap_tools.OR",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "imap_tools.NOT",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "imap_tools.OR",
"line_... |
15857685473 | # -*- coding: utf-8 -*-
import os
import sys
import webbrowser
from invoke import task
docs_dir = 'docs'
build_dir = os.path.join(docs_dir, '_build')
@task
def readme(ctx, browse=False):
ctx.run("rst2html.py README.rst > README.html")
if browse:
webbrowser.open_new_tab('README.html')
def build_docs(ctx, browse):
ctx.run("sphinx-build %s %s" % (docs_dir, build_dir), echo=True)
if browse:
browse_docs(ctx)
@task
def clean_docs(ctx):
ctx.run('rm -rf %s' % build_dir)
@task
def browse_docs(ctx):
path = os.path.join(build_dir, 'index.html')
webbrowser.open_new_tab(path)
@task
def docs(ctx, clean=False, browse=False, watch=False):
"""Build the docs."""
if clean:
clean_docs(ctx)
if watch:
watch_docs(ctx, browse=browse)
else:
build_docs(ctx, browse=browse)
@task
def watch_docs(ctx, browse=False, port=1234):
"""Run build the docs when a file changes."""
try:
import sphinx_autobuild # noqa
except ImportError:
print('ERROR: watch task requires the sphinx_autobuild package.')
print('Install it with:')
print(' pip install sphinx-autobuild')
sys.exit(1)
ctx.run('sphinx-autobuild {0} --port={port} {1} {2}'.format(
'--open-browser' if browse else '', docs_dir, build_dir, port=port),
echo=True, pty=True)
| CenterForOpenScience/COSDev | tasks.py | tasks.py | py | 1,383 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "webbrowser.open_new_tab",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "invoke.task",
"li... |
74160129703 | '''
Algorithm: just count how many characters(frequency more than one regard it as one)
'''
#!/bin/python3
import sys
from collections import Counter
def stringConstruction(s):
# Complete this function
return len(Counter(s).values())
if __name__ == "__main__":
q = int(input().strip())
for a0 in range(q):
s = input().strip()
result = stringConstruction(s)
print(result)
| CodingProgrammer/HackerRank_Python | (Strings)String_Construction(Counter_FK1).py | (Strings)String_Construction(Counter_FK1).py | py | 413 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 10,
"usage_type": "call"
}
] |
33531980673 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by Lucas Sinclair and Paul Rougieux
JRC biomass Project.
Unit D1 Bioeconomy.
"""
# Built-in modules #
# Third party modules #
# First party modules #
import autopaths
from autopaths import Path
from autopaths.auto_paths import AutoPaths
from autopaths.tmp_path import new_temp_dir
from plumbing.cache import property_cached
from tqdm import tqdm
# Internal modules #
from cbmcfs3_runner.reports.scenario import ScenarioReport
from cbmcfs3_runner.pump.dataframes import concat_as_df
###############################################################################
class Scenario(object):
"""
This object represents a modification of the input data for the purpose.
A scenario can be harvest and economic scenario.
Actual scenarios should inherit from this class.
"""
all_paths = """
/logs_summary.md
"""
def __iter__(self): return iter(self.runners.values())
def __len__(self): return len(self.runners.values())
def __getitem__(self, key):
"""Return a runner based on a country code."""
return self.runners[key]
def __init__(self, continent):
# Save parent #
self.continent = continent
# This scenario dir #
self.base_dir = Path(self.scenarios_dir + self.short_name + '/')
# Automatically access paths based on a string of many subpaths #
self.paths = AutoPaths(self.base_dir, self.all_paths)
def __repr__(self):
return '%s object with %i runners' % (self.__class__, len(self))
def __call__(self, verbose=False):
for code, steps in tqdm(self.runners.items()):
for runner in steps:
runner(interrupt_on_error=False, verbose=verbose)
self.compile_log_tails()
@property
def runners(self):
msg = "You should inherit from this class and implement this property."
raise NotImplementedError(msg)
@property
def scenarios_dir(self):
"""Shortcut to the scenarios directory."""
return self.continent.scenarios_dir
@property_cached
def report(self):
return ScenarioReport(self)
def compile_log_tails(self, step=-1):
summary = self.paths.summary
summary.open(mode='w')
summary.handle.write("# Summary of all log file tails\n\n")
summary.handle.writelines(r[step].tail for r in self.runners.values() if r[step])
summary.close()
# ------------------------------ Others ----------------------------------#
def make_csv_zip(self, csv_name, dest_dir):
"""
Will make a zip file will the specified CSV file from every country
together and place it in the given destination directory.
For instance you can do:
>>> f = scenario.make_csv_zip('ipcc_pools', '~/exports/for_sarah/')
>>> print(f)
"""
# Files to put in the zip #
files = {iso: rl[-1].post_processor.csv_maker.paths(csv_name)
for iso, rl in self.runners.items()}
# Actual name of CSV file #
csv_full_name = next(iter(files.items()))[1].name
# Destination directory #
dest_dir = Path(dest_dir)
# If it's not a directory #
assert isinstance(dest_dir, autopaths.dir_path.DirectoryPath)
# Destination zip file #
dest_zip = dest_dir + csv_full_name + '.zip'
# Temporary directory #
tmp_dir = new_temp_dir()
zip_dir = tmp_dir + csv_full_name + '/'
zip_dir.create()
# Copy #
for iso, f in files.items():
try:
f.copy(zip_dir + iso + '.csv')
except Exception as e:
print("no data in ", iso)
print('Error loading data: '+ str(e))
# Compress #
zip_dir.zip_to(dest_zip)
# Remove #
tmp_dir.remove()
# Return #
return dest_zip
def concat_as_df(self, *args, **kwargs):
"""A data frame with many countries together, crucial for analysis"""
return concat_as_df(self, *args, **kwargs)
| xapple/cbmcfs3_runner | cbmcfs3_runner/scenarios/base_scen.py | base_scen.py | py | 4,131 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "autopaths.Path",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "autopaths.auto_paths.AutoPaths",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "cbmcfs3_runne... |
22330605569 | import asyncio
import logging
import os
import re
import warnings
from asyncio import Future
from functools import wraps
from inspect import signature
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union
from tqdm.auto import tqdm
from rubrix._constants import (
DATASET_NAME_REGEX_PATTERN,
DEFAULT_API_KEY,
RUBRIX_WORKSPACE_HEADER_NAME,
)
from rubrix.client.apis.datasets import Datasets
from rubrix.client.apis.metrics import MetricsAPI
from rubrix.client.apis.searches import Searches
from rubrix.client.datasets import (
Dataset,
DatasetForText2Text,
DatasetForTextClassification,
DatasetForTokenClassification,
)
from rubrix.client.metrics.models import MetricResults
from rubrix.client.models import (
BulkResponse,
Record,
Text2TextRecord,
TextClassificationRecord,
TokenClassificationRecord,
)
from rubrix.client.sdk.client import AuthenticatedClient
from rubrix.client.sdk.commons.api import async_bulk
from rubrix.client.sdk.commons.errors import RubrixClientError
from rubrix.client.sdk.datasets import api as datasets_api
from rubrix.client.sdk.datasets.models import CopyDatasetRequest, TaskType
from rubrix.client.sdk.metrics import api as metrics_api
from rubrix.client.sdk.metrics.models import MetricInfo
from rubrix.client.sdk.text2text import api as text2text_api
from rubrix.client.sdk.text2text.models import (
CreationText2TextRecord,
Text2TextBulkData,
Text2TextQuery,
)
from rubrix.client.sdk.text_classification import api as text_classification_api
from rubrix.client.sdk.text_classification.models import (
CreationTextClassificationRecord,
LabelingRule,
LabelingRuleMetricsSummary,
TextClassificationBulkData,
TextClassificationQuery,
)
from rubrix.client.sdk.token_classification import api as token_classification_api
from rubrix.client.sdk.token_classification.models import (
CreationTokenClassificationRecord,
TokenClassificationBulkData,
TokenClassificationQuery,
)
from rubrix.client.sdk.users import api as users_api
from rubrix.client.sdk.users.models import User
from rubrix.utils import setup_loop_in_thread
_LOGGER = logging.getLogger(__name__)
class _RubrixLogAgent:
def __init__(self, api: "Api"):
self.__api__ = api
self.__loop__, self.__thread__ = setup_loop_in_thread()
@staticmethod
async def __log_internal__(api: "Api", *args, **kwargs):
try:
return await api.log_async(*args, **kwargs)
except Exception as ex:
_LOGGER.error(
f"Cannot log data {args, kwargs}\n"
f"Error of type {type(ex)}\n: {ex}. ({ex.args})"
)
raise ex
def log(self, *args, **kwargs) -> Future:
return asyncio.run_coroutine_threadsafe(
self.__log_internal__(self.__api__, *args, **kwargs), self.__loop__
)
class Api:
# Larger sizes will trigger a warning
_MAX_CHUNK_SIZE = 5000
def __init__(
self,
api_url: Optional[str] = None,
api_key: Optional[str] = None,
workspace: Optional[str] = None,
timeout: int = 60,
extra_headers: Optional[Dict[str, str]] = None,
):
"""Init the Python client.
We will automatically init a default client for you when calling other client methods.
The arguments provided here will overwrite your corresponding environment variables.
Args:
api_url: Address of the REST API. If `None` (default) and the env variable ``RUBRIX_API_URL`` is not set,
it will default to `http://localhost:6900`.
api_key: Authentification key for the REST API. If `None` (default) and the env variable ``RUBRIX_API_KEY``
is not set, it will default to `rubrix.apikey`.
workspace: The workspace to which records will be logged/loaded. If `None` (default) and the
env variable ``RUBRIX_WORKSPACE`` is not set, it will default to the private user workspace.
timeout: Wait `timeout` seconds for the connection to timeout. Default: 60.
extra_headers: Extra HTTP headers sent to the server. You can use this to customize
the headers of Rubrix client requests, like additional security restrictions. Default: `None`.
Examples:
>>> import rubrix as rb
>>> rb.init(api_url="http://localhost:9090", api_key="4AkeAPIk3Y")
>>> # Customizing request headers
>>> headers = {"X-Client-id":"id","X-Secret":"secret"}
>>> rb.init(api_url="http://localhost:9090", api_key="4AkeAPIk3Y", extra_headers=headers)
"""
api_url = api_url or os.getenv("RUBRIX_API_URL", "http://localhost:6900")
# Checking that the api_url does not end in '/'
api_url = re.sub(r"\/$", "", api_url)
api_key = api_key or os.getenv("RUBRIX_API_KEY", DEFAULT_API_KEY)
workspace = workspace or os.getenv("RUBRIX_WORKSPACE")
headers = extra_headers or {}
self._client: AuthenticatedClient = AuthenticatedClient(
base_url=api_url,
token=api_key,
timeout=timeout,
headers=headers.copy(),
)
self._user: User = users_api.whoami(client=self._client)
if workspace is not None:
self.set_workspace(workspace)
self._agent = _RubrixLogAgent(self)
def __del__(self):
if hasattr(self, "_client"):
del self._client
if hasattr(self, "_agent"):
del self._agent
@property
def client(self):
"""The underlying authenticated client"""
return self._client
@property
def datasets(self) -> Datasets:
return Datasets(client=self._client)
@property
def searches(self):
return Searches(client=self._client)
@property
def metrics(self):
return MetricsAPI(client=self.client)
def set_workspace(self, workspace: str):
"""Sets the active workspace.
Args:
workspace: The new workspace
"""
if workspace is None:
raise Exception("Must provide a workspace")
if workspace != self.get_workspace():
if workspace == self._user.username:
self._client.headers.pop(RUBRIX_WORKSPACE_HEADER_NAME, workspace)
elif (
self._user.workspaces is not None
and workspace not in self._user.workspaces
):
raise Exception(f"Wrong provided workspace {workspace}")
self._client.headers[RUBRIX_WORKSPACE_HEADER_NAME] = workspace
def get_workspace(self) -> str:
"""Returns the name of the active workspace.
Returns:
The name of the active workspace as a string.
"""
return self._client.headers.get(
RUBRIX_WORKSPACE_HEADER_NAME, self._user.username
)
def copy(self, dataset: str, name_of_copy: str, workspace: str = None):
"""Creates a copy of a dataset including its tags and metadata
Args:
dataset: Name of the source dataset
name_of_copy: Name of the copied dataset
workspace: If provided, dataset will be copied to that workspace
Examples:
>>> import rubrix as rb
>>> rb.copy("my_dataset", name_of_copy="new_dataset")
>>> rb.load("new_dataset")
"""
datasets_api.copy_dataset(
client=self._client,
name=dataset,
json_body=CopyDatasetRequest(name=name_of_copy, target_workspace=workspace),
)
def delete(self, name: str) -> None:
"""Deletes a dataset.
Args:
name: The dataset name.
Examples:
>>> import rubrix as rb
>>> rb.delete(name="example-dataset")
"""
datasets_api.delete_dataset(client=self._client, name=name)
def log(
self,
records: Union[Record, Iterable[Record], Dataset],
name: str,
tags: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Any]] = None,
chunk_size: int = 500,
verbose: bool = True,
background: bool = False,
) -> Union[BulkResponse, Future]:
"""Logs Records to Rubrix.
The logging happens asynchronously in a background thread.
Args:
records: The record, an iterable of records, or a dataset to log.
name: The dataset name.
tags: A dictionary of tags related to the dataset.
metadata: A dictionary of extra info for the dataset.
chunk_size: The chunk size for a data bulk.
verbose: If True, shows a progress bar and prints out a quick summary at the end.
background: If True, we will NOT wait for the logging process to finish and return an ``asyncio.Future``
object. You probably want to set ``verbose`` to False in that case.
Returns:
Summary of the response from the REST API.
If the ``background`` argument is set to True, an ``asyncio.Future`` will be returned instead.
Examples:
>>> import rubrix as rb
>>> record = rb.TextClassificationRecord(
... text="my first rubrix example",
... prediction=[('spam', 0.8), ('ham', 0.2)]
... )
>>> rb.log(record, name="example-dataset")
1 records logged to http://localhost:6900/datasets/rubrix/example-dataset
BulkResponse(dataset='example-dataset', processed=1, failed=0)
>>>
>>> # Logging records in the background
>>> rb.log(record, name="example-dataset", background=True, verbose=False)
<Future at 0x7f675a1fffa0 state=pending>
"""
future = self._agent.log(
records=records,
name=name,
tags=tags,
metadata=metadata,
chunk_size=chunk_size,
verbose=verbose,
)
if background:
return future
try:
return future.result()
finally:
future.cancel()
async def log_async(
self,
records: Union[Record, Iterable[Record], Dataset],
name: str,
tags: Optional[Dict[str, str]] = None,
metadata: Optional[Dict[str, Any]] = None,
chunk_size: int = 500,
verbose: bool = True,
) -> BulkResponse:
"""Logs Records to Rubrix with asyncio.
Args:
records: The record, an iterable of records, or a dataset to log.
name: The dataset name.
tags: A dictionary of tags related to the dataset.
metadata: A dictionary of extra info for the dataset.
chunk_size: The chunk size for a data bulk.
verbose: If True, shows a progress bar and prints out a quick summary at the end.
Returns:
Summary of the response from the REST API
Examples:
>>> # Log asynchronously from your notebook
>>> import asyncio
>>> import rubrix as rb
>>> from rubrix.utils import setup_loop_in_thread
>>> loop, _ = setup_loop_in_thread()
>>> future_response = asyncio.run_coroutine_threadsafe(
... rb.log_async(my_records, dataset_name), loop
... )
"""
tags = tags or {}
metadata = metadata or {}
if not name:
raise InputValueError("Empty dataset name has been passed as argument.")
if not re.match(DATASET_NAME_REGEX_PATTERN, name):
raise InputValueError(
f"Provided dataset name {name} does not match the pattern {DATASET_NAME_REGEX_PATTERN}. "
"Please, use a valid name for your dataset"
)
if chunk_size > self._MAX_CHUNK_SIZE:
_LOGGER.warning(
"""The introduced chunk size is noticeably large, timeout errors may occur.
Consider a chunk size smaller than %s""",
self._MAX_CHUNK_SIZE,
)
if isinstance(records, Record.__args__):
records = [records]
records = list(records)
try:
record_type = type(records[0])
except IndexError:
raise InputValueError("Empty record list has been passed as argument.")
if record_type is TextClassificationRecord:
bulk_class = TextClassificationBulkData
creation_class = CreationTextClassificationRecord
elif record_type is TokenClassificationRecord:
bulk_class = TokenClassificationBulkData
creation_class = CreationTokenClassificationRecord
elif record_type is Text2TextRecord:
bulk_class = Text2TextBulkData
creation_class = CreationText2TextRecord
else:
raise InputValueError(
f"Unknown record type {record_type}. Available values are {Record.__args__}"
)
processed, failed = 0, 0
progress_bar = tqdm(total=len(records), disable=not verbose)
for i in range(0, len(records), chunk_size):
chunk = records[i : i + chunk_size]
response = await async_bulk(
client=self._client,
name=name,
json_body=bulk_class(
tags=tags,
metadata=metadata,
records=[creation_class.from_client(r) for r in chunk],
),
)
processed += response.parsed.processed
failed += response.parsed.failed
progress_bar.update(len(chunk))
progress_bar.close()
# TODO: improve logging policy in library
if verbose:
_LOGGER.info(
f"Processed {processed} records in dataset {name}. Failed: {failed}"
)
workspace = self.get_workspace()
if (
not workspace
): # Just for backward comp. with datasets with no workspaces
workspace = "-"
print(
f"{processed} records logged to {self._client.base_url}/datasets/{workspace}/{name}"
)
# Creating a composite BulkResponse with the total processed and failed
return BulkResponse(dataset=name, processed=processed, failed=failed)
def delete_records(
self,
name: str,
query: Optional[str] = None,
ids: Optional[List[Union[str, int]]] = None,
discard_only: bool = False,
discard_when_forbidden: bool = True,
) -> Tuple[int, int]:
"""Delete records from a Rubrix dataset.
Args:
name: The dataset name.
query: An ElasticSearch query with the `query string syntax
<https://rubrix.readthedocs.io/en/stable/guides/queries.html>`_
ids: If provided, deletes dataset records with given ids.
discard_only: If `True`, matched records won't be deleted. Instead, they will be marked as `Discarded`
discard_when_forbidden: Only super-user or dataset creator can delete records from a dataset.
So, running "hard" deletion for other users will raise an `ForbiddenApiError` error.
If this parameter is `True`, the client API will automatically try to mark as ``Discarded``
records instead. Default, `True`
Returns:
The total of matched records and real number of processed errors. These numbers could not
be the same if some data conflicts are found during operations (some matched records change during
deletion).
Examples:
>>> ## Delete by id
>>> import rubrix as rb
>>> rb.delete_records(name="example-dataset", ids=[1,3,5])
>>> ## Discard records by query
>>> import rubrix as rb
>>> rb.delete_records(name="example-dataset", query="metadata.code=33", discard_only=True)
"""
return self.datasets.delete_records(
name=name,
query=query,
ids=ids,
mark_as_discarded=discard_only,
discard_when_forbidden=discard_when_forbidden,
)
def load(
self,
name: str,
query: Optional[str] = None,
ids: Optional[List[Union[str, int]]] = None,
limit: Optional[int] = None,
id_from: Optional[str] = None,
as_pandas=None,
) -> Dataset:
"""Loads a Rubrix dataset.
Parameters:
-----------
name:
The dataset name.
query:
An ElasticSearch query with the
`query string syntax <https://rubrix.readthedocs.io/en/stable/guides/queries.html>`_
ids:
If provided, load dataset records with given ids.
limit:
The number of records to retrieve.
id_from:
If provided, starts gathering the records starting from that Record. As the Records returned with the
load method are sorted by ID, ´id_from´ can be used to load using batches.
as_pandas:
DEPRECATED! To get a pandas DataFrame do ``rb.load('my_dataset').to_pandas()``.
Returns:
--------
A Rubrix dataset.
Examples:
**Basic Loading**: load the samples sorted by their ID
>>> import rubrix as rb
>>> dataset = rb.load(name="example-dataset")
**Iterate over a large dataset:**
When dealing with a large dataset you might want to load it in batches to optimize memory consumption
and avoid network timeouts. To that end, a simple batch-iteration over the whole database can be done
employing the `from_id` parameter. This parameter will act as a delimiter, retrieving the N items after
the given id, where N is determined by the `limit` parameter. **NOTE** If
no `limit` is given the whole dataset after that ID will be retrieved.
>>> import rubrix as rb
>>> dataset_batch_1 = rb.load(name="example-dataset", limit=1000)
>>> dataset_batch_2 = rb.load(name="example-dataset", limit=1000, id_from=dataset_batch_1[-1].id)
"""
if as_pandas is False:
warnings.warn(
"The argument `as_pandas` is deprecated and will be removed in a future version. "
"Please adapt your code accordingly. ",
FutureWarning,
)
elif as_pandas is True:
raise ValueError(
"The argument `as_pandas` is deprecated and will be removed in a future version. "
"Please adapt your code accordingly. ",
"If you want a pandas DataFrame do `rb.load('my_dataset').to_pandas()`.",
)
response = datasets_api.get_dataset(client=self._client, name=name)
task = response.parsed.task
task_config = {
TaskType.text_classification: (
text_classification_api.data,
TextClassificationQuery,
DatasetForTextClassification,
),
TaskType.token_classification: (
token_classification_api.data,
TokenClassificationQuery,
DatasetForTokenClassification,
),
TaskType.text2text: (
text2text_api.data,
Text2TextQuery,
DatasetForText2Text,
),
}
try:
get_dataset_data, request_class, dataset_class = task_config[task]
except KeyError:
raise ValueError(
f"Load method not supported for the '{task}' task. Supported tasks: "
f"{[TaskType.text_classification, TaskType.token_classification, TaskType.text2text]}"
)
response = get_dataset_data(
client=self._client,
name=name,
request=request_class(ids=ids, query_text=query),
limit=limit,
id_from=id_from,
)
records = [sdk_record.to_client() for sdk_record in response.parsed]
try:
records_sorted_by_id = sorted(records, key=lambda x: x.id)
# record ids can be a mix of int/str -> sort all as str type
except TypeError:
records_sorted_by_id = sorted(records, key=lambda x: str(x.id))
return dataset_class(records_sorted_by_id)
def dataset_metrics(self, name: str) -> List[MetricInfo]:
response = datasets_api.get_dataset(self._client, name)
response = metrics_api.get_dataset_metrics(
self._client, name=name, task=response.parsed.task
)
return response.parsed
def get_metric(self, name: str, metric: str) -> Optional[MetricInfo]:
metrics = self.dataset_metrics(name)
for metric_ in metrics:
if metric_.id == metric:
return metric_
def compute_metric(
self,
name: str,
metric: str,
query: Optional[str] = None,
interval: Optional[float] = None,
size: Optional[int] = None,
) -> MetricResults:
response = datasets_api.get_dataset(self._client, name)
metric_ = self.get_metric(name, metric=metric)
assert metric_ is not None, f"Metric {metric} not found !!!"
response = metrics_api.compute_metric(
self._client,
name=name,
task=response.parsed.task,
metric=metric,
query=query,
interval=interval,
size=size,
)
return MetricResults(**metric_.dict(), results=response.parsed)
def fetch_dataset_labeling_rules(self, dataset: str) -> List[LabelingRule]:
response = text_classification_api.fetch_dataset_labeling_rules(
self._client, name=dataset
)
return [LabelingRule.parse_obj(data) for data in response.parsed]
def rule_metrics_for_dataset(
self, dataset: str, rule: LabelingRule
) -> LabelingRuleMetricsSummary:
response = text_classification_api.dataset_rule_metrics(
self._client, name=dataset, query=rule.query, label=rule.label
)
return LabelingRuleMetricsSummary.parse_obj(response.parsed)
__ACTIVE_API__: Optional[Api] = None
def active_api() -> Api:
"""Returns the active API.
If Active API is None, initialize a default one.
"""
global __ACTIVE_API__
if __ACTIVE_API__ is None:
__ACTIVE_API__ = Api()
return __ACTIVE_API__
def api_wrapper(api_method: Callable):
"""Decorator to wrap the API methods in module functions.
Propagates the docstrings and adapts the signature of the methods.
"""
def decorator(func):
if asyncio.iscoroutinefunction(api_method):
@wraps(api_method)
async def wrapped_func(*args, **kwargs):
return await func(*args, **kwargs)
else:
@wraps(api_method)
def wrapped_func(*args, **kwargs):
return func(*args, **kwargs)
sign = signature(api_method)
wrapped_func.__signature__ = sign.replace(
parameters=[val for key, val in sign.parameters.items() if key != "self"]
)
return wrapped_func
return decorator
@api_wrapper(Api.__init__)
def init(*args, **kwargs):
global __ACTIVE_API__
__ACTIVE_API__ = Api(*args, **kwargs)
@api_wrapper(Api.set_workspace)
def set_workspace(*args, **kwargs):
return active_api().set_workspace(*args, **kwargs)
@api_wrapper(Api.get_workspace)
def get_workspace(*args, **kwargs):
return active_api().get_workspace(*args, **kwargs)
@api_wrapper(Api.copy)
def copy(*args, **kwargs):
return active_api().copy(*args, **kwargs)
@api_wrapper(Api.delete)
def delete(*args, **kwargs):
return active_api().delete(*args, **kwargs)
@api_wrapper(Api.log)
def log(*args, **kwargs):
return active_api().log(*args, **kwargs)
@api_wrapper(Api.log_async)
def log_async(*args, **kwargs):
return active_api().log_async(*args, **kwargs)
@api_wrapper(Api.load)
def load(*args, **kwargs):
return active_api().load(*args, **kwargs)
@api_wrapper(Api.delete_records)
def delete_records(*args, **kwargs):
return active_api().delete_records(*args, **kwargs)
class InputValueError(RubrixClientError):
pass
| Skumarh89/rubrix | src/rubrix/client/api.py | api.py | py | 24,714 | python | en | code | null | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "rubrix.utils.setup_loop_in_thread",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "asyncio.run_coroutine_threadsafe",
"line_number": 87,
"usage_type": "call"
},
{
... |
947951498 | #!/home/shailja/.virtualenv/my_env/bin/python3
import requests
import bs4
import sys
content = sys.argv[1]
def display_actual_text(text,para_no):
text = text[para_no]
[s.extract() for s in text(['style', 'script', '[document]', 'head', 'title'])]
visible_text = text.getText()
print(visible_text)
wiki_url = 'https://en.wikipedia.org/wiki/{}'
request = requests.get(wiki_url.format(content))
soup = bs4.BeautifulSoup(request.text,'lxml')
actual_text=soup.select('p')
# this is to handle if wikipedia don't have any article with this name
try:
display_actual_text(actual_text,1)
except:
print(f"Wikipedia does not have an article with this exact name.")
exit(1)
# wiki_url = 'https://en.wikipedia.org/wiki/Special:Search?go=Go&search={}&ns0=1'
# request = requests.get(wiki_url.format(content))
# soup = bs4.BeautifulSoup(request.text,'lxml')
# #relevant_text=soup.select("div", {"id": "bodyContent"}) #soup.select('#mw-body')
# #relevant_text=relevant_text.select("div.searchresults")
# content = soup.select(".mw-search-result")[0].select('td')[1]
# title=content.a['title']
# relevant_text=content.select(".searchresult")[0]
# [s.extract() for s in relevant_text(['style', 'script', '[document]', 'head', 'title'])]
# visible_text = relevant_text.getText()
# print(title)
# print(visible_text)
# for more data
show_next = False
para_no = 2
# this is to handle if search is found and reaches to the end of content in wikipidea
try:
while not show_next:
show_next = input("!")
print()
display_actual_text(actual_text,para_no)
para_no+=1
except:
print("End")
| SKT27182/web_scaping | wiki_search.py | wiki_search.py | py | 1,728 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 19,
"usage_type": "call"
}
] |
9866560419 | from mlagents_envs.environment import UnityEnvironment
from mlagents_envs.environment import ActionTuple
from mlagents_envs.side_channel.engine_configuration_channel import EngineConfigurationChannel
import numpy as np
import mlagents.trainers
from collections import namedtuple
obs = namedtuple(
'obs',
['vector', 'front', 'right', 'back', 'left', 'raycast'])
class Drone(object):
def __init__(self, time_scale=1.0, filename='mac.app', port=11000):
self.engine_configuration_channel = EngineConfigurationChannel()
print(f"VERSION : {mlagents.trainers.__version__}")
self.env = UnityEnvironment(
file_name=filename,
worker_id=port,
side_channels=[self.engine_configuration_channel])
self.env.reset()
self.behavior_name = list(self.env.behavior_specs.keys())[0]
self.spec = self.env.behavior_specs[self.behavior_name]
self.engine_configuration_channel.set_configuration_parameters(time_scale=time_scale)
self.dec, self.term = self.env.get_steps(self.behavior_name)
def reset(self):
self.env.reset()
self.dec, self.term = self.env.get_steps(self.behavior_name)
self.tracked_agent = -1
self.state = [self.dec.obs[i][0] for i in range(6)]
self.state = obs(
vector=self.state[0], front=self.state[1],
right=self.state[2], back=self.state[3],
left=self.state[4], raycast=self.state[5])
return self.state
def step(self, action):
if self.tracked_agent == -1 and len(self.dec) >= 1:
self.tracked_agent = self.dec.agent_id[0]
action = np.clip(action, -1, 1)
action_tuple = ActionTuple()
action_tuple.add_continuous(np.array([action]))
self.env.set_actions(self.behavior_name, action_tuple)
self.env.step()
self.dec, self.term = self.env.get_steps(self.behavior_name)
reward = 0
done = False
if self.tracked_agent in self.dec:
reward += self.dec[self.tracked_agent].reward
if self.tracked_agent in self.term:
reward += self.term[self.tracked_agent].reward
done = True
if done:
return self.state, reward, done
self.state = [self.dec.obs[i][0] for i in range(6)]
self.state = obs(
vector=self.state[0], front=self.state[1],
right=self.state[2], back=self.state[3],
left=self.state[4], raycast=self.state[5])
return self.state, reward, done
if __name__ == '__main__':
import matplotlib.pyplot as plt
env = Drone(
time_scale=0.1,
filename='/Users/chageumgang/Desktop/baselines/mac.app')
episode = 0
while True:
state = env.reset()
done = False
score = 0
episode += 1
while not done:
action = np.random.rand(3)
next_state, reward, done = env.step(action)
score += reward
print(next_state.vector.shape)
print(next_state.raycast.shape)
print(next_state.front.shape)
'''
fig = plt.figure()
ax1 = fig.add_subplot(2, 2, 1)
ax1.imshow(state.front)
ax1 = fig.add_subplot(2, 2, 2)
ax1.imshow(state.back)
ax1 = fig.add_subplot(2, 2, 3)
ax1.imshow(state.right)
ax1 = fig.add_subplot(2, 2, 4)
ax1.imshow(state.left)
plt.show(block=False)
plt.pause(0.1)
'''
state = next_state
print(episode, score)
| chagmgang/baselines | baselines/env/simple_drone.py | simple_drone.py | py | 3,699 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.namedtuple",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "mlagents_envs.side_channel.engine_configuration_channel.EngineConfigurationChannel",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "mlagents.trainers.trainers",
"line_... |
15006294998 | import pandas as pd
from bs4 import BeautifulSoup as bs
#Criando objeto BS
def get_file(file_name):
content = []
with open(file_name, 'r') as file:
content = file.readlines()
content = ''.join(content)
soup = bs(content,'xml')
return soup
#Buscando parents
def get_parents(soup):
parents = []
for p in soup.parents:
if p.has_attr("NAME") and p.find('TRANSFORMATION'):
parents.append(p["NAME"])
return parents
#Buscando valores
def get_values(soup, tag1):
mapping = soup.find_all(tag1)
names = []
descriptions = []
parents = []
for map in mapping:
names.append(map.get('NAME'))
descriptions.append(map.get('DESCRIPTION'))
parents.append(get_parents(map)) #soup>map
list_values = [parents, names, descriptions]
return list_values
#Criando DF
def create_df(list_values):
dict = {
'PARENTS': list_values[0],
'NAME': list_values[1],
'DESCRIPTION': list_values[2]
}
df = pd.DataFrame(dict)
return df
#Criando excel
def create_file(filename, df):
return df.to_excel(excel_writer=filename, sheet_name=filename, header=True)
#Separando colunas
def transform_column_parents(file):
df_origin = pd.read_excel(file)
list_column = df_origin['PARENTS'].values.tolist()
list_column = [eval(item) for item in list_column]
try:
df_column = pd.DataFrame(list_column, columns=['MAPPING', 'FOLDER', 'REPOSITORY'])
df_clean = df_origin.drop(['PARENTS'], axis=1)
df_clean = df_origin.drop(['Unnamed: 0'], axis=1)
df_complete = pd.concat([df_column, df_clean], axis=1)
except:
df_column = pd.DataFrame(list_column, columns=['FOLDER', 'REPOSITORY'])
df_clean = df_origin.drop(['PARENTS'], axis=1)
df_clean = df_origin.drop(['Unnamed: 0'], axis=1)
df_complete = pd.concat([df_column, df_clean], axis=1)
else:
print('working')
df_clean = df_origin.drop(['PARENTS'], axis=1)
df_clean = df_origin.drop(['Unnamed: 0'], axis=1)
df_complete = pd.concat([df_column, df_clean], axis=1)
return df_complete
if __name__ == '__main__':
#Strings
name = '../mapping.XML'
tag_mapping = 'MAPPING'
tag_transformation = 'TRANSFORMATION'
tag_target = 'TARGET'
tag_source = 'SOURCE'
filename_mapping = 'mapping.xlsx'
filename_transformation = 'transformation.xlsx'
filename_target = 'target.xlsx'
filename_source = 'source.xlsx'
#Criando objeto soup
soup = get_file(name)
#buscando valores de mapping, criando df e criando xlsx
list_mapping = get_values(soup, tag_mapping)
df_mapping = create_df(list_mapping)
create_file(filename_mapping, df_mapping)
#buscando valores de transformation, criando df e criando xlsx
list_transformation = get_values(soup, tag_transformation)
df_transformation = create_df(list_transformation)
create_file(filename_transformation, df_transformation)
#buscando valores de target, criando df e criando xlsx
list_target = get_values(soup, tag_target)
df_target = create_df(list_target)
create_file(filename_target, df_target)
#buscando valores de source, criando df e criando xlsx
list_source = get_values(soup, tag_source)
df_source = create_df(list_source)
create_file(filename_source, df_source)
#Bloco para dividir em tabelas os parents
file1 = '../source.xlsx'
file2 = '../mapping.xlsx'
file3 = '../transformation.xlsx'
file4 = '../target.xlsx'
#Sources
df1 = transform_column_parents(file1)
create_file(df1, 'source.xlsx')
#Mappings
df2 = transform_column_parents(file2)
create_file(df2, 'mapping.xlsx')
#Transformations
df3 = transform_column_parents(file3)
create_file(df3, 'transformation.xlsx')
#Targets
df4 = transform_column_parents(file4)
create_file(df4, 'target.xlsx') | jonesamandajones/powercenter | create_excel.py | create_excel.py | py | 3,991 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame... |
71592171944 | #!/bin/python3
import math
import os
import random
import re
import sys
from collections import deque
# Complete the bfs function below.
def bfs(n, m, edges, s):
#Create adjacency list empty on array
neighbors = [[] for i in range(n) for j in range(1)]
#Include neighbors of each vertex, with index minus 1
for e in edges:
neighbors[e[0]-1].append(e[1]-1)
neighbors[e[1]-1].append(e[0]-1)
#Seen and Distance begin in False and -1 each
seen = [False for i in range(n)]
distance = [-1 for i in range(n)]
#Start vertex as True and distance 0
seen[s - 1] = True
distance[s - 1] = 0
print(seen)
print(distance)
q = deque()
q.append(s - 1)
while len(q):
v = q.popleft()
for u in neighbors[v]:
if seen[u] == False:
distance[u] = distance[v] + 6
seen[u] = True
q.append(u)
seen[v] = True
return [distance[i] for i in range(n) if i != s-1]
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
q = int(input())
for q_itr in range(q):
nm = input().split()
n = int(nm[0])
m = int(nm[1])
edges = []
for _ in range(m):
edges.append(list(map(int, input().rstrip().split())))
s = int(input())
result = bfs(n, m, edges, s)
fptr.write(' '.join(map(str, result)))
fptr.write('\n')
fptr.close() | Gabospa/computer_science | bfs.py | bfs.py | py | 1,506 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 46,
"usage_type": "attribute"
}
] |
521538009 | #imports
import numpy as np
import matplotlib.pyplot as plt
import scipy.constants as const
from scipy.special import iv as I0
from scipy.special import kv as K0
#Define Global Variables
L_geo = 55.6e-9
Z0 = 50.0
F0_base = 0.95e9 #At lowest Temp
squares= 27223
c_couple = 1.5e-14
TC = 1.5
Delta_0 = (3.5*const.Boltzmann*TC)/2
sigma_n = 6.0e7 # Normal stae conductvity if superconducting film
Thick = 20e-9 # Thickness of superconducting fil
w = 2 * np.pi * F0_base
me = const.m_e
miu_0 = 4*np.pi*10**-7
pi = np.pi
#Main code
def main():
#Define temperature range with step 0.01K
step = 0.1
temp = np.arange(0.2, 0.3 , step)
#Find sigma1 and sigma 2 and Lint
sigma1, sigma2 = find_sigma1_sigma2(sigma_n ,Thick, TC, Delta_0, w, temp)
Lint = find_Lint_square(Thick, w, sigma2) * squares
#Find lk
Lk = find_lk(Thick, w, sigma2)
#Find Res
sigma12Ratio = sigma1/sigma2
Res = Lk*w*sigma12Ratio *squares
#IDC for Lowest Temp (0.2K)
Ltot_lowest = Lint[0] + L_geo
IDC = find_IDC(w, Ltot_lowest, c_couple)
#Find S21
Sweep_points = 20000
BW = 5e6
I_raw = np.zeros((Sweep_points, len(temp)), dtype="float")
Q_raw = np.copy(I_raw)
Phase = np.copy(Q_raw)
S21_Volt = np.copy(I_raw)
for i in range(0, len(Lint)):
Sweep, S21_Volt[:,i], Phase[:,i], I_raw[:,i], Q_raw[:,i],_,_,_,_,_ = Capacitive_Res_Sim(F0_base, c_couple, Z0, L_geo, Lint[i], Res[i], BW, Sweep_points, IDC)
plt.plot(Sweep/1e9, S21_Volt[:,i], label=str("{:.2f}".format(temp[i])))
#Graph labels and title
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, title="Temperature / K")
plt.xlabel('Frequency / GHz', fontsize=13)
plt.ylabel('Output Amplitude', fontsize=13)
#plt.title("S21 Amplitude For Varying Temperatures")
plt.xlim(0.9490, 0.9505)
plt.locator_params(nbins=6)
plt.savefig("S21 Plot with Resistance")
plt.rcParams['figure.dpi'] = 300
plt.figure()
#Q vs I plots
for i in range(0, len(Lint)):
plt.plot(I_raw[:,i], Q_raw[:,i], linewidth=1,label=str("{:.2f}".format(temp[i])))
#Minimum S21 at lowest temp
S21_Base = min(S21_Volt[:,0])
I_Base = np.zeros(len(temp), dtype="float")
Q_Base = np.copy(I_Base)
#Obtain F0_base and I and Q values for Lowest Temp
for i in range(0, len(S21_Volt[:,0])):
if S21_Base == S21_Volt[i,0]:
F0_Base = Sweep[i]
#Plot I and Q values at F0_Base
for i in range(0, len(temp)):
for j in range(0, len(Sweep)):
if F0_Base == Sweep[j]:
I_Base[i] = I_raw[j,i]
Q_Base[i] = Q_raw[j,i]
plt.plot(I_Base[i], Q_Base[i], markersize=4, marker="x", color='black')
#labels
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True, title="Temperature / K")
plt.xlabel('I / V', fontsize=13)
plt.ylabel('Q / V', fontsize=13)
#plt.title("Q vs I Plot for Varying Temperature")
plt.savefig("Q vs I plot for varying temp")
plt.figure()
#Finding F0 for the different Temperatures
F0 = np.zeros(len(temp))
for i in range(0, len(temp)):
S21_min = min(S21_Volt[:,i])
for j in range(0, len(Sweep)):
if S21_min == S21_Volt[j,i]:
F0[i] = Sweep[j]
#Plotting F0 vs Temp
plt.plot(temp, F0/1e9, color='k', linewidth="1", label="Minimum Of S21")
plt.xlabel('Temperature / K', fontsize=13)
plt.ylabel('F0 / GHz', fontsize=13)
plt.rcParams['figure.dpi'] = 300
#plt.title("F0 vs Temperature")
#Finding dI/dF and dQ/dF for lowest temperature
#Using numerical derivatives
step = abs((Sweep[0]-Sweep[-1])/Sweep_points)
for i in range(0, len(Sweep)):
if Sweep[i] == F0_Base:
didf = (I_raw[i+1,0] - I_raw[i-1,0])/(2*step)
dqdf = (Q_raw[i+1,0] - Q_raw[i-1,0])/(2*step)
#Use Magic Formula
di = np.zeros(len(temp))
dq = np.copy(di)
di = abs(I_Base - I_Base[0])
dq = abs(Q_Base - Q_Base[0])
dF0 = Magic_Formula(di, dq, didf, dqdf)
#Find F0 for different temp
F0_Magic = F0_Base - abs(dF0)
plt.plot(temp, F0_Magic/1e9, label="dF0 Formula")
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5), fancybox=True)
plt.ticklabel_format(useOffset=False)
plt.rcParams['figure.dpi'] = 1000
plt.xlim(0.20, 0.22)
plt.ylim(0.949980, 0.95)
plt.savefig("Magic Formula plot")
#KID Simulating Function
def Capacitive_Res_Sim(F0, C_couple, Z0, L_geo, L_int, Res, Sweep_BW, Sweep_points, Capacitance):
""" Help file here"""
j=complex(0,1)
Cc=C_couple
F_min=F0-(Sweep_BW/2.0)
F_max=F0+(Sweep_BW/2.0)
Sweep=np.linspace(F_min, F_max, Sweep_points)
W=Sweep*2.0*pi
W0=2.0*pi*F0
L=L_geo+L_int
C=Capacitance
Zres= 1.0/((1./((j*W*L)+Res))+(j*W*C)) # Impedance of resonator section
Zc=1.0/(j*W*Cc) #impedance of coupler
ZT=Zres+Zc
YT=1.0/ZT
S21 = 2.0/(2.0+(YT*Z0))
I_raw=S21.real
Q_raw=S21.imag
shift=((1.0-min(I_raw))/2.0)+min(I_raw)
I_cent=I_raw-shift
Q_cent=Q_raw
Phase=Atan(abs(Q_cent/I_cent))
QU=(W0*L)/Res
QL=(C*2)/(W0*(Cc**2)*Z0)
S21_Volt=abs(S21)
I_offset=shift
return (Sweep, S21_Volt, Phase, I_raw, Q_raw, I_cent, Q_cent, QU, QL, I_offset)
#Function to find sigma1 and sigma2
def find_sigma1_sigma2(sigma_n ,Thick, TC, Delta_0, w, T):
#An interpolation formula for delta_T
delta_T = Delta_0*np.tanh(1.74*np.sqrt((TC/T)-1))
#Define constants to simplify eqn
multiplying_constant = delta_T/(const.hbar * w)
e_const_1 = - Delta_0/(const.Boltzmann*T)
e_const_2 = (const.hbar*w)/(2*const.Boltzmann*T)
#Parts of the sigma1 Ratio
A = 2*multiplying_constant
B = np.exp(e_const_1)
C = K0(0, e_const_2)
D = 2*(np.sinh(e_const_2))
#Find Sigma 1 and Sigma 2
sigma1Ratio = A * B * C * D
sigma2Ratio = np.pi*multiplying_constant*(1 - (2*np.exp(e_const_1)*np.exp(-e_const_2)*I0(0,e_const_2)))
sigma2 = sigma2Ratio * sigma_n
sigma1 = sigma1Ratio * sigma_n
return sigma1, sigma2
def find_lk(Thick, w, sigma2):
#Depth
lower_fraction = miu_0*sigma2*w
Lambda_T_MB = (1/lower_fraction)**0.5
fraction = Thick/(2*Lambda_T_MB)
#Terms for lk
A = (miu_0*Lambda_T_MB)/4
B = coth(fraction)
C = fraction*(csch(fraction))**2
#R vs T
lk = A*(B+C)
return lk
def find_Lint_square(Thick, w, sigma2):
#Depth
lower_fraction = miu_0*sigma2*w
Lambda_T_MB = (1/lower_fraction)**0.5
#Internal Inductance
fraction = Thick/(2*Lambda_T_MB)
L_int = (miu_0*Lambda_T_MB/2)*coth(fraction)
return L_int
#Define coth and csch
def coth(x):
return np.cosh(x)/np.sinh(x)
def csch(x):
return 1/np.sinh(x)
def Atan(x):
return np.arctan(x)
#Find IDC function
def find_IDC(w0, Ltot, Cc):
IDC = 1/((w0**2)*Ltot) - Cc
return IDC
def Magic_Formula(di, dq, didf, dqdf):
return (di*didf + dq*dqdf)/(didf**2 + dqdf**2)
main() | Ashleyyyt/Characterizing-KIDs | Simulate KID.py | Simulate KID.py | py | 7,177 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.constants.Boltzmann",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "scipy.constants",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "numpy.pi",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "scipy.co... |
28886388336 | # from urllib import request
from django.shortcuts import render, redirect
from .models import Post, Comment
from .forms import CommentForm, PostUpdateForm
# from django.http import HttpResponseRedirect
from django.contrib.auth.decorators import login_required
# LoginRequiredMixin is simply the class based version for login_required decorator.
# because we cannot use decorators with classes, we are using mixins instead.
# UserPassesTestMixin mixin is for making sure that only the author can edit the posts
from django.contrib.auth.mixins import (LoginRequiredMixin,
UserPassesTestMixin)
from django.views.generic import (ListView,
# DetailView,
CreateView,
UpdateView,
DeleteView)
#! FBV for listing blog posts
def home(request):
context = {
'posts': Post.objects.all()
}
return render(request, 'blog/home.html', context)
#! CBV for listing blog posts
class PostListView(ListView):
model = Post
template_name = 'blog/home.html' # if I had left this field empty, django would have looked for 'blog/post_list' template cuz it looks for <app>/<model>_<viewtype.html> by default
context_object_name = 'posts' # by default, django uses the name "object_list". If this field was left empty, I'd have to use object_list to loop through my posts in home.html
ordering = ['-date_posted']
#! CBV for individual blog posts
# class PostDetailView(LoginRequiredMixin, DetailView):
# model = Post
# form = CommentForm
# # actually, for this one let's create the defualt html template file django is looking for. And since this
# # is a detailview, django's gonna look for a template named 'blog/post_detail.html'.
# # not defining our context_object_name, we'll have to use 'object' for every post in our blog/post_detail.html template
# #? view count part
# def get_object(self):
# views = super().get_object()
# views.blog_view += 1
# views.save()
# print(CommentForm)
# print(Post.objects.get(id = views.pk) == views)
# # if request.method == 'POST':
# # print(CommentForm(request.POST))
# # if form.is_valid():
# # comment=form.save(commit=False)
# # comment.blog = views
# # comment.save()
# return views
#! FBV for individual blog posts
@login_required
def blog_detail(request, pk):
post = Post.objects.get(id=pk)
print(post.post_image)
form = CommentForm()
form_blog= PostUpdateForm()
comments = Comment.objects.filter(post=post.id)
post.blog_view += 1
post.save()
if request.method == 'POST':
form = CommentForm(request.POST)
print('yay1')
if form.is_valid():
print('yay2')
comment = form.save(commit=False)
comment.post = post
post.blog_comment +=1
comment.user = request.user
post.blog_view -= 2
post.save()
comment.save()
return redirect("post-detail", pk)
return render(request, 'blog/post_detail.html', {'post': post, 'form': form, 'comments': comments})
#! CBV for creating blog posts
class PostCreateView(LoginRequiredMixin, CreateView): # make sure you add your mixins to the left. They should be inherited first, in other words
model = Post
fields = ('title', 'content', 'post_image')
success_url = '/'
# we are getting a "NOT NULL constraint failed: blog_post.author_id" after posting a blog post which
# means that the post needs an author and django by default cannot know who the author is. Therefore,
# we'll need to override the form_valid method and set the author before saving it
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
#! CBV for updating blog posts
class PostUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Post
fields = ('title', 'content', 'post_image')
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
def test_func(self): # UserPassesTestMixin requires to override the test_func, thus we are defining it here
post = self.get_object()
if self.request.user == post.author:
return True
return False
#! CBV for deleting blog posts
class PostDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Post
# fields = ('title', 'content')
success_url = '/'
def test_func(self):
post = self.get_object()
if self.request.user == post.author:
return True
return False
def like_post(request, id):
if request.method == "POST":
instance = Post.objects.get(id=id)
# print(request.user.id, id)
# print(instance.author)
# print(instance.likes.id)
if not instance.likes.filter(id=request.user.id).exists():
instance.likes.add(request.user)
instance.save()
# print(instance)
return render( request, 'blog/likes_area.html', context={'post':instance})
else:
instance.likes.remove(request.user)
instance.save()
# print(instance)
return render( request, 'blog/likes_area.html', context={'post':instance})
# def post_comment(request, id):
# model = Comment.objects.get(id=id)
# comment = CommentForm(instance=model)
# if request.method == "POST":
# comment = CommentForm(request.POST, instance=model)
# # instance = CommentForm(request, id=id)
# if comment.is_valid():
# comment.save()
# return render( request, 'blog/post_comment.html', context={'comment':comment})
# class CommentView(CreateView):
# model = Comment
# template_name = 'blog/post_comment.html'
# fields = ('post', 'body','date_added')
def about(request):
return render(request, 'blog/about.html') | MSKose/django-blog-app | blog/views.py | views.py | py | 6,019 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "models.Post.objects.all",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "django.s... |
74090977062 | import math
from datetime import datetime
import firebase_admin
from firebase_admin import credentials
from firebase_admin import db
from firebase_admin import firestore
import parseIntervalFiles as pif
import parseActivityFiles as paf
hervdir = "C:\\Users\\Ju\\GDrive\\Projects\\HeRV\\"
## Firestore connection parameters
keyfile = hervdir + "Docs\\herv-3c5ea-firebase-adminsdk-99tjk-98193df3d9.json"
databaseURL = 'https://herv-3c5ea.firebaseio.com'
## CSV file reading parameters
source = hervdir + "Data\\Raw\\"
start_dt = datetime(2017, 10, 29)
end_dt = datetime(2018, 11, 1)
def u_ref(db, uid):
return db.collection('users').document(str(uid))
def add_sessions(uid, start_dt, end_dt, source, dest):
u_sess = paf.get_sessions(uid, start_dt, end_dt, source, verbose=False)
print('adding', len(u_sess), 'sessions for user', uid)
s_ref = u_ref(dest, uid).collection('sessions')
b = dest.batch()
for sess in u_sess:
name = paf.csvu.string_from_time_filename(sess['start'])
doc = s_ref.document(name)
b.set(doc, sess)
b.commit()
def add_intervals(uid, start_dt, end_dt, source, dest):
for day in pif.csvu.gendays(start_dt, end_dt):
add_day_intervals(uid, day, source, dest)
print ("finished adding intervals for user", uid)
def add_day_intervals(uid, day, source, dest):
day_rr = pif.get_day_intervals(uid, day, source)
if len(day_rr) > 0:
dayname = datetime.strftime(day, "%Y%m%d")
print(len(day_rr), 'RR intervals in', dayname)
rr_ref = u_ref(dest, uid).collection('rr')
rr_ref.document(dayname).set({'rr_count': len(day_rr)})
mref = rr_ref.document(dayname).collection('minutes')
for min_batch in batch(group_by_minute(day_rr)):
print ('adding batch with', len(min_batch), 'minutes')
gr = dest.batch()
for (k, v) in min_batch:
doc = mref.document(k)
gr.set(doc, v)
gr.commit()
def batch(d, n=500):
x = len(d)
l = list(d.items())
for ndx in range(0, x, n):
yield l[ndx:min(ndx + n, x)]
def group_by_minute(dayrr):
d = {}
#TODO obviously, this can be done without looping by splitting the list by h,m
for h in range(24):
for m in range(60):
mrr = [x for x in dayrr if x['date'].hour == h and x['date'].minute == m]
if len(mrr) > 0:
miname = str(str(h).zfill(2) + str(m).zfill(2))
mi = {}
for s in range(60):
srr = [x['interval'] for x in mrr if x['date'].second == s]
if len(srr) > 0:
mi[str(s)] = srr
d[miname] = mi
return d
## Initializing a client to communicate with Firestore
cred = credentials.Certificate(keyfile)
default_app = firebase_admin.initialize_app(cred, options={'databaseURL': databaseURL})
client = firestore.client()
print ("Connected to Firestore...")
## for each user id in the database, search for sessions and intervals in csvs
users = client.collection('users')
userlist = [int(doc.id) for doc in users.get()]
for uid in userlist:
print("\n\nUSER", uid, "\n\n")
add_sessions(uid, start_dt, end_dt, source, client)
add_intervals(uid, start_dt, end_dt, source, client)
| jucc/HeRV_analysis | pipeline/convert_csv_firestore.py | convert_csv_firestore.py | py | 3,459 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "firebase_admin.db.collection",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "fire... |
1693307394 | import numpy as np
import math
import cv2
center_points = {}
objects_bbs_ids = []
id_count = 0
vechical_count = 0
count = 0
person_id = 0
camera = cv2.VideoCapture("video.mp4")
object_detector = cv2.createBackgroundSubtractorMOG2(history = None, varThreshold = None)
kernelOp = np.ones((3,3), np.uint8)
kernelC1 = np.ones((11,11), np.uint8)
fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows = True)
kernel_e = np.ones((5,5), np.uint8)
while True:
ret, frame = camera.read()
if not ret: break
frame = cv2.resize(frame, None, fx = 0.5, fy = 0.5)
width, height, _ = frame.shape
roi = frame[50: 540, 200:960]
fgmask = fgbg.apply(roi)
ret, imBin = cv2.threshold(fgmask, 200, 255, cv2.THRESH_BINARY)
mask1 = cv2.morphologyEx(imBin, cv2.MORPH_OPEN, kernelOp)
mask2 = cv2.morphologyEx(mask1, cv2.MORPH_CLOSE, kernelC1)
e_img = cv2.erode(mask2, kernel_e)
contours, _ = cv2.findContours(e_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
detections = []
for cnt in contours:
area = cv2.contourArea(cnt)
if area > 1000:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 2)
detections.append([x, y, w, h])
for rect in detections:
x, y, w, h = rect
cx = (x + x + w) // 2
cy = (y + y + h) // 2
same_object_detected = False
for id, pt in center_points.items():
distance = math.hypot(cx - pt[0], cy - pt[1])
if distance < 70:
center_points[id] = (cx, cy)
objects_bbs_ids.append([x, y, w, h, id])
same_object_detected = True
if (y >= 235 and y <= 255) and count != 1:
count += 1
vechical_count += count
if same_object_detected is False and count != 1:
center_points[id_count] = (cx, cy)
objects_bbs_ids.append([x, y, w, h, id_count])
count += 1
vechical_count += 1
id_count += 1
new_center_point = {}
for obj_bb_id in objects_bbs_ids:
_, _, _, _, object_id = obj_bb_id
center = center_points[object_id]
new_center_point[object_id] = center
center_points = new_center_point.copy()
box_ids = objects_bbs_ids
objects_bbs_ids = []
count = 0
for box_id in box_ids:
x, y, w, h, id = box_id
cv2.rectangle(roi, (x, y), (x + w, y + h), (0, 255, 0), 2)
cv2.putText(roi, str(id),(x + 15, y + 15), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 4)
cv2.rectangle(roi, (10, 10), (75, 75), (0, 255, 0), cv2.FILLED)
cv2.putText(roi, str(vechical_count), (20, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 4)
cv2.imshow("counter", roi)
if cv2.waitKey(1) == ord('q'): break
camera.release()
cv2.destroyAllWindows()
| Computer4062/Python-Projects | Road Tracker/counter.py | counter.py | py | 2,880 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.createBackgroundSubtractorMOG2",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.... |
19199461708 | from torch.utils.data import Dataset
import numpy as np
from pathlib import Path
import pandas as pd
import torch
from dpipe.io import load_numpy
class BraTSDataset(Dataset):
def __init__(self, meta: pd.DataFrame, source_folder: [str, Path], nonzero_mask=False, transform=None):
if isinstance(source_folder, str):
source_folder = Path(source_folder)
if nonzero_mask:
meta = meta[meta.sample_id.isin(meta.query('is_nonzero_mask == True').sample_id)]
self.source_folder = source_folder
self.meta_images = meta.query('is_mask == False').sort_values(by='sample_id').reset_index(drop=True)
self.meta_masks = meta.query('is_mask == True').sort_values(by='sample_id').reset_index(drop=True)
self.transform = transform
def __len__(self):
return self.meta_images.shape[0]
def __getitem__(self, i):
image = load_numpy(self.source_folder / self.meta_images.iloc[i]['relative_path'], allow_pickle=True, decompress=True)
mask = load_numpy(self.source_folder / self.meta_masks.iloc[i]['relative_path'], allow_pickle=True, decompress=True)
sample = image, mask
if self.transform:
image, mask = self.transform(sample)
return torch.from_numpy(image).reshape(1, 240, 240), torch.from_numpy(mask).reshape(1, 240, 240).double()
| kurmukovai/hse_projects | 2020/Anvar/data_loader.py | data_loader.py | py | 1,392 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pathlib.Pa... |
27194927025 | import logging
import numpy as np
from sklearn.model_selection import train_test_split, StratifiedKFold
from utils import logging as lg
from heatmap_tutorial import utils as ht_utils
lg.set_logging()
def get_mnist(dataset, dir_path='./data/mnist'):
if dataset == 'train':
prefix = 'train'
elif dataset == 'test':
prefix = 't10k'
else:
raise ValueError('No dataset MNIST - %s' % dataset)
logging.debug('Load %s : %s' % (dir_path, dataset))
x_path = '%s/%s-images-idx3-ubyte' % (dir_path, prefix)
y_path = '%s/%s-labels-idx1-ubyte' % (dir_path, prefix)
with open(x_path) as xf:
with open(y_path) as yf:
x = 2.0*np.fromfile(xf, dtype='ubyte', count=-1)[16:].reshape((-1, 784)) / 255 - 1
y = np.fromfile(yf, dtype='ubyte', count=-1)[8:]
y = (y[:, np.newaxis] == np.arange(10)) * 1.0
return x, y
def get_empty_data():
return np.zeros((28, 28)) - 1
def fill_left_right_digit(x, y, seed=71):
new_x = np.zeros((x.shape[0], 28, 28*3))
new_x[:, :, 28:(28*2)] = x
def plot_sample_propotions(indices, label):
y_not_in_class_i = classes[indices]
counts = dict()
for jj in range(10):
counts[jj] = np.sum(y_not_in_class_i == jj)
logging.info('%s | sample propotions' % (label))
logging.info(counts)
np.random.seed(seed)
classes = np.argmax(y, axis=1)
# plot_sample_propotions(range(classes.shape[0]), 'total')
for i in range(10):
samples_in_class_i = np.squeeze(np.argwhere(classes == i))
total = samples_in_class_i.shape[0]
samples_not_in_class_i = np.squeeze(np.argwhere(classes != i))
left_indices = np.random.choice(samples_not_in_class_i, total)
# plot_sample_propotions(left_indices, 'left-%d' % i)
right_indices = np.random.choice(samples_not_in_class_i, total)
# plot_sample_propotions(right_indices, 'right-%d' % i)
new_x[samples_in_class_i, :, :28] = x[left_indices, :, :]
new_x[samples_in_class_i, :, -28:] = x[right_indices, :, :]
return new_x, y
def expand_samples(x, y, n=7, seed=71):
new_x = np.zeros((x.shape[0], x.shape[1], x.shape[2]*n))
np.random.seed(seed)
classes = np.argmax(y, axis=1)
original_sample_idx = np.floor(n / 2).astype(int)
new_x[:, :, x.shape[2]*original_sample_idx:x.shape[2]*(original_sample_idx+1)] = x
for i in range(y.shape[1]):
samples_in_class_i = np.squeeze(np.argwhere(classes == i))
total = samples_in_class_i.shape[0]
samples_not_in_class_i = np.squeeze(np.argwhere(classes != i))
for j in range(n):
if j == original_sample_idx:
continue
indices = np.random.choice(samples_not_in_class_i, total)
new_x[samples_in_class_i, :, j*x.shape[2]:(j+1)*x.shape[2]] = x[indices, :, :]
return new_x, y
def create_majority_data(x, y, seed=71):
np.random.seed(seed)
classes = np.argmax(y, axis=1)
new_x = np.tile(x, (1, 3))
digit_positions = np.zeros((new_x.shape[0], 3))
for i in range(10):
samples_in_class_i = np.squeeze(np.argwhere(classes == i))
total = samples_in_class_i.shape[0]
samples_not_in_class_i = np.squeeze(np.argwhere(classes != i))
fake_digit_idx = np.random.choice(samples_not_in_class_i, total)
same_class_digit_idx = np.random.choice(samples_in_class_i, total)
for j, idx in zip(range(total), samples_in_class_i):
dd = [x[idx, :, :], x[same_class_digit_idx[j], :, :], x[fake_digit_idx[j], :, :]]
permuted_pos = np.random.permutation(range(3))
digit_positions[idx] = permuted_pos
dd_permuted = [dd[jj] for jj in permuted_pos]
new_x[idx, :, :] = np.concatenate(dd_permuted, axis=1)
return new_x, y, digit_positions <= 1
def create_middle_mark(no_x, no_digit=3):
zeros = np.zeros((no_x, no_digit))
zeros[:, int(np.floor(no_digit/2))] = 1
return zeros
def build_cvdataset(data, k=10):
xar = []
yar = []
mar = []
total_data = 0
for d in [data.train2d, data.test2d, data.val2d]:
xar.append(d.x)
yar.append(d.y)
mar.append(d.correct_digit_mark)
total_data += d.y.shape[0]
datasets = []
x = np.vstack(xar)
logging.info('total x shape')
logging.info(x.shape)
y = np.vstack(yar)
logging.info('total y shape')
logging.info(y.shape)
mark = np.vstack(mar)
logging.info('total mark shape')
logging.info(mark.shape)
skf = StratifiedKFold(n_splits=k, random_state=71, shuffle=True)
for train_indices, test_indices in skf.split(x, np.argmax(y, axis=1)):
dtrain = DataSet(x=x[train_indices, ...], y=y[train_indices, ...], correct_digit_mark=mark[train_indices, ...])
dtest = DataSet(x=x[test_indices, ...], y=y[test_indices, ...], correct_digit_mark=mark[test_indices, ...])
datasets.append((dtrain, dtest, dtest))
return datasets
class DatasetLoader():
def __init__(self, data_dir):
self.prepend_dir = lambda p: '%s/%s' % (data_dir, p)
self.cache = dict()
def load(self, dataset_name):
if self.cache.get(dataset_name):
return self.cache[dataset_name]
if dataset_name == 'mnist':
data = MNISTData(dir_path=self.prepend_dir('mnist'))
elif dataset_name == 'fashion-mnist':
data = FashionMNISTData(dir_path=self.prepend_dir('fashion-mnist'))
elif dataset_name == 'ufi-cropped':
data = UFICroppedData(dir_path=self.prepend_dir('ufi-cropped'))
elif dataset_name == 'mnist-3-digits':
data = MNIST3DigitsData(dir_path=self.prepend_dir('mnist'))
elif dataset_name == 'mnist-3-digits-maj':
data = MNIST3DigitsWithMajorityData(dir_path=self.prepend_dir('mnist'))
elif dataset_name == 'fashion-mnist-3-items':
data = FashionMNIST3ItemsData(dir_path=self.prepend_dir('fashion-mnist'))
elif dataset_name == 'fashion-mnist-3-items-maj':
data = FashionMNIST3DigitsWithMajorityData(dir_path=self.prepend_dir('fashion-mnist'))
elif dataset_name == 'mnist-7-digits':
data = MNISTMiddleSampleProblem(n=7, seed=5, dir_path=self.prepend_dir('mnist'))
elif dataset_name == 'fashion-mnist-7-items':
data = MNISTMiddleSampleProblem(n=7, seed=15, dir_path=self.prepend_dir('fashion-mnist'))
else:
raise SystemError('No dataset name `%s`' % dataset_name)
self.cache[dataset_name] = data
return self.cache[dataset_name]
class DataSet:
def __init__(self, x, y, correct_digit_mark=None):
self.x = x
self.y = y
self.correct_digit_mark = correct_digit_mark
def get_batch(self, no_batch, seed=71):
total = len(self.x)
np.random.seed(seed)
shuffled_indices = np.random.permutation(total)
x = self.x[shuffled_indices, :, :]
y = self.y[shuffled_indices, :]
for ndx in range(0, total, no_batch):
yield (x[ndx:min(ndx + no_batch, total)], y[ndx:min(ndx + no_batch, total)])
class MNISTData:
def __init__(self, dir_path='./data/mnist'):
self.dir_path = dir_path
x_train, y_train = get_mnist('train', dir_path=dir_path)
x_test, y_test = get_mnist('test', dir_path=dir_path)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=71)
self.no_classes = 10
self.dims = (28, 28)
self.train = DataSet(x_train, y_train)
self.val = DataSet(x_val, y_val)
self.test = DataSet(x_test, y_test)
self.train2d = DataSet(x_train.reshape(-1, 28, 28), y_train)
self.val2d = DataSet(x_val.reshape(-1, 28, 28), y_val)
self.test2d = DataSet(x_test.reshape(-1, 28, 28), y_test)
self.labels = {
0: 'Digit 0',
1: 'Digit 1',
2: 'Digit 2',
3: 'Digit 3',
4: 'Digit 4',
5: 'Digit 5',
6: 'Digit 6',
7: 'Digit 7',
8: 'Digit 8',
9: 'Digit 9'
}
def get_text_label(self, label_index):
return 'Digit %d' % label_index
def get_samples_for_vis(self, n=12):
np.random.seed(1234)
r = np.random.randint(0, self.test2d.y.shape[0], n)
return self.test2d.x[r, :, :], self.test2d.y[r]
class MNIST3DigitsData(MNISTData):
def __init__(self, **kwargs):
super(MNIST3DigitsData, self).__init__(**kwargs)
self.dims = (28, 28*3)
self.train2d = DataSet(*fill_left_right_digit(self.train2d.x, self.train2d.y, seed=0),
correct_digit_mark=create_middle_mark(self.train2d.x.shape[0]))
self.val2d = DataSet(*fill_left_right_digit(self.val2d.x, self.val2d.y, seed=1),
correct_digit_mark=create_middle_mark(self.val2d.x.shape[0])
)
self.test2d = DataSet(*fill_left_right_digit(self.test2d.x, self.test2d.y, seed=3),
correct_digit_mark=create_middle_mark(self.test2d.x.shape[0])
)
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
class MNIST3DigitsWithMajorityData(MNISTData):
def __init__(self, **kwargs):
super(MNIST3DigitsWithMajorityData, self).__init__(**kwargs)
self.dims = (28, 28*3)
x, y, train2d_correct_digit_mark = create_majority_data(self.train2d.x, self.train2d.y, seed=0)
self.train2d = DataSet(x, y, correct_digit_mark=train2d_correct_digit_mark)
assert self.train2d.correct_digit_mark.shape[0] == self.train2d.y.shape[0]
x, y, val2d_correct_digit_mark = create_majority_data(self.val2d.x, self.val2d.y, seed=1)
self.val2d = DataSet(x, y, correct_digit_mark=val2d_correct_digit_mark)
assert self.val2d.correct_digit_mark.shape[0] == self.val2d.y.shape[0]
x, y, test2d_correct_digit_mark = create_majority_data(self.test2d.x, self.test2d.y, seed=3)
self.test2d = DataSet(x, y, correct_digit_mark=test2d_correct_digit_mark)
assert self.test2d.correct_digit_mark.shape[0] == self.test2d.y.shape[0]
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
class MNISTMiddleSampleProblem(MNISTData):
def __init__(self, n=7, seed=1, **kwargs):
super(MNISTMiddleSampleProblem, self).__init__(**kwargs)
self.dims = (28, 28*n)
self.train2d = DataSet(*expand_samples(self.train2d.x, self.train2d.y, n, seed=seed))
self.train2d_correct_digit_mark = create_middle_mark(self.train2d.x.shape[0], no_digit=n)
self.val2d = DataSet(*expand_samples(self.val2d.x, self.val2d.y, n, seed=seed+1))
self.val2d_correct_digit_mark = create_middle_mark(self.val2d.x.shape[0], no_digit=n)
self.test2d = DataSet(*expand_samples(self.test2d.x, self.test2d.y, n, seed=seed+2))
self.test2d_correct_digit_mark = create_middle_mark(self.test2d.x.shape[0], no_digit=n)
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
labels = {
0: 'Digit 0',
1: 'Digit 1',
2: 'Digit 2',
3: 'Digit 3',
4: 'Digit 4',
5: 'Digit 5',
6: 'Digit 6',
7: 'Digit 7',
8: 'Digit 8',
9: 'Digit 9'
}
if 'fashion' in kwargs['dir_path']:
labels = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot'
}
self.labels = labels
class FashionMNISTData:
def __init__(self, dir_path='./data/fashion-mnist'):
x_train, y_train = get_mnist('train', dir_path=dir_path)
x_test, y_test = get_mnist('test', dir_path=dir_path)
x_train, x_val, y_train, y_val = train_test_split(x_train, y_train, test_size=0.2, random_state=71)
self.dims = (28, 28)
self.no_classes = 10
self.train = DataSet(x_train, y_train)
self.val = DataSet(x_val, y_val)
self.test = DataSet(x_test, y_test)
self.train2d = DataSet(x_train.reshape(-1, 28, 28), y_train)
self.val2d = DataSet(x_val.reshape(-1, 28, 28), y_val)
self.test2d = DataSet(x_test.reshape(-1, 28, 28), y_test)
self.labels = {
0: 'T-shirt/top',
1: 'Trouser',
2: 'Pullover',
3: 'Dress',
4: 'Coat',
5: 'Sandal',
6: 'Shirt',
7: 'Sneaker',
8: 'Bag',
9: 'Ankle boot'
}
def get_samples_for_vis(self, n=12):
indices = [588, 314, 47, 145, 258, 641, 561, 3410, 1094, 4059, 518, 9304][:n]
return self.test2d.x[indices, :], self.test2d.y[indices]
def get_text_label(self, label_index):
return self.labels[label_index]
class FashionMNIST3ItemsData(FashionMNISTData):
def __init__(self, **kwargs):
super(FashionMNIST3ItemsData, self).__init__(**kwargs)
self.dims = (28, 28*3)
self.train2d = DataSet(*fill_left_right_digit(self.train2d.x, self.train2d.y, seed=20),
correct_digit_mark=create_middle_mark(self.train2d.x.shape[0]))
self.val2d = DataSet(*fill_left_right_digit(self.val2d.x, self.val2d.y, seed=21),
correct_digit_mark=create_middle_mark(self.val2d.x.shape[0])
)
self.test2d = DataSet(*fill_left_right_digit(self.test2d.x, self.test2d.y, seed=23),
correct_digit_mark=create_middle_mark(self.test2d.x.shape[0])
)
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
class FashionMNIST3DigitsWithMajorityData(FashionMNISTData):
def __init__(self, **kwargs):
super(FashionMNIST3DigitsWithMajorityData, self).__init__(**kwargs)
self.dims = (28, 28*3)
x, y, train2d_correct_digit_mark = create_majority_data(self.train2d.x, self.train2d.y, seed=0)
self.train2d = DataSet(x, y, correct_digit_mark=train2d_correct_digit_mark)
assert self.train2d.correct_digit_mark.shape[0] == self.train2d.y.shape[0]
x, y, val2d_correct_digit_mark = create_majority_data(self.val2d.x, self.val2d.y, seed=1)
self.val2d = DataSet(x, y, correct_digit_mark=val2d_correct_digit_mark)
assert self.val2d.correct_digit_mark.shape[0] == self.val2d.y.shape[0]
x, y, test2d_correct_digit_mark = create_majority_data(self.test2d.x, self.test2d.y, seed=3)
self.test2d = DataSet(x, y, correct_digit_mark=test2d_correct_digit_mark)
assert self.test2d.correct_digit_mark.shape[0] == self.test2d.y.shape[0]
self.train = self.train2d
self.val = self.val2d
self.test = self.test2d
class UFICroppedData:
def __init__(self, dir_path='./data/ufi-cropped'):
# subsampling_indices = list(np.arange(0, 128, 2))
def avg_pooling(x):
new_x = np.zeros((x.shape[0], 64, 64))
for i in range(0, x.shape[1], 2):
for j in range(0, x.shape[2], 2):
new_x[:, int(i/2), int(j/2)] = np.mean(x[:, i:(i+2), j:(j+2)].reshape(-1, 4), axis=1)
return new_x
def flip_data(x, y):
total = x.shape[0]
new_x = np.tile(x, (2, 1, 1))
new_y = np.tile(y, (2, 1))
new_x[total:, :, :] = x[:, :, ::-1]
np.random.seed(0)
shuffled_indices = np.arange(total*2)
np.random.shuffle(shuffled_indices)
return new_x[shuffled_indices, :, :], new_y[shuffled_indices, :]
x_train = avg_pooling(np.load('%s/train-x.npy' % dir_path).reshape(-1, 128, 128))
y_train = np.load('%s/train-y.npy' % dir_path)
# print(x_train[0])
# print(np.argmax(y_train[0]))
x_train, y_train = flip_data(x_train, y_train)
# print('Train data', x_train.shape)
x_test = avg_pooling(np.load('%s/test-x.npy' % dir_path).reshape(-1, 128, 128))
y_test = np.load('%s/test-y.npy' % dir_path)
x_test, y_test = flip_data(x_test, y_test)
# print('Test data', x_test.shape)
self.dims = (64, 64)
# This is a bad idea but we have limited amount of data
x_val, y_val = x_test, y_test
self.no_classes = y_test.shape[1]
self.train = DataSet(x_train, y_train)
self.val = DataSet(x_val, y_val)
self.test = DataSet(x_test, y_test)
self.train2d = DataSet(x_train, y_train)
self.val2d = DataSet(x_val, y_val)
self.test2d = DataSet(x_test, y_test)
def get_samples_for_vis(self, n=12):
print("WARNING! this is data sampled from training set not testing one")
indices = [2785, 2973, 57, 906, 393, 3666, 3502, 1222, 731, 2659, 3400, 656]
return self.train2d.x[indices, :], self.train2d.y[indices]
def get_text_label(self, label_index):
return 'Person %d' % label_index
| p16i/thesis-designing-recurrent-neural-networks-for-explainability | src/utils/data_provider.py | data_provider.py | py | 17,506 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "utils.logging.set_logging",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "utils.logging",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "logging.debug",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.fromfile"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.