seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
39455791168 | def even_odd(data, redata):
if len(data) == 0:
return redata
else:
if data[0] % 2 == 0:
redata.insert(0, data[0])
return even_odd(data[1:], redata)
else:
redata.append(data[0])
return even_odd(data[1:], redata)
if __name__ == '__main__':
print(even_odd([1,2,3,4,5,6,7],[]))
print(even_odd([1,3,5,7,2,4,6,8],[]))
| huyuan95/Learn-Python | data structure/ch04/C4_19.py | C4_19.py | py | 401 | python | en | code | 0 | github-code | 36 |
38567808559 | adjList = {
'a' : ['b','c','d'],
'b' : ['a','c'],
'c' : ['a','b','d','e'],
'd' : ['a','c','e'],
'e' : ['c','d','f'],
'f' : ['e']
}
'''
visited = {a:1}
# parent = {}
dfsTraversal = []
for node in adjList.keys():
visited[node] = 0
# # parent[node] = None
def dfs(source):
visited[source] = 1
dfsTraversal.append(source)
for vertex in adjList[source]: # exploring adjacent vertex of source
if visited[vertex] == 0:
# parent[vertex] = source
dfs(vertex)
dfs('a')
print(dfsTraversal)
'''
visited = set()
dfsTraversal = []
def dfs(source):
visited.add(source)
dfsTraversal.append(source)
for vertex in adjList[source]: # exploring adjacent vertex of source
if vertex not in visited:
dfs(vertex)
dfs('a')
print(dfsTraversal)
| archanakalburgi/Algorithms | Graphs/depthFirstSeaarch.py | depthFirstSeaarch.py | py | 863 | python | en | code | 1 | github-code | 36 |
34338800542 | # https://leetcode.com/problems/n-ary-tree-level-order-traversal/
from typing import List
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
rst = []
if not root:
return rst
curNodes = [root]
while curNodes:
nextNodes = []
curVals = []
for node in curNodes:
curVals.append(node.val)
if node.children:
nextNodes += node.children
rst.append(curVals)
curNodes = nextNodes
return rst
| 0x0400/LeetCode | p429.py | p429.py | py | 714 | python | en | code | 0 | github-code | 36 |
34734577549 | import numpy as np
import time
pi = np.pi
naxis = np.newaxis
F_2D = lambda x: np.fft.fft2(x, axes=(0, 1))
IF_2D = lambda x: np.fft.ifft2(x, axes=(0, 1))
F_3D = lambda x: np.fft.fftn(x, axes=(0, 1, 2))
IF_3D = lambda x: np.fft.ifftn(x, axes=(0, 1, 2))
def pupilGen(fxlin, fylin, wavelength, na, na_in=0.0):
'''
pupilGen create a circular pupil function in Fourier space.
Inputs:
fxlin : 1D spatial frequency coordinate in horizontal direction
fylin : 1D spatial frequency coordinate in vertical direction
wavelength: wavelength of incident light
na : numerical aperture of the imaging system
na_in : put a non-zero number smaller than na to generate an annular function
Output:
pupil : pupil function
'''
pupil = np.array(fxlin[naxis, :]**2+fylin[:, naxis]**2 <= (na/wavelength)**2, dtype="float32")
if na_in != 0.0:
pupil[fxlin[naxis, :]**2+fylin[:, naxis]**2 < (na_in/wavelength)**2] = 0.0
return pupil
def _genGrid(size, dx):
'''
_genGrid create a 1D coordinate vector.
Inputs:
size : length of the coordinate vector
dx : step size of the 1D coordinate
Output:
grid : 1D coordinate vector
'''
xlin = np.arange(size, dtype='complex64')
return (xlin-size//2)*dx
class Solver3DDPC:
'''
Solver3DDPC class provides methods to preprocess 3D DPC measurements and recovers 3D refractive index with Tikhonov or TV regularziation.
'''
def __init__(self, dpc_imgs, wavelength, na, na_in, pixel_size, pixel_size_z, rotation, RI_medium):
'''
Initialize system parameters and functions for DPC phase microscopy.
'''
self.wavelength = wavelength
self.na = na
self.na_in = na_in
self.pixel_size = pixel_size
self.pixel_size_z = pixel_size_z
self.rotation = rotation
self.dpc_num = len(rotation)
self.fxlin = np.fft.ifftshift(_genGrid(dpc_imgs.shape[1], 1.0/dpc_imgs.shape[1]/self.pixel_size))
self.fylin = np.fft.ifftshift(_genGrid(dpc_imgs.shape[0], 1.0/dpc_imgs.shape[0]/self.pixel_size))
self.dpc_imgs = dpc_imgs.astype('float32')
self.RI_medium = RI_medium
self.window = np.fft.ifftshift(np.hamming(dpc_imgs.shape[2]))
self.pupil = pupilGen(self.fxlin, self.fylin, self.wavelength, self.na)
self.phase_defocus = self.pupil*2.0*pi*((1.0/wavelength)**2-self.fxlin[naxis, :]**2-self.fylin[:, naxis]**2)**0.5
self.oblique_factor = self.pupil/4.0/pi/((RI_medium/wavelength)**2-self.fxlin[naxis, :]**2-self.fylin[:, naxis]**2)**0.5
self.normalization()
self.sourceGen()
self.WOTFGen()
def normalization(self):
'''
Normalize the 3D intensity stacks by their average illumination intensities, and subtract the mean.
'''
self.dpc_imgs /= np.mean(self.dpc_imgs, axis=(0, 1, 2), keepdims=True)
self.dpc_imgs -= 1.0
def sourceGen(self):
'''
Generate DPC source patterns based on the rotation angles and numerical aperture of the illuminations.
'''
self.source = []
pupil = pupilGen(self.fxlin, self.fylin, self.wavelength, self.na, na_in=self.na_in)
for rot_index in range(self.dpc_num):
self.source.append(np.zeros((self.dpc_imgs.shape[:2]), dtype='float32'))
rotdegree = self.rotation[rot_index]
if rotdegree < 180:
self.source[-1][self.fylin[:, naxis]*np.cos(np.deg2rad(rotdegree))+1e-15>=
self.fxlin[naxis, :]*np.sin(np.deg2rad(rotdegree))] = 1.0
self.source[-1] *= pupil
else:
self.source[-1][self.fylin[:, naxis]*np.cos(np.deg2rad(rotdegree))+1e-15<
self.fxlin[naxis, :]*np.sin(np.deg2rad(rotdegree))] = -1.0
self.source[-1] *= pupil
self.source[-1] += pupil
self.source = np.asarray(self.source)
def sourceFlip(self, source):
'''
Flip the sources in vertical and horizontal directions, since the coordinates of the source plane and the pupil plane are opposite.
'''
source_flip = np.fft.fftshift(source)
source_flip = source_flip[::-1, ::-1]
if np.mod(source_flip.shape[0], 2)==0:
source_flip = np.roll(source_flip, 1, axis=0)
if np.mod(source_flip.shape[1], 2)==0:
source_flip = np.roll(source_flip, 1, axis=1)
return np.fft.ifftshift(source_flip)
def WOTFGen(self):
'''
Generate the absorption (imaginary part) and phase (real part) weak object transfer functions (WOTFs) using the sources and the pupil.
'''
dim_x = self.dpc_imgs.shape[1]
dim_y = self.dpc_imgs.shape[0]
dfx = 1.0/dim_x/self.pixel_size
dfy = 1.0/dim_y/self.pixel_size
z_lin = np.fft.ifftshift(_genGrid(self.dpc_imgs.shape[2], self.pixel_size_z))
prop_kernel = np.exp(1.0j*z_lin[naxis, naxis, :]*self.phase_defocus[:, :, naxis])
self.H_real = []
self.H_imag = []
for rot_index in range(self.dpc_num):
source_flip = self.sourceFlip(self.source[rot_index])
FSP_cFPG = F_2D(source_flip[:, :, naxis]*self.pupil[:, :, naxis]*prop_kernel)*\
F_2D(self.pupil[:, :, naxis]*prop_kernel*self.oblique_factor[:, :, naxis]).conj()
self.H_real.append(2.0*IF_2D(1.0j*FSP_cFPG.imag*dfx*dfy))
self.H_real[-1] *= self.window[naxis, naxis, :]
self.H_real[-1] = np.fft.fft(self.H_real[-1], axis=2)*self.pixel_size_z
self.H_imag.append(2.0*IF_2D(FSP_cFPG.real*dfx*dfy))
self.H_imag[-1] *= self.window[naxis, naxis, :]
self.H_imag[-1] = np.fft.fft(self.H_imag[-1], axis=2)*self.pixel_size_z
total_source = np.sum(source_flip*self.pupil*self.pupil.conj())*dfx*dfy
self.H_real[-1] *= 1.0j/total_source
self.H_imag[-1] *= 1.0/total_source
print("3D weak object transfer function {:02d}/{:02d} has been evaluated.".format(rot_index+1, self.dpc_num), end="\r")
self.H_real = np.array(self.H_real).astype('complex64')
self.H_imag = np.array(self.H_imag).astype('complex64')
def _V2RI(self, V_real, V_imag):
'''
Convert the complex scattering potential (V) into the refractive index. Imaginary part of the refractive index is dumped.
'''
wavenumber = 2.0*pi/self.wavelength
B = -1.0*(self.RI_medium**2-V_real/wavenumber**2)
C = -1.0*(-1.0*V_imag/wavenumber**2/2.0)**2
RI_obj = ((-1.0*B+(B**2-4.0*C)**0.5)/2.0)**0.5
return np.array(RI_obj)
def setRegularizationParameters(self, reg_real=5e-5, reg_imag=5e-5, tau=5e-5, rho = 5e-5):
'''
Set regularization parameters for Tikhonov deconvolution and total variation regularization.
'''
# Tikhonov regularization parameters
self.reg_real = reg_real
self.reg_imag = reg_imag
# TV regularization parameters
self.tau = tau
# ADMM penalty parameter
self.rho = rho
def _prox_LASSO(self, V1_k, y_DV_k, use_gpu):
'''
_prox_LASSO performs the proximal operator and solves the LASSO problem with L1 norm for total variation regularization.
Inputs:
V1_k : complex scattering potential
y_DV_k : Lagrange multipliers for the gradient vectors of the scattering potential
use_gpu : flag to specify gpu usage
Output:
DV_k : soft-thresholded gradient vectors of the scattering potential
DV_k_or_diff : difference between the thresholded gradient vectors and the original ones
'''
if use_gpu:
shape_3d = self.dpc_imgs.shape[:3]
DV_k_or_diff = af.constant(0.0, shape_3d[0], shape_3d[1], shape_3d[2], 6, dtype=af.Dtype.f32)
DV_k_or_diff[:,:,:,0] = V1_k[:,:,:,0] - af.shift(V1_k[:,:,:,0], 0, -1)
DV_k_or_diff[:,:,:,1] = V1_k[:,:,:,0] - af.shift(V1_k[:,:,:,0], -1)
DV_k_or_diff[:,:,:,2] = V1_k[:,:,:,0] - af.shift(V1_k[:,:,:,0], 0, 0, -1)
DV_k_or_diff[:,:,:,3] = V1_k[:,:,:,1] - af.shift(V1_k[:,:,:,1], 0, -1)
DV_k_or_diff[:,:,:,4] = V1_k[:,:,:,1] - af.shift(V1_k[:,:,:,1], -1)
DV_k_or_diff[:,:,:,5] = V1_k[:,:,:,1] - af.shift(V1_k[:,:,:,1], 0, 0, -1)
else:
DV_k_or_diff = np.zeros(self.dpc_imgs.shape[:3]+ (6, ), dtype='float32')
DV_k_or_diff[:,:,:,0] = V1_k[:,:,:,0] - np.roll(V1_k[:,:,:,0], -1, axis=1)
DV_k_or_diff[:,:,:,1] = V1_k[:,:,:,0] - np.roll(V1_k[:,:,:,0], -1, axis=0)
DV_k_or_diff[:,:,:,2] = V1_k[:,:,:,0] - np.roll(V1_k[:,:,:,0], -1, axis=2)
DV_k_or_diff[:,:,:,3] = V1_k[:,:,:,1] - np.roll(V1_k[:,:,:,1], -1, axis=1)
DV_k_or_diff[:,:,:,4] = V1_k[:,:,:,1] - np.roll(V1_k[:,:,:,1], -1, axis=0)
DV_k_or_diff[:,:,:,5] = V1_k[:,:,:,1] - np.roll(V1_k[:,:,:,1], -1, axis=2)
DV_k = DV_k_or_diff - y_DV_k
if use_gpu:
DV_k = af.maxof(DV_k-self.tau/self.rho, 0.0) - af.maxof(-DV_k-self.tau/self.rho, 0.0)
else:
DV_k = np.maximum(DV_k-self.tau/self.rho, 0.0) - np.maximum(-DV_k-self.tau/self.rho, 0.0)
DV_k_or_diff = DV_k - DV_k_or_diff
return DV_k, DV_k_or_diff
def _prox_projection(self, V1_k, V2_k, y_V2_k, boundary_constraint):
'''
_prox_projection performs Euclidean norm projection to impose positivity or negativity constraints on the scattering potential.
Inputs:
V1_k : complex scattering potential
V2_k : splitted complex scattering potential
y_V2_k : Lagrange multipliers for the splitted scattering potential
boundary_constraint : indicate whether to use positive or negative constraint on the scattering potential
Output:
V2_k : updated splitted complex scattering potential
'''
V2_k = V1_k + y_V2_k
V_real = V2_k[:,:,:,1]
V_imag = V2_k[:,:,:,0]
if boundary_constraint["real"]=="positive":
V_real[V_real<0.0] = 0.0
elif boundary_constraint["real"]=="negative":
V_real[V_real>0.0] = 0.0
if boundary_constraint["imag"]=="positive":
V_imag[V_real<0.0] = 0.0
elif boundary_constraint["imag"]=="negative":
V_imag[V_real>0.0] = 0.0
V2_k[:,:,:,0] = V_imag
V2_k[:,:,:,1] = V_real
return V2_k
def _deconvTikhonov(self, AHA, AHy, determinant, use_gpu):
'''
_deconvTikhonov solves a Least-Squares problem with L2 regularization.
'''
if use_gpu:
V_real = af.real(af.ifft3((AHA[0]*AHy[1]-AHA[2]*AHy[0])/determinant))
V_imag = af.real(af.ifft3((AHA[3]*AHy[0]-AHA[1]*AHy[1])/determinant))
else:
V_real = IF_3D((AHA[0]*AHy[1]-AHA[2]*AHy[0])/determinant).real
V_imag = IF_3D((AHA[3]*AHy[0]-AHA[1]*AHy[1])/determinant).real
return V_real, V_imag
def _deconvTV(self, AHA, determinant, fIntensity, fDx, fDy, fDz, tv_max_iter, boundary_constraint, use_gpu):
'''
_deconvTV solves the 3D DPC deconvolution with total variation regularization and boundary value constraints using the ADMM algorithm.
'''
AHy =[(self.H_imag.conj()*fIntensity).sum(axis=0), (self.H_real.conj()*fIntensity).sum(axis=0)]
if use_gpu:
shape_3d = self.dpc_imgs.shape[:3]
V1_k = af.constant(0.0, shape_3d[0], shape_3d[1], shape_3d[2], 2, dtype=af.Dtype.f32)
V2_k = af.constant(0.0, shape_3d[0], shape_3d[1], shape_3d[2], 2, dtype=af.Dtype.f32)
DV_k = af.constant(0.0, shape_3d[0], shape_3d[1], shape_3d[2], 6, dtype=af.Dtype.f32)
y_DV_k = af.constant(0.0, shape_3d[0], shape_3d[1], shape_3d[2], 6, dtype=af.Dtype.f32)
y_V2_k = af.constant(0.0, shape_3d[0], shape_3d[1], shape_3d[2], 2, dtype=af.Dtype.f32)
AHy = [af.to_array(AHy_i) for AHy_i in AHy]
else:
V1_k = np.zeros(self.dpc_imgs.shape[:3]+ (2, ), dtype='float32')
V2_k = np.zeros(self.dpc_imgs.shape[:3]+ (2, ), dtype='float32')
DV_k = np.zeros(self.dpc_imgs.shape[:3]+ (6, ), dtype='float32')
y_DV_k = np.zeros(self.dpc_imgs.shape[:3]+ (6, ), dtype='float32')
y_V2_k = np.zeros(self.dpc_imgs.shape[:3]+ (2, ), dtype='float32')
t_start = time.time()
for iteration in range(tv_max_iter):
# solve Least-Squares
if use_gpu:
AHy_k = [AHy[0]+self.rho*(af.fft3(V2_k[:,:,:,0]-y_V2_k[:,:,:,0])+ af.conjg(fDx)*af.fft3(DV_k[:,:,:,0]+y_DV_k[:,:,:,0])\
+ af.conjg(fDy)*af.fft3(DV_k[:,:,:,1]+y_DV_k[:,:,:,1])\
+ af.conjg(fDz)*af.fft3(DV_k[:,:,:,2]+y_DV_k[:,:,:,2])),\
AHy[1]+self.rho*(af.fft3(V2_k[:,:,:,1]-y_V2_k[:,:,:,1])+ af.conjg(fDx)*af.fft3(DV_k[:,:,:,3]+y_DV_k[:,:,:,3])\
+ af.conjg(fDy)*af.fft3(DV_k[:,:,:,4]+y_DV_k[:,:,:,4])\
+ af.conjg(fDz)*af.fft3(DV_k[:,:,:,5]+y_DV_k[:,:,:,5]))]
else:
AHy_k = [AHy[0]+self.rho*(F_3D(V2_k[:,:,:,0]-y_V2_k[:,:,:,0])+ fDx.conj()*F_3D(DV_k[:,:,:,0]+y_DV_k[:,:,:,0])\
+ fDy.conj()*F_3D(DV_k[:,:,:,1]+y_DV_k[:,:,:,1])\
+ fDz.conj()*F_3D(DV_k[:,:,:,2]+y_DV_k[:,:,:,2])),\
AHy[1]+self.rho*(F_3D(V2_k[:,:,:,1]-y_V2_k[:,:,:,1])+ fDx.conj()*F_3D(DV_k[:,:,:,3]+y_DV_k[:,:,:,3])\
+ fDy.conj()*F_3D(DV_k[:,:,:,4]+y_DV_k[:,:,:,4])\
+ fDz.conj()*F_3D(DV_k[:,:,:,5]+y_DV_k[:,:,:,5]))]
V1_k[:,:,:,1],\
V1_k[:,:,:,0] = self._deconvTikhonov(AHA, AHy_k, determinant, use_gpu)
# solve LASSO proximal step
DV_k, DV_k_diff = self._prox_LASSO(V1_k, y_DV_k, use_gpu)
# solve Euclidean proximal step
V2_k = self._prox_projection(V1_k, V2_k, y_V2_k, boundary_constraint)
# dual update
y_DV_k += DV_k_diff;
y_V2_k += V1_k - V2_k;
print("elapsed time: {:5.2f} seconds, iteration : {:02d}/{:02d}".format(time.time()-t_start, iteration+1, tv_max_iter), end="\r")
return V1_k[:,:,:,1], V1_k[:,:,:,0]
def solve(self, method="Tikhonov", tv_max_iter=20, boundary_constraint={"real":"negative", "imag":"negative"}, use_gpu=False):
'''
_prox_LASSO performs the proximal operator and solves the LASSO problem with L1 norm for total variation regularization.
Inputs:
method : select "Tikhonov" or "TV" deconvolution methods.
tv_max_iter : If "TV" method is used, specify the number of iterations of the ADMM algorithm
boundary_constraint : indicate whether to use positive or negative constraint on the scattering potential
use_gpu : flag to specify gpu usage
Output:
RI_obj : reconstructed 3D refractive index
'''
if use_gpu:
globals()["af"] = __import__("arrayfire")
AHA = [(self.H_imag.conj()*self.H_imag).sum(axis=0), (self.H_imag.conj()*self.H_real).sum(axis=0),\
(self.H_real.conj()*self.H_imag).sum(axis=0), (self.H_real.conj()*self.H_real).sum(axis=0)]
fIntensity = F_3D(self.dpc_imgs).transpose(3, 0, 1, 2).astype('complex64')
if method == "Tikhonov":
print("="*10+" Solving 3D DPC with Tikhonov regularization "+"="*10)
AHA[0] += self.reg_imag
AHA[3] += self.reg_real
AHy = [(self.H_imag.conj()*fIntensity).sum(axis=0), (self.H_real.conj()*fIntensity).sum(axis=0)]
if use_gpu:
AHA = [af.to_array(AHA_i) for AHA_i in AHA]
AHy = [af.to_array(AHy_i) for AHy_i in AHy]
determinant = AHA[0]*AHA[3]-AHA[1]*AHA[2]
V_real, V_imag = self._deconvTikhonov(AHA, AHy, determinant, use_gpu)
elif method == "TV":
print("="*10+" Solving 3D DPC with total variation regularization and boundary value constraint "+"="*10)
fDx = np.zeros(self.dpc_imgs.shape[:3], dtype='complex64')
fDy = np.zeros(self.dpc_imgs.shape[:3], dtype='complex64')
fDz = np.zeros(self.dpc_imgs.shape[:3], dtype='complex64')
fDx[0, 0, 0] = 1.0; fDx[0, -1, 0] = -1.0; fDx = F_3D(fDx).astype('complex64')
fDy[0, 0, 0] = 1.0; fDy[-1, 0, 0] = -1.0; fDy = F_3D(fDy).astype('complex64')
fDz[0, 0, 0] = 1.0; fDz[0, 0, -1] = -1.0; fDz = F_3D(fDz).astype('complex64')
AHA[0] += self.rho*(fDx*fDx.conj() + fDy*fDy.conj() + fDz*fDz.conj() + 1.0)
AHA[3] += self.rho*(fDx*fDx.conj() + fDy*fDy.conj() + fDz*fDz.conj() + 1.0)
if use_gpu:
AHA = [af.to_array(AHA_i) for AHA_i in AHA]
fDx = af.to_array(fDx)
fDy = af.to_array(fDy)
fDz = af.to_array(fDz)
determinant = AHA[0]*AHA[3]-AHA[1]*AHA[2]
V_real, V_imag = self._deconvTV(AHA, determinant, fIntensity, fDx, fDy, fDz, tv_max_iter, boundary_constraint, use_gpu)
RI_obj = self._V2RI(V_real, V_imag)
return RI_obj
| Waller-Lab/3DQuantitativeDPC | python_code/algorithm_3ddpc.py | algorithm_3ddpc.py | py | 16,007 | python | en | code | 11 | github-code | 36 |
35000621242 | import os
import torch
import pandas as pd
import torchaudio
import cv2
import torchaudio.transforms as T
from torch.utils.data import Dataset
import numpy as np
from .utils_dataset import get_transform
class BatvisionV2Dataset(Dataset):
def __init__(self, cfg, annotation_file, location_blacklist=None):
self.cfg = cfg
self.root_dir = cfg.dataset.dataset_dir
self.audio_format = cfg.dataset.audio_format
location_list = os.listdir(self.root_dir)
if location_blacklist:
location_list = [location for location in location_list if location not in location_blacklist]
location_csv_paths = [os.path.join(self.root_dir, location, annotation_file) for location in location_list]
self.instances = []
for location_csv in location_csv_paths:
self.instances.append(pd.read_csv(location_csv))
self.instances = pd.concat(self.instances)
def __len__(self):
return len(self.instances)
def __getitem__(self, idx):
# Access instance
instance = self.instances.iloc[idx]
# Load path
depth_path = os.path.join(self.root_dir,instance['depth path'],instance['depth file name'])
audio_path = os.path.join(self.root_dir,instance['audio path'],instance['audio file name'])
## Depth
# Load depth map
depth = np.load(depth_path).astype(np.float32)
depth = depth / 1000 # to go from mm to m
if self.cfg.dataset.max_depth:
depth[depth > self.cfg.dataset.max_depth] = self.cfg.dataset.max_depth
# Transform
depth_transform = get_transform(self.cfg, convert = True, depth_norm = self.cfg.dataset.depth_norm)
gt_depth = depth_transform(depth)
## Audio
# Load audio binaural waveform
waveform, sr = torchaudio.load(audio_path)
# STFT parameters for full length audio
win_length = 200
n_fft = 400
hop_length = 100
# Cut audio to fit max depth
if self.cfg.dataset.max_depth:
cut = int((2*self.cfg.dataset.max_depth / 340) * sr)
waveform = waveform[:,:cut]
# Update STFT parameters
win_length = 64
n_fft = 512
hop_length=64//4
# Process sound
if 'spectrogram' in self.audio_format:
if 'mel' in self.audio_format:
spec = self._get_melspectrogram(waveform, n_fft = n_fft, power = 1.0, win_length = win_length)
else:
spec = self._get_spectrogram(waveform, n_fft = n_fft, power = 1.0, win_length = win_length, hop_length = hop_length)
spec_transform = get_transform(self.cfg, convert = False) # convert False because already a tensor
audio2return = spec_transform(spec)
elif 'waveform' in self.audio_format:
audio2return = waveform
return audio2return, gt_depth
# audio transformation: spectrogram
def _get_spectrogram(self, waveform, n_fft = 400, power = 1.0, win_length = 400, hop_length=100):
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
power=power,
hop_length=hop_length,
)
#db = T.AmplitudeToDB(stype = 'magnitude')
return spectrogram(waveform)
# audio transformation: mel spectrogram
def _get_melspectrogram(self, waveform, n_fft = 400, power = 1.0, win_length = 400, f_min = 20.0, f_max = 20000.0):
melspectrogram = T.MelSpectrogram(sample_rate = 44100,
n_fft=n_fft,
win_length=win_length,
power=power,
f_min = f_min,
f_max = f_max,
n_mels = 32,
)
return melspectrogram(waveform)
| AmandineBtto/Batvision-Dataset | UNetSoundOnly/dataloader/BatvisionV2_Dataset.py | BatvisionV2_Dataset.py | py | 3,853 | python | en | code | 6 | github-code | 36 |
44033939735 | import sys
import heapq
n = int(sys.stdin.readline())
heap = []
computers = [0 for _ in range(n)]
count = [0 for _ in range(n)]
su = 0
for _ in range(n):
p,q = map(int, sys.stdin.readline().split())
heapq.heappush(heap, [p,q])
while heap:
temp = heapq.heappop(heap)
for i in range(len(computers)):
if computers[i] <= temp[0]:
if computers[i] == 0:
su += 1
computers[i] = temp[1]
count[i] += 1
break
print(su)
for i in count:
if i == 0:
pass
else:
print(i, end= " ") | GluteusStrength/Algorithm | 백준/Gold/12764. 싸지방에 간 준하/싸지방에 간 준하.py | 싸지방에 간 준하.py | py | 615 | python | en | code | 0 | github-code | 36 |
40776965677 | def minimumSwaps(arr):
count = 0
for i in range(len(arr)):
while arr[i] != i+1:
temp = arr[i];
arr[i] = arr[temp-1];
arr[temp-1] = temp;
count +=1;
return count;
n=int(input())
a=list(map(int,input().split()))
print(minimumSwaps(a)) | keshavsingh4522/Python | HackerRank/Interview Preparation Kit/Arrays/Minimum-Swaps-2.py | Minimum-Swaps-2.py | py | 302 | python | en | code | 67 | github-code | 36 |
72489930665 | import numpy as np
import itertools
import argparse
cards = ['A', 'K', 'Q', 'J', '10', '9', '8', '7', '6', '5', '4', '3', '2']
suits = ['S', 'H', 'C', 'D']
class Card:
def __init__(self, val, suit):
self.val = val
self.suit = suit
def __str__(self):
return f'{self.val} {self.suit}'
class Hand:
def __init__(self, card1, card2):
self.card1 = card1
self.card2 = card2
self.hand_counts = {
9 : [0, 'Straight Flush'],
8 : [0, 'Four of a Kind'],
7 : [0, 'Full House'],
6 : [0, 'Flush'],
5 : [0, 'Straight'],
4 : [0, 'Three of a Kind'],
3 : [0, 'Two Pair'],
2 : [0, 'Pair'],
1 : [0, 'High Card'],
}
def count(self, n):
self.hand_counts[n][0] += 1
def __str__(self):
res = f'{self.card1} {self.card2} makes:\n'
pct = np.asarray([_[0] for _ in self.hand_counts.values()])
pct = pct/pct.sum()*100
for i, hand in enumerate(self.hand_counts.values()):
res += f'{hand[1]}: {round(pct[i], 4)}%\n'
return res
value = {c:13-i for i, c in enumerate(cards)}
def high_card(cards):
return max([value[c.val] for c in cards])
def same_suit(cards):
return len(set([c.suit for c in cards])) == 1
def is_straight(cards):
sc = sorted(cards, key=lambda x: value[x.val])
prev = value[sc[0].val]
if value[sc[1].val] == 1 and prev == 13:
prev = 0
for j in range(1, len(cards)):
curr = value[sc[j].val]
if curr != prev+1:
return False
prev = curr
return True
def is_4_of_a_kind(cards):
_, counts = get_same_cards(cards)
if len(counts) == 2 and counts.max() == 4:
return True
return False
def is_full_house(cards):
_, counts = get_same_cards(cards)
if len(counts) == 2 and counts.max() == 3:
return True
return False
def is_trio(cards):
_, counts = get_same_cards(cards)
if len(counts) == 3 and counts.max() == 3:
return True
return False
def is_2_pair(cards):
_, counts = get_same_cards(cards)
if len(counts) == 3 and counts.max() == 2:
return True
return False
def is_pair(cards):
_, counts = get_same_cards(cards)
if len(counts) == 4:
return True
return False
def get_same_cards(cards):
vals = np.asarray([value[c.val] for c in cards])
return np.unique(vals, return_counts=True)
def get_val(vals, counts, c):
return sorted(vals[counts == c], key=lambda x:-x)
def hand(cards):
hc = high_card(cards)
vals, counts = get_same_cards(cards)
if same_suit(cards) and is_straight(cards):
return (9, [hc])
elif is_4_of_a_kind(cards):
f = get_val(vals, counts, 4)
return (8, f)
elif is_full_house(cards):
f1 = get_val(vals, counts, 3)
f2 = get_val(vals, counts, 2)
return (7, f1 + f2)
elif same_suit(cards):
return (6, [hc])
elif is_straight(cards):
return (5, [hc])
elif is_trio(cards):
f1 = get_val(vals, counts, 3)
f2 = get_val(vals, counts, 1)
return (4, f1 + f2)
elif is_2_pair(cards):
f1 = get_val(vals, counts, 2)
f2 = get_val(vals, counts, 1)
return (3, f1 + f2)
elif is_pair(cards):
f1 = get_val(vals, counts, 2)
f2 = get_val(vals, counts, 1)
return (2, f1 + f2)
else:
return (1, get_val(vals, counts, 1))
def get_best_hand(all_cards):
all_hands = []
for cards in itertools.combinations(all_cards, 5):
all_hands.append(hand(cards))
return max(all_hands)
def draw_cards(Deck, n):
cards = []
for _ in range(n):
cards.append(Deck.pop(0))
return cards
def comp_two_hands(hand1, hand2, Deck):
table = draw_cards(Deck, 5)
bh1 = get_best_hand(hand1+table)
bh2 = get_best_hand(hand2+table)
if bh1 > bh2:
return -1, bh1, bh2
elif bh1 < bh2:
return 1, bh1, bh2
else:
return 0, bh1, bh2
parser = argparse.ArgumentParser()
parser.add_argument('--hand1', type=str, required=True)
parser.add_argument('--hand2', type=str)
parser.add_argument('--comp', action='store_true')
parser.add_argument('--overall_equity', action='store_true')
args = parser.parse_args()
def parse_hand(hand):
return [_.split(' ') for _ in hand.split(' & ')]
def create_deck(remove_cards=[]):
Deck = []
for card in cards:
for suit in suits:
if (card, suit) in remove_cards:
continue
Deck.append(Card(card, suit))
return Deck
if args.comp:
hand1 = parse_hand(args.hand1)
hand2 = parse_hand(args.hand2)
Deck = create_deck(hand1+hand2)
f = lambda x: Card(x[0], x[1])
hand1 = [f(_) for _ in hand1]
hand2 = [f(_) for _ in hand2]
hand1_count = 0
hand2_count = 0
tie_count = 0
nseeds = 10
nsims = 200
Hand1 = Hand(*hand1)
Hand2 = Hand(*hand2)
for seed in range(nseeds):
np.random.seed(seed)
for sim in range(nsims):
deck_shuffled = np.random.permutation(Deck).tolist()
res, h1, h2 = comp_two_hands(hand1, hand2, deck_shuffled.copy())
Hand1.count(h1[0])
Hand2.count(h2[0])
if res == -1:
hand1_count += 1
elif res == 1:
hand2_count += 1
else:
tie_count += 1
h1_win = hand1_count/nseeds/nsims*100
h2_win = hand2_count/nseeds/nsims*100
tie = tie_count/nseeds/nsims*100
print(f'Hand1: {hand1[0]}, {hand1[1]} wins {h1_win}%')
print(f'Hand2: {hand2[0]}, {hand2[1]} wins {h2_win}%')
print(f'Tie happens: {tie}%')
print(Hand1)
print(Hand2)
elif args.overall_equity:
hand1 = parse_hand(args.hand1)
Deck = create_deck(hand1)
f = lambda x: Card(x[0], x[1])
hand1 = [f(_) for _ in hand1]
nseeds = 10
nsims = 500
hand1_count = 0
hand2_count = 0
tie_count = 0
Hand1 = Hand(*hand1)
for seed in range(nseeds):
np.random.seed(seed)
for i in range(nsims):
rnd_card = np.random.randint(0, len(Deck))
rnd_card2 = np.random.randint(0, len(Deck))
while rnd_card2 == rnd_card:
rnd_card2 = np.random.randint(0, len(Deck))
hand2 = [Deck[rnd_card], Deck[rnd_card2]]
Hand2 = Hand(*hand2)
deck_shuffled = np.random.permutation(Deck).tolist()
res, h1, h2 = comp_two_hands(hand1, hand2, deck_shuffled.copy())
Hand1.count(h1[0])
Hand2.count(h2[0])
if res == -1:
hand1_count += 1
elif res == 1:
hand2_count += 1
else:
tie_count += 1
h1_win = hand1_count/nseeds/nsims*100
h2_win = hand2_count/nseeds/nsims*100
tie = tie_count/nseeds/nsims*100
print(f'Hand1: {hand1[0]}, {hand1[1]} wins {h1_win}%')
print(f'Hand2 wins {h2_win}%')
print(f'Tie happens: {tie}%')
print(Hand1)
# print(Hand2)
else:
hand1 = parse_hand(args.hand1)
Deck = create_deck(hand1)
f = lambda x: Card(x[0], x[1])
hand1 = [f(_) for _ in hand1]
nseeds = 10
nsims = 500
Hand1 = Hand(*hand1)
for seed in range(nseeds):
np.random.seed(seed)
for sim in range(nsims):
deck_shuffled = np.random.permutation(Deck).tolist()
table = draw_cards(deck_shuffled, 5)
h1 = get_best_hand(table+hand1)
Hand1.count(h1[0])
print(Hand1)
| arpit-1110/Poker | poker_odds.py | poker_odds.py | py | 7,581 | python | en | code | 0 | github-code | 36 |
3642645644 | #!/usr/bin/python
import json
import sys
out_file = sys.argv[1]
document_entities_file = sys.argv[2]
query_file = sys.argv[3]
query_docs = {}
docs = set()
with open(out_file) as f:
for line in f:
query, _, document, rank, _, _ = line.split()
rank = int(rank)
if rank > 10:
continue
if query not in query_docs:
query_docs[query] = []
query_docs[query].append(document)
docs.add(document)
doc_entities = {}
with open(document_entities_file) as f:
for line in f:
document, entity, _ = line.split()
if document not in docs:
continue
if document not in doc_entities:
doc_entities[document] = []
doc_entities[document].append(entity)
queries = {}
j = json.load(open(query_file))
for q in j['queries']:
queries[q['title']] = q['text']
for query in query_docs:
for doc in query_docs[query]:
i = 0
if doc in doc_entities:
for entity in doc_entities[doc]:
i += 1
if i <= 5:
print('\t'.join([query, '"{}"'.format(queries[query]), entity]))
| gtsherman/entities-experiments | src/query_entities.py | query_entities.py | py | 992 | python | en | code | 0 | github-code | 36 |
74105621545 | from django.shortcuts import render
# se importan los modelos
from .models import Author, Genre, Book, BookInstance
# se crea la funcion index
def index (request) :
# se optiene el numero de libros
num_books = Book.objects.all().count()
# se optiene el numero de instancias
num_inctances = BookInstance.objects.all().count()
# se optiene el numero de authores
num_authors = Author.objects.all().count()
# para ver los libros disponivles
# aca se trabaja con la base de datos
# status__exact = "a" es para que los campos sean exactamente igual a la letra "a" y con count te da la cantidad de libros con ese campo
disponivles = BookInstance.objects.filter (status__exact = "a").count()
# request hace referencia a el parametro de la funcion
# despues se hace referensia al archivo de la plantilla html que va a mostrar el contenido luego en context van los datos que resive el archibo html
return render (
request,
"index.html",
context = {
"num_books": num_books,
"num_inctances": num_inctances,
"num_authors": num_authors,
"disponivles": disponivles,
}
)
| MallicTesla/Mis_primeros_pasos | Programacion/002 ejemplos/002 - 13 django catalogo/catalog/views.py | views.py | py | 1,201 | python | es | code | 1 | github-code | 36 |
31508455076 | import asyncio
import json
from aiogram import Bot, Dispatcher, executor, types
from aiogram.utils.markdown import hbold, hunderline, hcode, hlink
from aiogram.dispatcher.filters import Text
from config import token
from test import morph
from main import check_news_update
bot = Bot(token=token, parse_mode=types.ParseMode.HTML)
dp = Dispatcher(bot)
def get_json(ner, date):
dict_news = list()
with open("news_dict.json", "r", encoding="utf-8") as read_file:
data = json.load(read_file)
day = None
month = None
year = None
print('gf')
for new in data.keys():
ner_tec, _ = morph(str(data[new]['article_full_desc']))
_, data_tec = morph(str(data[new]['article_data']))
c = True
for n in ner:
if n not in ner_tec:
c = False
if len(date) != 0:
day = date.day
month = date.month
year = date.year
for d in data_tec:
if day is not None and day != d.day:
c = False
if month is not None and month != d.day:
c = False
if year is not None and year != d.day:
c = False
if c == True:
dict_news.append([data[new]['article_data'], data[new]['article_full_desc']])
print(dict_news)
return dict_news
@dp.message_handler(content_types=types.ContentTypes.TEXT)
async def process_text_message(message: types.Message):
text,r=morph(message.text)
list_news = get_json(text,r)
print(list_news)
for news in list_news:
result = str(news[0]) + "\n" + str(news[1])
await message.answer(result)
else:
await message.answer("Нет таких новостей")
await message.answer("1")
# @dp.message_handler(Text(equals="Сводка"))
# async def get_news_summary(message: types.Message):
# with open("news_dict.json",encoding="utf-8") as file:
# news_dict = json.load(file)
#
# for k, v in sorted(news_dict.items())[-5:]:
# text = prediction(message.text)
if __name__ == '__main__':
executor.start_polling(dp)
| KondratevProgi/news | tg_bot.py | tg_bot.py | py | 2,254 | python | en | code | 0 | github-code | 36 |
6568975593 | #6588
from sys import stdin
array = [True for i in range(1000001)] # 전체 수 만큼 True의 리스트 생성
for i in range(2, 1001): # 1001 = int(math.sqrt(1000000)) + 1, 에라토스테네스 체 -> 1000^2 = 1000000: 제곱근까지만 검증해 코드 같다.
if array[i]:
for k in range(i + i, 1000001, i): #range(시작 숫자, 종료숫자, step)
array[k] = False # i+i부터 ~ 1000000, i의 배수 index -> False
while True:
n = int(stdin.readline())
if n == 0:
break
for i in range(3, len(array)): # 1,2 사용x
if array[i] and array[n-i]: # i+(n-i) = n
print(n, "=", i, "+", n-i)
break #b-a가 가장 큰 것을 출력하기 위해 찾으면 break
"""
* 에라토스테네스의 체(소수 판별 알고리즘)
에라토스테네스의 체에서는 1을 제거
--> 지워지지 앟는 수 중에 제일 작은 2를 소수로 선택한다
--> 나머지 2의 배수를 모두 지운다 --> 지워지지 않는 수 중에 제일 작은 3을 소수로 선택한다
--> 나머지 3의 배수를 지운다 ... 5, 7, 11, 13 등으로 반복을 한다.
제곱근까지만 약수의 여부를 검증해도 소수인지 아닌지 알 수 있는 이유는 6과 같은 수의 경우 2*3=3*2로 대칭을 이루기 때문이다.
* 시간초가 코드
import sys
def isPrime(num):
if num == 1:
return False
else:
for i in range(2, int(num ** 0.5) + 1):
if num % i == 0:
return False
return True
prime = []
for i in range(3,100001):
if i % 2 == 0:
continue
else:
if isPrime(i):
prime.append(i)
while True:
find = 0
n = int(sys.stdin.readline())
if n == 0:
break
for i in range(len(prime)):
for j in reversed(range(len(prime))):
if prime[i] + prime[j] == n:
find = 1
break
if find == 1:
break
if find == 1:
print(n, " = ", prime[i], " + ", prime[j])
else:
print("Goldbach's conjecture is wrong.")
""" | jjun-ho/Baekjoon | 알고리즘 기초 1/2-1. 수학 1/6_골드바흐의_추측.py | 6_골드바흐의_추측.py | py | 2,100 | python | ko | code | 0 | github-code | 36 |
33939924493 | import unittest
import random
from sortedkeycollections import AVLTree, SortedKeyList, SortedArrayList
class TestSortedKeyList(unittest.TestCase):
def setUp(self):
self.test_class = SortedKeyList
def test_insert(self):
values_integer = [(21, '113'), (71, 'wcwf'), (-6, (121, 32, 'x')), (11, 232), (-6, 'seq'), (21, -50),
(-600000, 'adf'), (11, 'wwe')]
prioritized_values_integer = [(-600000, 'adf'), (-6, (121, 32, 'x')), (-6, 'seq'), (11, 232), (11, 'wwe'),
(21, '113'), (21, -50), (71, 'wcwf')]
collection_integer = self.test_class()
for pair in values_integer:
collection_integer.insert(*pair)
self.assertEqual(list(iter(collection_integer)), prioritized_values_integer)
self.assertEqual(len(collection_integer), len(values_integer))
values_str = [('xYz', '93242sax'), ('xYz', ('x', 823)), ('a97', -23), ('a', 'xx'), ('HH', -23.6), ('a', 132),
('77', 32)]
prioritized_values_str = [('77', 32), ('HH', -23.6), ('a', 'xx'), ('a', 132), ('a97', -23), ('xYz', '93242sax'),
('xYz', ('x', 823))]
collection_str = self.test_class()
for priority, obj in values_str:
collection_str.insert(priority, obj)
self.assertEqual(list(iter(collection_str)), prioritized_values_str)
self.assertEqual(len(collection_str), len(values_str))
def test_popleft(self):
if self.test_class is None:
return
values = [((12, 'x'), 78.5), ((-10, 'X'), 'wca'), ((12, '8'), '87'), ((-10, 'X'), 212121)]
expected_values = [((12, 'x'), 78.5), ((12, '8'), '87'), ((-10, 'X'), 212121), ((-10, 'X'), 'wca')]
collection = self.test_class()
for pair in values:
collection.insert(*pair)
while len(collection) > 0:
self.assertEqual(collection.popleft(), expected_values.pop())
def test_popleft_exception(self):
collection = self.test_class()
self.assertRaises(KeyError, lambda: collection.popleft())
class TestSortedArrayList(TestSortedKeyList):
def setUp(self):
self.test_class = SortedArrayList
class TestAVLTree(TestSortedKeyList):
def setUp(self):
self.test_class = AVLTree
def test_balance(self):
tree = AVLTree()
for _ in range(50):
tree.insert(random.randint(-100, 100), random.randint(-500, 500))
for _ in range(40):
tree.popleft()
for i in range(20):
tree.insert(i, random.randint(-900, 900))
for node in tree.node_iter():
lh, rh = node.left_tree_height(), node.right_tree_height()
height = max(lh, rh) + 1
self.assertEqual(node.height, height)
self.assertGreater(2, abs(lh - rh))
if __name__ == '__main__':
unittest.main()
| i1red/oop-3rd-sem | lab1/tests/test_sortedkeycollections.py | test_sortedkeycollections.py | py | 2,935 | python | en | code | 0 | github-code | 36 |
71578942183 | import vtk
def main():
colors = vtk.vtkNamedColors()
# Set the background color.
colors.SetColor("bkg", [0.2, 0.3, 0.4, 1.0])
# Create a sphere to deform
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(51)
sphere.SetPhiResolution(17)
sphere.Update()
bounds = sphere.GetOutput().GetBounds()
# Create a filter to color the sphere
ele = vtk.vtkElevationFilter()
ele.SetInputConnection(sphere.GetOutputPort())
ele.SetLowPoint(0,0,-0.5);
ele.SetHighPoint(0,0,0.5);
ele.SetLowPoint((bounds[1] + bounds[0]) / 2.0,
(bounds[3] + bounds[2]) / 2.0,
-bounds[5]);
ele.SetHighPoint((bounds[1] + bounds[0]) / 2.0,
(bounds[3] + bounds[2]) / 2.0,
bounds[5]);
ele.Update()
# Create a mesh to deform the sphere
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(6)
pts.SetPoint(0,
bounds[0] - 0.1 * (bounds[1] - bounds[0]),
(bounds[3] + bounds[2]) / 2.0,
(bounds[5] + bounds[4]) / 2.0)
pts.SetPoint(1,
bounds[1] + 0.1 * (bounds[1] - bounds[0]),
(bounds[3] + bounds[2]) / 2.0,
(bounds[5] + bounds[4]) / 2.0)
pts.SetPoint(2,
(bounds[1] + bounds[0]) / 2.0,
bounds[2] - 0.1 * (bounds[3] - bounds[2]),
(bounds[5] + bounds[4]) / 2.0)
pts.SetPoint(3,
(bounds[1] + bounds[0]) / 2.0,
bounds[3] + 0.1 * (bounds[3] - bounds[2]),
(bounds[5] + bounds[4]) / 2.0)
pts.SetPoint(4,
(bounds[1] + bounds[0]) / 2.0,
(bounds[3] + bounds[2]) / 2.0,
bounds[4] - 0.1 * (bounds[5] - bounds[4]))
pts.SetPoint(5,
(bounds[1] + bounds[0]) / 2.0,
(bounds[3] + bounds[2]) / 2.0,
bounds[5] + 0.1 * (bounds[5] - bounds[4]))
tris = vtk.vtkCellArray()
cells = [[2, 0, 4], [1, 2, 4], [3, 1, 4], [0, 3, 4], [0, 2, 5], [2, 1, 5], [1, 3, 5], [3, 0, 5]]
for cell in cells:
tris.InsertNextCell(3)
for c in cell:
tris.InsertCellPoint(c)
pd = vtk.vtkPolyData()
pd.SetPoints(pts)
pd.SetPolys(tris)
meshMapper = vtk.vtkPolyDataMapper()
meshMapper.SetInputData(pd)
meshActor = vtk.vtkActor()
meshActor.SetMapper(meshMapper)
meshActor.GetProperty().SetRepresentationToWireframe()
meshActor.GetProperty().SetColor(colors.GetColor3d("Black"))
deform = vtk.vtkDeformPointSet()
deform.SetInputData(ele.GetOutput())
deform.SetControlMeshData(pd)
deform.Update()
controlPoint = pts.GetPoint(5)
pts.SetPoint(5, controlPoint[0],
controlPoint[1],
bounds[5] + .8 * (bounds[5] - bounds[4]))
pts.Modified()
polyMapper = vtk.vtkPolyDataMapper()
polyMapper.SetInputConnection(deform.GetOutputPort())
polyActor = vtk.vtkActor()
polyActor.SetMapper(polyMapper)
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer.AddActor(polyActor)
renderer.AddActor(meshActor)
renderer.GetActiveCamera().SetPosition(1,1,1)
renderer.ResetCamera()
renderer.SetBackground(colors.GetColor3d("bkg"))
renWin.SetSize(300,300)
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Meshes/DeformPointSet.py | DeformPointSet.py | py | 3,526 | python | en | code | 319 | github-code | 36 |
45556966628 | """Calcula el precio de la energía diario a partir de los precio horarios"""
import pathlib
from utils import read_format_hourly_prices, resample_hourly_prices
base_path = pathlib.Path.cwd()
cleansed_path = base_path.joinpath("data_lake/cleansed")
business_path = base_path.joinpath("data_lake/business")
def compute_daily_prices(
source_path=cleansed_path,
target_path=business_path,
source_filename="precios-horarios.csv",
target_namefile="precios-diarios.csv",
):
"""Calcula los precios promedios diarios.
Usael archivo data_lake/cleansed/precios-horarios.csv, calcula el precio
promedio diario (sobre las 24 horas del dia) para cada uno de los dias. Las
columnas del archivo data_lake/business/precios-diarios.csv son:
* fecha: fecha en formato YYYY-MM-DD
* precio: precio promedio diario de la electricidad en la bolsa nacional
"""
df_hourly_prices = read_format_hourly_prices(source_path, filename=source_filename)
df_daily_prices = resample_hourly_prices(df_hourly_prices, freq="D")
df_daily_prices.to_csv(target_path.joinpath(target_namefile))
if __name__ == "__main__":
import doctest
compute_daily_prices()
doctest.testmod()
| productos-de-datos/proyecto-albetancurqu42 | src/data/compute_daily_prices.py | compute_daily_prices.py | py | 1,216 | python | es | code | 0 | github-code | 36 |
71119081063 | def minigame():
import pygame
import sys
import pictures
import random
status = 'alive'
zombie_size = [50,100,150,200,250,300]
obstacle_list = []
bg_pos = 0
move= 0
side = 0
score = 0
game_screen = pygame.display.set_mode((608,342)) #creates a screen 1024 pixels wide and 576 pixels long
clock = pygame.time.Clock() # creates clock object, this will be used later to control the fps
def move_bg():
game_screen.blit(background,(bg_pos,0))
game_screen.blit(background,(bg_pos+608,0))
def new_obstacle():
z_size = random.choice(zombie_size)
new_obstacle = obstacle.get_rect(center =(800,z_size))
return new_obstacle
def move_obstacle(obstacle):
for obstacle in obstacle_list:
obstacle.centerx = obstacle.centerx -1
def obstacle_screen(obstacle):
for obstacles in obstacle_list:
game_screen.blit(obstacle, obstacles)
def death (obstacle):
status = 'alive'
for obstacle in obstacle_list:
if avatar_bound.colliderect(obstacle):
status = 'dead'
return status
pygame.init() #initialize pygame module
#uploading all the necessary images
background = pygame.image.load('pictures/bg.jpg').convert()
avatar = pygame.image.load('pictures/avatar.png').convert_alpha()
obstacle = pygame.image.load('pictures/zombie.png').convert_alpha()
gameover = pygame.image.load('pictures/gameover.jpg').convert()
gamewin = pygame.image.load('pictures/win.webp').convert()
#transforming and scaling images
avatar = pygame.transform.scale(avatar, (58, 62))
obstacle = pygame.transform.scale(obstacle, (60, 80))
gameover= pygame.transform.scale(gameover,(608,342))
gamewin= pygame.transform.scale(gamewin,(608,342))
avatar_bound = avatar.get_rect(center =(50,150)) # get rectangle around surface this will help us check for collisions
add_obstacle = pygame.USEREVENT
pygame.time.set_timer(add_obstacle, 2200) #every 1500 millseconds event
while True:
# write code to exit out of module (add score parameters
for event in pygame.event.get():
if event.type == pygame.QUIT: #or score reaches 10
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP or event.key == ord('w'):
move =0
move = move - 3
if event.key == pygame.K_DOWN or event.key == ord('s'):
move =0
move = move + 3
if event.key == pygame.K_LEFT or event.key == ord('a'):
side = 0
side = side - 3
if event.key == pygame.K_RIGHT or event.key == ord('d'):
side = 0
side = side + 3
if event.type == pygame.KEYUP:
side = 0
move = 0
if event.type == add_obstacle :
obstacle_list.append(new_obstacle())
bg_pos = bg_pos-1
game_screen.blit(background,(bg_pos,0)) # block transfer of bg image, upper left corner at (0,0)
move_bg()
if bg_pos <= -608:
bg_pos = 0
if status == 'alive' and score<= 20:
move_obstacle(obstacle_list)
obstacle_screen(obstacle)
game_screen.blit(avatar, avatar_bound)
# controlling movements of avatar
avatar_bound.centery = avatar_bound.centery + move
avatar_bound.centerx = avatar_bound.centerx + side
status = death(obstacle_list)
score = score + 0.01
elif status == 'dead':
game_screen.blit(gameover,(-20,0))
elif score >= 20:
game_screen.blit(gamewin,(0,0))
pygame.display.update() # updating the display screen
clock.tick(100) #updates 100 times in a second | jadeyujinlee/Smithpocalypse-v.1 | minigame.py | minigame.py | py | 3,488 | python | en | code | 0 | github-code | 36 |
37677063476 | #!/usr/bin/env python3
import os
import sys
import urllib.request
from flask import (
Flask,
flash,
jsonify,
make_response,
redirect,
render_template,
request,
)
from werkzeug.utils import secure_filename
from scripts import predict_model
from scripts import mongodb
from scripts import train_model
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True
ENVIRONMENT_WINDOWS = True
@app.route("/")
def index():
return "Application Alive"
@app.route("/api/v1.0/users", methods=["GET"])
def get_users():
return jsonify({"Users": mongodb.get_list_users()})
@app.route("/api/v1.0/experiments/<user>", methods=["GET"])
def get_experiments(user):
user = user.lower()
file_dir_data = os.listdir("data/")
if user not in file_dir_data:
return make_response(jsonify({"error": "User Not Found"}), 404)
else:
file_dir_user = os.listdir("data/{}".format(user))
return jsonify({"user": user, "experiments": file_dir_user})
@app.route("/api/v1.0/train/<user>/<experiment_name>", methods=["GET"])
def train_experiment(user, experiment_name):
user = user.lower()
experiment_name = experiment_name.lower()
file_dir_data = os.listdir("data/")
if user not in file_dir_data:
return make_response(jsonify({"error": "User Not Found"}), 404)
file_dir_user = os.listdir("data/{}".format(user))
if experiment_name not in file_dir_user:
return make_response(jsonify({"error": "Experiment Not Found"}), 404)
model_def = "data/{}/{}/model_definition.yaml".format(user, experiment_name)
data_csv = "data/{}/{}/train_data.csv".format(user, experiment_name)
log_file = "data/{}/{}/training.log".format(user, experiment_name)
if ENVIRONMENT_WINDOWS:
output_dir = "data\{}\{}".format(user, experiment_name)
else:
output_dir = "data/{}/{}".format(user, experiment_name)
res = train_model.train_model(
model_def, output_dir, data_csv, experiment_name, log_file
)
if res != True:
return jsonify(
{
"user": user,
"response": res,
"model_definition": model_def,
"data_csv": data_csv,
"log_file": log_file,
"output_dir": output_dir,
}
)
return jsonify({"user": user, "response": "Training in Progress"})
@app.route("/api/v1.0/predict/<user>/<experiment_name>", methods=["POST"])
def predict_experiment(user, experiment_name):
file = request.files["file"]
if file.filename != "":
filename = secure_filename(file.filename)
if "predict" not in os.listdir("data/{}/{}".format(user, experiment_name)):
os.mkdir("data/{}/{}/predict/".format(user, experiment_name))
file.save("data/{}/{}/predict/{}".format(user, experiment_name, filename))
return jsonify(
{"result": {"dog": 0.85, "cat": 0.15}, "exp": experiment_name, "user": user}
)
return jsonify({"response": "Error"})
@app.route("/api/v1.0/register/<user>", methods=["GET"])
def register_user(user):
user = user.lower()
dir_list_users = os.listdir("data/")
if user in dir_list_users:
return make_response(jsonify({"error": "User Already Exists"}), 420)
else:
os.mkdir("data/{}".format(user))
return jsonify({"user": user, "response": "User Successfully Created"})
@app.route("/api/v1.0/register/<user>/<experiment_name>", methods=["GET"])
def register_experiment(user, experiment_name):
user = user.lower()
experiment_name = experiment_name.lower()
dir_list_users = os.listdir("data/")
if user in dir_list_users:
dir_list_experiments = os.listdir("data/{}/".format(user))
if experiment_name in dir_list_experiments:
return make_response(jsonify({"error": "Experiment Already Exists"}), 420)
else:
os.mkdir("data/{}/{}".format(user, experiment_name))
return jsonify(
{
"user": user,
"experiment": experiment_name,
"response": "Experiment Successfully Created",
}
)
else:
return make_response(jsonify({"error": "User Does Not Exist"}), 420)
@app.route("/api/v1.0/remove/<user>", methods=["GET"])
def remove_user(user):
user = user.lower()
dir_list_users = os.listdir("data/")
if user not in dir_list_users:
return make_response(jsonify({"error": "User Does Not Exist"}), 420)
else:
dir_list_experiments = os.listdir("data/{}".format(user))
if len(dir_list_experiments) == 0:
os.rmdir("data/{}".format(user))
return jsonify({"user": user, "response": "User Successfully Removed"})
else:
return jsonify(
{
"user": user,
"response": "User Experiments still exist",
"experiments": dir_list_experiments,
}
)
@app.route("/api/v1.0/remove/<user>/<experiment_name>", methods=["GET"])
def remove_experiment(user, experiment_name):
user = user.lower()
experiment_name = experiment_name.lower()
dir_list_users = os.listdir("data/")
if user in dir_list_users:
dir_list_experiments = os.listdir("data/{}/".format(user))
if experiment_name in dir_list_experiments:
os.rmdir("data/{}/{}".format(user, experiment_name))
return jsonify(
{
"user": user,
"experiment": experiment_name,
"response": "Experiment Successfully Removed",
}
)
else:
return make_response(jsonify({"error": "Experiment Does Not Exist"}), 420)
else:
return make_response(jsonify({"error": "User Does Not Exist"}), 420)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({"error": "Request Not Found"}), 404)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port="5000")
| projectasteria/PlaceholderAPI | app.py | app.py | py | 6,137 | python | en | code | 0 | github-code | 36 |
74267310505 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.actions module."""
from abc import ABCMeta
from future.utils import with_metaclass
from .base import Base
def get_action(action):
"""Get action."""
action_obj = Base.get_or_new_from_json_dict_with_types(
action, {
'postback': PostbackAction,
'message': MessageAction,
'uri': URIAction,
'datetimepicker': DatetimePickerAction,
'camera': CameraAction,
'cameraRoll': CameraRollAction,
'location': LocationAction,
'richmenuswitch': RichMenuSwitchAction,
}
)
return action_obj
def get_actions(actions):
"""Get actions."""
new_actions = []
if actions:
for action in actions:
action_obj = get_action(action)
if action_obj:
new_actions.append(action_obj)
return new_actions
class Action(with_metaclass(ABCMeta, Base)):
"""Abstract base class of Action."""
def __init__(self, **kwargs):
"""__init__ method.
:param kwargs:
"""
super(Action, self).__init__(**kwargs)
self.type = None
class PostbackAction(Action):
"""PostbackAction.
https://developers.line.me/en/docs/messaging-api/reference/#postback-action
When a control associated with this action is tapped,
a postback event is returned via webhook with the specified string in the data property.
"""
def __init__(
self,
label=None,
data=None,
display_text=None,
text=None,
input_option=None,
fill_in_text=None,
**kwargs
):
"""__init__ method.
:param str label: Label for the action.
:param str data: String returned via webhook
in the postback.data property of the postback event.
:param str display_text: Text displayed in the chat as a message sent by
the user when the action is performed.
:param str text: Deprecated. Text displayed in the chat as a message sent by
the user when the action is performed. Returned from the server through a webhook.
:param kwargs:
"""
super(PostbackAction, self).__init__(**kwargs)
self.type = 'postback'
self.label = label
self.data = data
self.display_text = display_text
self.text = text
self.input_option = input_option
self.fill_in_text = fill_in_text
class MessageAction(Action):
"""MessageAction.
https://developers.line.me/en/docs/messaging-api/reference/#message-action
When a control associated with this action is tapped,
the string in the text property is sent as a message from the user.
"""
def __init__(self, label=None, text=None, **kwargs):
"""__init__ method.
:param str label: Label for the action.
:param str text: Text sent when the action is performed.
:param kwargs:
"""
super(MessageAction, self).__init__(**kwargs)
self.type = 'message'
self.label = label
self.text = text
class URIAction(Action):
"""URIAction.
https://developers.line.me/en/docs/messaging-api/reference/#uri-action
When a control associated with this action is tapped,
the URI specified in the uri property is opened.
"""
def __init__(self, label=None, uri=None, alt_uri=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
Max: 20 characters
:param str uri: URI opened when the action is performed.
:param alt_uri: URI opened when the desktop app.
:type alt_uri: T <= :py:class:`linebot.models.actions.AltUri`
:param kwargs:
"""
super(URIAction, self).__init__(**kwargs)
self.type = 'uri'
self.label = label
self.uri = uri
self.alt_uri = self.get_or_new_from_json_dict(alt_uri, AltUri)
class AltUri(with_metaclass(ABCMeta, Base)):
"""AltUri.
https://github.com/line/line-bot-sdk-python/issues/155
URI opened when the desktop app.
"""
def __init__(self, desktop=None, **kwargs):
"""__init__ method.
:param str desktop: URI opened on LINE for macOS and Windows
when the action is performed.
If the altUri.desktop property is set,
the uri property is ignored on LINE for macOS and Windows.
:param kwargs:
"""
super(AltUri, self).__init__(**kwargs)
self.desktop = desktop
class DatetimePickerAction(Action):
"""DatetimePickerAction.
https://developers.line.me/en/docs/messaging-api/reference/#datetime-picker-action
When a control associated with this action is tapped,
a postback event is returned via webhook with the date and time
selected by the user from the date and time selection dialog.
The datetime picker action does not support time zones.
"""
def __init__(self, label=None, data=None, mode=None,
initial=None, max=None, min=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param str data: String returned via webhook
in the postback.data property of the postback event
:param str mode: Action mode
date: Pick date
time: Pick time
datetime: Pick date and time
:param str initial: Initial value of date or time
:param str max: Largest date or time value that can be selected.
Must be greater than the min value.
:param str min: Smallest date or time value that can be selected.
Must be less than the max value.
:param kwargs:
"""
super(DatetimePickerAction, self).__init__(**kwargs)
self.type = 'datetimepicker'
self.label = label
self.data = data
self.mode = mode
self.initial = initial
self.max = max
self.min = min
class CameraAction(Action):
"""CameraAction.
https://developers.line.me/en/reference/messaging-api/#camera-action
This action can be configured only with quick reply buttons.
When a button associated with this action is tapped,
the camera screen in the LINE app is opened.
"""
def __init__(self, label=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param kwargs:
"""
super(CameraAction, self).__init__(**kwargs)
self.type = 'camera'
self.label = label
class CameraRollAction(Action):
"""CameraRollAction.
https://developers.line.me/en/reference/messaging-api/#camera-roll-action
This action can be configured only with quick reply buttons.
When a button associated with this action is tapped,
the camera roll screen in the LINE app is opened.
"""
def __init__(self, label=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param kwargs:
"""
super(CameraRollAction, self).__init__(**kwargs)
self.type = 'cameraRoll'
self.label = label
class LocationAction(Action):
"""LocationRollAction.
https://developers.line.me/en/reference/messaging-api/#location-action
This action can be configured only with quick reply buttons.
When a button associated with this action is tapped,
the location screen in the LINE app is opened.
"""
def __init__(self, label=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param kwargs:
"""
super(LocationAction, self).__init__(**kwargs)
self.type = 'location'
self.label = label
class RichMenuSwitchAction(Action):
"""RichMenuSwitchAction.
https://developers.line.biz/en/reference/messaging-api/#richmenu-switch-action
This action can be configured only with rich menus.
It can't be used for Flex Messages or quick replies.
When you tap a rich menu associated with this action,
you can switch between rich menus,
and a postback event including the rich menu alias ID selected
by the user is returned via a webhook.
"""
def __init__(self, label=None, rich_menu_alias_id=None, data=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param str rich_menu_alias_id: Rich menu alias ID to switch to.
:param str data: String returned by the postback.data property
of the postback event via a webhook
:param kwargs:
"""
super(RichMenuSwitchAction, self).__init__(**kwargs)
self.type = 'richmenuswitch'
self.label = label
self.rich_menu_alias_id = rich_menu_alias_id
self.data = data
| line/line-bot-sdk-python | linebot/models/actions.py | actions.py | py | 9,405 | python | en | code | 1,739 | github-code | 36 |
30788452362 | import sys
input = sys.stdin.readline
from queue import PriorityQueue
queue = PriorityQueue() # 우선순위 큐
N = int(input())
for i in range(N):
a = int(input())
if a == 0:
if queue.empty():
print(0)
else:
print(-queue.get()) # 가장 큰 값 제거하고 - 붙여서 출력하기 (넣을 때 음수로 넣었으니까)
else:
queue.put(-a) # 오름차순으로 제거하기 위해 음수로 넣기 | sojungpp/Algorithm | 백준/Silver/11279. 최대 힙/최대 힙.py | 최대 힙.py | py | 480 | python | ko | code | 0 | github-code | 36 |
17789850439 | class SLList():
class node():
def __init__(self,data):
self.element=data
self.next = None #declares and intializes to NONE
#NODE class is visible to every method in SLLIST class
def __init__(self):#happens at every obj creation for SLList class
self.head = self.node(None)#creation of obj for node class
self.size = 0#declared and intializes to 0
def isEmpty(self):
if self.size>0:
return False
else:
return True
def insertFirst(self,data):
newNode = self.node(data)
if self.size == 0:
#head, newNode -> objs of node class
self.head.element = newNode.element #self.head.element was NONE, now it's newNode.element=>.element stands for data
else:
newNode.next = self.head#contains ref to next node//makes LINK bw newNode and head
self.head = newNode #assigning the obj to head
self.size += 1
def insertLast(self,data):
newNode=self.node(data)#created a node
if(self.size==None):
self.head.element=newNode.element
else:
currentNode=self.head #like i=0, first element
while(currentNode.next!=None):
currentNode=currentNode.next
#currentNode that comes out will be the last node
currentNode.next=newNode#linked the node
self.size+=1
def printList(self):
if (self.isEmpty()):
print("List is Empty")
else:
currentNode = self.head
while(currentNode != None):
print(currentNode.element, end =" ")
currentNode = currentNode.next
print("")
def deleteFirst(self):
if(self.isEmpty()):
print("List is Empty")
else:
if (self.head.next == None):#if there's only one value
self.head.element = None
else:
temp = self.head
self.head = self.head.next #next obj assigned as new head, first obj del
del temp
self.size -=1
def deleteLast(self):
currentNode=self.head
while(currentNode.next.next!=None):
currentNode=currentNode.next
#last but one, second last element comes out
currentNode.next=None#ref for next is none, lost
def listSize(self):
return self.size
sll = SLList()
print(sll.isEmpty())
sll.insertFirst(10)
sll.insertFirst(20)
sll.insertFirst(30)
sll.insertLast(40)
sll.insertLast(50)
sll.insertLast(60)
sll.printList()
print(sll.listSize())
sll.deleteFirst()
sll.deleteLast()
sll.printList()
print(sll.listSize())
print(sll.isEmpty())
| abi2189/DBMS-Electricity-Bill-Payment-System | AP-WORK/python/linkedList.py | linkedList.py | py | 2,831 | python | en | code | 0 | github-code | 36 |
24788364259 | import collections
import heapq
class Solution:
def networkDelayTime(self, times: list[list[int]], n: int, k: int) -> int:
graph = collections.defaultdict(list)
for u, v, w in times:
graph[u].append((w, v))
hq = [(0, k)]
dist = collections.defaultdict(int)
while hq:
weight, node = heapq.heappop(hq)
if node not in dist:
dist[node] = weight
for next_weight, next in graph[node]:
total_weight = next_weight + weight
heapq.heappush(hq, (total_weight, next))
if len(dist) == n:
return max(dist.values())
return -1
temp = Solution()
print(temp.networkDelayTime([[2,1,1],[2,3,1],[3,4,1]], 4, 2))
| inhyeokJeon/AALGGO | Python/LeetCode/shortest_path/743_network_delay_time.py | 743_network_delay_time.py | py | 772 | python | en | code | 0 | github-code | 36 |
70943557223 | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('test_rdd').getOrCreate()
sc = spark.sparkContext
class TestRDD():
# Creations
def test_create_from_dataframe(self):
df = spark.range(10).toDF('id')
rdd = df.rdd
rows = rdd.collect()
assert len(rows) == 10
assert rows[9]['id'] == 9
def test_create_from_collection(self):
data = [1, 2, 3, 4]
rdd = sc.parallelize(data, 2)
list_1 = rdd.collect()
assert list_1 == [1, 2, 3, 4]
list_2 = rdd.glom().collect()
assert list_2 == [[1, 2], [3, 4]]
def test_create_from_file(self):
pass
# Transformations
def test_map(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
rdd_1 = rdd.map(lambda word: (word, word[0], len(word)))
list_1 = rdd_1.collect()
assert list_1[0] == ('The', 'T', 3)
def test_filter(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
rdd_1 = rdd.map(lambda word: (word, word[0], len(word)))
rdd_2 = rdd_1.filter(lambda record: record[2] == 5)
list_2 = rdd_2.collect()
assert list_2 == [('quick', 'q', 5), ('brown', 'b', 5), ('jumps', 'j', 5)]
def test_sortBy(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
rdd_1 = rdd.sortBy(lambda word: len(word))
list_1 = rdd_1.take(5)
assert list_1 == ['The', 'fox', 'the', 'dog', 'over']
# Partition Transformations
def test_mapPartitions(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
rdd_1 = rdd.mapPartitions(lambda part: [word[::-1] for word in part])
list_1 = rdd_1.collect()
assert list_1 == ['ehT', 'kciuq', 'nworb', 'xof', 'spmuj', 'revo', 'eht', 'yzal', 'god']
def test_foreachPartition(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
def func(partition):
for word in partition:
print(word[::-1])
rdd.foreachPartition(func)
# Actions
def test_count(self):
data = range(1, 5)
rdd = sc.parallelize(data)
cnt = rdd.count()
assert cnt == 4
def test_reduce(self):
data = range(1, 5)
rdd = sc.parallelize(data)
product = rdd.reduce(lambda x, y: x * y)
assert product == 24
# Pair RDDs
def test_keyBy_and_mapValues(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
pair_rdd = sc.parallelize(words).keyBy(lambda word: word.lower()[0])
rdd_1 = pair_rdd.mapValues(lambda word: word.upper())
list_1 = rdd_1.take(3)
assert list_1 == [('t', 'THE'), ('q', 'QUICK'), ('b', 'BROWN')]
list_2 = rdd_1.keys().collect()
assert list_2 == ['t', 'q', 'b', 'f', 'j', 'o', 't', 'l', 'd']
list_3 = rdd_1.values().collect()
assert list_3[0] == 'THE'
def test_countByKey(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
pair_rdd = sc.parallelize(words).map(lambda word: (word.lower()[0], word.upper()))
d = pair_rdd.countByKey()
assert list(d.items()) == [('t', 2), ('q', 1), ('b', 1), ('f', 1), ('j', 1), ('o', 1), ('l', 1), ('d', 1)]
def test_reduceByKey(self):
pair_rdd = sc.parallelize([('a', 1), ('b', 2), ('c', 3), ('b', 2), ('a', 1)])
rdd_1 = pair_rdd.reduceByKey(lambda x, y: x*y)
list_1 = rdd_1.collect()
assert list_1 == [('a', 1), ('b', 4), ('c', 3)]
# Broadcast Variable
def test_BV(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
bv_data = {'e': 5, 'j': 10, 'o': 15, 't': 20, 'y': 25}
bv = sc.broadcast(bv_data)
bv_value = bv.value
rdd_1 = rdd.map(lambda word: bv_value.get(word.lower()[0], -1))
list_1 = rdd_1.collect()
assert list_1 == [20, -1, -1, -1, 10, 15, 20, -1, -1]
# Accumulator
def test_accumulator(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
first_acc = sc.accumulator(value=0)
def func(word):
if len(word) == 3:
first_acc.add(1)
rdd.foreach(func)
assert first_acc.value == 4
if __name__ == '__main__':
test = TestRDD()
# Call a method here
test.test_accumulator()
spark.stop() | bablookr/big-data-experiments | pyspark-experiments/test/test_rdd.py | test_rdd.py | py | 4,725 | python | en | code | 0 | github-code | 36 |
71578941543 | #!/usr/bin/env python
import os.path
import vtk
def get_program_parameters():
import argparse
description = 'Decimate polydata.'
epilogue = '''
This is an example using vtkDecimatePro to decimate input polydata, if provided, or a sphere otherwise.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('filename', nargs='?', default=None, help='Optional input filename e.g Torso.vtp.')
parser.add_argument('reduction', nargs='?', type = float, default=.9, help='Sets the decimation target reduction, (default is 0.9).')
args = parser.parse_args()
return args.filename, args.reduction
def main():
filePath, reduction = get_program_parameters()
# Define colors
colors = vtk.vtkNamedColors()
backFaceColor = colors.GetColor3d("gold")
inputActorColor = colors.GetColor3d("flesh")
decimatedActorColor = colors.GetColor3d("flesh")
colors.SetColor('leftBkg', [0.6, 0.5, 0.4, 1.0])
colors.SetColor('rightBkg', [0.4, 0.5, 0.6, 1.0])
if filePath and os.path.isfile(filePath):
inputPolyData = ReadPolyData(filePath)
if not inputPolyData:
inputPolyData = GetSpherePD()
else:
inputPolyData = GetSpherePD()
print("Before decimation")
print(f"There are {inputPolyData.GetNumberOfPoints()} points.")
print(f"There are {inputPolyData.GetNumberOfPolys()} polygons.")
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(inputPolyData)
decimate.SetTargetReduction(reduction)
decimate.PreserveTopologyOn()
decimate.Update()
decimated = vtk.vtkPolyData()
decimated.ShallowCopy(decimate.GetOutput())
print("After decimation")
print(f"There are {decimated.GetNumberOfPoints()} points.")
print(f"There are {decimated.GetNumberOfPolys()} polygons.")
print(f"Reduction: {(inputPolyData.GetNumberOfPolys() - decimated.GetNumberOfPolys()) / inputPolyData.GetNumberOfPolys()}")
inputMapper = vtk.vtkPolyDataMapper()
inputMapper.SetInputData(inputPolyData)
backFace = vtk.vtkProperty()
backFace.SetColor(backFaceColor)
inputActor = vtk.vtkActor()
inputActor.SetMapper(inputMapper)
inputActor.GetProperty().SetInterpolationToFlat()
inputActor.GetProperty().SetColor(inputActorColor)
inputActor.SetBackfaceProperty(backFace)
decimatedMapper = vtk.vtkPolyDataMapper()
decimatedMapper.SetInputData(decimated)
decimatedActor = vtk.vtkActor()
decimatedActor.SetMapper(decimatedMapper)
decimatedActor.GetProperty().SetColor(decimatedActorColor)
decimatedActor.GetProperty().SetInterpolationToFlat()
decimatedActor.SetBackfaceProperty(backFace)
# There will be one render window
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(600, 300)
# And one interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Define viewport ranges
# (xmin, ymin, xmax, ymax)
leftViewport = [0.0, 0.0, 0.5, 1.0]
rightViewport = [0.5, 0.0, 1.0, 1.0]
# Setup both renderers
leftRenderer = vtk.vtkRenderer()
renderWindow.AddRenderer(leftRenderer)
leftRenderer.SetViewport(leftViewport)
leftRenderer.SetBackground((colors.GetColor3d('leftBkg')))
rightRenderer = vtk.vtkRenderer()
renderWindow.AddRenderer(rightRenderer)
rightRenderer.SetViewport(rightViewport)
rightRenderer.SetBackground((colors.GetColor3d('rightBkg')))
# Add the sphere to the left and the cube to the right
leftRenderer.AddActor(inputActor)
rightRenderer.AddActor(decimatedActor)
# Shared camera
# Shared camera looking down the -y axis
camera = vtk.vtkCamera()
camera.SetPosition (0, -1, 0)
camera.SetFocalPoint (0, 0, 0)
camera.SetViewUp (0, 0, 1)
camera.Elevation(30)
camera.Azimuth(30)
leftRenderer.SetActiveCamera(camera)
rightRenderer.SetActiveCamera(camera)
leftRenderer.ResetCamera()
leftRenderer.ResetCameraClippingRange()
renderWindow.Render()
renderWindow.SetWindowName('Decimation')
interactor.Start()
def ReadPolyData(file_name):
import os
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".ply":
reader = vtk.vtkPLYReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtp":
reader = vtk.vtkXMLpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".obj":
reader = vtk.vtkOBJReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".stl":
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtk":
reader = vtk.vtkpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".g":
reader = vtk.vtkBYUReader()
reader.SetGeometryFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
else:
# Return a None if the extension is unknown.
poly_data = None
return poly_data
def GetSpherePD():
"""
:return: The PolyData representation of a sphere.
"""
source = vtk.vtkSphereSource()
source.SetThetaResolution(30)
source.SetPhiResolution(15)
source.Update()
return source.GetOutput()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Meshes/Decimation.py | Decimation.py | py | 5,683 | python | en | code | 319 | github-code | 36 |
43034357184 | import datetime
import os
import time
import xarray as xr
from app.dataprocessing.benchmark import Timer
from app.dataprocessing.datasource_interface import IDatasource
from app.dataprocessing.local.local_reader import LocalReader
from app.dataprocessing.remote.opendap_access_cas import OpendapAccessCAS
from app.datastructures.datastructure_interface import INode, IStructure, get_bounds
from app.datastructures.n_dimensional.kd_tree import KDTree
from app.datastructures.three_dimensional.octree import Octree
from app.datastructures.two_dimensional.quad_tree import QuadTree
from dotenv import load_dotenv
from sympy import im
class CachedDS:
def __init__(self, node: INode):
self.ds = node.ds
self.bounds = get_bounds(node.ds)
self.time_stamp = datetime.datetime.now()
self.resolution = node.resolution
def __str__(self) -> str:
return f"\tBounds:{self.bounds}\n\tCreated:{self.time_stamp}\n\tResolution:{self.resolution:.2f}"
class DataHandler:
"""
Defines the data source and selects proper data structure
"""
def __init__(self) -> None:
self.ds = None # xarray.Dataset
self.data_source: IDatasource = None
self.data_structure: IStructure = None
self.max_chunk_size = 50
self.on_demand_data = False
self.custom_rules = None
self.cache: list[CachedDS] = []
def set_max_chunk_size(self, chunk_size):
self.max_chunk_size = chunk_size
def get_cache(self):
return self.cache
def set_custom_rules(self, custom_rules):
self.custom_rules = custom_rules
def set_opendap_cas(
self,
cas_url,
ds_url,
username,
password,
file_size=None,
constraints=None,
struct=None,
):
self.on_demand_data = True
if username == None or password == None:
print("please save credentials to .env")
self.data_source = OpendapAccessCAS(
username,
password,
ds_url,
cas_url,
file_size_MB=file_size,
constraints=constraints,
)
self.ds = self.data_source.get_dataset()
with Timer("Creating data structure"):
self.data_structure = self.__set_data_structure(struct)
def set_local_netcdf_reader(self, file_path, constraints=None, struct=None):
self.data_source = LocalReader(file_path, constraints)
with Timer("Loading dataset"):
self.ds = self.data_source.get_dataset()
with Timer("Creating data structure"):
self.data_structure = self.__set_data_structure(struct)
def get_inital_netcdf(self):
ds, bounds, node = self.data_structure.get_initial_dataset()
file_name = "tmp/nc/data_" + str(time.time()) + ".nc" # TODO: revisit.
ds.to_netcdf(file_name)
if self.on_demand_data:
self.__node_stream_to_local_src(node, file_name)
return file_name
def get_initial_ds(self):
ds, bounds, node = self.data_structure.get_initial_dataset()
return ds, bounds, node
def request_data_netcdf(self, bounds, return_xr_chunk=False, fit_bounds=False):
ds, bounds, node = self.data_structure.request_data_single_chunk(
bounds, fit_bounds=fit_bounds
)
file_name = "tmp/nc/data_" + str(time.time())[-5:] + ".nc" # TODO: revisit.
ds.to_netcdf(file_name)
if self.on_demand_data and fit_bounds == False:
self.__node_stream_to_local_src(node, file_name)
if return_xr_chunk:
return file_name, bounds, node
else:
return file_name
def get_file_size_MB(self, file_path):
return os.path.getsize(file_path) / (1024 * 1024)
def get_node_resolution(self, node):
return self.data_structure.get_node_resolution(node) * 100
def get_node_spatial_resolution(self, node) -> dict:
return self.data_structure.get_node_spatial_resolution(node)
def get_full_xr_ds(self) -> xr.Dataset:
return self.data_structure.ds
def __node_stream_to_local_src(self, node, file_path):
# store cache in list
node.ds = xr.open_dataset(file_path)
self.cache.append(CachedDS(node))
def __set_data_structure(self, custom):
if custom:
if custom == "KDTree":
return KDTree(
self.ds,
full_file_size=self.data_source.get_file_size_MB(),
max_chunk_size=self.max_chunk_size,
custom_rules=self.custom_rules,
)
ds_dims = self.__get_num_dimensions()
if ds_dims == 2:
return QuadTree(
self.ds, self.data_source.get_file_size_MB(), self.max_chunk_size
)
elif ds_dims == 3:
return Octree(
self.ds, self.data_source.get_file_size_MB(), self.max_chunk_size
)
elif ds_dims > 3:
return KDTree(
self.ds,
full_file_size=self.data_source.get_file_size_MB(),
max_chunk_size=self.max_chunk_size,
custom_rules=self.custom_rules,
)
else:
raise Exception("DataHandler: unsupported number of dimensions")
def __get_num_dimensions(self):
return len(self.ds.dims)
| oyjoh/adaptive-data-retrieval | app/dataprocessing/data_handler.py | data_handler.py | py | 5,439 | python | en | code | 0 | github-code | 36 |
16173071673 | """
Count the number of occurrences of each character and return it as a (list of tuples) in order of appearance.
For empty output return (an empty list).
Consult the solution set-up for the exact data structure implementation depending on your language.
Example:
ordered_count("abracadabra") == [('a', 5), ('b', 2), ('r', 2), ('c', 1), ('d', 1)]
"""
# def ordered_count(inp):
# def count(inp, i):
# return (i, inp.count(i))
# [count(inp, n) for n in inp]
# [l.append((n, inp.count(n))) for n in inp if ]
def ordered_count(inp):
l = []
for n in inp:
if (n, inp.count(n)) not in l:
l.append((n, inp.count(n)))
return l
inp = "nlkugiluhpouywo8eyrp hv wg vcyewqgirdyqb"
print(ordered_count(inp))
| genievy/codewars | tasks_from_codewars/7kyu/Ordered Count of Characters.py | Ordered Count of Characters.py | py | 752 | python | en | code | 0 | github-code | 36 |
9108378468 | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
reqs = []
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open(path.join(here, "requirements.txt"), encoding="utf-8") as f:
read_lines = f.readlines()
reqs = [each.strip() for each in read_lines]
setup(
name="decimaljs",
version="1.0.4",
description="An arbitrary-precision Decimal type for JavaScript to Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kirankotari/decimaljs",
author="Kiran Kumar Kotari",
author_email="kirankotari@live.com",
install_requires=reqs,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="decimal.js decimal decimaljs",
packages=find_packages(where=".", exclude=["tests"]),
include_package_data=True,
)
| kirankotari/decimaljs | setup.py | setup.py | py | 1,323 | python | en | code | 1 | github-code | 36 |
451086909 | #!/usr/bin/python3
from .config_utils import get_base_config
from .crypto_utils import hash_file
from .file_utils import profile_url_file, clean_up
from .filter_utils import filter_url_list
from .log_utils import get_module_logger
from .plugin_utils import load_plugins
from .viper_utils import upload_to_viper
from .virus_total import get_urls_for_ip, get_class_for_hash
from threading import Thread
import numpy
import os
import sys
CDIR = os.path.dirname(os.path.realpath(__file__))
ROOTDIR = os.path.abspath(os.path.join(CDIR, os.pardir))
BASECONFIG = get_base_config(ROOTDIR)
LOGGING = get_module_logger(__name__)
def get_malware_urls():
"""Produce a list of malware URLs from daily updated feeds.
Returns:
- result: (type: MalwareUrl list) filtered malware URL list.
"""
url_list = []
return_list = []
malware_host_list = get_plugin_malware_hosts()
if len(malware_host_list) > 0:
for host_object in malware_host_list:
host_url_list = get_urls_for_ip(host_object.address, host_object.source)
if len(host_url_list) > 0:
return_list.extend(host_url_list)
malware_url_list = get_plugin_malware_urls()
if len(malware_url_list) > 0:
return_list.extend(malware_url_list)
return filter_url_list(return_list)
def get_plugin_malware_hosts():
"""Produce a list of malware hosts.
Returns:
- host_list: (type: MalwareHost list) list of malware hosts.
"""
host_list = []
LOGGING.info('Loading malware host plugins...')
for plugin in load_plugins():
try:
if 'malware-host' in plugin.TYPES and plugin.DISABLED == False:
LOGGING.info('Running plugin: {0}'.format(plugin.NAME))
plugin_list = plugin.get_malwarehost_list()
if len(plugin_list) > 0:
host_list.extend(plugin_list)
except Exception as e:
LOGGING.error('Problem running plugin. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return host_list
def get_plugin_malware_urls():
"""Produce a list of malware URLs from regularly updated feeds.
Returns:
- result: (type: MalwareUrl list) filtered malware URL list.
"""
url_list = []
return_list = []
LOGGING.info('Loading malware URL plugins...')
for plugin in load_plugins():
try:
if 'malware-url' in plugin.TYPES and plugin.DISABLED == False:
LOGGING.info('Running plugin: {0}'.format(plugin.NAME))
plugin_list = plugin.get_malwareurl_list()
if len(plugin_list) > 0:
for url_object in plugin_list:
if url_object.url not in url_list:
return_list.append(url_object)
url_list.append(url_object.url)
except Exception as e:
LOGGING.error('Problem running plugin. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
return return_list
def process_malware_list(url_list):
"""Fetch and store all malware URLs in a list.
Params:
- url_list: (type: MalwareUrl list) malware URL list.
"""
for mal_url in url_list:
tmp_file_path = profile_url_file(mal_url.url)
if not tmp_file_path:
LOGGING.warning('URL: {0} did not successfully download. Continuing...'.format(mal_url.url))
continue
try:
if BASECONFIG.tag_samples:
file_hash = hash_file(tmp_file_path)
mal_class = get_class_for_hash(file_hash)
if mal_class:
upload_to_viper(mal_url, tmp_file_path, mal_class)
else:
LOGGING.warning('Failed to classify file. Tag will be generic.')
upload_to_viper(mal_url, tmp_file_path)
else:
upload_to_viper(mal_url, tmp_file_path)
except Exception as e:
LOGGING.error('Problem preparing submission to Viper. Aborting task.')
LOGGING.exception(sys.exc_info())
LOGGING.exception(type(e))
LOGGING.exception(e.args)
LOGGING.exception(e)
clean_up(tmp_file_path)
def queue_malware_list(full_list):
"""Partition and process a malware URL list.
Params:
- full_list: (type: MalwareUrl list) filtered malware URL list.
"""
url_lists = numpy.array_split(full_list, BASECONFIG.malware_workers)
worker_threads = []
for url_list in url_lists:
LOGGING.info('Queueing {0} items...'.format(len(url_list)))
list_thread = Thread(target=process_malware_list, args=[url_list])
worker_threads.append(list_thread)
for worker_thread in worker_threads:
worker_thread.start()
try:
LOGGING.info('Spawning {0} ph0neutria spiders. Press CTRL+C to terminate.'.format(BASECONFIG.malware_workers))
for worker_thread in worker_threads:
worker_thread.join()
except KeyboardInterrupt:
LOGGING.warning('Mother spider received Ctrl+C. Killing babies.')
for worker_thread in worker_threads:
worker_thread.terminate()
worker_thread.join()
| phage-nz/ph0neutria | core/malware_utils.py | malware_utils.py | py | 5,478 | python | en | code | 299 | github-code | 36 |
30714122215 | import math
from re import L
import cv2
import mediapipe as mp
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
subjectpath = '/Volumes/Transcend/data/2020醒吾華橋 科技部/DT006-f/'
videopath = subjectpath + 'DT006 t1 前測 側.MP4'
folderpath = 'DT006_t1_S_1_1/'
txtpath = subjectpath + folderpath + 'log.txt'
cap = cv2.VideoCapture(videopath)
clip = VideoFileClip(videopath)
width =cap.get(3)
height =cap.get(4)
TotalFrame = cap.get(cv2.CAP_PROP_FRAME_COUNT)
framelist = []
RAnklePosX = []
LAnklePosX = []
with mp_pose.Pose(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as pose:
while(True):
success, image = cap.read()
if not success:
continue
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pose.process(image)
# Draw the pose annotation on the image.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
framelist.append(frame)
if int(frame) == 1 and results.pose_landmarks:
firstX = width - results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE].x * width #以右邊界為0
else:
firstX = 5000 #使默認左邊為0
if firstX < 2500 and results.pose_landmarks:
LankleX = width - results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE].x * width #以右邊界為0
RankleX = width - results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ANKLE].x * width
elif results.pose_landmarks:
LankleX = results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE].x * width #以左邊界為0
RankleX = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ANKLE].x * width
else:
LankleX = 0
RankleX = 0
LAnklePosX.append(LankleX)
RAnklePosX.append(RankleX)
if cv2.waitKey(5) & int(frame) == int(TotalFrame):
break
Amp = 30 #Arithmetic mean parameter
LAm_X = []
RAm_X = []
Lcal_X = 0
Rcal_X = 0
for i in range(Amp):
Lcal_X = Lcal_X + LAnklePosX[i]
Rcal_X = Rcal_X + RAnklePosX[i]
for i in range(Amp, len(LAnklePosX)):
LAm_X.append(Lcal_X / Amp)
RAm_X.append(Rcal_X / Amp)
Lcal_X = Lcal_X - LAnklePosX[i-Amp]
Lcal_X = Lcal_X + LAnklePosX[i]
Rcal_X = Rcal_X - RAnklePosX[i-Amp]
Rcal_X = Rcal_X + RAnklePosX[i]
Am_framelist = framelist[Amp:]
plt.figure()
plt.plot(Am_framelist, RAm_X)
plt.plot(Am_framelist, LAm_X)
plt.savefig(subjectpath + folderpath + 'ankle_trajectory.png')
Ldx_list = [0]
Rdx_list = [0]
for i in range(1,len(Am_framelist)):
Rdx = RAm_X[i]-RAm_X[i-1]
Ldx = LAm_X[i]-LAm_X[i-1]
Rdx_list.append(Rdx)
Ldx_list.append(Ldx)
plt.figure()
plt.plot(Am_framelist, Rdx_list)
plt.plot(Am_framelist, Ldx_list)
plt.savefig(subjectpath + folderpath + 'delta_ankle.png')
#腳踝
Rslice_frame = []
Lslice_frame = []
label = []
# Rmaxframe = 1
# Rmax = 0
# for i in range(1, 85):
# if Rdx_list[i] > Rmax:
# Rmax = Rdx_list[i]
# Rmaxframe = i
# Rslice_frame.append(Am_framelist[Rmaxframe])
# Lmaxframe = 1
# Lmax = 0
# for i in range(1, 85):
# if Ldx_list[i] > Lmax:
# Lmax = Ldx_list[i]
# Lmaxframe = i
# Lslice_frame.append(Am_framelist[Lmaxframe])
for i in range(85,len(Am_framelist)-85):
Rhighest = 1
Lhighest = 1
for j in range(1,85):
if RAm_X[i] <= 1000 or Rdx_list[i] <= Rdx_list[i+j] or Rdx_list[i] <= Rdx_list[i-j]:
Rhighest = 0
break
if Rhighest == 1:
Rslice_frame.append(Am_framelist[i])
for k in range(1,85):
if LAm_X[i] <= 1000 or Ldx_list[i] <= Ldx_list[i+k] or Ldx_list[i] <= Ldx_list[i-k]:
Lhighest = 0
break
if Lhighest == 1:
Lslice_frame.append(Am_framelist[i])
Rmaxframe = len(Am_framelist)-85
Rmax = 0
for i in range(len(Am_framelist)-85, len(Am_framelist)):
if Rdx_list[i] > Rmax:
Rmax = Rdx_list[i]
Rmaxframe = i
Rslice_frame.append(Am_framelist[Rmaxframe])
Lmaxframe = len(Am_framelist)-85
Lmax = 0
for i in range(len(Am_framelist)-85, len(Am_framelist)):
if Ldx_list[i] > Lmax:
Lmax = Ldx_list[i]
Lmaxframe = i
Lslice_frame.append(Am_framelist[Lmaxframe])
print("Slice frame calculated by the right ankle:", Rslice_frame)
print("Slice frame calculated by the left ankle::", Lslice_frame)
#若左右frame差正負100內,取先發生的
Kslice_frame = []
i=0
j=0
while i < len(Lslice_frame) and j < len(Rslice_frame):
if abs(Lslice_frame[i] - Rslice_frame[j]) <= 100:
if Lslice_frame[i] <= Rslice_frame[j]:
Kslice_frame.append(Lslice_frame[i])
label.append("L")
else:
Kslice_frame.append(Rslice_frame[j])
label.append("R")
i += 1
j += 1
elif Lslice_frame[i] < Rslice_frame[j]:
Kslice_frame.append(Lslice_frame[i])
label.append("L")
i += 1
else:
Kslice_frame.append(Rslice_frame[j])
label.append("R")
j += 1
print("total slice frame:", Kslice_frame)
with open(txtpath, 'w') as f:
f.write(f"Slice frame calculated by the right ankle: {Rslice_frame}\n")
f.write(f"Slice frame calculated by the left ankle: {Lslice_frame}\n")
f.write(f"Total sliced frame: {Kslice_frame}\n")
fps=clip.fps
n=1
b=1
l=1
r=1
for i in range(len(Kslice_frame)):
start_frame = Kslice_frame[i]- 40
if start_frame + 85 <= TotalFrame:
end_frame = start_frame + 85
else:
end_frame = TotalFrame
start_time = start_frame / fps
print("start",start_time)
end_time = end_frame / fps
print("end",end_time)
clip.subclip(start_time, end_time).write_videofile(subjectpath + folderpath + f'{n}.mp4')
n+=1
# if i <= 3:
# clip.subclip(start_time, end_time).write_videofile(subjectpath + folderpath + f'B{b}.mp4')
# b+=1
# elif label[i] == "L":
# clip.subclip(start_time, end_time).write_videofile(subjectpath + folderpath + f'L{l}.mp4')
# l+=1
# elif label[i] == "R":
# clip.subclip(start_time, end_time).write_videofile(subjectpath + folderpath + f'R{r}.mp4')
# r+=1 | wenxxi/LESS-video-slicing | s_ankle_slicing.py | s_ankle_slicing.py | py | 6,489 | python | en | code | 0 | github-code | 36 |
74050760424 | import unittest
import parlai.utils.testing as testing_utils
class TestAlice(unittest.TestCase):
def test_alice_runs(self):
"""
Test that the ALICE agent is stable over time.
"""
valid, test = testing_utils.eval_model(dict(task='convai2', model='alice'))
self.assertEqual(valid['f1'], 0.01397)
if __name__ == '__main__':
unittest.main()
| facebookresearch/ParlAI | tests/nightly/cpu/test_alice.py | test_alice.py | py | 389 | python | en | code | 10,365 | github-code | 36 |
13038148522 | import fresh_tomatoes
import media
import requests
import json
import config
youtube_suffix = config.youtube_key
youtube_prefix = 'https://www.youtube.com/watch?v='
# Movie list -- Here you can add and subtract movies as your tastes change
movie_list = ["There Will Be Blood", "The Life Aquatic", "Unforgiven",
"Gladiator", "About Time", "The 'Burbs"]
movies = []
def get_info(video):
"""Fetches movie info from Open Movie Database"""
# Request data from Youtube
youtube = requests.get('https://www.googleapis.com/youtube/v3/search?part=s'
'nippet&q=' + video + 'trailer&maxResults=1&key=' +
youtube_suffix, timeout=20)
youtube_str = youtube.text
youtube_dict = json.loads(youtube_str)
video_id = youtube_dict['items'][0]['id']['videoId']
video_url = youtube_prefix + video_id
# Request data from OMDB
result = requests.get('http://www.omdbapi.com/?t=' + video + '&y=&plot='
'short&r=json', timeout=20)
resp_str = result.text
# Convert data into a python dictionary
# http://stackoverflow.com/questions/12788217/extract-single-value-from-json-response-python
resp_dict = json.loads(resp_str)
trailer = video_url
title = resp_dict["Title"]
poster = resp_dict["Poster"]
release = resp_dict["Released"]
rating = resp_dict["Rated"]
runtime = resp_dict["Runtime"]
genre = resp_dict["Genre"]
director = resp_dict["Director"]
plot = resp_dict["Plot"]
actors = resp_dict["Actors"]
movies.append(media.Movie(trailer, title, poster, release, rating, runtime,
genre, director, plot, actors))
# Create movie instances and add them to the movies list
for movie in movie_list:
get_info(movie)
fresh_tomatoes.open_movies_page(movies)
| aaronbjohnson/movie-trailer-website | entertainment_center.py | entertainment_center.py | py | 1,862 | python | en | code | 0 | github-code | 36 |
43612785268 | # -*- coding: utf-8 -*-
from odoo import api, fields, models
class reader(models.Model):
_name = 'rfid.reader'
_description = 'RFID Reader'
name = fields.Char(string="Reader Name")
alias = fields.Char(string="Reader Alias")
mac_address = fields.Char(string="MAC Address")
tag_reads = fields.One2many(
comodel_name="rfid.reads.tag",
inverse_name="reader"
)
heartbeat_reads = fields.One2many(
comodel_name="rfid.reads.heartbeat",
inverse_name="reader"
)
class tag_read(models.Model):
_name = 'rfid.reads.tag'
_description = "RFID Tag Reads"
name = fields.Char()
reader = fields.Many2one(
string="Reader",
comodel_name="rfid.reader",
ondelete="set null"
)
reader_alias = fields.Char(related='reader.alias', string="Reader Alias")
epc = fields.Text(
string="Electronic Product Code",
help="""The Tag Electronic Product Code (EPC) Data field"""
)
pc = fields.Text(
string="Protocol Control",
help="""Protocol Control (PC) Bits"""
)
antennaPort = fields.Integer(
string="Antenna",
help="""Antenna ID on which Tag is read."""
)
peakRssi = fields.Integer(
string="Peak RSSI",
help="""Peak Received Signal Strength Indicator (RSSI)"""
)
seenCount = fields.Integer(
string="Seen Count",
help="""The number of times the Tag is read (in-case of periodic reporting)."""
)
timeStamp = fields.Datetime(
string="Time Stamp",
help="""Timestamp when Tag is seen."""
)
phase = fields.Float(
string="Phase",
help="""Phase information reported by the reader when the tag is seen."""
)
channelIndex = fields.Integer(
string="Channel Index",
help="""Index of the first channel when the tag is seen."""
)
class heartbeat_read(models.Model):
_name = 'rfid.reads.heartbeat'
_description = "RFID Heartbeat"
reader = fields.Many2one(
string="Reader",
comodel_name="rfid.reader",
ondelete="set null"
)
timeStamp = fields.Datetime(
string="Time Stamp",
help="""Timestamp when Tag is seen."""
)
| Maralai/fx-connect | models/models.py | models.py | py | 2,276 | python | en | code | 0 | github-code | 36 |
23420997070 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ventas', '0075_auto_20161016_1522'),
]
operations = [
migrations.RenameField(
model_name='comanda',
old_name='tiempo_estimado_elaboracion',
new_name='tiempo_estimado_procesamiento',
),
migrations.AlterField(
model_name='comanda',
name='area_solicitante',
field=models.ForeignKey(related_name='area_solicitante_comanda', verbose_name=b'Area Solicitante', to='bar.Sector'),
),
migrations.AlterField(
model_name='comanda',
name='fecha_hora_pedido_comanda',
field=models.DateTimeField(verbose_name=b'Fecha/hora Comanda'),
),
migrations.AlterField(
model_name='comanda',
name='numero_pedido',
field=models.ForeignKey(verbose_name=b'Numero de Pedido', to='ventas.Pedido'),
),
migrations.AlterField(
model_name='comanda',
name='producto_a_entregar',
field=models.ForeignKey(verbose_name=b'Producto Solicitado', to='stock.ProductoVenta'),
),
]
| pmmrpy/SIGB | ventas/migrations/0076_auto_20161016_1554.py | 0076_auto_20161016_1554.py | py | 1,288 | python | en | code | 0 | github-code | 36 |
40885362078 | import numpy as np
# Things that were changed from the original:
# - Reformatted code and variable names to conform with PEP8
# - Added legal header
# This file contains routines from Lisbon Machine Learning summer school.
# The code is freely distributed under a MIT license. https://github.com/LxMLS/lxmls-toolkit/
def parse_proj(scores, gold=None):
# pylint: disable=too-many-locals
"""
Parse using Eisner's algorithm.
"""
nr, nc = np.shape(scores)
if nr != nc:
raise ValueError("scores must be a squared matrix with nw+1 rows")
N = nr - 1 # Number of words (excluding root).
# Initialize CKY table.
complete = np.zeros([N + 1, N + 1, 2]) # s, t, direction (right=1).
incomplete = np.zeros([N + 1, N + 1, 2]) # s, t, direction (right=1).
complete_backtrack = -np.ones([N + 1, N + 1, 2], dtype=int) # s, t, direction (right=1).
incomplete_backtrack = -np.ones([N + 1, N + 1, 2], dtype=int) # s, t, direction (right=1).
incomplete[0, :, 0] -= np.inf
# Loop from smaller items to larger items.
for k in range(1, N + 1):
for s in range(N - k + 1):
t = s + k
# First, create incomplete items.
# left tree
incomplete_vals0 = (
complete[s, s:t, 1]
+ complete[(s + 1) : (t + 1), t, 0]
+ scores[t, s]
+ (0.0 if gold is not None and gold[s] == t else 1.0)
)
incomplete[s, t, 0] = np.max(incomplete_vals0)
incomplete_backtrack[s, t, 0] = s + np.argmax(incomplete_vals0)
# right tree
incomplete_vals1 = (
complete[s, s:t, 1]
+ complete[(s + 1) : (t + 1), t, 0]
+ scores[s, t]
+ (0.0 if gold is not None and gold[t] == s else 1.0)
)
incomplete[s, t, 1] = np.max(incomplete_vals1)
incomplete_backtrack[s, t, 1] = s + np.argmax(incomplete_vals1)
# Second, create complete items.
# left tree
complete_vals0 = complete[s, s:t, 0] + incomplete[s:t, t, 0]
complete[s, t, 0] = np.max(complete_vals0)
complete_backtrack[s, t, 0] = s + np.argmax(complete_vals0)
# right tree
complete_vals1 = incomplete[s, (s + 1) : (t + 1), 1] + complete[(s + 1) : (t + 1), t, 1]
complete[s, t, 1] = np.max(complete_vals1)
complete_backtrack[s, t, 1] = s + 1 + np.argmax(complete_vals1)
# value = complete[0][N][1]
heads = [-1 for _ in range(N + 1)] # -np.ones(N+1, dtype=int)
_backtrack_eisner(incomplete_backtrack, complete_backtrack, 0, N, 1, 1, heads)
value_proj = 0.0
for m in range(1, N + 1):
h = heads[m]
value_proj += scores[h, m]
return heads
# pylint: disable=too-many-arguments
def _backtrack_eisner(incomplete_backtrack, complete_backtrack, s, t, direction, complete, heads):
"""
Backtracking step in Eisner's algorithm.
- incomplete_backtrack is a (NW+1)-by-(NW+1) numpy array indexed by a start
position, an end position, and a direction flag (0 means left, 1 means
right). This array contains the arg-maxes of each step in the Eisner
algorithm when building *incomplete* spans.
- complete_backtrack is a (NW+1)-by-(NW+1) numpy array indexed by a start
position, an end position, and a direction flag (0 means left, 1 means
right). This array contains the arg-maxes of each step in the Eisner
algorithm when building *complete* spans.
- s is the current start of the span
- t is the current end of the span
- direction is 0 (left attachment) or 1 (right attachment)
- complete is 1 if the current span is complete, and 0 otherwise
- heads is a (NW+1)-sized numpy array of integers which is a placeholder
for storing the head of each word.
"""
if s == t:
return
if complete:
r = complete_backtrack[s][t][direction]
if direction == 0:
_backtrack_eisner(incomplete_backtrack, complete_backtrack, s, r, 0, 1, heads)
_backtrack_eisner(incomplete_backtrack, complete_backtrack, r, t, 0, 0, heads)
return
_backtrack_eisner(incomplete_backtrack, complete_backtrack, s, r, 1, 0, heads)
_backtrack_eisner(incomplete_backtrack, complete_backtrack, r, t, 1, 1, heads)
return
r = incomplete_backtrack[s][t][direction]
if direction == 0:
heads[s] = t
_backtrack_eisner(incomplete_backtrack, complete_backtrack, s, r, 1, 1, heads)
_backtrack_eisner(incomplete_backtrack, complete_backtrack, r + 1, t, 0, 1, heads)
return
heads[t] = s
_backtrack_eisner(incomplete_backtrack, complete_backtrack, s, r, 1, 1, heads)
_backtrack_eisner(incomplete_backtrack, complete_backtrack, r + 1, t, 0, 1, heads)
return
| IntelLabs/nlp-architect | nlp_architect/models/bist/decoder.py | decoder.py | py | 4,909 | python | en | code | 2,921 | github-code | 36 |
40067300798 | import pandas as pd
import numpy as np
from sklearn import preprocessing
i = 5
train_mice = '../data/five_imps/train_mice_%d.csv' % i
all_mice = '../data/five_imps/all_mice_%d.csv' % i
train_hot = '../data/five_imps/train_mice_hot_%d.csv' % i
test_hot = '../data/five_imps/test_mice_hot_%d.csv' % i
# Read dataframe
X = pd.read_csv(train_mice)
X_all = pd.read_csv(all_mice)
y = np.ravel(X[['y']])
X.drop('y', axis=1, inplace=True)
# Specify categorical columns
is_cat = ['workclass', 'edu', 'married', 'occupation', 'relationship',
'race', 'sex', 'country']
# Onehot hot encoding
#X_enc = pd.get_dummies(X, columns=is_cat)
X_all_enc = pd.get_dummies(X_all, columns=is_cat)
X_train_enc = X_all_enc.ix[0:len(X)-1, :]
X_train_enc['y'] = y
X_train_enc.to_csv(train_hot, index=False)
X_test_enc = X_all_enc.ix[(len(X)):len(X_all), :]
X_test_enc.to_csv(test_hot, index=False)
| Pold87/ml-final-ass | Python/onehot.py | onehot.py | py | 915 | python | en | code | 0 | github-code | 36 |
6798346511 | import celery
import logging
import requests
from django.conf import settings
from ..models import Job
from ..helper import data_job_for_applicant
JOB_URL_CREATE = 'api/admin/'
JOB_URL_DETAIL = 'api/admin/{}/'
logger = logging.getLogger('celery-task')
class ApplicantJobMixin:
host = settings.EXOLEVER_HOST + settings.SERVICE_JOBS_HOST
headers = {'USERNAME': settings.AUTH_SECRET_KEY}
def get_url(self, uuid=None):
if uuid is None:
url = self.host + JOB_URL_CREATE
else:
url = self.host + JOB_URL_DETAIL.format(uuid.__str__())
return url
def get_job(self, *args, **kwargs):
try:
return Job.objects.get(id=kwargs.get('job_id'))
except Job.DoesNotExist:
logger.error('Job does not exist')
raise Exception()
class ApplicantJobCreate(ApplicantJobMixin, celery.Task):
name = 'ApplicantJobCreate'
def run(self, *args, **kwargs):
if settings.POPULATOR_MODE:
return
job = self.get_job(*args, **kwargs)
url = self.get_url()
data = data_job_for_applicant(job.applicant)
try:
response = requests.post(url, json=data, headers=self.headers)
assert response.status_code == requests.codes.created
uuid = response.json().get('uuid')
job.uuid = uuid
job.save()
except AssertionError:
message = 'Exception: {}-{}'.format(response.content, url)
logger.error(message)
self.retry(countdown=120, max_retries=20)
class ApplicantJobUpdate(ApplicantJobMixin, celery.Task):
name = 'ApplicantJobUpdate'
def run(self, *args, **kwargs):
if settings.POPULATOR_MODE:
return
job = self.get_job(*args, **kwargs)
url = self.get_url(uuid=job.uuid.__str__())
data = data_job_for_applicant(job.applicant)
try:
response = requests.put(url, json=data, headers=self.headers)
assert response.status_code == requests.codes.ok
except AssertionError:
message = 'Exception: {}-{}'.format(response.content, url)
logger.error(message)
self.retry(countdown=120, max_retries=20)
class ApplicantJobDelete(ApplicantJobMixin, celery.Task):
name = 'ApplicantJobDelete'
def run(self, *args, **kwargs):
uuid = kwargs.get('job_uuid', None)
if settings.POPULATOR_MODE or uuid is None:
return
url = self.get_url(uuid=kwargs.get('job_uuid'))
try:
response = requests.delete(url, headers=self.headers)
assert response.status_code == requests.codes.no_content
except AssertionError:
message = 'Exception: {}-{}'.format(response.content, url)
logger.error(message)
self.retry(countdown=120, max_retries=20)
| tomasgarzon/exo-services | service-exo-opportunities/jobs/tasks/applicant.py | applicant.py | py | 2,899 | python | en | code | 0 | github-code | 36 |
28692702951 | # Demander à l'utilisateur de saisir une phrase
phrase_utilisateur = input("Veuillez saisir une phrase : ")
# Initialiser le compteur de mots
nombre_mots = 0
mot = False
# Parcourir chaque caractère de la phrase
for caractere in phrase_utilisateur:
if caractere != ' ':
# Si le caractère n'est pas un espace, c'est un mot
if not mot:
mot = True
nombre_mots += 1
else:
# Si le caractère est un espace, marquer la fin du mot
mot = False
# Gérer le cas où la phrase se termine par un mot
if phrase_utilisateur and phrase_utilisateur[-1] != ' ':
nombre_mots += 1
# Convertir la phrase en majuscules
phrase_majuscules = ""
for caractere in phrase_utilisateur:
if 'a' <= caractere <= 'z':
lettre_majuscule = chr(ord(caractere) - 32)
phrase_majuscules += lettre_majuscule
else:
phrase_majuscules += caractere
# Convertir la phrase en minuscules
phrase_minuscules = ""
for caractere in phrase_utilisateur:
if 'A' <= caractere <= 'Z':
lettre_minuscule = chr(ord(caractere) + 32)
phrase_minuscules += lettre_minuscule
else:
phrase_minuscules += caractere
# Afficher les résultats
print(f"La phrase en majuscules : {phrase_majuscules}")
print(f"La phrase en minuscules : {phrase_minuscules}")
print(f"Le nombre de mots dans la phrase : {nombre_mots}")
| felixgetaccess/dc5-freyss-felix-baignoire-data | ex1-b.py | ex1-b.py | py | 1,386 | python | fr | code | 0 | github-code | 36 |
20360966046 | from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askdirectory
from tkinter.filedialog import askopenfilename
from PIL import Image
# =================================================================================
def convertion():
u_path = str(t1.get())
d_path = str(t2.get())
name = str(t3.get())
p_type = str(types.get())
im = Image.open(rf"{t1.get()}")
im.save(rf"{t2.get()}\{t3.get()}{types.get()}")
def clear():
t1.delete(0,'end')
t2.delete(0,'end')
t3.delete(0,'end')
types.delete(0,'end')
def brsfnc():
location = askdirectory()
if t2.get() != "":
t2.delete(0,'end')
t2.insert(0,location)
else:
t2.insert(0,location)
def brspic():
f_loc = askopenfilename()
if t1.get() !="":
t1.delete(0,'end')
t1.insert(0,f_loc)
else:
t1.insert(0,f_loc)
# =================================================================================
win = Tk()
win.title("Image Converter")
win.iconbitmap(r"C:\Users\Public\Pictures\Sample Pictures\Treetog-Junior-Monitor-desktop.ico")
win.geometry("900x500")
win.maxsize(900,500)
win.minsize(900,500)
win['bg']="#83a2f2"
heading = Label(win,text="Image Converter",font=("verdana",35,"bold"),bg="#83a2f2",fg="gold")
heading.place(x=150,y=10)
l1 = Label(win,text="Enter The Image Path",font=("verdana",15,"bold")).grid(row=0,column=0,padx=20,pady=120)
t1 = Entry(win,width=25,borderwidth=5,font=(("verdana",15,"bold")))
t1.grid(row=0,column=1,pady=120)
brs = Button(win,text="Browse File",font=("verdan",8,"bold"),borderwidth=5,width=10,command=brspic)
brs.place(x=660,y=120)
l2 = Label(win,text="Enter Saving Path",font=("verdana",15,"bold")).place(x=20,y=200)
t2 = Entry(win,width=25,borderwidth=5,font=(("verdana",15,"bold")))
t2.place(x=293,y=200)
brsbtn = Button(win,text="Browse Folder",font=("verdan",8,"bold"),borderwidth=5,width=14,command=brsfnc)
brsbtn.place(x=660,y=200)
l3 = Label(win,text="Enter Saving Name",font=("verdana",15,"bold")).place(x=20,y=280)
t3 = Entry(win,width=20,borderwidth=5,font=(("verdana",10,"bold")))
t3.place(x=293,y=280)
combo = ttk.Label(win, text="Enter image type",font=("verdana",14,"bold"))
combo.place(x=20,y=360)
types = ttk.Combobox(win,width = 27,font=("verdana",12,"bold"))
types['values']=('.jgp','.png','.ico','.jpeg','.gif')
types.place(x=293,y=360)
types.current()
b1 = Button(win,text="Convert",font=("verdan",12,"bold"),borderwidth=5,width=12,command=convertion)
b1.place(x=730,y=370)
b1 = Button(win,text="Clear",font=("verdan",12,"bold"),borderwidth=5,width=12,command=clear)
b1.place(x=730,y=440)
win.mainloop() | sagnik403/Image-Converter-Tkinter | main.py | main.py | py | 2,752 | python | en | code | 1 | github-code | 36 |
39366415760 | from data_preparation import Preprocessing
import pickle
import tensorflow as tf
import os
from tensorflow.python.framework import ops
from sklearn.metrics.classification import accuracy_score
from sklearn.metrics import precision_recall_fscore_support
import warnings
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
ops.reset_default_graph()
checkpoints_dir = 'checkpoints/1519576475' # Change this path based on the output from '$ python3 train.py' script "Model saved in: checkpoints/1517781236"
data_dir = 'data/' # Data directory containing 'data.csv' file with 'SentimentText' and 'Sentiment\'. Intermediate files will automatically be stored here'
stopwords_file = 'data/stopwords.txt' # Path to stopwords file. If stopwords_file is None, no stopwords will be used'
sequence_len = None # Maximum sequence length
n_samples= None # Set n_samples=None to use the whole dataset
test_size = 0.2
batch_size = 100 #Batch size
random_state = 0 # Random state used for data splitting. Default is 0
if checkpoints_dir is None:
raise ValueError('Please, a valid checkpoints directory is required (--checkpoints_dir <file name>)')
# Load data
data_lstm = Preprocessing(data_dir=data_dir,
stopwords_file=stopwords_file,
sequence_len=sequence_len,
n_samples=n_samples,
test_size=test_size,
val_samples=batch_size,
random_state=random_state,
ensure_preprocessed=True)
# Import graph and evaluate the model using test data
original_text, x_test, y_test, test_seq_len = data_lstm.get_test_data(original_text=True)
graph = tf.Graph()
with graph.as_default():
sess = tf.Session()
# Import graph and restore its weights
print('Restoring graph ...')
saver = tf.train.import_meta_graph("{}/model.ckpt.meta".format(checkpoints_dir))
saver.restore(sess, ("{}/model.ckpt".format(checkpoints_dir)))
# Recover input/output tensors
input = graph.get_operation_by_name('input').outputs[0]
target = graph.get_operation_by_name('target').outputs[0]
seq_len = graph.get_operation_by_name('lengths').outputs[0]
dropout_keep_prob = graph.get_operation_by_name('dropout_keep_prob').outputs[0]
predict = graph.get_operation_by_name('final_layer/softmax/predictions').outputs[0]
accuracy = graph.get_operation_by_name('accuracy/accuracy').outputs[0]
# Perform prediction
pred, acc = sess.run([predict, accuracy],
feed_dict={input: x_test,
target: y_test,
seq_len: test_seq_len,
dropout_keep_prob: 1})
print("Evaluation done.")
# Print results
print('\nAccuracy: {0:.4f}\n'.format(acc))
for i in range(100):
print('Sample: {0}'.format(original_text[i]))
print('Predicted sentiment: [{0:.4f}, {1:.4f}]'.format(pred[i, 0], pred[i, 1]))
print('Real sentiment: {0}\n'.format(y_test[i]))
| PacktPublishing/Deep-Learning-with-TensorFlow-Second-Edition | Chapter06/LSTM_Sentiment/predict.py | predict.py | py | 3,018 | python | en | code | 48 | github-code | 36 |
15138075498 | """
test cli module
"""
import subprocess
from typing import List, Tuple
def capture(command: List[str]) -> Tuple[bytes, bytes, int]:
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
return out, err, proc.returncode
def test_cli() -> None:
"""Test cli module"""
command = ["sdwaddle"]
out, err, exitcode = capture(command)
assert exitcode == 0
| entelecheia/super-duper-waddle | tests/sdwaddle/test_cli.py | test_cli.py | py | 469 | python | en | code | 19 | github-code | 36 |
18515803588 | class Solution(object):
def removeComments(self, source):
"""
:type source: List[str]
:rtype: List[str]
"""
inblock = False
result = []
remain = []
for line in source:
blockStart = -2
blockEnd = -2
for i,c in enumerate(line):
if not inblock:
remain.append(c)
if i > 0 and i-2 != blockEnd and line[i-1] == "/" and c == "/": # //
remain.pop()
remain.pop()
break
if i > 0 and i-2 != blockEnd and line[i-1] == "/" and c == "*": # /*
inblock = True
remain.pop()
remain.pop()
blockStart = i-1
elif i > 0 and i-2 != blockStart and line[i-1] == "*" and c == "/": # */
inblock = False
blockEnd = i -1
if not inblock:
if len(remain):
result.append("".join(remain))
remain = []
return result
#Remove Comments
#https://leetcode.com/problems/remove-comments/description/ | jimmy623/LeetCode | Solutions/Remove Comments.py | Remove Comments.py | py | 1,081 | python | en | code | 0 | github-code | 36 |
16645056447 | import re
import pandas as pd
pd.set_option('display.max_rows', None)
from pathlib import Path
file_path = Path('texts/wf_anthology.txt')
# Read in the text file as a string
with open(file_path, 'r') as f:
text = f.read()
# define the regular expression pattern to match the section title, author's name, and introductory paragraph
pattern = r'^##(.+?)\n\n(.+?)\n\n((?:.|\n)*?)\n\n'
# process each section
new_text = ''
for match in re.finditer(pattern, text, re.MULTILINE | re.DOTALL):
# extract the relevant information
title = match.group(1)
author = match.group(2)
intro = match.group(3)
# remove the unwanted text
new_intro = re.sub(r'^' + author + r'(?:.|\n)*?\n\n', '', intro)
# concatenate the modified section to the new text
new_text += '##' + title + '\n\n' + author + '\n\n' + new_intro
with open('wf_anthology_manually_tidied_v1.txt', 'w') as outfile:
outfile.write(''.join(new_text))
'''
# Split the text by the '##' delimiter and extract the text content
blocks = []
for block in re.split(r'(?m)^##', text)[1:]:
# Extract the story name and author name
match = re.match(r'(.*?)\n\n(.*?)\n\n', block, re.DOTALL)
if match:
story_name = match.group(1)
author_name = match.group(2).replace('\n', '')
else:
story_name = ''
author_name = ''
# Extract only the text content
text_content = re.sub(r'(.*?)\n\n(.*?)\n', '', block, re.DOTALL).strip().replace('\n', '')
# Add the block to the list of dictionaries
blocks.append({'story_name': story_name, 'author_name': author_name, 'text': text_content})
# Create a pandas DataFrame from the list of dictionaries
df = pd.DataFrame(blocks)
print(df)
'''
| kspicer80/weird_fiction_experiments | author_story_similarity.py | author_story_similarity.py | py | 1,727 | python | en | code | 0 | github-code | 36 |
25546091528 | # def sum_of_intervals(intervals):
# sum_of_length = []
# for char in intervals:
# length_of_intervals = char[1]-char[0]
# print(sum_of_length)
# def sum_of_intervals(intervals):
# # Sort the intervals
# sorted_intervals = sorted(intervals)
# # Initialize variables to keep track of the current interval and the total sum
# total_sum = 0
# for interval in sorted_intervals:
# length_of_intervals = interval[1]-interval[0]
# total_sum += length_of_intervals
# return total_sum
# result = sum_of_intervals([(6, 10), (1, 9)])
# print(result)
def merge_overlapping_intervals(intervals):
# Sort the intervals
sorted_intervals = sorted(intervals)
# Initialize variables to keep track of the merged intervals
merged_intervals = [sorted_intervals[0]]
# Iterate through the sorted intervals and merge overlapping ones
for interval in sorted_intervals[1:]: # this semantic helps to loop on the list from index 1
if merged_intervals[-1][1] >= interval[0]:
# print(merged_intervals[-1][1]) # last value in the interval
# print(merged_intervals[-1]) # interval
# print(merged_intervals[-1][0]) # first value in the interval
# Merge
merged_intervals[-1] = (merged_intervals[-1][0], max(merged_intervals[-1][1], interval[1]))
else:
# Add non-overlapping interval
merged_intervals.append(interval)
return merged_intervals
def sum_of_intervals(intervals):
# Merge overlapping intervals
merged_intervals = merge_overlapping_intervals(intervals)
print(merged_intervals)
total_sum = 0
# Calculate the total sum of the merged intervals (first solution)
# total_sum = sum(end - start for start, end in merged_intervals)
# second solution
for interval in merged_intervals:
length_of_intervals = interval[1]-interval[0]
total_sum += length_of_intervals
return total_sum
result = sum_of_intervals([(1, 4), (1, 6)])
print(result) | LeaBani/algo-training | python/sumIntervals.py | sumIntervals.py | py | 2,076 | python | en | code | 0 | github-code | 36 |
18767587589 | # Задача 26: Напишите программу, которая на вход принимает два числа A и B,
# и возводит число А в целую степень B с помощью рекурсии.
import my_functions
my_functions.show_header("Программа возводит введенное число в введеную степень")
num = my_functions.get_number("Введите число:")
degree = my_functions.get_number("Введите степень:")
print(f'\nЧисло {num} в степени {degree} = {my_functions.Degree_Recursion(num, degree)}')
my_functions.end()
| AntonkinAnton/Python_HomeWork | task026.py | task026.py | py | 639 | python | ru | code | 0 | github-code | 36 |
72170366505 | import pandas as pd
import numpy as np
import joblib
import pickle
import warnings
import os
from data.make_dataset import preprocess_train_df
warnings.filterwarnings("ignore")
def make_categorical_dataset(processed_dfs, proteins_df):
"""
Turns the train_updrs.csv into a categorical dataset
based on the ratings:
updrs 1 categorical ratings: 10 and below is mild, 11 to 21 is moderate, 22 and above is severe
updrs 2 categorical ratings: 12 and below is mild, 13 to 29 is moderate, 30 and above is severe
updrs 3 categorical ratings: 32 and below is mild, 33 to 58 is moderate, 59 and above is severe
updrs 4 categorical ratings: 4 and below is mild, 5 to 12 is moderate, 13 and above is severe
Args:
processed_df: dataframe with one row per visit_month containing all of the protein and peptide columns
proteins_df: dataframe with the UniProt column and the peptide columns
Returns:
categorical_df: dataframe with the updrs values as categorical values based on the ratings as well as the proteins and peptides values
"""
# read the data
updrs1_df = processed_dfs["updrs_1"]
updrs2_df = processed_dfs["updrs_2"]
updrs3_df = processed_dfs["updrs_3"]
updrs4_df = processed_dfs["updrs_4"]
protein_list = list(proteins_df["UniProt"].unique())
# list of columns for information
info_cols = [
"visit_id",
"patient_id",
"visit_month",
"updrs_1",
"updrs_2",
"updrs_3",
"updrs_4",
"kfold",
]
# protein and peptide columns
peptide_list = [
col
for col in updrs1_df.columns
if col not in protein_list and col not in info_cols
]
prot_pep_cols = protein_list + peptide_list
# add a column for the number of proteins and peptides present
updrs1_df["num_prot_pep"] = updrs1_df[prot_pep_cols].sum(axis=1)
updrs2_df["num_prot_pep"] = updrs2_df[prot_pep_cols].sum(axis=1)
updrs3_df["num_prot_pep"] = updrs3_df[prot_pep_cols].sum(axis=1)
updrs4_df["num_prot_pep"] = updrs4_df[prot_pep_cols].sum(axis=1)
# number of proteins
updrs1_df["num_prot"] = updrs1_df[protein_list].sum(axis=1)
updrs2_df["num_prot"] = updrs2_df[protein_list].sum(axis=1)
updrs3_df["num_prot"] = updrs3_df[protein_list].sum(axis=1)
updrs4_df["num_prot"] = updrs4_df[protein_list].sum(axis=1)
# number of peptides
updrs1_df["num_pept"] = updrs1_df[peptide_list].sum(axis=1)
updrs2_df["num_pept"] = updrs2_df[peptide_list].sum(axis=1)
updrs3_df["num_pept"] = updrs3_df[peptide_list].sum(axis=1)
updrs4_df["num_pept"] = updrs4_df[peptide_list].sum(axis=1)
# apply the categorical ratings
updrs1_df["updrs_1_cat"] = np.where(
updrs1_df["updrs_1"] <= 10,
"mild",
np.where(updrs1_df["updrs_1"] <= 21, "moderate", "severe"),
)
updrs2_df["updrs_2_cat"] = np.where(
updrs2_df["updrs_2"] <= 12,
"mild",
np.where(updrs2_df["updrs_2"] <= 29, "moderate", "severe"),
)
updrs3_df["updrs_3_cat"] = np.where(
updrs3_df["updrs_3"] <= 32,
"mild",
np.where(updrs3_df["updrs_3"] <= 58, "moderate", "severe"),
)
updrs4_df["updrs_4_cat"] = np.where(
updrs4_df["updrs_4"] <= 4,
"mild",
np.where(updrs4_df["updrs_4"] <= 12, "moderate", "severe"),
)
categorical_dfs = {
"updrs_1": updrs1_df,
"updrs_2": updrs2_df,
"updrs_3": updrs3_df,
"updrs_4": updrs4_df,
}
return categorical_dfs
def add_med_data(clin_df, updrs_df):
"""
Takes in the separate upd23b_clinical_state_on_medication data.
Creates dummy columns and adds them to the updrs dataset for the clinical medication data
Args:
clin_df: dataframe with the upd23b_clinical_state_on_medication column and visit_id column
updrs_df: dataframe with the all of the protein, peptide, visit_id, visit_month, and patient_id columns
Returns:
updrs_df: the dataframe with the updrs_1_cat_preds column added
"""
clin_df["upd23b_clinical_state_on_medication"] = clin_df[
"upd23b_clinical_state_on_medication"
].fillna("Unknown")
# get dummies for on_medication column
clin_df_dummies = pd.get_dummies(
clin_df, columns=["upd23b_clinical_state_on_medication"], drop_first=True
)
clin_df_dummies = clin_df_dummies[
[
"visit_id",
"upd23b_clinical_state_on_medication_On",
"upd23b_clinical_state_on_medication_Unknown",
]
]
# merge the updrs data with the clinical data for dummy columns
updrs_df = pd.merge(updrs_df, clin_df_dummies, on="visit_id")
return updrs_df
def predict_updrs1(df):
"""Predict the updrs_1_cat column for the provided dataframe using saved CatBoost Classifier model.
Args:
df: the dataframe with the updrs_1_cat column to be predicted
Returns:
df: the dataframe with the updrs_1_cat_preds column added
"""
# Load the saved model
model_path = os.path.join(
"..", "models", "catboost_updrs_1_model_hyperopt_smote.sav"
)
model = joblib.load(model_path)
# Make predictions on the test data
X = df.drop(columns=["updrs_1_cat", "kfold", "visit_id", "patient_id", "updrs_1"])
try:
preds = model.predict_proba(X)[:, 1]
except AttributeError as e:
print(f"Error: {e}")
# use threshold of 0.46 to get the predicted updrs_1_cat
updrs_1_cat_preds = np.where(preds >= 0.46, 1, 0)
# add the column to the dataframe
df["updrs_1_cat_preds"] = updrs_1_cat_preds
return df
def predict_updrs2(df):
"""Predict the updrs_2_cat column for the provided dataframe using saved CatBoost Classifier model.
Args:
df: the dataframe with the updrs_2_cat column to be predicted
Returns:
df: the dataframe with the updrs_2_cat_preds column added
"""
model_path = os.path.join(
"..", "models", "catboost_updrs_2_model_hyperopt_smote_meds.sav"
)
model = joblib.load(model_path)
# Make predictions on the test data
X = df.drop(columns=["updrs_2_cat", "kfold", "visit_id", "patient_id", "updrs_2"])
try:
preds = model.predict_proba(X)[:, 1]
except AttributeError as e:
print(f"Error: {e}")
# use threshold of 0.22 to get the predicted updrs_2_cat
updrs_2_cat_preds = np.where(preds >= 0.22, 1, 0)
# add the column to the dataframe
df["updrs_2_cat_preds"] = updrs_2_cat_preds
return df
def predict_updrs3(df):
"""Predict the updrs_3_cat column for the provided dataframe using saved LightGBM Classifier model.
Args:
df: the dataframe with the updrs_3_cat column to be predicted
Returns:
df: the dataframe with the updrs_3_cat_preds column added
"""
# Load the saved model
filename = os.path.join(
"..", "models", "lgboost_updrs_3_model_hyperopt_smote_meds.sav"
)
# model = pickle.load(open(filename, "rb"))
model = joblib.load(filename)
# Make predictions on the test data
X = df.drop(columns=["updrs_3_cat", "kfold", "visit_id", "patient_id", "updrs_3"])
try:
preds = model.predict_proba(X, verbose=-100)[:, 1]
except AttributeError as e:
print(f"Error: {e}")
# use threshold of 0.28 to get the predicted updrs_3_cat
updrs_3_cat_preds = np.where(preds >= 0.28, 1, 0)
# add the column to the dataframe
df["updrs_3_cat_preds"] = updrs_3_cat_preds
return df
if __name__ == "__main__":
# read in the data
train_clin_path = os.path.join("..", "data", "raw", "train_clinical_data.csv")
train_prot_path = os.path.join("..", "data", "raw", "train_proteins.csv")
train_pep_path = os.path.join("..", "data", "raw", "train_peptides.csv")
train_clin_df = pd.read_csv(train_clin_path)
train_prot_df = pd.read_csv(train_prot_path)
train_pep_df = pd.read_csv(train_pep_path)
proc_dfs = preprocess_train_df(
train_clin_df, train_prot_df, train_pep_df, save_data=False
)
# convert to only 12 month data since that was what was used for training
for updrs in ["updrs_1", "updrs_2", "updrs_3", "updrs_4"]:
temp_df = proc_dfs[updrs]
proc_dfs[updrs] = temp_df[temp_df["visit_month"] <= 12]
cat_dfs = make_categorical_dataset(proc_dfs, train_prot_df)
cat_dfs["updrs_2"] = add_med_data(train_clin_df, cat_dfs["updrs_2"])
cat_dfs["updrs_3"] = add_med_data(train_clin_df, cat_dfs["updrs_3"])
pred_updrs1_df = predict_updrs1(cat_dfs["updrs_1"])
pred_updrs2_df = predict_updrs2(cat_dfs["updrs_2"])
pred_updrs3_df = predict_updrs3(cat_dfs["updrs_3"])
# combine prediction columns into one dataframe
updrs_preds = pd.merge(
pred_updrs1_df,
pred_updrs2_df[["visit_id", "updrs_2_cat", "updrs_2_cat_preds"]],
on="visit_id",
)
updrs_preds = pd.merge(
updrs_preds,
pred_updrs3_df[["visit_id", "updrs_3_cat", "updrs_3_cat_preds"]],
on="visit_id",
how="left",
)
# save the dataframe as a csv
file_path = os.path.join("..", "data", "predictions", "full_updrs_preds.csv")
updrs_preds.to_csv(file_path, index=False)
| dagartga/Boosted-Models-for-Parkinsons-Prediction | src/pred_pipeline.py | pred_pipeline.py | py | 9,293 | python | en | code | 0 | github-code | 36 |
17585289452 | import requests
from starwhale import Link, Image, Point, dataset, Polygon, MIMEType # noqa: F401
from starwhale.utils.retry import http_retry
PATH_ROOT = "https://starwhale-examples.oss-cn-beijing.aliyuncs.com/dataset/cityscapes"
ANNO_PATH = "disparity/train"
DATA_PATH_LEFT = "leftImg8bit/train"
DATA_PATH_RIGHT = "rightImg8bit/train"
SUFFIX_MASK = "_disparity.png"
SUFFIX_DATA_LEFT = "_leftImg8bit.png"
SUFFIX_DATA_RIGHT = "_rightImg8bit.png"
@http_retry
def request_link_json(anno_link):
return requests.get(anno_link, timeout=10).json()
def mask_image(_name, dir_name):
return Image(
display_name=_name,
mime_type=MIMEType.PNG,
as_mask=True,
link=Link(uri=f"{PATH_ROOT}/{ANNO_PATH}/{dir_name}/{_name}"),
)
def build_ds():
ds = dataset("cityscapes_disparity")
ds.info["baseline"] = 22
ds.info["homepage"] = "https://www.cityscapes-dataset.com"
tree = request_link_json(f"{PATH_ROOT}/{ANNO_PATH}/tree.json")
for d in tree:
if d["type"] != "directory":
continue
dir_name = d["name"]
for f in d["contents"]:
if f["type"] != "file":
continue
_name = str(f["name"])
if not _name.endswith(SUFFIX_MASK):
continue
disparity_mask = mask_image(_name, dir_name)
name = _name.replace(SUFFIX_MASK, "")
right_image = Image(
display_name=name,
link=Link(
uri=f"{PATH_ROOT}/{DATA_PATH_RIGHT}/{dir_name}/{name}{SUFFIX_DATA_RIGHT}"
),
mime_type=MIMEType.JPEG,
)
left_image = Image(
display_name=name,
link=Link(
uri=f"{PATH_ROOT}/{DATA_PATH_LEFT}/{dir_name}/{name}{SUFFIX_DATA_LEFT}"
),
mime_type=MIMEType.JPEG,
)
ds.append(
{
"left_image_8bit": left_image,
"right_image_8bit": right_image,
"disparity_mask": disparity_mask,
}
)
ds.commit()
load_ds = dataset(ds.uri)
print(load_ds.info)
ds.close()
if __name__ == "__main__":
build_ds()
| star-whale/starwhale | example/datasets/cityscapes/disparity/dataset.py | dataset.py | py | 2,278 | python | en | code | 171 | github-code | 36 |
17883497405 | import time
t_start_script = time.time()
print(__name__)
import matplotlib.pyplot as plt
import numpy as np
a = np.random.randn(1000 * 100* 100)
print('start_time and prepare data:', time.time() - t_start_script)
print(a.shape)
print(sys.argv)
# >>>
#print('11123',a)
if __name__ == '__main__':
t0 = time.time()
# plt.plot([1, 2, 3, 4, 5, 1])
plt.hist(a, bins=20,color='red')
plt.xlabel('haha')
plt.ylabel('y is lalala')
print(time.time() - t0)
print('full time', time.time() - t_start_script)
plt.show()
| pyminer/pyminer | pyminer/packages/applications_toolbar/apps/cftool/test1.py | test1.py | py | 542 | python | en | code | 77 | github-code | 36 |
15442911590 | import argparse
from . import completion_helpers
class ArgumentParser(argparse.ArgumentParser):
def enable_print_header(self):
self.add_argument(
'-q', action='store_true',
help="Suppresses printing of headers when multiple tasks are " +
"being examined"
)
def task_argument(self, optional=False):
kwargs = {
"default": "",
"type": str,
"help": "ID of the task. May match multiple tasks (or all)"
}
if optional:
kwargs["nargs"] = "?"
self.add_argument('task', **kwargs).completer = completion_helpers.task
def file_argument(self):
self.add_argument(
'file', nargs="*", default=["stdout"],
help="Path to the file inside the task's sandbox."
).completer = completion_helpers.file
def path_argument(self):
self.add_argument(
'path', type=str, nargs="?", default="",
help="""Path to view."""
).completer = completion_helpers.file
| mesosphere-backup/mesos-cli | mesos/cli/parser.py | parser.py | py | 1,067 | python | en | code | 116 | github-code | 36 |
15948222705 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
def get_data_splits(X, Y, train_size=0.8):
"""This function splits the whole dataset into train, test and validation sets.
Args:
X (pd.DataFrame): DataFrame containing feature values of data points of shape (num_samples,num_features)
Y (pd.DataFrame): DataFrame containing labels for samples of shape (num_samples,)
train_size (float, optional): size of the training set in (0,1), Defaults to 0.8.
Returns:
tuple: tuple containing X_train, Y_train, X_val, Y_val, X_test, Y_test
"""
X_train, X_, Y_train, Y_ = train_test_split(X, Y, train_size=train_size, stratify=Y)
X_val, X_test, Y_val, Y_test = train_test_split(X_, Y_, train_size=0.5, stratify=Y_)
return X_train, Y_train, X_val, Y_val, X_test, Y_test
def rename_columns(df):
"""This function renames the columns of features DataFrame into cat_ and num_ styles.
Args:
df (pd.DataFrame): A DataFrame containing features as columns and samples as rows
Returns:
pd.DataFrame: A DataFrame with renamed columns.
"""
categorical_cols = ["0", "1", "2", "3", "13"]
numerical_cols = ["4", "5", "6", "7", "8", "9", "10", "11", "12"]
new_names = []
for col_name in df.columns.astype(str).values:
if col_name in numerical_cols:
df[col_name] = pd.to_numeric(df[col_name])
new_names.append((col_name, "num_" + col_name))
elif col_name in categorical_cols:
new_names.append((col_name, "cat_" + col_name))
else: # pragma: no cover, other data
new_names.append((col_name, col_name))
df.rename(columns=dict(new_names), inplace=True)
return df
def preprocess(X_df, Y_df=None, label_col="18", enc=None):
"""This function preprocess the features and labels DataFrames by encoding categorical features and relabeling. If Y_df is not None, normal samples get label 1 and anomalies get label -1.
Args:
X_df (pd.DataFrame): DataFrame containing features with renamed columns.
Y_df (pd.DataFrame, optional): DataFrame containing the labels for samples in X_df. Defaults to None.
label_col (str, optional): Name of the label column in Y_df. Defaults to "18".
enc (sklearn.preprocessing.OneHotEncoder, optional): Fitted OneHotEncoder, a new encoder will be fit to the data if none is given. Defaults to None.
Returns:
tuple: df_new, Y_df, enc, the encoded features, the labels, and the OneHotEncoder
"""
X_df = X_df.reset_index(drop=True)
if Y_df is not None:
Y_df = Y_df.reset_index(drop=True)
if not enc:
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(X_df.loc[:, ["cat_" in i for i in X_df.columns]])
num_cat_features = enc.transform(X_df.loc[:, ["cat_" in i for i in X_df.columns]]).toarray()
df_catnum = pd.DataFrame(num_cat_features)
df_catnum = df_catnum.add_prefix("catnum_")
df_new = pd.concat([X_df, df_catnum], axis=1)
if Y_df is not None:
filter_clear = Y_df[label_col] == 1
filter_infected = Y_df[label_col] < 0
Y_df[label_col][filter_clear] = 1
Y_df[label_col][filter_infected] = -1
return df_new, Y_df, enc
def get_preprocessed_train(X_df, Y_df, label_col="18"):
"""This function prepares the dataset for training. Returns only normal samples.
Args:
X_df (pd.DataFrame): A DataFrame containing features for samples of shape (num_samples, num_features)
Y_df (pd.DataFrame): A DataFrame containing labels for data samples of shape (n_samples,)
label_col (str, optional): Name of the label column in Y_df. Defaults to "18"
Returns:
tuple: X_train_num, Y_train_clear, numerical_cols, ohe_enc: features and labels of normal samples together with names of the columns for training and the OneHotEncoder used in preprocessing
"""
count_norm = X_df[Y_df[label_col] == 1].shape[0]
count_anomaly = X_df[Y_df[label_col] != 1].shape[0]
print("normal:", count_norm, "anomalies:", count_anomaly)
X_df = rename_columns(X_df)
X_df, Y_df, ohe_enc = preprocess(X_df, Y_df)
# select numerical features
numerical_cols = X_df.columns.to_numpy()[["num_" in i for i in X_df.columns]]
X_train_clear = X_df[Y_df[label_col] == 1]
Y_train_clear = Y_df[Y_df[label_col] == 1]
X_train_num = X_train_clear[numerical_cols]
return X_train_num, Y_train_clear, numerical_cols, ohe_enc
def get_test(X_df, enc, Y_df=None):
"""This function works similar to get_preprocessed_train, but prepares the test dataset for evaluation or inference purposes.
Args:
(pd.DataFrame): A DataFrame containing features for samples of shape (num_samples, num_features)
enc (sklearn.preprocessing.OneHotEncoder): A fitted OneHotEncoder for transforming features
Y_df (pd.DataFrame, optional): A DataFrame containing labels for data samples of shape (n_samples,), is none for inference purposes. Defaults to None.
Returns:
tuple: X_df, Y_df, preprocessed features and labels
"""
X_df = rename_columns(X_df)
X_df, Y_df, _ = preprocess(X_df, Y_df, enc=enc)
numerical_cols = X_df.columns.to_numpy()[["num_" in i for i in X_df.columns]]
X_df = X_df[numerical_cols]
return X_df, Y_df
| rahbararman/AnoShiftIDS | IDSAnoShift/data.py | data.py | py | 5,408 | python | en | code | 0 | github-code | 36 |
15009189438 | import pathlib
from typing import Optional
import essentia.standard as es
import numpy as np
import pyrubberband as pyrb
from madmom.features.downbeats import DBNDownBeatTrackingProcessor, RNNDownBeatProcessor
from mixer.logger import logger
SAMPLE_RATE = 44100 # Sample rate fixed for essentia
class TrackProcessor:
SAMPLE_RATE = SAMPLE_RATE
def __init__(self, file_path: str, name: Optional[str] = None) -> None:
"""
Parameters
----------
file_path : str
absolute or relative location of track audio file
name : Optional[str]
name to give to track if not present in file path
"""
self._file_path = pathlib.Path(file_path)
if name is None:
self._name = self._file_path.stem
else:
self._name = name
self._audio = np.array([])
self._bpm = None
self._downbeats = np.array([])
def __str__(self):
return self._name
@property
def audio(self) -> np.ndarray:
return self._audio
@property
def downbeats(self) -> np.ndarray:
return self._downbeats
@property
def bpm(self) -> Optional[float]:
return self._bpm
@bpm.setter
def bpm(self, bpm: float) -> np.ndarray:
"""
Time stretch audio file to increase BPM to target
Parameters
----------
bpm : float
intended BPM of audio
Returns
-------
np.ndarray
time-stretched audio
"""
if self._bpm is None:
self.calculate_bpm()
assert self._bpm is not None
stretch_factor = bpm / self._bpm
self._audio = pyrb.time_stretch(
self._audio, SAMPLE_RATE, stretch_factor
).astype(np.float32)
self.calculate_bpm()
logger.info(f"Tempo for {self} set to {round(self._bpm, 2)}")
return self._audio
def load(self, path: Optional[str] = None) -> np.ndarray:
"""
Load an audio file from a given path.
Parameters
----------
path : Optional[str]
local path to audio file
if None, file_path attribute value used
Returns
-------
np.ndarray
mono representation of audio file
"""
if path is None:
path = str(self._file_path.resolve())
loader = es.MonoLoader(filename=path, sampleRate=SAMPLE_RATE)
self._audio = loader()
logger.info(f"Loaded audio for {self}")
return self._audio
def crop(self, offset: int, length: int) -> None:
"""
Crop track using number of downbeats.
Parameters
----------
offset : int
number of downbeats into original audio to crop from
length : int
number of downbeats that new audio will contain
"""
if self.downbeats.size == 0:
self.calculate_downbeats()
start_sample = int(self._downbeats[offset] * SAMPLE_RATE)
end_sample = int(self._downbeats[offset + length] * SAMPLE_RATE)
self._audio = self._audio[start_sample : end_sample + 1]
logger.info(
f"Cropped {self} audio between downbeats {offset} and {offset + length}"
)
def calculate_bpm(self) -> float:
"""
Determine BPM for audio using essentia
Returns
-------
bpm : float
tempo of audio file
"""
rhythm_extractor = es.RhythmExtractor2013(method="degara")
self._bpm, _, _, _, _ = rhythm_extractor(self._audio)
assert self._bpm is not None
logger.info(f"Calculated tempo for {self} at {round(self._bpm, 2)}")
return self._bpm
def calculate_downbeats(self) -> None:
"""
Use madmom downbeat tracking to estimate downbeat time points for audio file.
"""
proc = DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], fps=100)
act = RNNDownBeatProcessor()(self._audio)
proc_res = proc(act)
self._downbeats = proc_res[proc_res[:, 1] == 1, 0].astype(np.float32)
logger.info(f"Calculated downbeats for {self}")
class TrackGroupProcessor:
def __init__(self) -> None:
self._tracks: list[TrackProcessor] = []
self._bpm: Optional[float] = None
@property
def bpm(self) -> Optional[float]:
return self._bpm
@property
def tracks(self) -> list[TrackProcessor]:
return self._tracks
def add_track(self, track: TrackProcessor) -> None:
"""
Add a track to the track group.
Parameters
----------
track : TrackProcessor
track to be added
"""
self._tracks.append(track)
self.calculate_bpm()
def calculate_bpm(self):
"""
Calculate average bpm of current tracks in group.
"""
track_bpms = [track.bpm for track in self._tracks]
self._bpm = sum(track_bpms) / len(track_bpms)
| joekitsmith/mixer | mixer/processors/track.py | track.py | py | 5,050 | python | en | code | 0 | github-code | 36 |
22163777058 | #!/usr/bin/env python
import csv
import gzip
import os
import re
import sys
from pyproj import Transformer
csv.field_size_limit(sys.maxsize)
AMOUNT_REGEX = re.compile('Both Installment[\s\S]+?\$([\d,\.]+)')
# California Zone 3
# https://epsg.io/2227
transformer = Transformer.from_crs(2227, 4326)
def get_val(row, key):
val = row[key].strip()
if not val:
return 0
return float(val)
with open('/home/ian/Downloads/Santa_Cruz_Assessor_Parcels.csv') as f_in, \
open('./parse_output.csv', 'w') as f_out:
reader = csv.DictReader(f_in)
fieldnames = ['address', 'apn', 'longitude', 'latitude', 'tax', 'county']
writer = csv.DictWriter(f_out, fieldnames=fieldnames)
count = 0
for row in reader:
count += 1
if count % 1000 == 0:
print(count, '...')
apn = row['APN']
address = row['SITEADD']
try:
x_coord = float(row['XCOORD'])
y_coord = float(row['YCOORD'])
except:
print('-> bad coords')
continue
centroid = transformer.transform(x_coord, y_coord)
print(count, apn, address, centroid)
output_path = '/home/ian/code/prop13/scrapers/santa_cruz/scrape_output/%s.html' % (apn)
if not os.path.exists(output_path):
print('-> no scraped file')
continue
try:
with gzip.open(output_path, 'rt') as f_in:
html = f_in.read()
except:
print('--> bad file')
continue
amount = -1
try:
amount_str = AMOUNT_REGEX.search(html).group(1).replace(',', '')
amount = float(amount_str)
except:
print('--> Could not parse float', amount_str)
continue
print('--> Paid', amount)
writer.writerow({
'address': address,
'apn': apn,
'latitude': centroid[0],
'longitude': centroid[1],
'tax': amount,
'county': 'SCZ',
})
| typpo/ca-property-tax | scrapers/santa_cruz/parse.py | parse.py | py | 2,041 | python | en | code | 89 | github-code | 36 |
15514519322 | import json
import os
import shutil
import sys
import tempfile
import unittest
from compare_perf_tests import LogParser
from compare_perf_tests import PerformanceTestResult
from compare_perf_tests import ReportFormatter
from compare_perf_tests import ResultComparison
from compare_perf_tests import TestComparator
from compare_perf_tests import main
from compare_perf_tests import parse_args
from test_utils import captured_output
class TestPerformanceTestResult(unittest.TestCase):
def test_init(self):
header = "#,TEST,SAMPLES,MIN,MAX,MEAN,SD,MEDIAN"
log_line = "1,AngryPhonebook,20,10664,12933,11035,576,10884"
r = PerformanceTestResult.fromOldFormat(header, log_line)
self.assertEqual(r.test_num, 1)
self.assertEqual(r.name, "AngryPhonebook")
self.assertEqual(
(r.num_samples, r.min_value, r.max_value, r.mean, r.sd, r.median),
(20, 10664, 12933, 11035, 576, 10884),
)
self.assertEqual(r.samples, [])
header = "#,TEST,SAMPLES,MIN,MAX,MEAN,SD,MEDIAN,MAX_RSS"
log_line = "1,AngryPhonebook,1,12045,12045,12045,0,12045,10510336"
r = PerformanceTestResult.fromOldFormat(header, log_line)
self.assertEqual(r.max_rss, 10510336)
def test_init_quantiles(self):
header = "#,TEST,SAMPLES,MIN(μs),MEDIAN(μs),MAX(μs)"
log = "1,Ackermann,3,54383,54512,54601"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual(r.test_num, 1)
self.assertEqual(r.name, "Ackermann")
self.assertEqual(
(r.num_samples, r.min_value, r.median, r.max_value),
(3, 54383, 54512, 54601)
)
self.assertAlmostEqual(r.mean, 54498.67, places=2)
self.assertAlmostEqual(r.sd, 109.61, places=2)
self.assertEqual(r.samples, [54383, 54512, 54601])
header = "#,TEST,SAMPLES,MIN(μs),MEDIAN(μs),MAX(μs),MAX_RSS(B)"
log = "1,Ackermann,3,54529,54760,55807,266240"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual((len(r.samples), r.max_rss), (3, 266240))
header = "#,TEST,SAMPLES,MIN(μs),Q1(μs),Q2(μs),Q3(μs),MAX(μs)"
log = "1,Ackermann,5,54570,54593,54644,57212,58304"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual(
(r.num_samples, r.min_value, r.median, r.max_value),
(5, 54570, 54644, 58304)
)
self.assertEqual((r.q1, r.q3), (54581.5, 57758))
self.assertEqual(len(r.samples), 5)
header = "#,TEST,SAMPLES,MIN(μs),Q1(μs),Q2(μs),Q3(μs),MAX(μs),MAX_RSS(B)"
log = "1,Ackermann,5,54686,54731,54774,55030,63466,270336"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual(r.num_samples, 5)
self.assertEqual(len(r.samples), 5)
self.assertEqual(r.max_rss, 270336)
def test_init_delta_quantiles(self):
# 2-quantile from 2 samples in repeated min, when delta encoded,
# the difference is 0, which is omitted -- only separator remains
header = "#,TEST,SAMPLES,MIN(μs),𝚫MEDIAN,𝚫MAX"
log = "202,DropWhileArray,2,265,,22"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual((r.num_samples, r.min_value, r.median, r.max_value),
(2, 265, 276, 287))
self.assertEqual(len(r.samples), 2)
self.assertEqual(r.num_samples, 2)
def test_init_oversampled_quantiles(self):
"""When num_samples is < quantile + 1, some of the measurements are
repeated in the report summary. Samples should contain only true
values, discarding the repeated artifacts from quantile estimation.
The test string is slightly massaged output of the following R script:
subsample <- function(x, q) {
quantile(1:x, probs=((0:(q-1))/(q-1)), type=1)}
tbl <- function(s) t(sapply(1:s, function(x) {
qs <- subsample(x, s); c(qs[1], diff(qs)) }))
sapply(c(3, 5, 11, 21), tbl)
TODO: Delete this test when we delete quantile support from the
benchmark harness. Reconstructing samples from quantiles as this code is
trying to do is not really statistically sound, which is why we're going
to delete most of this in favor of an architecture where the
lowest-level benchmarking logic reports samples, we store and pass
raw sample data around as much as possible, and summary statistics are
only computed as necessary for actual reporting (and then discarded,
since we can recompute anything we need if we always have the raw
samples available).
"""
def validatePTR(deq): # construct from delta encoded quantiles string
deq = deq.split(",")
num_samples = deq.count("1")
r = PerformanceTestResult(
["0", "B", str(num_samples)] + deq, quantiles=True, delta=True
)
self.assertEqual(len(r.samples), num_samples)
self.assertEqual(r.samples, range(1, num_samples + 1))
delta_encoded_quantiles = """
1,,
1,,1
1,,,,
1,,,1,
1,,1,1,
1,,1,1,1
1,,,,,,,,,,
1,,,,,,1,,,,
1,,,,1,,,1,,,
1,,,1,,,1,,1,,
1,,,1,,1,,1,,1,
1,,1,,1,,1,1,,1,
1,,1,1,,1,1,,1,1,
1,,1,1,1,,1,1,1,1,
1,,1,1,1,1,1,1,1,1,
1,,1,1,1,1,1,1,1,1,1
1,,,,,,,,,,,,,,,,,,,,
1,,,,,,,,,,,1,,,,,,,,,
1,,,,,,,1,,,,,,,1,,,,,,
1,,,,,,1,,,,,1,,,,,1,,,,
1,,,,,1,,,,1,,,,1,,,,1,,,
1,,,,1,,,1,,,,1,,,1,,,1,,,
1,,,1,,,1,,,1,,,1,,,1,,,1,,
1,,,1,,,1,,1,,,1,,1,,,1,,1,,
1,,,1,,1,,1,,1,,,1,,1,,1,,1,,
1,,,1,,1,,1,,1,,1,,1,,1,,1,,1,
1,,1,,1,,1,,1,,1,1,,1,,1,,1,,1,
1,,1,,1,,1,1,,1,,1,1,,1,,1,1,,1,
1,,1,,1,1,,1,1,,1,1,,1,1,,1,1,,1,
1,,1,1,,1,1,,1,1,,1,1,1,,1,1,,1,1,
1,,1,1,,1,1,1,,1,1,1,,1,1,1,,1,1,1,
1,,1,1,1,,1,1,1,1,,1,1,1,1,,1,1,1,1,
1,,1,1,1,1,1,,1,1,1,1,1,1,,1,1,1,1,1,
1,,1,1,1,1,1,1,1,1,,1,1,1,1,1,1,1,1,1,
1,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1"""
map(validatePTR, delta_encoded_quantiles.split("\n")[1:])
def test_init_meta(self):
header = (
"#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),"
+ "MEDIAN(μs),PAGES,ICS,YIELD"
)
log = "1,Ackermann,200,715,1281,726,47,715,7,29,15"
r = PerformanceTestResult.fromOldFormat(header, log)
self.assertEqual((r.test_num, r.name), (1, "Ackermann"))
self.assertEqual(
(r.num_samples, r.min_value, r.max_value, r.mean, r.sd, r.median),
(200, 715, 1281, 726, 47, 715),
)
self.assertEqual((r.mem_pages, r.involuntary_cs, r.yield_count), (7, 29, 15))
header = (
"#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),"
+ "MAX_RSS(B),PAGES,ICS,YIELD"
)
log = "1,Ackermann,200,715,1951,734,97,715,36864,9,50,15"
r = PerformanceTestResult.fromOldFormat(header, log)
self.assertEqual(
(r.num_samples, r.min_value, r.max_value, r.mean, r.sd, r.median),
(200, 715, 1951, 734, 97, 715),
)
self.assertEqual(
(r.mem_pages, r.involuntary_cs, r.yield_count, r.max_rss),
(9, 50, 15, 36864),
)
header = "#,TEST,SAMPLES,MIN(μs),MAX(μs),PAGES,ICS,YIELD"
log = "1,Ackermann,200,715,3548,8,31,15"
r = PerformanceTestResult.fromOldFormat(header, log)
self.assertEqual((r.num_samples, r.min_value, r.max_value), (200, 715, 3548))
self.assertEqual(r.samples, [])
self.assertEqual((r.mem_pages, r.involuntary_cs, r.yield_count), (8, 31, 15))
header = "#,TEST,SAMPLES,MIN(μs),MAX(μs),MAX_RSS(B),PAGES,ICS,YIELD"
log = "1,Ackermann,200,715,1259,32768,8,28,15"
r = PerformanceTestResult.fromOldFormat(header, log)
self.assertEqual((r.num_samples, r.min_value, r.max_value), (200, 715, 1259))
self.assertEqual(r.samples, [])
self.assertEqual(r.max_rss, 32768)
self.assertEqual((r.mem_pages, r.involuntary_cs, r.yield_count), (8, 28, 15))
def test_merge(self):
tests = [
"""{"number":1,"name":"AngryPhonebook",
"samples":[12045]}""",
"""{"number":1,"name":"AngryPhonebook",
"samples":[12325],"max_rss":10510336}""",
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616],"max_rss":10502144}""",
"""{"number":1,"name":"AngryPhonebook",
"samples":[12270],"max_rss":10498048}"""
]
results = [PerformanceTestResult(json) for json in tests]
def as_tuple(r):
return (
r.num_samples,
r.min_value,
r.max_value,
round(r.mean, 2),
round(r.sd, 2),
r.median,
r.max_rss,
)
r = results[0]
self.assertEqual(as_tuple(r), (1, 12045, 12045, 12045, 0, 12045, None))
r.merge(results[1])
self.assertEqual(
as_tuple(r),
(2, 12045, 12325, 12185, 197.99, 12185, 10510336),
)
r.merge(results[2])
self.assertEqual(
as_tuple(r),
(3, 11616, 12325, 11995.33, 357.1, 12045, 10502144),
)
r.merge(results[3])
self.assertEqual(
as_tuple(r),
(4, 11616, 12325, 12064, 322.29, 12157.5, 10498048),
)
def test_legacy_merge(self):
header = """#,TEST,NUM_SAMPLES,MIN,MAX,MEAN,SD,MEDIAN, MAX_RSS"""
tests = [
"""1,AngryPhonebook,8,12045,12045,12045,0,12045""",
"""1,AngryPhonebook,8,12325,12325,12325,0,12325,10510336""",
"""1,AngryPhonebook,8,11616,11616,11616,0,11616,10502144""",
"""1,AngryPhonebook,8,12270,12270,12270,0,12270,10498048"""
]
results = [PerformanceTestResult.fromOldFormat(header, row) for row in tests]
def as_tuple(r):
return (
r.num_samples,
r.min_value,
r.max_value,
round(r.mean, 2),
round(r.sd, 2) if r.sd is not None else None,
r.median,
r.max_rss,
)
r = results[0]
self.assertEqual(as_tuple(r), (8, 12045, 12045, 12045, 0, 12045, None))
r.merge(results[1])
self.assertEqual(
as_tuple(r), # Note: SD, Median are lost
(16, 12045, 12325, 12185, None, None, 10510336),
)
r.merge(results[2])
self.assertEqual(
as_tuple(r),
(24, 11616, 12325, 11995.33, None, None, 10502144),
)
r.merge(results[3])
self.assertEqual(
as_tuple(r),
(32, 11616, 12325, 12064, None, None, 10498048),
)
class TestResultComparison(unittest.TestCase):
def setUp(self):
self.r0 = PerformanceTestResult(
"""{"number":101,"name":"GlobalClass",
"samples":[0,0,0,0,0],"max_rss":10185728}"""
)
self.r01 = PerformanceTestResult(
"""{"number":101,"name":"GlobalClass",
"samples":[20,20,20],"max_rss":10185728}"""
)
self.r1 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[12325],"max_rss":10510336}"""
)
self.r2 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616],"max_rss":10502144}"""
)
self.r3 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616,12326],"max_rss":10502144}"""
)
def test_init(self):
rc = ResultComparison(self.r1, self.r2)
self.assertEqual(rc.name, "AngryPhonebook")
self.assertAlmostEqual(rc.ratio, 12325.0 / 11616.0)
self.assertAlmostEqual(rc.delta, (((11616.0 / 12325.0) - 1) * 100), places=3)
# handle test results that sometimes change to zero, when compiler
# optimizes out the body of the incorrectly written test
rc = ResultComparison(self.r0, self.r0)
self.assertEqual(rc.name, "GlobalClass")
self.assertAlmostEqual(rc.ratio, 1)
self.assertAlmostEqual(rc.delta, 0, places=3)
rc = ResultComparison(self.r0, self.r01)
self.assertAlmostEqual(rc.ratio, 0, places=3)
self.assertAlmostEqual(rc.delta, 2000000, places=3)
rc = ResultComparison(self.r01, self.r0)
self.assertAlmostEqual(rc.ratio, 20001)
self.assertAlmostEqual(rc.delta, -99.995, places=3)
# disallow comparison of different test results
self.assertRaises(AssertionError, ResultComparison, self.r0, self.r1)
def test_values_is_dubious(self):
self.assertFalse(ResultComparison(self.r1, self.r2).is_dubious)
# new.min < old.min < new.max
self.assertTrue(ResultComparison(self.r1, self.r3).is_dubious)
# other way around: old.min < new.min < old.max
self.assertTrue(ResultComparison(self.r3, self.r1).is_dubious)
class FileSystemIntegration(unittest.TestCase):
def setUp(self):
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test
shutil.rmtree(self.test_dir)
def write_temp_file(self, file_name, data):
temp_file_name = os.path.join(self.test_dir, file_name)
with open(temp_file_name, "w") as f:
for line in data:
f.write(line)
f.write('\n')
return temp_file_name
class OldAndNewLog(unittest.TestCase):
old_log_content = [
"""{"number":1,"name":"AngryPhonebook","""
+ """"samples":[10458,12714,11000],"max_rss":10204365}""",
"""{"number":2,"name":"AnyHashableWithAClass","""
+ """"samples":[247027,319065,259056,259056],"max_rss":10250445}""",
"""{"number":3,"name":"Array2D","""
+ """"samples":[335831,400221,346622,346622],"max_rss":28297216}""",
"""{"number":4,"name":"ArrayAppend","""
+ """"samples":[23641,29000,24990,24990],"max_rss":11149926}""",
"""{"number":34,"name":"BitCount","samples":[3,4,4,4],"max_rss":10192896}""",
"""{"number":35,"name":"ByteSwap","samples":[4,6,4,4],"max_rss":10185933}"""
]
new_log_content = [
"""{"number":265,"name":"TwoSum","samples":[5006,5679,5111,5111]}""",
"""{"number":35,"name":"ByteSwap","samples":[0,0,0,0,0]}""",
"""{"number":34,"name":"BitCount","samples":[9,9,9,9]}""",
"""{"number":4,"name":"ArrayAppend","samples":[20000,29000,24990,24990]}""",
"""{"number":3,"name":"Array2D","samples":[335831,400221,346622,346622]}""",
"""{"number":1,"name":"AngryPhonebook","samples":[10458,12714,11000,11000]}"""
]
def makeResult(json_text):
return PerformanceTestResult(json.loads(json_text))
old_results = dict(
[
(r.name, r) for r in map(makeResult, old_log_content)
]
)
new_results = dict(
[
(r.name, r) for r in map(makeResult, new_log_content)
]
)
def assert_report_contains(self, texts, report):
assert not isinstance(texts, str)
for text in texts:
self.assertIn(text, report)
class TestLogParser(unittest.TestCase):
def test_parse_results_csv(self):
"""Ignores unknown lines, extracts data from supported formats."""
log = """#,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us)
7,Array.append.Array.Int?,20,10,10,10,0,10
21,Bridging.NSArray.as!.Array.NSString,20,11,11,11,0,11
42,Flatten.Array.Tuple4.lazy.for-in.Reserve,20,3,4,4,0,4
Total performance tests executed: 1
"""
parser = LogParser()
results = parser.parse_results(log.splitlines())
self.assertTrue(isinstance(results[0], PerformanceTestResult))
self.assertEqual(results[0].name, "Array.append.Array.Int?")
self.assertEqual(results[1].name, "Bridging.NSArray.as!.Array.NSString")
self.assertEqual(results[2].name, "Flatten.Array.Tuple4.lazy.for-in.Reserve")
def test_parse_results_tab_delimited(self):
log = "34\tBitCount\t20\t3\t4\t4\t0\t4"
parser = LogParser()
results = parser.parse_results(log.splitlines())
self.assertTrue(isinstance(results[0], PerformanceTestResult))
self.assertEqual(results[0].name, "BitCount")
def test_parse_results_formatted_text(self):
"""Parse format that Benchmark_Driver prints to console"""
log = """
# TEST SAMPLES MIN(μs) MAX(μs) MEAN(μs) SD(μs) MEDIAN(μs) MAX_RSS(B)
3 Array2D 20 2060 2188 2099 0 2099 20915200
Total performance tests executed: 1
"""
parser = LogParser()
results = parser.parse_results(log.splitlines()[1:]) # without 1st \n
self.assertTrue(isinstance(results[0], PerformanceTestResult))
r = results[0]
self.assertEqual(r.name, "Array2D")
self.assertEqual(r.max_rss, 20915200)
def test_parse_quantiles(self):
"""Gathers samples from reported quantiles. Handles optional memory."""
r = LogParser.results_from_string(
"""#,TEST,SAMPLES,QMIN(μs),MEDIAN(μs),MAX(μs)
1,Ackermann,3,54383,54512,54601"""
)["Ackermann"]
self.assertEqual(r.samples, [54383, 54512, 54601])
r = LogParser.results_from_string(
"""#,TEST,SAMPLES,QMIN(μs),MEDIAN(μs),MAX(μs),MAX_RSS(B)
1,Ackermann,3,54529,54760,55807,266240"""
)["Ackermann"]
self.assertEqual(r.samples, [54529, 54760, 55807])
self.assertEqual(r.max_rss, 266240)
def test_parse_delta_quantiles(self):
r = LogParser.results_from_string( # 2-quantile aka. median
"#,TEST,SAMPLES,QMIN(μs),𝚫MEDIAN,𝚫MAX\n0,B,1,101,,"
)["B"]
self.assertEqual(
(r.num_samples, r.min_value, r.median, r.max_value, len(r.samples)),
(1, 101, 101, 101, 1),
)
r = LogParser.results_from_string(
"#,TEST,SAMPLES,QMIN(μs),𝚫MEDIAN,𝚫MAX\n0,B,2,101,,1"
)["B"]
self.assertEqual(
(r.num_samples, r.min_value, r.median, r.max_value, len(r.samples)),
(2, 101, 101.5, 102, 2),
)
r = LogParser.results_from_string( # 20-quantiles aka. ventiles
"#,TEST,SAMPLES,QMIN(μs),𝚫V1,𝚫V2,𝚫V3,𝚫V4,𝚫V5,𝚫V6,𝚫V7,𝚫V8,"
+ "𝚫V9,𝚫VA,𝚫VB,𝚫VC,𝚫VD,𝚫VE,𝚫VF,𝚫VG,𝚫VH,𝚫VI,𝚫VJ,𝚫MAX\n"
+ "202,DropWhileArray,200,214,,,,,,,,,,,,1,,,,,,2,16,464"
)["DropWhileArray"]
self.assertEqual(
(r.num_samples, r.min_value, r.max_value, len(r.samples)),
(200, 214, 697, 0),
)
def test_parse_meta(self):
r = LogParser.results_from_string(
"#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),"
+ "PAGES,ICS,YIELD\n"
+ "0,B,1,2,2,2,0,2,7,29,15"
)["B"]
self.assertEqual(
(r.min_value, r.mem_pages, r.involuntary_cs, r.yield_count), (2, 7, 29, 15)
)
r = LogParser.results_from_string(
"#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),"
+ "MAX_RSS(B),PAGES,ICS,YIELD\n"
+ "0,B,1,3,3,3,0,3,36864,9,50,15"
)["B"]
self.assertEqual(
(r.min_value, r.mem_pages, r.involuntary_cs, r.yield_count, r.max_rss),
(3, 9, 50, 15, 36864),
)
r = LogParser.results_from_string(
"#,TEST,SAMPLES,QMIN(μs),MAX(μs),PAGES,ICS,YIELD\n" + "0,B,1,4,4,8,31,15"
)["B"]
self.assertEqual(
(r.min_value, r.mem_pages, r.involuntary_cs, r.yield_count), (4, 8, 31, 15)
)
r = LogParser.results_from_string(
"#,TEST,SAMPLES,QMIN(μs),MAX(μs),MAX_RSS(B),PAGES,ICS,YIELD\n"
+ "0,B,1,5,5,32768,8,28,15"
)["B"]
self.assertEqual(
(r.min_value, r.mem_pages, r.involuntary_cs, r.yield_count, r.max_rss),
(5, 8, 28, 15, 32768),
)
def test_results_from_merge(self):
"""Parsing concatenated log merges same PerformanceTestResults"""
concatenated_logs = """#,TEST,SAMPLES,MIN,MAX,MEAN,SD,MEDIAN
4,ArrayAppend,20,23641,29000,24990,0,24990
4,ArrayAppend,1,20000,20000,20000,0,20000"""
results = LogParser.results_from_string(concatenated_logs)
self.assertEqual(list(results.keys()), ["ArrayAppend"])
result = results["ArrayAppend"]
self.assertTrue(isinstance(result, PerformanceTestResult))
self.assertEqual(result.min_value, 20000)
self.assertEqual(result.max_value, 29000)
class TestTestComparator(OldAndNewLog):
def test_init(self):
def names(tests):
return [t.name for t in tests]
tc = TestComparator(self.old_results, self.new_results, 0.05)
self.assertEqual(names(tc.unchanged), ["AngryPhonebook", "Array2D"])
# self.assertEqual(names(tc.increased), ["ByteSwap", "ArrayAppend"])
self.assertEqual(names(tc.decreased), ["BitCount"])
self.assertEqual(names(tc.added), ["TwoSum"])
self.assertEqual(names(tc.removed), ["AnyHashableWithAClass"])
# other way around
tc = TestComparator(self.new_results, self.old_results, 0.05)
self.assertEqual(names(tc.unchanged), ["AngryPhonebook", "Array2D"])
self.assertEqual(names(tc.increased), ["BitCount"])
self.assertEqual(names(tc.decreased), ["ByteSwap", "ArrayAppend"])
self.assertEqual(names(tc.added), ["AnyHashableWithAClass"])
self.assertEqual(names(tc.removed), ["TwoSum"])
# delta_threshold determines the sorting into change groups;
# report only change above 100% (ByteSwap's runtime went to 0):
tc = TestComparator(self.old_results, self.new_results, 1)
self.assertEqual(
names(tc.unchanged),
["AngryPhonebook", "Array2D", "ArrayAppend", "BitCount"],
)
self.assertEqual(names(tc.increased), ["ByteSwap"])
self.assertEqual(tc.decreased, [])
class TestReportFormatter(OldAndNewLog):
def setUp(self):
super(TestReportFormatter, self).setUp()
self.tc = TestComparator(self.old_results, self.new_results, 0.05)
self.rf = ReportFormatter(self.tc, changes_only=False)
self.markdown = self.rf.markdown()
self.git = self.rf.git()
self.html = self.rf.html()
def assert_markdown_contains(self, texts):
self.assert_report_contains(texts, self.markdown)
def assert_git_contains(self, texts):
self.assert_report_contains(texts, self.git)
def assert_html_contains(self, texts):
self.assert_report_contains(texts, self.html)
def test_values(self):
self.assertEqual(
ReportFormatter.values(
PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[10664,12933,11035,10884]}"""
)
),
("AngryPhonebook", "10664", "12933", "11379", "—"),
)
self.assertEqual(
ReportFormatter.values(
PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[12045],"max_rss":10510336}"""
)
),
("AngryPhonebook", "12045", "12045", "12045", "10510336"),
)
r1 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[12325],"max_rss":10510336}"""
)
r2 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616],"max_rss":10510336}"""
)
self.assertEqual(
ReportFormatter.values(ResultComparison(r1, r2)),
("AngryPhonebook", "12325", "11616", "-5.8%", "1.06x"),
)
self.assertEqual(
ReportFormatter.values(ResultComparison(r2, r1)),
("AngryPhonebook", "11616", "12325", "+6.1%", "0.94x"),
)
r1 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[12325],"max_rss":10510336}"""
)
r2 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616,12326],"max_rss":10510336}"""
)
self.assertEqual(
ReportFormatter.values(ResultComparison(r1, r2))[4],
"1.06x (?)", # is_dubious
)
def test_justified_columns(self):
"""Table columns are all formated with same width, defined by the
longest value.
"""
self.assert_markdown_contains(
[
"AnyHashableWithAClass | 247027 | 319065 | 271051 | 10250445",
"Array2D | 335831 | 335831 | +0.0% | 1.00x",
]
)
self.assert_git_contains(
[
"AnyHashableWithAClass 247027 319065 271051 10250445",
"Array2D 335831 335831 +0.0% 1.00x",
]
)
def test_column_headers(self):
"""Report contains table headers for ResultComparisons and changed
PerformanceTestResults.
"""
performance_test_result = self.tc.added[0]
self.assertEqual(
ReportFormatter.header_for(performance_test_result),
("TEST", "MIN", "MAX", "MEAN", "MAX_RSS"),
)
comparison_result = self.tc.increased[0]
self.assertEqual(
ReportFormatter.header_for(comparison_result),
("TEST", "OLD", "NEW", "DELTA", "RATIO"),
)
self.assert_markdown_contains(
[
"TEST | OLD | NEW | DELTA | RATIO",
":--- | ---: | ---: | ---: | ---: ",
"TEST | MIN | MAX | MEAN | MAX_RSS",
]
)
self.assert_git_contains(
[
"TEST OLD NEW DELTA RATIO",
"TEST MIN MAX MEAN MAX_RSS",
]
)
self.assert_html_contains(
[
"""
<th align='left'>OLD</th>
<th align='left'>NEW</th>
<th align='left'>DELTA</th>
<th align='left'>RATIO</th>""",
"""
<th align='left'>MIN</th>
<th align='left'>MAX</th>
<th align='left'>MEAN</th>
<th align='left'>MAX_RSS</th>""",
]
)
def test_emphasize_speedup(self):
"""Emphasize speedup values for regressions and improvements"""
# tests in No Changes don't have emphasized speedup
self.assert_markdown_contains(
[
"BitCount | 3 | 9 | +199.9% | **0.33x**",
"ByteSwap | 4 | 0 | -100.0% | **4001.00x**",
"AngryPhonebook | 10458 | 10458 | +0.0% | 1.00x ",
"ArrayAppend | 23641 | 20000 | -15.4% | **1.18x (?)**",
]
)
self.assert_git_contains(
[
"BitCount 3 9 +199.9% **0.33x**",
"ByteSwap 4 0 -100.0% **4001.00x**",
"AngryPhonebook 10458 10458 +0.0% 1.00x",
"ArrayAppend 23641 20000 -15.4% **1.18x (?)**",
]
)
self.assert_html_contains(
[
"""
<tr>
<td align='left'>BitCount</td>
<td align='left'>3</td>
<td align='left'>9</td>
<td align='left'>+199.9%</td>
<td align='left'><font color='red'>0.33x</font></td>
</tr>""",
"""
<tr>
<td align='left'>ByteSwap</td>
<td align='left'>4</td>
<td align='left'>0</td>
<td align='left'>-100.0%</td>
<td align='left'><font color='green'>4001.00x</font></td>
</tr>""",
"""
<tr>
<td align='left'>AngryPhonebook</td>
<td align='left'>10458</td>
<td align='left'>10458</td>
<td align='left'>+0.0%</td>
<td align='left'><font color='black'>1.00x</font></td>
</tr>""",
]
)
def test_sections(self):
"""Report is divided into sections with summaries."""
self.assert_markdown_contains(
[
"""<details open>
<summary>Regression (1)</summary>""",
"""<details >
<summary>Improvement (2)</summary>""",
"""<details >
<summary>No Changes (2)</summary>""",
"""<details open>
<summary>Added (1)</summary>""",
"""<details open>
<summary>Removed (1)</summary>""",
]
)
self.assert_git_contains(
[
"Regression (1): \n",
"Improvement (2): \n",
"No Changes (2): \n",
"Added (1): \n",
"Removed (1): \n",
]
)
self.assert_html_contains(
[
"<th align='left'>Regression (1)</th>",
"<th align='left'>Improvement (2)</th>",
"<th align='left'>No Changes (2)</th>",
"<th align='left'>Added (1)</th>",
"<th align='left'>Removed (1)</th>",
]
)
def test_report_only_changes(self):
"""Leave out tests without significant change."""
rf = ReportFormatter(self.tc, changes_only=True)
markdown, git, html = rf.markdown(), rf.git(), rf.html()
self.assertNotIn("No Changes", markdown)
self.assertNotIn("AngryPhonebook", markdown)
self.assertNotIn("No Changes", git)
self.assertNotIn("AngryPhonebook", git)
self.assertNotIn("No Changes", html)
self.assertNotIn("AngryPhonebook", html)
def test_single_table_report(self):
"""Single table report has inline headers and no elaborate sections."""
self.tc.removed = [] # test handling empty section
rf = ReportFormatter(self.tc, changes_only=True, single_table=True)
markdown = rf.markdown()
self.assertNotIn("<details", markdown) # no sections
self.assertNotIn("\n\n", markdown) # table must not be broken
self.assertNotIn("Removed", markdown)
self.assert_report_contains(
[
"\n**Regression** ",
"| **OLD**",
"| **NEW**",
"| **DELTA**",
"| **RATIO**",
"\n**Added** ",
"| **MIN**",
"| **MAX**",
"| **MEAN**",
"| **MAX_RSS**",
],
markdown,
)
# Single delimiter row:
self.assertIn("\n:---", markdown) # first column is left aligned
self.assertEqual(markdown.count("| ---:"), 4) # other, right aligned
# Separator before every inline header (new section):
self.assertEqual(markdown.count(" | | | | "), 2)
git = rf.git()
self.assertNotIn("): \n", git) # no sections
self.assertNotIn("REMOVED", git)
self.assert_report_contains(
[
"\nREGRESSION ",
" OLD ",
" NEW ",
" DELTA ",
" RATIO ",
"\n\nADDED ",
" MIN ",
" MAX ",
" MEAN ",
" MAX_RSS ",
],
git,
)
# Separator before every inline header (new section):
self.assertEqual(git.count("\n\n"), 2)
class Test_parse_args(unittest.TestCase):
required = ["--old-file", "old.log", "--new-file", "new.log"]
def test_required_input_arguments(self):
with captured_output() as (_, err):
self.assertRaises(SystemExit, parse_args, [])
self.assertIn("usage: compare_perf_tests.py", err.getvalue())
args = parse_args(self.required)
self.assertEqual(args.old_file, "old.log")
self.assertEqual(args.new_file, "new.log")
def test_format_argument(self):
self.assertEqual(parse_args(self.required).format, "markdown")
self.assertEqual(
parse_args(self.required + ["--format", "markdown"]).format, "markdown"
)
self.assertEqual(parse_args(self.required + ["--format", "git"]).format, "git")
self.assertEqual(
parse_args(self.required + ["--format", "html"]).format, "html"
)
with captured_output() as (_, err):
self.assertRaises(
SystemExit, parse_args, self.required + ["--format", "bogus"]
)
self.assertIn(
"error: argument --format: invalid choice: 'bogus' "
"(choose from 'markdown', 'git', 'html')",
err.getvalue(),
)
def test_delta_threshold_argument(self):
# default value
args = parse_args(self.required)
self.assertEqual(args.delta_threshold, 0.05)
# float parsing
args = parse_args(self.required + ["--delta-threshold", "0.1"])
self.assertEqual(args.delta_threshold, 0.1)
args = parse_args(self.required + ["--delta-threshold", "1"])
self.assertEqual(args.delta_threshold, 1.0)
args = parse_args(self.required + ["--delta-threshold", ".2"])
self.assertEqual(args.delta_threshold, 0.2)
with captured_output() as (_, err):
self.assertRaises(
SystemExit, parse_args, self.required + ["--delta-threshold", "2,2"]
)
self.assertIn(
" error: argument --delta-threshold: invalid float " "value: '2,2'",
err.getvalue(),
)
def test_output_argument(self):
self.assertEqual(parse_args(self.required).output, None)
self.assertEqual(
parse_args(self.required + ["--output", "report.log"]).output, "report.log"
)
def test_changes_only_argument(self):
self.assertFalse(parse_args(self.required).changes_only)
self.assertTrue(parse_args(self.required + ["--changes-only"]).changes_only)
class Test_compare_perf_tests_main(OldAndNewLog, FileSystemIntegration):
"""Integration test that invokes the whole comparison script."""
markdown = [
"<summary>Regression (1)</summary>",
"TEST | OLD | NEW | DELTA | RATIO",
"BitCount | 3 | 9 | +199.9% | **0.33x**",
]
git = [
"Regression (1):",
"TEST OLD NEW DELTA RATIO",
"BitCount 3 9 +199.9% **0.33x**",
]
html = ["<html>", "<td align='left'>BitCount</td>"]
def setUp(self):
super(Test_compare_perf_tests_main, self).setUp()
self.old_log = self.write_temp_file("old.log", self.old_log_content)
self.new_log = self.write_temp_file("new.log", self.new_log_content)
def execute_main_with_format(self, report_format, test_output=False):
report_file = self.test_dir + "report.log"
args = [
"compare_perf_tests.py",
"--old-file",
self.old_log,
"--new-file",
self.new_log,
"--format",
report_format,
]
sys.argv = args if not test_output else args + ["--output", report_file]
with captured_output() as (out, _):
main()
report_out = out.getvalue()
if test_output:
with open(report_file, "r") as f:
report = f.read()
# because print adds newline, add one here, too:
report_file = str(report + "\n")
else:
report_file = None
return report_out, report_file
def test_markdown(self):
"""Writes Markdown formatted report to stdout"""
report_out, _ = self.execute_main_with_format("markdown")
self.assert_report_contains(self.markdown, report_out)
def test_markdown_output(self):
"""Writes Markdown formatted report to stdout and `--output` file."""
report_out, report_file = self.execute_main_with_format(
"markdown", test_output=True
)
self.assertEqual(report_out, report_file)
self.assert_report_contains(self.markdown, report_file)
def test_git(self):
"""Writes Git formatted report to stdout."""
report_out, _ = self.execute_main_with_format("git")
self.assert_report_contains(self.git, report_out)
def test_git_output(self):
"""Writes Git formatted report to stdout and `--output` file."""
report_out, report_file = self.execute_main_with_format("git", test_output=True)
self.assertEqual(report_out, report_file)
self.assert_report_contains(self.git, report_file)
def test_html(self):
"""Writes HTML formatted report to stdout."""
report_out, _ = self.execute_main_with_format("html")
self.assert_report_contains(self.html, report_out)
def test_html_output(self):
"""Writes HTML formatted report to stdout and `--output` file."""
report_out, report_file = self.execute_main_with_format(
"html", test_output=True
)
self.assertEqual(report_out, report_file)
self.assert_report_contains(self.html, report_file)
if __name__ == "__main__":
unittest.main()
| apple/swift | benchmark/scripts/test_compare_perf_tests.py | test_compare_perf_tests.py | py | 38,114 | python | en | code | 64,554 | github-code | 36 |
6198215490 | def check(want, ten):
flag = 1
for one in want:
if one not in ten or ten[one] < want[one]:
flag = 0
break
return flag
# 그러면 discount에 있는 값이 100000 까지 존재할 수 있는데 이걸 10개씩 미루면서 딕셔너리 만들면..
#
def solution(want, number, discount):
dic = {}
want_dic = {}
answer = 0
for i in range(10):
if discount[i] in dic:
dic[discount[i]] +=1
else:
dic[discount[i]] = 1
for i,val in enumerate(want):
want_dic[val] = number[i]
answer += check(want_dic, dic)
for i in range(10,len(discount)):
dic[discount[i-10]] -= 1
if discount[i] in dic:
dic[discount[i]] +=1
else:
dic[discount[i]] = 1
answer += check(want_dic,dic)
return answer | byeong-chang/Baekjoon-programmers | 프로그래머스/lv2/131127. 할인 행사/할인 행사.py | 할인 행사.py | py | 854 | python | en | code | 2 | github-code | 36 |
41551247368 | from importlib.resources import path
from kubernetes import client as kclient
from kubernetes import config
config.load_incluster_config()
v1 = kclient.CoreV1Api()
# Trainer Pod deletes itself
try:
api_response = v1.delete_namespaced_pod(
name='trainer', namespace='mlbuffet')
except Exception as e:
print("Exception when calling CoreV1Api->connect_delete_namespaced_pod_proxy: %s\n" % e)
| zylklab/mlbuffet | modules/trainer/apoptosis.py | apoptosis.py | py | 408 | python | en | code | 6 | github-code | 36 |
22783063408 | #
# @lc app=leetcode id=347 lang=python3
#
# [347] Top K Frequent Elements
#
# https://leetcode.com/problems/top-k-frequent-elements/description/
#
# algorithms
# Medium (62.25%)
# Likes: 4564
# Dislikes: 260
# Total Accepted: 550.4K
# Total Submissions: 881.4K
# Testcase Example: '[1,1,1,2,2,3]\n2'
#
# Given a non-empty array of integers, return the k most frequent elements.
#
# Example 1:
#
#
# Input: nums = [1,1,1,2,2,3], k = 2
# Output: [1,2]
#
#
#
# Example 2:
#
#
# Input: nums = [1], k = 1
# Output: [1]
#
#
# Note:
#
#
# You may assume k is always valid, 1 ≤ k ≤ number of unique elements.
# Your algorithm's time complexity must be better than O(n log n), where n is
# the array's size.
# It's guaranteed that the answer is unique, in other words the set of the top
# k frequent elements is unique.
# You can return the answer in any order.
#
#
#
# @lc code=start
from heapq import heappush, heappop, heappushpop
from collections import Counter
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
if not nums or len(nums) == 0:
return []
counts = Counter(nums)
freq = [(counts[key], key) for key in counts]
heap = []
for f, num in freq:
if len(heap) < k:
heappush(heap, (f, num))
else:
if f > heap[0][0]:
heappushpop(heap, (f, num))
res = []
while heap:
res.append(heappop(heap)[1])
return res
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
if not nums or len(nums) == 0:
return []
freq = Counter(nums)
heap = []
for num in freq:
if len(heap) < k:
heappush(heap, (freq[num], num))
else:
if freq[num] > heap[0][0]:
heapreplace(heap, (freq[num], num))
# size of heap will be k
res = [pair[1] for pair in heap]
return res
# @lc code=end
# Time: O(nlog(k)) where n represents the number of unique number, worst case is n
# Space: O(n)
| Zhenye-Na/leetcode | python/347.top-k-frequent-elements.py | 347.top-k-frequent-elements.py | py | 2,170 | python | en | code | 17 | github-code | 36 |
73744186345 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0112_auto_20160929_1340'),
]
operations = [
migrations.AlterModelOptions(
name='meaning',
options={'ordering': ['gloss']},
),
migrations.RenameField(
model_name='meaning',
old_name='elicitation',
new_name='meaningSetIx',
),
migrations.AddField(
model_name='meaning',
name='meaningSetMember',
field=models.IntegerField(default=0),
),
]
| lingdb/CoBL-public | ielex/lexicon/migrations/0113_auto_20161004_1315.py | 0113_auto_20161004_1315.py | py | 682 | python | en | code | 3 | github-code | 36 |
19340358372 | import json
import logging
import math
from django.db import IntegrityError
from django.db.models import F
from datetime import datetime, timedelta
from django.template.loader import render_to_string
from anodyne import settings
from api.models import Reading, Station, StationInfo, StationParameter, \
Exceedance, SMSAlert
from api.utils import send_mail, send_sms
log = logging.getLogger('vepolink')
class ToDatabase:
def __init__(self, **kwargs):
self.kwargs = kwargs
def send_alert(self, exceedances):
log.info('Sending Alert')
log.info(exceedances)
try:
for exceedance in exceedances:
station = exceedance.get('station')
parameter = exceedance.get('parameter')
param = StationParameter.objects.get(
station=station,
allowed=True,
parameter__name=parameter
)
context = {
'param': parameter,
'value': '%s %s against Pres. Stand. %s %s' % (
exceedance.get('value'), param.parameter.unit,
param.maximum, parameter),
'category': station.industry.type,
'industry': '%s, %s, %s' % (
station.industry, station.industry.city,
station.industry.state),
'timestamp': exceedance.get('timestamp').strftime(
'%a, %d-%b-%Y %H:%M'),
'alert_type': station.monitoring_type,
'location': param.monitoring_id
}
mail_receipients = station.user_email.split(';')
html_content = render_to_string(
'alerts-mail/exceedance.html', context)
send_mail(subject='Exceedance Alert',
recipient_list=mail_receipients,
cc=['info@anodyne.in'],
html_message=html_content,
message='',
from_email=settings.EMAIL_HOST_USER
)
phone_receipients = station.user_ph
sms_context = "SMS ALERT FROM VEPOLINK%nALERT: {alert_type}%nIndustry Name:{industry}%nCATEGORY:{category}%nLOCATION:{location}%nEXCEEDING PARAMETER:{param}%nVALUE:{value}%n{timestamp}%nAvg Value for last 15 Min%nRespond at customercare@anodyne.in".format(**context)
log.info('Initiating Exceedance SMS')
send_sms(numbers=phone_receipients, content=sms_context)
except:
log.exception('Failing to Send Mail alert')
def check_exceedance(self, station, reading):
log.info('Checking exceedance %s' % station)
q = {
'param': F('parameter__name'),
'min': F('minimum'),
'max': F('maximum')
}
params = StationParameter.objects.filter(
station=station,
allowed=True
).values(**q)
exceedances_rec = []
for meta in params:
exceedances = {}
param = meta.get('param')
pmax = float(meta.get('max', 0))
pmin = float(meta.get('min', 0))
if pmin == pmax or pmax == 0:
continue
else:
current_val = float(reading.get(param, 0))
if current_val > pmax:
exceedances.update({
'parameter': param,
'value': current_val,
})
if param.lower() == 'ph' and pmin > current_val > pmax:
exceedances.update({
'parameter': param,
'value': current_val,
})
if exceedances:
log.info('Exceedances %s' % exceedances)
exceedances.update({'timestamp': reading.get('timestamp'),
'station': station})
exceedances_rec.append(exceedances)
if exceedances_rec:
try:
Exceedance.objects.bulk_create(
[Exceedance(**q) for q in exceedances_rec])
log.info('Exceedance observed %s' % station)
except IntegrityError:
pass
self.send_alert(exceedances_rec)
def _clean_reading(self, reading):
if reading:
clean_reading = {}
for k, v in reading.items():
if k.lower() == 'timestamp':
k = k.lower()
clean_reading[k] = v
else:
try:
value = float(v)
if not math.isnan(value):
clean_reading[k] = float('{0:.2f}'.format(value))
except ValueError:
pass
if len(clean_reading.keys()) > 1:
return clean_reading
def insert(self):
basename = self.kwargs.get('basename')
response = {
'success': False,
'msg': ''
}
db_status = {
'db': response
}
log.info('Adding to database:%s' % self.kwargs)
try:
readings = self._clean_reading(self.kwargs.get('readings'))
if readings:
station = Station.objects.get(prefix=self.kwargs.get('prefix'))
self.check_exceedance(station, readings)
Reading.objects.create(
station=station,
reading=readings
)
station.status = 'Live'
station.save()
sinfo, created = StationInfo.objects.get_or_create(
station=station)
obj = sinfo if sinfo else created
obj.last_seen = readings.get('timestamp')
obj.last_upload_info = json.dumps(response)
readings['timestamp'] = readings.get('timestamp').strftime(
'%Y-%m-%d %H:%M:%S ')
obj.readings = json.dumps(readings)
obj.save()
log.info('Added to Reading successfully')
response['success'] = True
response['msg'] = "%s: Added Readings" % basename
else:
response['success'] = False
response['msg'] = "%s: No Readings Found" % basename
except IntegrityError:
response['msg'] = "%s: Reading exists." % basename
return db_status
except Exception as err:
response['success'] = False
response['msg'] = "%s: Failed to readings to databse %s" % (
basename, err
)
log.exception('DB ERROR')
return db_status
| anodyneweb/aw_backend | anodyne/anodyne/connectors/to_database.py | to_database.py | py | 6,957 | python | en | code | 0 | github-code | 36 |
42026376598 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from .views import HomeView, ArchivesView, AboutView, PhotoView, MusicView, ArticleDetailView, CategoryView, TagListView, TagsView, CategoryListView, search
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^archives/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$', ArchivesView.as_view(), name='archives'),
url(r'^category_list/$', CategoryListView.as_view(), name='category_list'), #分类列表页
url(r'^category/(?P<category_id>\d+)/$', CategoryView.as_view(), name='category'),
url(r'^tag_list/$', TagListView.as_view(), name='tag_list'), #标签列表页
url(r'^tag/(?P<tag_id>\d+)/$', TagsView.as_view(), name='tag'), #所属标签的文章列表
url(r'^about/', AboutView.as_view(), name='about'),
url(r'^photo/', PhotoView.as_view(), name='photo'),
url(r'^music/', MusicView.as_view(), name='music'),
url(r'^article_detail/(?P<article_id>\d+)/$', ArticleDetailView.as_view(), name='article_detail'),
url(r'^search/$',search, name="search"),
]
| JustBreaking/myblog | apps/myblog/urls.py | urls.py | py | 1,101 | python | en | code | 0 | github-code | 36 |
43911017589 | import datetime
import zadanie2lista6
import zadanie22lista6
plik = 'plik_do_szyfrowania.txt.txt'
openplik = open(plik,"r").read()
a = int(input("Podaj liczbe od 1-10: "))
date_today = datetime.date.today()
month = date_today.month
year = date_today.year
day = date_today.day
g = ['plik_zaszyfrowany','_',a,year,'-',month,'-',day,'.txt']
all_strings = list(map(str, g))
result = ''.join(all_strings)
s = ['plik_deszyfrowany','_',a,year,'-',month,'-',day,'.txt']
all_strings = list(map(str, s))
wynik2 = ''.join(all_strings)
zaszyfrowane = zadanie2lista6.Szyfr_Cezara(openplik,a)
print(zaszyfrowane)
with open(result,'w', encoding="utf-8") as file:
file.write(zaszyfrowane)
#ZADANIE2
szyfr2 = open(result,'r')
deszyfrowanie = zadanie22lista6.Szyfr_Cezara2(zaszyfrowane,a)
print(deszyfrowanie)
with open(wynik2,'w', encoding="utf-8") as file:
file.write(deszyfrowanie)
| AWyszynska/JSP2022 | lista8/zadanie1i2.py | zadanie1i2.py | py | 898 | python | pl | code | 0 | github-code | 36 |
72498628583 | import os
import sys
import zipfile
import urllib.request
import filecmp
import shutil
import errno
import typing
import orjson
VERSIONS_JSON = "https://launchermeta.mojang.com/mc/game/version_manifest.json"
RELEASE_TYPES = typing.Literal["release", "snapshot"]
def fetch_json(url: str):
response = urllib.request.urlopen(url)
return orjson.loads(response.read())
def get_urls(type: RELEASE_TYPES, number: int) -> list[str]:
global VERSIONS_JSON
urls = {}
for item in fetch_json(VERSIONS_JSON)["versions"]:
if len(urls) < (number + 1) and item["type"] == type:
urls[item["id"]] = item["url"]
return list(urls.values())
def save_temp(urls: list[str]) -> list[str]:
names = []
if not os.path.exists("temp"):
os.mkdir("temp")
for url in urls:
name = fetch_json(url)["id"]
names.append(name)
os.mkdir(f"temp/{name}")
with open(f"temp/{name}.zip", "wb") as f:
f.write(
urllib.request.urlopen(
fetch_json(url)["downloads"]["client"]["url"]
).read()
)
zip_ref = zipfile.ZipFile(f"temp/{name}.zip", "r")
zip_ref.extractall(f"temp/{name}")
zip_ref.close()
return names
def diff_folders(new: str, old: str, type: RELEASE_TYPES, delete_folder: bool = False):
added = []
changed = []
deleted = []
if not delete_folder:
diff_folders(old, new, type, delete_folder=True)
for root, _, files in os.walk(f"temp/{new}"):
for name in files:
src = os.path.join(root, name)
if f"temp/{new}/assets/minecraft/textures/" in src:
dest = src.replace(new, old, 1)
if not delete_folder:
if not os.path.exists(dest):
added.append(src)
elif not filecmp.cmp(src, dest):
changed.append(src)
elif not os.path.exists(dest):
deleted.append(src)
for item in added:
save_diff(new, f"../{type.capitalize()}s/{new}/added", item)
for item in changed:
save_diff(new, f"../{type.capitalize()}s/{new}/changed", item)
for item in deleted:
save_diff(new, f"../{type.capitalize()}s/{old}/deleted", item)
def save_diff(base_folder: str, new_folder: str, item: str):
src = item
dest = item.replace(f"{base_folder}/assets/minecraft/textures/", f"{new_folder}/")
try:
shutil.copy(src, dest)
except IOError as e:
if e.errno != errno.ENOENT:
raise
os.makedirs(os.path.dirname(dest))
e = shutil.copy(src, dest)
def main():
release_type = sys.argv[1]
number = int(sys.argv[2])
if release_type not in {"release", "snapshot"}:
print("Invalid release type")
return
if typing.TYPE_CHECKING:
release_type = typing.cast(RELEASE_TYPES, release_type)
print("Getting files...")
urls = get_urls(release_type, number)
folders = save_temp(urls)
print("Comparing files...")
for x in range(number):
diff_folders(folders[x], folders[x + 1], release_type)
if __name__ == "__main__":
main()
| AstreaTSS/mc-texture-changes | compare.py | compare.py | py | 3,236 | python | en | code | 1 | github-code | 36 |
71001076263 | #!/usr/bin/env python
#
# This script downloads a game from OGS and produces a .game file
# that can be used by our test estimator.
#
import requests
import sys
def fetch_game(game_id):
res = requests.get('https://online-go.com/termination-api/game/%d/state' % game_id)
if res.status_code != 200:
sys.stderr.write('Unable to fetch game\n')
return None
data = res.json()
removal = data['removal']
board = data['board']
board = [
[-1 if x == 2 else x for x in row]
for row in board
]
last_move = data['last_move']
player_to_move = 0
if last_move['y'] == -1:
player_to_move = -board[last_move['y']][last_move['x']]
if player_to_move == 0:
player_to_move = 1
return board, removal, player_to_move
def print_game(output, board, removal, player_to_move):
output.write('# 1=black -1=white 0=open\n')
output.write('height %d\n' % len(board))
output.write('width %d\n' % len(board[0]))
output.write('player_to_move %d\n' % player_to_move)
for y in range(len(board)):
output.write(' '.join('%2d' % x for x in board[y]) + '\n')
output.write('\n');
for y in range(len(removal)):
output.write(' '.join('%2d' % x for x in removal[y]) + '\n')
if __name__ == "__main__":
if len(sys.argv) != 2 or int(sys.argv[1]) <= 0:
sys.stderr.write("Usage: ./fetch_ogs_game.py <game-id>\n")
else:
game_id = int(sys.argv[1])
filename = '%d.game' % game_id
with open(filename, 'w') as output:
board, removal, player_to_move = fetch_game(game_id)
print_game(output, board, removal, player_to_move)
print('Wrote %s. You will want to modify the stone removal map to fix any errors that happened in scoring.' % filename)
| online-go/score-estimator | tools/fetch_ogs_game.py | fetch_ogs_game.py | py | 1,827 | python | en | code | 51 | github-code | 36 |
28780254481 | """
Python Crash Course, Third Edition https://ehmatthes.github.io/pcc_3e/
My notes: https://github.com/egalli64/pythonesque/pcc3
Chapter 15 - Generating Data - Plotting a Simple Line Graph - Plotting a Series of Points with scatter()
"""
import matplotlib.pyplot as plt
plt.style.use('seaborn')
fig, ax = plt.subplots()
# a few scattered points
xs = [1, 2, 3, 4, 5]
ys = [1, 4, 9, 16, 25]
ax.scatter(xs, ys, s=100)
ax.set_title("Square Numbers", fontsize=24)
ax.set_xlabel("Value", fontsize=14)
ax.set_ylabel("Square of Value", fontsize=14)
ax.tick_params(labelsize=14)
plt.show()
| egalli64/pythonesque | pcc3/ch15/e1f_scatter_points.py | e1f_scatter_points.py | py | 587 | python | en | code | 17 | github-code | 36 |
11032901468 | import sys
# Bottom-up implementation of the classic rod-cut problem
def bottom_up_rod(p, n):
r = [-1] * (n + 1)
r[0] = 0
for j in range(1, n+1):
q = -sys.maxsize + 1
for i in range(1, j+1):
q = max(q, p[i] + r[j -i])
r[j] = q
return r[n]
prices = {1:1, 2:5, 3:8, 4:9, 5:10, 6:17, 7:17, 8:20, 9:24, 10:30}
print("Max rod revenue for length 4 is %d" % bottom_up_rod(prices, 4))
| tonydelanuez/python-ds-algos | probs/bottom-up-rod.py | bottom-up-rod.py | py | 433 | python | en | code | 0 | github-code | 36 |
70376331945 | import XInput
from pynput import keyboard
from pygame import mixer
mixer.init()
import time
class XinputHandler(XInput.EventHandler):
def __init__(self, keyMan):
super().__init__(0, 1, 2, 3)
self.keyMan = keyMan
def process_button_event(self, event):
if event.type == XInput.EVENT_BUTTON_PRESSED:
if event.button_id in [1,2,4,8, 4096, 8192, 16384, 32768]:
self.keyMan.ckey = '1'
if event.button_id in [256, 512]:
self.keyMan.ckey = '2'
class KeyMan:
def __init__(self):
self.ckey = ''
keyboard.Listener(on_press=self.press).start()
handler = XinputHandler(self)
thread = XInput.GamepadThread()
thread.add_event_handler(handler)
def reset(self):
self.ckey = '0'
def press(self, key: keyboard.HotKey):
try:
if key.char in 'jf':
self.ckey = '1'
elif key.char in 'kd':
self.ckey = '2'
elif key.char in 'q':
self.ckey = 'q'
except AttributeError:
pass
class Waiter:
def __init__(self):
self.lastTime = None
def init(self):
self.lastTime = time.time()
def wait(self, sec):
nextTime = self.lastTime + sec
sleepTime = nextTime - time.time()
if sleepTime > 0:
time.sleep(nextTime - time.time())
self.lastTime = nextTime
key = KeyMan()
beep1 = mixer.Sound('beep1')
beep2 = mixer.Sound('beep2')
#
# エディター
#
def editor(beats, secPerBeat, length, soundSpan, lag):
if secPerBeat/2-lag < 0 or secPerBeat/2+lag < 0:
print('lag値が大きすぎます')
return
sheet = []
input('(press enter key to start)')
key.reset()
waiter = Waiter()
waiter.init()
for _ in range(length):
line = ''
beep1.play()
waiter.wait(secPerBeat/2+lag)
line += key.ckey
key.reset()
waiter.wait(secPerBeat/2-lag)
for i in range(beats-1):
if (i+1) % soundSpan == 0:
beep2.play()
waiter.wait(secPerBeat/2+lag)
line += key.ckey
key.reset()
waiter.wait(secPerBeat/2-lag)
print(line)
sheet.append(line)
return sheet
def editor_ex(bpm: int, base: int, beats: int, length: int, soundSpan: int=1, lag=0):
# base : 基本の拍数
# beats : 1小節内の拍数
# length : 小節数
# soundSpan : 音を鳴らす頻度
# lag : 入力の遅延補正
secPerMeasure = (60/bpm) * base
secPerBeat = secPerMeasure / beats
return editor(beats, secPerBeat, length, soundSpan, lag)
| tsoushi/SimpleRealtimeTJAEditor | taiko_nothread.py | taiko_nothread.py | py | 2,738 | python | en | code | 0 | github-code | 36 |
72426420905 | from rest_framework.test import APITestCase
from restapi.models import Companies, Countries
class FilterTest(APITestCase):
@classmethod
def setUpTestData(cls):
companies = 10
Countries.objects.create(name="c", continent="c", population=1, capital="c", surface=1)
country = Countries.objects.get(name="c")
for c_id in range(companies):
Companies.objects.create(name=f"c {c_id}", year_founded=1950 + c_id, number_of_employees=10, country=country,
activity="a")
def test_correct_result(self):
response = self.client.get("/restapi/companies/?year=1950")
self.assertEqual(len(response.data), 10)
response1 = self.client.get("/restapi/companies/?year=1955")
self.assertEqual(len(response1.data), 5)
response2 = self.client.get("/restapi/companies/?year=1960")
self.assertEqual(len(response2.data), 0)
| UBB-SDI-23/lab-5x-andrei-crisan27 | backend-project/tests/test_filter.py | test_filter.py | py | 939 | python | en | code | 0 | github-code | 36 |
7004144868 | # define a function that take list of words as argument and
# return list with reverse of every element in that list
# example :
# ['abc','xyz','tuv'] ---> ['cba', 'zyx', 'vut']
def reverse_item(l):
r_list = []
for i in l:
r_list.append(i[::-1])
return r_list
l = ['abc','xyz','tuv']
print(reverse_item(l)) | salmansaifi04/python | chapter5(list)/14_exercise_03.py | 14_exercise_03.py | py | 330 | python | en | code | 0 | github-code | 36 |
17811935337 | import numpy as np
import logo
import words
print('\n')
print(logo.h_logo)
random_word = np.random.choice(words.words).lower()
print('\n')
print("Randomly chosen word for sample game: ", random_word)
display = []
for i in range(len(random_word)):
display += '_'
game = True
life = 5
while game == True:
print('\n')
guess = input("Enter the guess letter: ")
position = 0
for letter in random_word:
if letter == guess:
print('\n')
print("The guess was right")
display[position] = guess
print(display)
position += 1
if guess not in random_word:
life -= 1
print(f"The guess was wrong, your current life: {life}/5")
if '_' not in display:
print('\n')
print("*** You Won ***")
game = False
if life == 0:
print('\n')
print("*** You Lose ***")
game = False
| SachinSaj/Python-Course-Projects | Hangman/hangman.py | hangman.py | py | 928 | python | en | code | 0 | github-code | 36 |
1137077324 | import json
# things we need for NLP
import nltk
from nltk.stem.lancaster import LancasterStemmer
nltk.download('punkt')
stemmer = LancasterStemmer()
# things we need for Tensorflow
import numpy as np
import tflearn
import tensorflow as tf
import random
import pickle
class ModelBuilder(object):
def __init__(self):
with open('intents.json') as json_data:
self.intents = json.load(json_data)
self.words = []
self.classes = []
self.documents = []
self.ignore_words = [
'what', 'are', 'is', 'the', 'why',
'does', 'how', 'in', 'on', '?', 'my',
'I'
]
def parse_intents_doc(self):
# loop through each sentence in our intents patterns
for intent in self.intents['intents']:
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = nltk.word_tokenize(pattern)
# add to our words list
self.words.extend(w)
# add to documents in our corpus
self.documents.append((w, intent['tag']))
# add to our classes list
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
# stem and lower each word and remove duplicates
self.words = [stemmer.stem(w.lower()) for w in self.words if w not in self.ignore_words]
self.words = sorted(list(set(self.words)))
# remove duplicates
self.classes = sorted(list(set(self.classes)))
def build_training_data(self):
# create our training data
training = []
output = []
# create an empty array for our output
output_empty = [0] * len(self.classes)
# training set, bag of words for each sentence
for doc in self.documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create our bag of words array
for w in self.words:
if w in pattern_words:
bag.append(1)
else:
bag.append(0)
# output is a '0' for each tag and '1' for current tag
output_row = list(output_empty)
output_row[self.classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists
train_x = list(training[:,0])
train_y = list(training[:,1])
return train_x, train_y
def train_neural_network(self, train_x, train_y):
# reset underlying graph data
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
# Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')
# save all of our data structures
pickle.dump({
'words': self.words,
'classes': self.classes,
'train_x': train_x,
'train_y': train_y
},
open('training_data', 'wb')
)
if __name__ == '__main__':
model_builder = ModelBuilder()
model_builder.parse_intents_doc()
train_x, train_y = model_builder.build_training_data()
model_builder.train_neural_network(train_x, train_y)
| nlokare/chatbot | chat_model.py | chat_model.py | py | 4,023 | python | en | code | 0 | github-code | 36 |
22430840352 | # Import SQLite3
import sqlite3
#create a database connection called "cars"
conn = sqlite3.connect("cars.db")
#Create the cursor to execute commands
cursor = conn.cursor()
#create a table/query called inventory that includes "make, model and quantity"
#use the cursor to execute this!
cursor.execute("""CREATE TABLE inventory
(Make TEXT, Model TEXT, Quantity INT)
""")
#close the connection
conn.close()
| JackM15/sql | car_sql.py | car_sql.py | py | 438 | python | en | code | 0 | github-code | 36 |
74430232105 | from django.urls import path
from . import views
urlpatterns = [
path(
'',
views.all_products,
name='products'),
path(
'ranked/',
views.products_ranking,
name='products_ranking'),
path(
'<int:product_id>/',
views.product_detail,
name='product_detail'),
path(
'add/',
views.ProductCreateView.as_view(),
name='add_product'),
path(
'search/',
views.search_page,
name='search_page'),
path(
'htmx-search/',
views.htmx_search_products,
name='htmx_search'),
path(
'edit/<int:pk>/',
views.ProductUpdateView.as_view(),
name='edit_product'),
path(
'delete/<int:product_id>/',
views.delete_product,
name='delete_product'),
path(
'add_to_wishlist/<int:id>/',
views.add_to_wishlist,
name='add_to_wishlist'),
path(
'delete_wishlist/<int:id>/',
views.delete_wishlist_item,
name='delete_wishlist_item'),
path(
'category/<category>/',
views.category_products,
name='product_category_view'),
path(
'add_category/',
views.add_product_category,
name='add_product_category'),
path(
'edit_category/<int:id>/',
views.edit_product_category,
name='edit_product_category'),
path(
'delete_category/<int:id>/',
views.delete_product_category,
name='delete_product_category'),
path(
'delete_comment/<int:id>/',
views.delete_comment,
name='delete_comment'),
]
| neil314159/portfolio-project-5 | products/urls.py | urls.py | py | 1,650 | python | en | code | 0 | github-code | 36 |
8400219894 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 24 13:08:39 2022
@author: luisdsaco
(C) 2017-2022 Luis Díaz Saco
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
"""
from sacagents import AgentStoppedError, Agent, SpyAgent, CounterAgent
from sacagents import __version__ as sac_ver, __author__ as sac_auth
def test_message(ag):
ag.print_locked('The current status of created agent', ag.ID(), 'is',
ag.status())
ag.print_locked("The total number of agents is", Agent.total_num_agents())
if __name__ == "__main__":
print("Using Sacagents ", sac_ver, ": (C) 2022 ", sac_auth)
# Testing Creation and Cloning
ag1 = Agent(0)
test_message(ag1)
ag2 = Agent(5)
test_message(ag2)
ag3 = ag1.clone()
test_message(ag3)
ag4 = SpyAgent('English')
test_message(ag4)
ag5 = CounterAgent()
test_message(ag5)
# Testing direct execution
aglist = [ag1,ag2,ag3,ag4]
for ag in aglist:
ag.send_message('Run')
for ag in aglist:
ag.send_message('Stop')
for ag in aglist:
if ag.is_alive():
ag.join()
# Testing threded operations
for i in range(5):
ag5.print_locked("Send message",i)
ag5.send_message('Thread')
ag5.send_message('Stop')
if ag5.is_alive():
ag5.join()
# Testing delayed operations
# German agents only confess after 10 attempts
# There is programmed a delayed confession
ag4 = SpyAgent('German')
for i in range(10):
ag4.send_message('Run')
test_message(ag4)
ag4.send_message('Timer')
ag4.send_message('Stop')
ag4.join()
# Testing erroneous commands
ag5 = SpyAgent('Spanish')
test_message(ag5)
ag5.send_message('Err')
ag5.send_message('Stop')
ag5.join()
# Testing invalid data and exception handling
ag3 = SpyAgent('French')
test_message(ag3)
ag3.send_message('Run')
ag3.send_message('Stop')
ag3.join()
ag3.add_confession('French','Je suis un espion américain')
try:
ag3.send_message('Run')
ag3.send_message('Stop')
ag3.join()
except AgentStoppedError:
print('Cannot receive messages again')
# Testing the modificacion of the status
ag3 = SpyAgent('French')
ag3.add_confession('French','Je suis un espion américain')
try:
ag3.send_message('Run')
ag3.send_message('Stop')
ag3.join()
except AgentStoppedError:
print('Cannot execute it again')
print("End of program before")
# Delayed messages will appear after the last print command
| luisdsaco/sacagents | sample.py | sample.py | py | 3,298 | python | en | code | 0 | github-code | 36 |
2721632683 | class Solution(object):
# Time:O(N^2) Space: O(N)
def ThreeSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
nums.sort()
res = []
for i in range(len(nums)-2):
l = i + 1
r = len(nums)-1
while l < r:
if nums[i] + nums[l] + nums[r] == target:
res.append([nums[i], nums[l], nums[r]])
l += 1
elif nums[i] + nums[l] + nums[r] > target:
r -= 1
else:
l += 1
return res
nums = [-1,0,1,2,-1,-4]
target = 0
s = Solution()
print(s.ThreeSum(nums, target))
| ZhengLiangliang1996/Leetcode_ML_Daily | Array/3Sum.py | 3Sum.py | py | 739 | python | en | code | 1 | github-code | 36 |
10793744334 | import copy
def polygonal(s, n):
if s == 3: # Triangle
return n * (n + 1) // 2
elif s == 4: # Square
return n * n
elif s == 5: # Pentagonal
return n * (3 * n - 1) // 2
elif s == 6: # Hexagonal
return n * (2 * n - 1)
elif s == 7: # Heptagonal
return n * (5 * n - 3) // 2
elif s == 8: # Octagonal
return n * (3 * n - 2)
# Generate all the 4-digit polygonal numbers
polygonals = {}
for s in range(3, 9):
polygonals[s] = []
n = 1
while True:
p = polygonal(s, n)
if p < 1000:
n += 1
elif p < 10000:
polygonals[s].append(p)
n += 1
else:
break
# Recursive function to find the cyclic set
def find_cyclic_set(numbers, polygonal_types, current_set):
if len(current_set) == 6 and current_set[0] // 100 == current_set[-1] % 100:
return current_set
for s in polygonal_types:
for number in numbers[s]:
#if len(current_set) == 0:
# print('b1')
if len(current_set) == 0 or current_set[-1] % 100 == number // 100:
new_polygonal_types = polygonal_types.copy()
new_polygonal_types.remove(s)
new_set = current_set + [number]
result = find_cyclic_set(numbers, new_polygonal_types, new_set)
if result is not None:
return result
return None
# Find the cyclic set
cyclic_set = find_cyclic_set(polygonals, list(range(3, 9)), [])
# Print the sum of the cyclic set
print(sum(cyclic_set))
| WilliamLP/solvegpt | project_euler/gpt-4/61.py | 61.py | py | 1,597 | python | en | code | 0 | github-code | 36 |
39350644450 | import copy
import numpy as np
import cv2
import time
import random
import argparse
def draw_obstacles(canvas,clr=5,unknown=False, map_flag=1):
"""
@brief: This function goes through each node in the canvas image and checks for the
obstacle space using the half plane equations.
If the node is in obstacle space, the color is changed to blue.
:param canvas: Canvas Image
:param unknown: Unknown Map flag
"""
# Uncomment to use the cv2 functions to create the obstacle space
# cv2.circle(canvas, (300,65),45,(255,0,0),-1)
# cv2.fillPoly(canvas, pts = [np.array([[115,40],[36,65],[105,150],[80,70]])], color=(255,0,0)) #Arrow
# cv2.fillPoly(canvas, pts = [np.array([[200,110],[235,130],[235,170],[200,190],[165,170],[165,130]])], color=(255,0,0)) #Hexagon
height,width,_ = canvas.shape
for i in range(width):
for j in range(height):
if map_flag == 1:
# Map 01
if(i<=clr) or (i>=(400-clr)) or (j<=clr) or (j>=(250-clr)):
canvas[j][i] = [255,0,0]
if ((i-300)**2+(j-65)**2-((40+clr)**2))<=0:
canvas[j][i] = [255,0,0]
if (j+(0.57*i)-213.53)>=clr and (j-(0.57*i)+5.04+clr)>=0 and (i-235-clr)<=0 and (j+(0.57*i)-305.04-clr)<=0 and (j-(0.57*i)-76.465-clr)<=0 and (i-155-clr)>=0:
canvas[j][i] = [255,0,0]
if ((j+(0.316*i)-66.1483-clr)>=0 and (j+(0.857*i)-140.156-clr)<=0 and (j-(0.114*i)-55.909-clr)<=0) or ((j-(1.23*i)-23.576-clr)<=0 and (j-(3.2*i)+197.763+clr)>=0 and (j-(0.114*i)-55.909-clr)>=0):
canvas[j][i] = [255,0,0]
elif map_flag == 2:
# Map 02
if(i<=clr) or (i>=(400-clr)) or (j<=clr) or (j>=(250-clr)):
canvas[j][i] = [255,0,0]
if ((i>=118-clr) and (i<=148+clr) and (j>=clr) and (j<=63)):
canvas[j][i] = [255,0,0]
if ((i>=118-clr) and (i<=148+clr) and (j>=103-clr) and (j<=147+clr)):
canvas[j][i] = [255,0,0]
if ((i>=118-clr) and (i<=148+clr) and (j>=187-clr) and (j<=(250-clr))):
canvas[j][i] = [255,0,0]
if ((i>=251-clr) and (i<=281+clr) and (j>=42-clr) and (j<=105+clr)):
canvas[j][i] = [255,0,0]
if ((i>=251-clr) and (i<=281+clr) and (j>=145-clr) and (j<=208+clr)):
canvas[j][i] = [255,0,0]
if(unknown):
for i in range(width):
for j in range(height):
if map_flag == 1:
if ((i-110)**2+(j-210)**2-((35)**2))<=0:
canvas[j][i] = [0,255,0]
elif map_flag == 2:
if ((i-70)**2+(j-190)**2-((35)**2))<=0:
canvas[j][i] = [0,255,0]
if ((i-200)**2+(j-140)**2-((20)**2))<=0:
canvas[j][i] = [0,255,0]
return canvas
def is_obstacle(next_width,next_height,canvas,unknown=False):
"""
@brief: This function checks if the node is present in the obstacle.
If the node is in obstacle space, the function returns true.
:param canvas: Canvas Image
:param unknown: Unknown Map flag
"""
if(unknown):
if canvas[int(round(next_height))][int(round(next_width))][1]==255 or canvas[int(round(next_height))][int(round(next_width))][0]==255:
return True
else:
return False
else:
if canvas[int(round(next_height))][int(round(next_width))][0]==255:
# print("In obstacle")
return True
else:
return False
def cost_to_goal(node,final):
"""
@brief: This function computes the euclidean distance between the current node and the final goal node.
:param node: present node
:param final: final node (Goal Node)
"""
return np.sqrt(np.power(node[0]-final[0],2)+np.power(node[1]-final[1],2))
def compute_distance(node1,node2):
"""
@brief: This function computes the euclidean distance between the two given nodes. Mainly used to compute
the edge length.
:param node1: First Node
:param node2: Second Node
"""
return np.sqrt(np.power(node1[0]-node2[0],2)+np.power(node1[1]-node2[1],2))
def nearest(sample, TV):
"""
@brief: This function returns the nearest node from the previously generated vertices.
:param sample: sampled node
:param TV: Tree Vertices
"""
dist = []
for vertex in TV:
dist.append(compute_distance(sample,vertex))
nearest_vertex = TV[dist.index(min(dist))]
return nearest_vertex
def neighbor_nodes(sample, TV, d=10):
"""
@brief: This function computes the nearby neighbouring nodes at a particular distance threshold from
the sample and the tree vertices.
:param sample: Node from which the neighbouring nodes are to be returned
:param TV: Tree vertices
"""
neighbors = []
for vertex in TV:
if compute_distance(sample,vertex) < d:
neighbors.append(vertex)
return neighbors
def collision_free(X_nearest,X_rand,canvas,unknown=False): #Replace X_rand with X_new for steer function
"""
@brief: This function samples the edge and checks for the validity of the edge by checking the obstacle space
:param X_nearest: Nearest Node
:param X_rand: Random node
:param canvas: Map
:unknown: Flag for unknown map
"""
if X_rand[0] != X_nearest[0]:
x1 = min(X_nearest[0],X_rand[0])
if(X_nearest[0] == x1):
for w in range(X_nearest[0],X_rand[0]+1):
h = ((X_rand[1] - X_nearest[1])/(X_rand[0] - X_nearest[0]))*(w - X_nearest[0]) + X_nearest[1]
if(is_obstacle(int(w),int(h),canvas,unknown)):
# print("Collision!")
return False
else:
for w in range(X_rand[0],X_nearest[0]+1):
h = ((X_nearest[1] - X_rand[1])/(X_nearest[0] - X_rand[0]))*(w - X_rand[0]) + X_rand[1]
if(is_obstacle(int(w),int(h),canvas,unknown)):
# print("Collision!")
return False
else:
y1 = min(X_nearest[1],X_rand[1])
if(y1 == X_nearest[1]):
for h in range(X_nearest[1],X_rand[1]+1):
if(is_obstacle(int(X_nearest[0]),int(h),canvas,unknown)):
# print("Collision!")
return False
else:
for h in range(X_rand[1],X_nearest[1]+1):
if(is_obstacle(int(X_rand[0]),int(h),canvas,unknown)):
# print("Collision!")
return False
return True
def rewire(node1,node2,node_dict,canvas,final_state):
"""
@brief: This function rewires the edge between the nodes by checking the edge length and the cost to goal.
:param node1: Node 1
:param node2: Node 2
:param node_dict: Dictionary containing the Parent nodes and costs
:param canvas: Map
:param final_state: Goal Node
"""
# print("In rewire")
parent = []
if collision_free(node1,node2, canvas) is True:
if (compute_distance(node1, node2) + cost_to_goal(node1, final_state)) < cost_to_goal(node2, final_state):
node_dict[tuple(node2)] = [node1, compute_distance(node1, node2) + cost_to_goal(node1, final_state)]
parent = node1.copy()
if len(parent) != 0:
return parent
else:
return node_dict[tuple(node2)][0]
def mod_rrt_star(initial_state,final_state,canvas):
"""
@brief: This function generates the random tree for the given obstacle map.
:param initial_state: Start Node
:param final_state: final node (Goal Node)
:param canvas: Map
"""
TV = []
TE = {}
TV.append(final_state)
node_dict = {}
node_dict[tuple(final_state)] = final_state
while True:
width_rand = random.randint(0,canvas.shape[1]-1)
height_rand = random.randint(0,canvas.shape[0]-1)
X_rand = [width_rand,height_rand]
# print("Random sample: ", X_rand)
X_nearest = nearest(X_rand,TV)
#Steer function to be implemented later for non-holonomic constraints.
#X_new <- Steer(X_rand, X_nearest)
#Here X_rand is X_new
if(collision_free(X_nearest, X_rand, canvas) is False):
continue
X_parent = X_nearest.copy()
node_dict[tuple(X_rand)] = [X_parent, cost_to_goal(X_nearest,final_state) + compute_distance(X_nearest,X_rand)]
X_neighbors = neighbor_nodes(X_rand, TV, 10)
for n in X_neighbors:
X_parent = rewire(n, X_rand, node_dict, canvas, final_state)
TV.append(X_rand)
TE[tuple(X_rand)] = X_parent.copy()
# print("X_parent", X_parent)
for n in X_neighbors:
X_parent_temp = rewire(X_rand, n, node_dict, canvas, final_state)
if X_parent_temp == X_rand:
# print("Before Pop", n)
TE.pop(tuple(n))
TE[tuple(n)] = X_rand.copy()
if compute_distance(X_rand,initial_state) < 5:
print("RRT* Converged!")
return TE, TV, X_rand
def backtrack(initial_state, final_state, edges, canvas):
"""
@brief: This function backtracks the path from the goal node to the start node.
:param initial_state: start node
:param final_state: final node (Goal Node)\
:param edges: Edges between the nodes
"""
state = initial_state.copy()
path = []
while True:
node = edges[tuple(state)]
path.append(state)
# cv2.line(canvas, tuple(state), tuple(node), (0, 255, 230), 3)
if(tuple(node) == tuple(final_state)):
path.append(final_state)
print("Back Tracking Done!")
break
state = node.copy()
return path
def path_sampling(path):
"""
@brief: This function samples the generated path
:param path: path from start node to the goal node
"""
sampled_path = []
for i in range(0,len(path)-1):
X_rand = path[i]
X_nearest = path[i+1]
if X_rand[0] != X_nearest[0]:
x1 = min(X_nearest[0],X_rand[0])
if(X_nearest[0] == x1):
for w in range(X_nearest[0],X_rand[0]+1):
h = ((X_rand[1] - X_nearest[1])/(X_rand[0] - X_nearest[0]))*(w - X_nearest[0]) + X_nearest[1]
sampled_path.append([int(w),int(h)])
else:
for w in range(X_rand[0],X_nearest[0]+1):
h = ((X_nearest[1] - X_rand[1])/(X_nearest[0] - X_rand[0]))*(w - X_rand[0]) + X_rand[1]
sampled_path.append([int(w),int(h)])
else:
print("vertical line", X_nearest[1], X_rand[1])
y1 = min(X_nearest[1],X_rand[1])
print("y1 ", y1)
if(y1 == X_nearest[1]):
for h in range(X_nearest[1],X_rand[1]+1):
sampled_path.append([int(X_nearest[0]),int(h)])
else:
for h in range(X_rand[1],X_nearest[1]+1):
sampled_path.append([int(X_rand[0]),int(h)])
return sampled_path
def path_smoothening(sampled_path, final_state, canvas, unknown = False):
"""
@brief: This function smoothenes the path by connecting the start nodes with the most feasible node starting
from the goal node
:param sampled path: Sampled Path
:param final_state: final node (Goal Node)
:param canvas: Map
:param unknown: Flag for dynamic map ( unknown obstacles )
"""
shortest_path = []
# if len(sampled_path) > 0:
shortest_path.append(sampled_path[0])
print("Length of Sampled Path: ",len(sampled_path))
while (tuple(shortest_path[-1]) != tuple(sampled_path[-1])):
# print(sampled_path.index(shortest_path[-1]))
for i in range(sampled_path.index(shortest_path[-1]),len(sampled_path)):
if collision_free(shortest_path[-1],sampled_path[len(sampled_path)-1-i+sampled_path.index(shortest_path[-1])], canvas, unknown):
shortest_path.append(sampled_path[len(sampled_path)-1-i+sampled_path.index(shortest_path[-1])])
break
# print(shortest_path)
return shortest_path
def path_replanning(path, dynamic_map, edges, vertices, initial, final):
"""
@brief: This function replans the path based on the dynamic obstacles present in the map.
:param path: actual path to be followed
:param dynamic_map: Dynamic map
:param edges: Edges
:param vertices: Vertices of the tree
:param initial: starting node
:param final: Goal Node
"""
replanned_path = path.copy()
print("in path replanning")
for i in range(1,len(path)):
node = replanned_path[i]
X_next = node.copy()
if is_obstacle(node[0],node[1],dynamic_map,True) or (not collision_free(replanned_path[i-1],X_next,dynamic_map, unknown=True)):
X_curr = replanned_path[i-1].copy()
X_candi = []
X_near = neighbor_nodes(X_curr, vertices, d=50)
for near in X_near:
if collision_free(X_curr,near,dynamic_map, unknown=True):
if near not in replanned_path:
X_candi.append(near)
X_next = pareto(X_candi,X_curr,final)
# X_next = X_candi[0]
if(X_next is not None):
# print("Nearby node found!")
# X_curr = X_next.copy()
# print("Previous: ",replanned_path[i])
replanned_path[i] = X_next.copy()
# print("Updated: ",replanned_path[i])
else:
print("Not Enough Samples found nearby and hence the path goes through the obstacle")
# print("Replanned Path: ", replanned_path)
return replanned_path
def pareto(X_candi,initial,final):
"""
@brief: This function returns the most dominant node by using the pareto dominance theory
:param X_candi: Candidate Nodes
:param initial: Initial Node
:paran final: Final Node (Goal Node)
"""
paretos = []
for candidates in X_candi:
paretos.append([compute_distance(candidates,initial), cost_to_goal(candidates,final)])
if(len(paretos) != 0):
pareto_dict = {}
for i in range(0,len(paretos)):
dominant_node = paretos[i].copy()
ID = 0
OD = 0
for j in range(0,len(paretos)):
if(tuple(paretos[i]) == tuple(paretos[j])):
continue
elif ((paretos[j][0]<=dominant_node[0] and paretos[j][1]<=dominant_node[1]) and (paretos[j][0]<dominant_node[0] or paretos[j][1]<dominant_node[1])):
ID += 1
elif (((paretos[j][0]>=dominant_node[0] and paretos[j][1]>=dominant_node[1]) and (paretos[j][0]>dominant_node[0] or paretos[j][1]>dominant_node[1]))):
OD += 1
pareto_dict[tuple(dominant_node)] = [ID, OD]
pareto_keys = list(pareto_dict.keys())
pareto_IDs = []
pareto_ODs = []
for p_key in pareto_keys:
pareto_IDs.append(pareto_dict[tuple(p_key)][0])
pareto_ODs.append(pareto_dict[tuple(p_key)][1])
zero_ID_index = list(np.where(np.array(pareto_IDs)==0))[0]
# print("Zero ID Index Type: ",type(zero_ID_index), zero_ID_index)
if(len(zero_ID_index)>1):
zero_ID_keys = []
for i in zero_ID_index:
zero_ID_keys.append(pareto_keys[i])
zero_ID_max_OD = []
for key in zero_ID_keys:
zero_ID_max_OD.append(pareto_dict[tuple(key)][1])
max_OD = np.max(zero_ID_max_OD)
max_OD_key = zero_ID_keys[zero_ID_max_OD.index(max_OD)]
# print(max_OD_key)
return X_candi[paretos.index(list(max_OD_key))]
elif(len(zero_ID_index)==1):
return X_candi[paretos.index(list(pareto_keys[zero_ID_index[0]]))]
else:
print("NO PARETO!")
else:
print("No Candidate Nodes")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--map1', action='store_true',
help="Loads Map 01")
parser.add_argument('--map2', action='store_true',
help="Loads Map 02")
args = parser.parse_args()
#Gives the time at which the program has started
canvas = np.ones((250,400,3),dtype="uint8") #Creating a blank canvas
if args.map1:
flag = 1
elif args.map2:
flag = 2
canvas = draw_obstacles(canvas,clr=5,unknown=False,map_flag=flag) #Draw the obstacles in the canvas, default point robot with 5 units of clearance
initial_state,final_state = [10,10], [350,150] #Take the start and goal node from the user
#Changing the cartesian coordinates to image coordinates:
initial_state[1] = canvas.shape[0]-1 - initial_state[1]
final_state[1] = canvas.shape[0]-1 - final_state[1]
#Write a condition to check if the initial state and final state are in the obstacle space and exit from program and ask to rerun with valid start and goal positions
if(canvas[initial_state[1]][initial_state[0]][0]==255 or canvas[final_state[1]][final_state[0]][0]==255):
print("Given Start or Goal Node is in the Obstacle Region. Please re-run with Valid Coordinates")
exit()
start_time = time.time()
#Start Node and Goal Node
cv2.circle(canvas,(int(initial_state[0]),int(initial_state[1])),5,(0,255,0),-1)
cv2.circle(canvas,(int(final_state[0]),int(final_state[1])),5,(0,0,255),-1)
#Generate the graph from the Modified RRT* Algorithm
edges, vertices, s_node = mod_rrt_star(initial_state,final_state,canvas) #Compute the path using A Star Algorithm
tree_canvas = canvas.copy()
#Draw the edges
for key in edges:
# cv2.line(canvas, tuple(key), tuple(edges[key]), (255, 128, 223), 2)
cv2.line(tree_canvas, tuple(key), tuple(edges[key]), (0, 0, 255), 3)
cv2.circle(tree_canvas,(key[0], key[1]), 1, (0,255,255), -1)
cv2.circle(tree_canvas,(edges[key][0],edges[key][1]), 1, (0,255,0), -1)
# cv2.imshow("Modified RRT* Tree Expansion", canvas)
#Generate a dynamic map
dynamic_map = np.ones((250,400,3),dtype="uint8") #Creating a blank canvas
dynamic_map = draw_obstacles(dynamic_map,clr=5,unknown=True, map_flag=flag) #Draw the obstacles in the canvas, default point robot with 5 units of clearance
cv2.imshow("Known Map with Unknown Obstacles", dynamic_map)
#Backtack the path to reach from start node to goal node
path = backtrack(s_node, final_state, edges, canvas)
rrt_path_canvas = tree_canvas.copy()
for i in range(1,len(path)):
cv2.line(rrt_path_canvas, tuple(path[i-1]), tuple(path[i]), (0, 255, 0), 3)
# cv2.imshow("Modified RRT* Path", rrt_path_canvas)
#Sample and Smoothen the path from the list returned from the backtracking function.
sampled_path = path_sampling(path)
smoothened_path = path_smoothening(sampled_path.copy(),final_state,canvas,unknown = False)
smooth_rrt_path_canvas = rrt_path_canvas.copy()
for i in range(1,len(smoothened_path)):
cv2.line(smooth_rrt_path_canvas, tuple(smoothened_path[i-1]), tuple(smoothened_path[i]), (255, 255, 255), 3)
# cv2.imshow("Smoothened Modified RRT* Path", smooth_rrt_path_canvas)
#Resample the smoothened path
sampled_path = path_sampling(smoothened_path)
#Replan the path from the dynamic obstacles
replanned_path = path_replanning(sampled_path, dynamic_map, edges, vertices,s_node,final_state)
replanned_path_canvas = dynamic_map.copy()
for i in range(0,len(replanned_path)):
cv2.circle(replanned_path_canvas,tuple(replanned_path[i]), 2, (0,145,145), -1)
n_path = []
prev_path = []
for i in range(0,len(replanned_path)):
if(tuple(sampled_path[i]) == tuple(replanned_path[i])):
prev_path.append(sampled_path[i])
continue
else:
# print("N Path Append")
n_path.append(sampled_path[i-2])
n_path.append(sampled_path[i-1])
for j in range(i,len(replanned_path)):
n_path.append(replanned_path[j])
break
# cv2.imshow("Replanned Modified RRT* Path", replanned_path_canvas)
# replanned_sampled = path_sampling(replanned_path)
# print("New path ",n_path)
new_replanned_path = path_smoothening(n_path.copy(), final_state, dynamic_map, unknown=True)
smooth_replanned_path_canvas = replanned_path_canvas.copy()
for i in range(1,len(sampled_path)):
cv2.line(dynamic_map, tuple(sampled_path[i-1]), tuple(sampled_path[i]), (0, 137, 255), 3)
# print(replanned_path)
for i in range(1,len(prev_path)):
cv2.line(smooth_replanned_path_canvas, tuple(prev_path[i-1]), tuple(prev_path[i]), (255, 128, 223), 2)
for i in range(1,len(new_replanned_path)):
cv2.line(smooth_replanned_path_canvas, tuple(new_replanned_path[i-1]), tuple(new_replanned_path[i]), (255, 128, 223), 2)
# cv2.circle(dynamic_map,tuple(replanned_path[i]), 3, (255,255,255), -1)
# cv2.imshow("Smoothened Replanned Modified RRT* Path",dynamic_map)
end_time = time.time() #Time taken to run the whole algorithm to find the optimal path
cv2.imshow("Known Map with Initial & Final Nodes", canvas)
cv2.imshow("Modified RRT* Tree Expansion", tree_canvas)
# cv2.imshow("Known Map with Unknown Obstacles", dynamic_map)
cv2.imshow("Modified RRT* Path", rrt_path_canvas)
cv2.imshow("Smoothened Modified RRT* Path", smooth_rrt_path_canvas)
# cv2.imshow("Replanned Pareto Dominant Nodes", replanned_path_canvas)
cv2.imshow("Smoothened Replanned Modified RRT* Path", smooth_replanned_path_canvas)
cv2.waitKey(0)
cv2.destroyAllWindows()
# print("Code Execution Time: ",end_time-start_time) #Prints the total execution time | okritvik/MOD-RRT-Star-Implementation-Point-Robot | mod_rrt_star.py | mod_rrt_star.py | py | 22,275 | python | en | code | 4 | github-code | 36 |
71326379303 | import numpy as np
import sympy as sp
import math
from matplotlib import pyplot as plt
#BEGIN EXERCISE 1
def left_endpoint_sum(f,a,b,n):
d = (b-a) / n
sum = 0
for i in range(0,n):
sum += f(a + i*d)
return d*sum
def right_endpoint_sum(f,a,b,n):
d = (b-a) / n
sum = 0
for i in range(1,n+1):
sum += f(a + i*d)
return d*sum
#END EXERCISE 1
#BEGIN EXERCISE 2
x = sp.symbols('x')
f = lambda x : math.e**(-1*x**2)
LRS5 = left_endpoint_sum(f, 0, 0.5, 5) #Left Riemann Sum with 5 intervals
RRS5 = right_endpoint_sum(f, 0, 0.5, 5) #Right Riemann Sum with 5 intervals
LRS10 = left_endpoint_sum(f, 0, 0.5, 10)
RRS10 = right_endpoint_sum(f, 0, 0.5, 10)
LRS100 = left_endpoint_sum(f, 0, 0.5, 100)
RRS100 = right_endpoint_sum(f, 0, 0.5, 100)
xfunc = np.linspace(0,0.5,1001)
x5 = np.linspace(0,0.5,6)
x10 = np.linspace(0,0.5,11)
x100 = np.linspace(0,0.5,101)
yfunc = f(xfunc)
y5 = f(x5)
y10 = f(x10)
y100 = f(x100)
plt.step(x5, y5, 'b', where="post", label="Left Riemann Sum = " + str(LRS5))
plt.step(x5, y5, 'm', where="pre", label="Right Riemann Sum = " + str(RRS5))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 5 divisions")
plt.legend(fontsize=9)
plt.show()
plt.step(x10, y10, 'b', where="post", label="Left Riemann Sum = " + str(LRS10))
plt.step(x10, y10, 'm', where="pre", label="Right Riemann Sum = " + str(RRS10))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 10 divisions")
plt.legend(fontsize=9)
plt.show()
plt.step(x100, y100, 'b', where="post", label="Left Riemann Sum = " + str(LRS100))
plt.step(x100, y100, 'm', where="pre", label="Right Riemann Sum = " + str(RRS100))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 100 divisions")
plt.legend(fontsize=9)
plt.show()
#END EXERCISE 2
#BEGIN EXERCISE 3
def midpoint_sum(f,a,b,n):
d = (b-a) / n
sum = 0
for i in range(0.5, n+0.5, 1):
sum += f(a + i*d)
return d*sum
def trapezoid_sum(f,a,b,n):
d = (b-a) / n
sum = 0
for i in range(0, n+1):
if i == 0:
sum += f(a)
elif i == n:
sum += f(b)
else:
sum += 2*f(a + i*d)
return 0.5*d*sum
#END EXERCISE 3
#BEGIN EXERCISE 4
x = sp.symbols('x')
f = lambda x : math.e**(-1*x**2)
MPS5 = midpoint_sum(f, 0, 0.5, 5) #Midpoint Sum with 5 intervals
TPS5 = trapezoid_sum(f, 0, 0.5, 5) #Trapezoidal Sum with 5 intervals
MPS10 = midpoint_sum(f, 0, 0.5, 10)
TPS10 = trapezoid_sum(f, 0, 0.5, 10)
MPS100 = midpoint_sum(f, 0, 0.5, 100)
TPS100 = trapezoid_sum(f, 0, 0.5, 100)
xfunc = np.linspace(0,0.5,1001)
x5 = np.linspace(0,0.5,6)
x10 = np.linspace(0,0.5,11)
x100 = np.linspace(0,0.5,101)
yfunc = f(xfunc)
y5 = f(x5)
y10 = f(x10)
y100 = f(x100)
plt.step(x5, y5, 'y', where="mid", label="Midpoint Sum = " + str(MPS5))
plt.plot(x5, y5, 'r', label="Trapezoidal Sum = " + str(TPS5))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 5 divisions")
plt.legend(fontsize=9)
plt.show()
plt.step(x10, y10, 'y', where="mid", label="Midpoint Sum = " + str(MPS10))
plt.plot(x10, y10, 'r', label="Trapezoidal Sum = " + str(TPS10))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 10 divisions")
plt.legend(fontsize=9)
plt.show()
plt.step(x100, y100, 'y', where="mid", label="Midpoint Sum = " + str(MPS100))
plt.plot(x100, y100, 'r', label="Trapezoidal Sum = " + str(TPS100))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 100 divisions")
plt.legend(fontsize=9)
plt.show()
#END EXERCISE 4
#BEGIN EXERCISE 5
def simpsons_rule_sum(f,a,b,n):
assert n % 2 == 0, "n must be even"
d = (b-a) / n
sum = 0
for i in range(0,n+1):
if i == 0:
sum += f(a)
elif i == n:
sum += f(b)
elif i % 2 == 0:
sum += 2*f(a + i*d)
else:
sum += 4*f(a + i*d)
return (1/3)*d*sum
#END EXERCISE 5
#BEGIN EXERCISE 6
x = sp.symbols('x')
f = lambda x : math.e**(-1*x**2)
SRS10 = simpsons_rule_sum(f, 0, 0.5, 10) #Simpsons Rule Sum with 10 intervals
SRS100 = simpsons_rule_sum(f, 0, 0.5, 100)
print(SRS10)
print(SRS100)
#END EXERCISE 6
Approximations = []
Approximations += [("LRS5",LRS5), ("LRS10",LRS10), ("LRS100",LRS100)]
Approximations += [("RRS5",RRS5), ("RRS10",RRS10), ("RRS100",RRS100)]
Approximations += [("MPS5",MPS5), ("MPS10",MPS10), ("MPS100",MPS100)]
Approximations += [("TPS5",TPS5), ("TPS10",TPS10), ("TPS100",TPS100)]
Approximations += [("SRS10",SRS10), ("SRS100",SRS100)]
Approximations.sort(key = lambda y : y[1])
print(Approximations)
#BEGIN EXERCISE 7
x = sp.symbols('x')
f = lambda x : math.e**(-1*x**2)
fppmag = lambda x : abs(4*x**2 * math.e**(-1*x**2) - 2*math.e**(-1*x**2))
xtest = np.linspace(0,0.5,101)
ytest = fppmag(xtest)
plt.plot(xtest,ytest)
plt.show()
K = fppmag(0)
def midpoint_errorf(k,a,b,n):
return (k*(b-a)**3) / (24*n**2)
def trapezoid_errorf(k,a,b,n):
return (k*(b-a)**3) / (12*n**2)
def simpsons_errorf(k,a,b,n):
return (k*(b-a)**5) / (180*n**4)
T10 = trapezoid_errorf(K,0,0.5,10)
M10 = midpoint_errorf(K,0,0.5,10)
S10 = simpsons_errorf(K,0,0.5,10)
Trange = (TPS10 - T10, TPS10 + T10)
Mrange = (MPS10 - M10, MPS10 + M10)
Srange = (SRS10 - S10, SRS10 + S10)
print(Trange) #T10
print(Mrange) #M10
print(Srange) #S10
#END EXERCISE 7
#BEGIN EXERCISE 8
x = sp.symbols('x')
f = lambda x : 1/x
L10 = left_endpoint_sum(f,1,5,10)
R10 = left_endpoint_sum(f,1,5,10)
T10 = trapezoid_sum(f,1,5,10)
M10 = midpoint_sum(f,1,5,10)
S10 = simpsons_rule_sum(f,1,5,10)
fppmag = lambda x : abs(2/(x**3))
xtest = np.linspace(1,5,101)
yest = fppmag(xtest)
plt.plot(xtest,ytest)
plt.show()
K = fppmag(1)
LE = L10-R10
RE = L10-R10
TE = trapezoid_errorf(K,1,5,10)
ME = midpoint_errorf(K,1,5,10)
SE = simpsons_errorf(K,1,5,10)
Lrange = (L10-LE,L10+LE)
Rrange = (R10-RE,R10+RE)
Trange = (T10-TE,T10+TE)
Mrange = (M10-ME,M10+ME)
Srange = (S10-SE,S10+SE)
print(Lrange)
print(Rrange)
print(Trange)
print(Mrange)
print(Srange)
LET = L10 - math.log(5)
RET = R10 - math.log(5)
TET = T10 - math.log(5)
MET = M10 - math.log(5)
SET = S10 - math.log(5)
print(LET) #True lefthand error
print(RET) #True righthand error
print(TET) #True trapezoidal error
print(MET) #True midpoint error
print(SET) #True simpsons error
#END EXERCISE 8
| Drew-Morris/Real-Analysis-PY | Integration/Integration-Vim.py | Integration-Vim.py | py | 6,124 | python | en | code | 0 | github-code | 36 |
37372176191 | import pyconll
from configuration import (
UD_PATH,
UD_STDCH_PATH, UD_CANTO_PATH,
UD_STDCH_CONLLU, UD_CANTO_CONLLU
)
import os
if not os.path.exists(UD_CANTO_CONLLU):
raise IOError("You need to download yue_hk-ud-test.conllu from\n"
" https://github.com/UniversalDependencies/UD_Cantonese-HK\n"
" and place it under %s!\n" % UD_CANTO_PATH)
if not os.path.exists(UD_STDCH_CONLLU):
raise IOError("You need to download zh_hk-ud-test.conllu from\n"
" https://github.com/UniversalDependencies/UD_Chinese-HK\n"
" and place it under %s!\n" % UD_STDCH_PATH)
def read_UD(file_path):
return pyconll.load_from_file(file_path)
def read_UD_sentences(file_path, tokenize=True):
UD_file = read_UD(file_path)
sentences = [[token.form for token in sentence] if tokenize else ''.join([token.form for token in sentence])
for sentence in UD_file]
return sentences
def save_UD_sentences_for_TER(tokenize=False, stdch_file="UD_stdch.txt", canto_file="UD_canto.txt"):
stdch_sen = read_UD_sentences(UD_STDCH_CONLLU)
canto_sen = read_UD_sentences(UD_CANTO_CONLLU)
with open(os.path.join(UD_PATH, stdch_file), "w") as stdch_f:
with open(os.path.join(UD_PATH, canto_file), "w") as canto_f:
for stdch, canto in zip(stdch_sen, canto_sen):
if tokenize:
stdch_f.write(' '.join(stdch) + ' ' + '(%s)' % ''.join(stdch) + '\n')
canto_f.write(' '.join(canto) + ' ' + '(%s)' % ''.join(stdch) + '\n')
else:
stdch_f.write(''.join(stdch) + ' ' + '(%s)' % ''.join(stdch) + '\n')
canto_f.write(''.join(canto) + ' ' + '(%s)' % ''.join(stdch) + '\n')
print("%s | %s saved. " % (stdch_file, canto_file))
def read_data(tokenize=False):
stdch_sen = read_UD_sentences(UD_STDCH_CONLLU, tokenize=tokenize)
canto_sen = read_UD_sentences(UD_CANTO_CONLLU, tokenize=tokenize)
data = [(stdch, canto) for stdch, canto in zip(stdch_sen, canto_sen)]
return data
if __name__ == '__main__':
save_UD_sentences_for_TER()
'''
Example command line for TER evaluation
$ cd ./code/v2
$ java -jar tercom-0.7.25/tercom.7.25.jar -r ../../data/dl-UD_Cantonese-Chinese/UD_canto.txt -h ../../data/dl-UD_Cantonese-Chinese/UD_stdch.txt -n ../../data/tmp_v2/TER.r_canto_h_stdch
'''
| kiking0501/Cantonese-Chinese-Translation | code/v2/dao_UD.py | dao_UD.py | py | 2,441 | python | en | code | 6 | github-code | 36 |
14416840611 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 从数据库里面导出一定时间前的还持有份额的人员和产品数据,每个产品是一个Excel
import pymysql
from openpyxl import *
import os
#检索每一行的第一个产品代码字段,如果是同一个代码,要保存在一个Excel里,不同产品的数据,用不同的Excel保存
#fields是列名,data是数据集,path是保存的路径,如果空,则保存在当前目录下
def SaveData2Excel(fields, data, path = ''):
if path == '':
path = os.getcwd()
wb = Workbook()
ws = wb.active
fundcode = data[0][0]
fundname = data[0][1]
#第1行是列名
ws.append([f[0] for f in fields])
#把数据复制到Excel中
for row in data:
#不是同一个产品,先保存原来的Excel,再新建一个
if row[0] != fundcode:
wb.save(path + os.sep + fundname + "_持有人名册.xlsx")
print("{0}结束导出".format(fundname))
wb = Workbook()
ws = wb.active
fundcode = row[0]
fundname = row[1]
ws.append([f[0] for f in fields])
ws.append(row)
wb.save(path + os.sep + fundname + "_持有人名册.xlsx")
wb = None
#需要导出数据的日期范围
end_date = "2018-1-1"
#导出文件地址
path = "C:\\Users\\gaos\\Documents\\PyqtProject\\output"
#数据库地址
connection = pymysql.connect(host='192.168.40.98', port=3306,\
user='selling_query',password='123456',db='private_data',charset='utf8')
cursor = connection.cursor()
#导出的SQL
sql_base = "SELECT \
p.`code` as '产品代码', \
p.`name` AS '产品名', \
u.`name` AS '用户名', \
concat(" + '"' + "'"+ '"' + ",u.certificate_no) as '认证号码', \
h.current_share as '2017年末份额' \
FROM \
holders h, \
users u, \
products p, \
( \
SELECT \
h1.product_id, \
h1.user_id, \
max(h1.data_log_id) log_id \
FROM \
holders h1, \
users u, \
products p \
WHERE \
TO_DAYS(h1.hold_at_str) <= TO_DAYS('{0}') \
AND h1.product_id = p.id \
AND h1.user_id = u.id \
GROUP BY \
h1.product_id, \
h1.user_id \
) t \
WHERE \
1 = 1 \
AND t.log_id = h.data_log_id \
AND t.product_id = h.product_id \
and t.user_id = h.user_id \
and h.current_share > 0 \
AND h.product_id = p.id \
AND p.`status` = 0 \
AND p.company_id = 1 \
and h.user_id = u.id \
ORDER BY \
h.product_id, \
h.user_id;"
#从数据库获取数据
sql = sql_base.format(end_date)
cursor.execute(sql)
result = cursor.fetchall()
if len(result) > 0:
fields = cursor.description
SaveData2Excel(fields, result, path)
#关闭数据库
cursor.close()
connection.close() | matthew59gs/Projects | python/market/export_fund_share2.py | export_fund_share2.py | py | 2,544 | python | en | code | 0 | github-code | 36 |
28031460700 | import pickle
import numpy as np
import sklearn.base
from matplotlib.figure import figaspect
from sklearn.linear_model import LogisticRegression
from dataclasses import dataclass
from sklearn.preprocessing import StandardScaler
from . import network
from tqdm import tqdm
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import warnings
def _cart2pol(x, y):
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x) # * 180 / np.pi
return rho, phi
def _pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return x, y
@dataclass
class HopfieldSimulation:
hopnet: network.Hopfield
states: np.ndarray
energies: np.ndarray
def save(self, filename):
"""
Save HopfieldSimulation object with pickle.
:param filename:
"""
with open(filename, 'wb') as f:
pickle.dump(self, f)
@dataclass
class HopfiledEmbedding:
hopnet: network.Hopfield
embedding_model: sklearn.base.BaseEstimator
attractors: dict
attractor_model: sklearn.base.BaseEstimator
attractor_model_dim: int
state_sample: np.ndarray
attractor_sample: np.ndarray
def save(self, filename):
"""
Save HopfieldEmbedding with pickle.
:param filename:
"""
with open(filename, 'wb') as f:
pickle.dump(self, f)
def plot(self,
activations=None,
plot_type='scatter',
legend=True,
density_bins=1000,
density_sigma=20,
ax=None,
regimes_fill_kwargs=dict(),
regimes_contour_kwargs=dict(),
attractor_plot_type='scatter',
attractor_kwargs=dict(),
legend_kwargs=None,
**kwargs):
"""
Plot the attractor regimes and the data in the embedding space.
:param activations: activations to plot
:param regimes_fill_kwargs: kwargs for attractor regimes fill
:param regimes_line_kwargs: kwargs for attractor regimes line
:param ax: matplotlib axis (with polar projection)
:param kwargs: kwargs for embedding model plot
"""
default_regimes_fill_kwargs = dict(alpha=.2, cmap='tab10')
default_regimes_fill_kwargs.update(regimes_fill_kwargs)
default_regimes_contour_kwargs = dict(colors="gray", linewidths=0.5)
default_regimes_contour_kwargs.update(regimes_contour_kwargs)
if attractor_plot_type == 'glassbrain':
default_attractor_kwargs = dict(display_mode='x', colorbar=False)
elif attractor_plot_type == 'scatter':
default_attractor_kwargs = dict()
else:
raise ValueError("Unknown attractor plot type.")
default_attractor_kwargs.update(attractor_kwargs)
if plot_type == 'scatter':
default_kwargs = dict(alpha=1.0, s=10, linewidths=0, c='black')
elif plot_type == 'line':
default_kwargs = dict()
elif plot_type == 'stream':
default_kwargs = dict(linewidth=5, color='gray', density=1.2, bins=10)
default_kwargs.update(kwargs)
elif plot_type == 'hist2d':
default_kwargs = dict(bins=100, cmap='gray_r')
elif plot_type == 'density':
default_kwargs = dict(cmap='gray_r')
elif plot_type == 'contour':
default_kwargs = dict(alpha=1, linewidths=0.1)
elif plot_type == 'contourf':
default_kwargs = dict(levels=20, antialiased=True, cmap='Greens')
else:
raise ValueError("Unknown type.")
default_kwargs.update(kwargs)
if plot_type == 'stream':
stream_linewidth = default_kwargs.pop('linewidth')
stream_bins = default_kwargs.pop('bins')
if ax is None:
fig = plt.gcf()
if plot_type == 'stream' or plot_type == 'density' or plot_type == 'contour' or plot_type == 'contourf':
ax_cart = fig.add_axes([0,0,1,1], polar=False, frameon=False)
ax = fig.add_axes([0,0,1,1], polar=True, frameon=False)
w, h = figaspect(1)
ax.figure.set_size_inches(w, h)
else:
ax.set_aspect('equal')
ax.patch.set_alpha(0)
if plot_type == 'stream' or plot_type == 'density' or plot_type == 'contour' or plot_type == 'contourf':
ax_cart = ax.figure.add_axes(ax.get_position(), polar=False, frameon=False, zorder=-1)
# todo: test if ax is specified
max_r = 1
for l, attractor in self.attractors.items():
att = StandardScaler().fit_transform(np.array(attractor).reshape(1, -1).T).T
att_cart = self.embedding_model.transform(att)[:, :2]
r, th = _cart2pol(att_cart[:, 0], att_cart[:, 1])
max_r = max(max_r, r.squeeze())
if plot_type == 'stream' or plot_type == 'density' or plot_type == 'contour' or plot_type == 'contourf':
ax_cart.set_xlim([-1.1 * max_r, 1.1 * max_r])
ax_cart.set_ylim([-1.1 * max_r, 1.1 * max_r])
ax_cart.set_xticks([])
ax_cart.set_yticks([])
ax_cart.grid(False)
ax.set_ylim([0, 1.1 * max_r])
# plot actual data
if activations is not None:
# transform activations to embedding space
activations = StandardScaler().fit_transform(activations.T).T
embedded = self.embedding_model.transform(activations)
r, th = _cart2pol(embedded[:, 0], embedded[:, 1])
if plot_type == 'scatter':
plot = ax.scatter(th, r, **default_kwargs)
# produce a legend with a cross-section of sizes from the scatter
if legend_kwargs is not None:
handles, labels = plot.legend_elements(prop="colors")
legend = ax.legend(handles, labels, **legend_kwargs)
elif plot_type == 'line':
plot = ax.plot(th, r, **default_kwargs)
elif plot_type == 'stream':
directions = embedded[1:, :] - embedded[:-1, :]
from scipy.stats import binned_statistic_2d
dir_x, x_edges, y_edges, _ = binned_statistic_2d(embedded[:-1, 1], embedded[:-1, 0], directions[:, 0],
statistic=np.mean,
bins=[np.linspace(-max_r*1.1, max_r*1.1, stream_bins),
np.linspace(-max_r*1.1, max_r*1.1, stream_bins)])
dir_y, x_edges, y_edges, _ = binned_statistic_2d(embedded[:-1, 1], embedded[:-1, 0], directions[:, 1],
statistic=np.mean,
bins=[np.linspace(-max_r * 1.1, max_r * 1.1,
stream_bins),
np.linspace(-max_r * 1.1, max_r * 1.1,
stream_bins)])
x, y = np.meshgrid((x_edges[1:] + x_edges[:-1]) / 2,
(y_edges[1:] + y_edges[:-1]) / 2)
speed = np.sqrt(dir_x ** 2 + dir_y ** 2)
ax_cart.streamplot(x, y, dir_x, dir_y,
linewidth= stream_linewidth * speed / speed[~ np.isnan(speed)].max(),
**default_kwargs)
elif plot_type == 'hist2d':
raise NotImplementedError("Not implemented yet.")
#plot = ax.hist2d(th, r, **default_kwargs)
elif plot_type == 'density' or plot_type == 'contour' or plot_type == 'contourf':
H, x_edges, y_edges = np.histogram2d(embedded[:, 1], embedded[:, 0],
bins=density_bins,
density=True,
range=[[-max_r*1.2, max_r*1.2], [-max_r*1.2, max_r*1.2]])
from scipy.ndimage import gaussian_filter
H = gaussian_filter(H, sigma=density_sigma, mode='wrap')
H[H<0.0001] = np.nan
x, y = np.meshgrid(x_edges,
y_edges) # rectangular plot of polar data
# calculate midpoints of bins
y = (y[: -1, :-1] + y[1:, 1:]) / 2
x = (x[: -1, :-1] + x[1:, 1:]) / 2
rad, theta = _cart2pol(x, y)
#theta = theta % (np.pi * 2)
# fill
if plot_type == 'density':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax_cart.pcolormesh(x, y, H, **default_kwargs)
elif plot_type == 'contour':
ax_cart.contour(x, y, H, **default_kwargs)
elif plot_type == 'contourf':
ax_cart.contourf(x, y, H, **default_kwargs)
else:
raise ValueError("Unknown type.")
# plot attractor regimes
ax.set_prop_cycle(None)
def predict_label(x, y):
return self.attractor_model.predict(np.array([x, y]).T.reshape(-1, self.attractor_model_dim))
x, y = np.meshgrid(np.linspace(-max_r * 1.2, max_r * 1.2, 500),
np.linspace(-max_r * 1.2, max_r * 1.2, 500)) # rectangular plot of polar data
pred = predict_label(x, y).reshape(x.shape)
rad, theta = _cart2pol(x, y)
# fill
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.pcolormesh(theta, rad, pred.T, **default_regimes_fill_kwargs)
# contour
ax.contour(theta, rad, pred.T, **default_regimes_contour_kwargs)
# plot attractor states
ax.set_prop_cycle(None)
for l, attractor in self.attractors.items():
att = StandardScaler().fit_transform(np.array(attractor).reshape(1, -1).T).T
att_cart = self.embedding_model.transform(att)[:, :2]
r, th = _cart2pol(att_cart[:, 0], att_cart[:, 1])
if attractor_plot_type == 'scatter':
ax.scatter(th, r, **default_attractor_kwargs)
elif attractor_plot_type == 'glassbrain':
trans = ax.transData.transform((th, r))
trans = ax.figure.transFigure.inverted().transform(trans).flatten()
network.State(attractor).plot(figure=ax.figure,
axes=(trans[0] - 0.05, trans[1] - 0.05, 0.1, 0.1),
**default_attractor_kwargs)
else:
raise ValueError("Unknown attractor_type.")
ax.set_xticks([0, 0.5 * np.pi, np.pi, 1.5 * np.pi], ["", "", "", ""])
ax.set_yticks(np.arange(0, np.round(max_r) + 1, 2))
ax.set_rlabel_position(0)
ax.tick_params(axis='y', colors='gray')
ax.spines['polar'].set_visible(False)
ax.xaxis.grid(True, linewidth=1, color='black')
ax.yaxis.grid(True, linewidth=0.5, color='lightgrey')
#ax.set_axisbelow('line')
return ax
def simulate_activations(connectome, noise_coef=1, num_iter=1000, init_state=None, signal=None,
progress=True, random_state=None, **kwargs):
"""
Simulate activations of a Hopfield network with a given connectome.
Factory function for HopfieldSimulation dataclass.
:param signal: non-null signal to be added to the noise in each iteration (list of length connectome.shape[0])
:param connectome: a 2D numpy array
:param noise_coef: noise coefficient
:param num_iter: number of iterations
:param init_state: initial state
:param random_state: random state
:param kwargs: additional arguments to network.Hopfield
:return: HopfieldSimulation object
"""
if not isinstance(connectome, np.ndarray) or connectome.ndim != 2 or connectome.shape[0] != connectome.shape[1]:
raise ValueError("Connectome must be a 2D quadratic numpy array!")
if signal is None:
signal = np.zeros(connectome.shape[0])
random = np.random.default_rng(random_state)
default_kwargs = {
"scale": True,
"threshold": 0,
"beta": 0.05
}
default_kwargs.update(kwargs)
hopnet = network.Hopfield(connectome, **default_kwargs)
states = np.zeros((num_iter + 1, hopnet.num_neuron))
energies = np.zeros(num_iter)
if init_state is None:
states[0] = network.State(random.normal(0, 1, hopnet.num_neuron))
else:
states[0] = network.State(init_state)
for i in tqdm(range(num_iter), disable=not progress):
new_state, n_iter, energy = hopnet.update(states[i], num_iter=1)
energies[i] = energy[-1]
# add noise
states[i + 1] = np.array(new_state) + random.normal(signal, noise_coef, hopnet.num_neuron)
return HopfieldSimulation(hopnet=hopnet, states=states[:-1], energies=energies)
def load_simulation(filename):
"""
Load a serialized (pickled) HopfieldSimulation dataclass.
:param filename:
:return: HopfieldSimulation object
"""
with open(filename, 'rb') as f:
return pickle.load(f)
def create_embeddings(simulation, attractor_sample=1000, num_hopfield_iter=100000, attractor_model_dim=2,
random_state=None, progress=True, **kwargs):
"""
Construct a new Hopfield embeddings of a connectome from a HopfieldSimulation object.
:param attractor_sample: ratio of states to be used for attractor model training
:param simulation: HopfieldSimulation object
:param kwargs: additional arguments to the embedding model (sklearn.decomposition.PCA)
:return:
"""
# PCA on simulated hopfield states
pca = PCA(**kwargs)
states = StandardScaler().fit_transform(simulation.states.T).T
embedded = pca.fit_transform(states)
# calculate attractor states for a subsample
random = np.random.default_rng(random_state)
attractors = dict()
attractor_labels = np.zeros(min(int(simulation.states.shape[0]), attractor_sample), dtype=int)
sample = random.choice(simulation.states.shape[0], min(int(simulation.states.shape[0]), attractor_sample),
replace=False)
for i, s in tqdm(enumerate(sample), total=len(sample), disable=not progress):
att, n_iter, energy = simulation.hopnet.update(simulation.states[s], num_iter=num_hopfield_iter)
if n_iter == num_hopfield_iter:
print(n_iter, '!!')
raise RuntimeWarning("Convergence error!")
if tuple(np.round(att, 6)) not in attractors.keys():
attractors[tuple(np.round(att, 6))] = len(attractors)
attractor_labels[i] = len(attractors) - 1
else:
attractor_labels[i] = attractors[tuple(np.round(att, 6))]
# invert dictionary
attractors = {v: np.array(k) for k, v in attractors.items()}
# Fit a Multinomial Logistic Regression model that predicts the attractors on the first two PCs
attractor_model = LogisticRegression(multi_class="multinomial")
attractor_model.fit(embedded[sample, :attractor_model_dim], attractor_labels)
return HopfiledEmbedding(hopnet=simulation.hopnet, embedding_model=pca,
attractors=attractors,
attractor_model=attractor_model,
attractor_model_dim=attractor_model_dim,
state_sample=simulation.states[sample],
attractor_sample=attractor_labels)
def load_embedding(filename):
"""
Load a serialized (pickled) HopfieldEmbedding dataclass.
:param filename: file to load
:return: HopfieldEmbedding object
"""
with open(filename, 'rb') as f:
return pickle.load(f)
| pni-lab/connattractor | connattractor/analysis.py | analysis.py | py | 16,176 | python | en | code | 2 | github-code | 36 |
7405277670 | import numpy as np
import math
import re
import feedparser as fp
def loadDataSet():
postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1] #1是侮辱性文字,0是正常言论
return postingList, classVec
def createVocabList(dataSet):
vocabSet = set([])
for document in dataSet: #添加每篇文章中出现的新词
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet): #朴素贝叶斯的词集模型
returnVec = [0] * len(vocabList) #获取输入表的长度,建立一个所有元素都为0的向量
for word in inputSet: #遍历需要检查的集合,有匹配的将值变为1
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print("the word: %s is not in my Vocabulary!"%word)
return returnVec
def trainNB0(trainMatrix, trainCategory): #trainMatrix是经过01转换的矩阵
numTrainDocs = len(trainMatrix) #获取训练矩阵的长度
numWords = len(trainMatrix[0]) #获取训练矩阵的列数
pAbusive = sum(trainCategory)/float(numTrainDocs) #表示侮辱类概率
p0Num = np.ones(numWords)
p1Num = np.ones(numWords)
p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = p1Num / p1Denom
p1Vect = [math.log(x) for x in p1Vect]
p0Vect = p0Num / p0Denom
p0Vect = [math.log(x) for x in p0Vect]
return p0Vect, p1Vect, pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1 = sum(vec2Classify * p1Vec) + math.log(pClass1)
p0 = sum(vec2Classify * p0Vec) + math.log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
def testingNB():
listOPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V, p1V, pAb = trainNB0(np.array(trainMat), np.array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry, 'classified as:', classifyNB(thisDoc, p0V, p1V ,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry, 'classified as:', classifyNB(thisDoc, p0V, p1V, pAb))
def bagOfWords2VecMN(vocabList, inputSet): #朴素贝叶斯的词袋模型
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
def textParse(bigString):
listOfTokens = re.split(r'\W*', bigString) #使用split函数根据除了数字和英文字母以外的字符做分割
return [tok.lower() for tok in listOfTokens if len(tok) > 2] #转换为小写字母
def spamTest():
docList=[]
classList = []
fullText =[]
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' %i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #在读取所有spam文件夹中的文件后,最后加入标记1
wordList = textParse(open('email/ham/%d.txt' %i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0) #在读取所有ham文件夹中的文件后,最后加入标记0
vocabList = createVocabList(docList)
trainingSet = range(50)
testSet=[]
for i in range(10): #随机选择其中的10份邮件
randIndex = int(np.random.uniform(0, len(trainingSet))) #uniform(x,y)表示在[x,y)之间随机生成一个实数
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]
trainClasses = []
for docIndex in trainingSet:
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))
errorCount = 0
for docIndex in testSet:
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(np.array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print("classification error",docList[docIndex])
print('the error rate is: ',float(errorCount) / len(testSet))
select = int(input("请输入你要选择的操作:"))
if select == 1:
listPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listPosts)
print(setOfWords2Vec(myVocabList, listPosts[0]))
print(setOfWords2Vec(myVocabList, listPosts[3]))
elif select == 2:
listPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listPosts)
trainMat = []
for postinDoc in listPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V, p1V, pAb = trainNB0(trainMat, listClasses)
print(pAb)
print(p0V)
print(p1V)
elif select == 3:
testingNB()
elif select == 4:
emailText = open('email/ham/6.txt').read()
regEx = re.compile('\\W*')
print(regEx.split(emailText))
elif select == 5:
spamTest()
elif select == 6:
ny = fp.parse('http://newyork.craigslist.org/stp/index.rss')
print(len(ny['entries']))
| GuoBayern/MachineLearning | bayes.py | bayes.py | py | 5,884 | python | en | code | 0 | github-code | 36 |
70553069544 | import os, datetime, time
import torch
import torch.optim as optim
import numpy as np
import math
import cv2
import tqdm
import config
import constants
from utils.trainer_utils import (
AverageMeter,
get_HHMMSS_from_second,
save_checkpoint,
save_all_img,
save_joints3d_img,
save_mesh,
save_templates_info,
train_only_3task_network,
train_hmr_using_3task,
train_hmr_using_joints,
train_texture_net,
train_hmr_using_adv_loss,
)
from utils.imutils import uncrop
from lib.utils.eval_utils import (
batch_compute_similarity_transform_torch,
)
from lib.utils.geometry import batch_rodrigues
from lib.models.smpl import SMPL, SMPL_MODEL_DIR, H36M_TO_J14, SMPL_MEAN_PARAMS
# import soft_renderer as sr
# from soft_renderer.mesh import Mesh
# from soft_renderer.renderer import SoftRenderer
# import soft_renderer.cuda.load_textures as load_textures_cuda
from lib.models.smpl import get_smpl_faces
class Trainer():
def __init__(
self,
model_name,
HMR,
context_encoder_net,
discriminator,
jigsaw_puzzle_net,
rotation_net,
texture_net,
img_renderer,
seg_renderer,
texture_discriminator,
train_dataloader,
test_dataloader,
test_dataloader_h36m,
test_dataloader_3dpw,
test_dataloader_lsp,
loss_fn_BCE,
loss_fn_CE,
loss_fn_MSE,
loss_fn_keypoints,
loss_fn_mask,
HMR_optimizer_all,
HMR_scheduler_all,
discriminator_optimizer,
context_encoder_optimizer,
jigsaw_puzzle_optimizer,
rotation_optimizer,
texture_net_optimizer,
texture_discriminator_optimizer,
device,
num_epoch,
args
):
self.model_name = model_name
self.args = args
# model
self.HMR = HMR
self.context_encoder_net = context_encoder_net
self.jigsaw_puzzle_net = jigsaw_puzzle_net
self.rotation_net = rotation_net
self.discriminator = discriminator
self.texture_net = texture_net
self.img_renderer = img_renderer
self.seg_renderer = seg_renderer
self.texture_discriminator = texture_discriminator
# device
self.device = device
# dataloader
self.train_dataloader = train_dataloader
if test_dataloader:
self.test_dataloader = test_dataloader
if test_dataloader_h36m:
self.test_dataloader_h36m = test_dataloader_h36m
if test_dataloader_3dpw:
self.test_dataloader_3dpw = test_dataloader_3dpw
if test_dataloader_lsp:
self.test_dataloader_lsp = test_dataloader_lsp
# loss
self.loss_fn_BCE = loss_fn_BCE
self.loss_fn_CE = loss_fn_CE
self.loss_fn_MSE = loss_fn_MSE
self.loss_fn_keypoints = loss_fn_keypoints
self.loss_fn_mask = loss_fn_mask
# optimizer
self.HMR_optimizer_all = HMR_optimizer_all
self.HMR_scheduler_all = HMR_scheduler_all
self.discriminator_optimizer = discriminator_optimizer
self.context_encoder_optimizer = context_encoder_optimizer
self.jigsaw_puzzle_optimizer = jigsaw_puzzle_optimizer
self.rotation_optimizer = rotation_optimizer
self.texture_net_optimizer = texture_net_optimizer
self.texture_discriminator_optimizer = texture_discriminator_optimizer
# Valiable
self.num_epoch = num_epoch
self.freq_print = args.freq_print
self.num_patch = 4
self.tex_size = args.tex_size
today = datetime.datetime.now()
self.today = datetime.datetime.strftime(today, "%y%m%d_%H%M%S")
self.output_dir = self.args.output_dir if self.args.output_dir else self.today
# smpl and J_regressor
self.smpl_neutral = SMPL("data/vibe_data",
create_transl=False).to(self.device)
self.smpl_male = SMPL("data/vibe_data",
gender='male',
create_transl=False).to(self.device)
self.smpl_female = SMPL("data/vibe_data",
gender='female',
create_transl=False).to(self.device)
self.J_regressor = np.load("data/vibe_data/J_regressor_h36m.npy")
self.J_regressor_torch = torch.from_numpy(self.J_regressor).float()
parts_texture = np.load("data/vertex_texture.npy")
self.parts_texture = torch.from_numpy(parts_texture).to(self.device).float()
self.cube_parts = torch.FloatTensor(np.load("data/cube_parts.npy")).to(self.device)
def train(self):
print("===================Train===================\nEpoch {} Start".format(self.epoch+1))
train_template = \
'Epoch: {}/{} | Batch_idx: {}/{} | ' \
'loss_DC: {losses_DC.val:.4f} ({losses_DC.avg:.4f}) | loss_CE: {losses_CE.val:.4f} ({losses_CE.avg:.4f}) | ' \
'loss_JP: {losses_JP.val:.4f} ({losses_JP.avg:.4f}) | acc_JP: {acces_JP.val:.4f} ({acces_JP.avg:.4f}) | ' \
'loss_ROT: {losses_ROT.val:.4f} ({losses_ROT.avg:.4f}) | acc_ROT: {acces_ROT.val:.4f} ({acces_ROT.avg:.4f}) | ' \
'loss_Texture: {losses_texture_ori_img.val:.4f} ({losses_texture_ori_img.avg:.4f}) | ' \
'loss_Seg: {losses_seg.val:.4f} ({losses_seg.avg:.4f}) | ' \
'loss_Texture_Total: {losses_texture_total.val:.4f} ({losses_texture_total.avg:.4f}) | ' \
'loss_disc_e: {losses_disc_e.val:.4f} ({losses_disc_e.avg:.4f}) | ' \
'loss_disc_d: {losses_disc.val:.4f} ({losses_disc.avg:.4f}) | ' \
'loss_disc_real: {losses_disc_real.val:.4f} ({losses_disc_real.avg:.4f}) | ' \
'loss_disc_fake: {losses_disc_fake.val:.4f} ({losses_disc_fake.avg:.4f}) | ' \
'loss_HMR_3task: {losses_HMR_3task.val:.4f} ({losses_HMR_3task.avg:.4f}) | ' \
'loss_HMR_joints3d: {losses_HMR_joints3d.val:.4f} ({losses_HMR_joints3d.avg:.4f}) | ' \
'loss_joints: {losses_joints.val:.4f} ({losses_joints.avg:.4f}) | ' \
'MPJPE: {train_MPJPE.val:.4f} ({train_MPJPE.avg:.4f}) | ' \
'PA_MPJPE: {train_PA_MPJPE.val:.4f} ({train_PA_MPJPE.avg:.4f}) | ' \
'loss_total: {losses_total.val:.4f} ({losses_total.avg:.4f}) | ' \
'Batch duration: {duration}'
batch_start = time.time()
self.losses_DC = AverageMeter() # Discriminator Loss
self.losses_CE = AverageMeter() # Context Encoder Loss
self.losses_JP = AverageMeter() # Jigsaw Puzzle Loss
self.acces_JP = AverageMeter() # Jigsaw Puzzle Accuracy
self.losses_ROT = AverageMeter() # Rotation Loss
self.acces_ROT = AverageMeter() # Rotation Accuracy
self.losses_texture_ori_img = AverageMeter() # Texture Loss
self.losses_seg = AverageMeter() # Segmentation Loss
self.losses_texture_total = AverageMeter() # Texture Loss + Segmentation Loss
self.losses_HMR_3task = AverageMeter() # Discriminator Loss + Context Encoder Loss + Roation Loss
self.losses_disc_e = AverageMeter() # Encoder Discriminator Loss for rendering img
self.losses_disc = AverageMeter() # Real + Fake Discriminator Loss
self.losses_disc_real = AverageMeter() # Discriminator Loss for Real
self.losses_disc_fake = AverageMeter() # Discriminator Loss for Fake
self.losses_total = AverageMeter() # Total Sum Loss
self.losses_joints = AverageMeter() # 2d + 3d joints loss
self.losses_HMR_joints3d = AverageMeter() # 3D joints loss
self.train_MPJPE = AverageMeter() # MPJPE
self.train_PA_MPJPE = AverageMeter() # PA_MPJPE
len_train_dataloader = len(self.train_dataloader)
for batch_idx, item in tqdm.tqdm(enumerate(self.train_dataloader), desc='Train {}/{}'.format(self.epoch+1, self.num_epoch), total=len(self.train_dataloader)):
img = item['img'].to(self.device)
black_img = item['black_img'].to(self.device)
context_encoder_input = item['context_encoder_input'].to(self.device)
center_crop_img = item['center_crop_img'].to(self.device)
jigsaw_input = item['jigsaw_input'].to(self.device)
rotation_img = item['rotation_input'].to(self.device)
jigsaw_order = item['jigsaw_order'].to(self.device)
rotation_idx = item['rotation_idx'].to(self.device)
joints3d = item['pose_3d'].to(self.device)
has_joints3d = item['has_pose_3d'].to(self.device)
joints2d = item['keypoints'].to(self.device)
batch_size = img.shape[0]
gt_mask = item['gt_mask'].to(self.device)
has_mask = item['has_mask'].to(self.device)
self.zeros = torch.zeros([batch_size, 1]).to(self.device)
self.ones = torch.ones([batch_size, 1]).to(self.device)
faces = get_smpl_faces().astype(np.int32)
faces = torch.from_numpy(faces).to(self.device)
faces = faces.expand((batch_size, -1, -1))
joint_mapper_gt = constants.J24_TO_J17 if self.args.test_dataset == 'mpi-inf-3dhp' else constants.J24_TO_J14
if self.epoch < self.args.first_stage_nEpoch: # Epoch 0~9
# Training 3 Task net
# Discriminator, Context Encoder, Jigsaw Puzzle, Rotation net
self.HMR.eval()
self.context_encoder_net.train()
self.discriminator.train()
self.jigsaw_puzzle_net.train()
self.rotation_net.train()
output_ce_224 = \
train_only_3task_network(
self.HMR,
self.context_encoder_net,
self.discriminator,
self.jigsaw_puzzle_net,
self.rotation_net,
self.loss_fn_BCE,
self.loss_fn_MSE,
self.loss_fn_CE,
self.losses_CE,
self.losses_DC,
self.acces_JP,
self.losses_JP,
self.acces_ROT,
self.losses_ROT,
self.discriminator_optimizer,
self.context_encoder_optimizer,
self.jigsaw_puzzle_optimizer,
self.rotation_optimizer,
img,
context_encoder_input,
center_crop_img,
jigsaw_input,
jigsaw_order,
rotation_img,
rotation_idx,
self.num_patch,
self.ones,
self.zeros,
batch_size,
)
# Training Texture Net
self.texture_net.train()
output_train_texture_net = \
train_texture_net(
self.HMR,
self.texture_net,
self.img_renderer,
self.loss_fn_MSE,
self.loss_fn_mask,
self.losses_texture_ori_img,
self.losses_seg,
self.losses_texture_total,
self.texture_net_optimizer,
img,
black_img,
batch_size,
self.args,
gt_mask,
has_mask,
train_first_stage=True
)
mask = output_train_texture_net[0]
detach_images = output_train_texture_net[1]
rendering = output_train_texture_net[2]
vertices = output_train_texture_net[3]
# train hmr & texture net using 3task, rendering, segmentation and gan loss (or joints)
else: # Epoch 10~19
self.HMR.eval()
self.context_encoder_net.eval()
self.discriminator.eval()
self.jigsaw_puzzle_net.eval()
self.rotation_net.eval()
self.texture_net.train()
self.texture_discriminator.train()
# Training Mesh network (HMR) using 3 Task Loss
loss_HMR, output_ce_224 = \
train_hmr_using_3task(
self.HMR,
self.context_encoder_net,
self.discriminator,
self.jigsaw_puzzle_net,
self.rotation_net,
self.loss_fn_BCE,
self.loss_fn_MSE,
self.loss_fn_CE,
self.losses_CE,
self.acces_JP,
self.losses_JP,
self.acces_ROT,
self.losses_ROT,
self.losses_HMR_3task,
img,
context_encoder_input,
center_crop_img,
jigsaw_input,
jigsaw_order,
rotation_img,
rotation_idx,
self.num_patch,
self.ones,
self.zeros,
batch_size,
self.args
)
loss_all = loss_HMR
# Trainign Texture net
output_train_texture_net = \
train_texture_net(
self.HMR,
self.texture_net,
self.img_renderer,
self.loss_fn_MSE,
self.loss_fn_mask,
self.losses_texture_ori_img,
self.losses_seg,
self.losses_texture_total,
self.texture_net_optimizer,
img,
black_img,
batch_size,
self.args,
gt_mask,
has_mask,
train_first_stage=False
)
texture_loss = output_train_texture_net[0]
loss_all += texture_loss
mask = output_train_texture_net[1]
detach_images = output_train_texture_net[2]
rendering = output_train_texture_net[3]
vertices = output_train_texture_net[4]
# Trining HMR using adversarial loss
e_disc_loss, d_disc_loss, rendering_bg = \
train_hmr_using_adv_loss(
self.HMR,
self.texture_discriminator,
self.texture_net,
self.img_renderer,
self.losses_disc_e,
self.losses_disc,
self.losses_disc_real,
self.losses_disc_fake,
img,
batch_size,
)
loss_all += e_disc_loss
self.texture_discriminator_optimizer.zero_grad()
d_disc_loss.backward()
self.texture_discriminator_optimizer.step()
if not self.args.self_supervised:
# Training Mesh network (HMR) using joints
joints_loss, mpjpe, pa_mpjpe, num_data = train_hmr_using_joints(
self.HMR,
self.loss_fn_keypoints,
self.losses_HMR_joints3d,
img,
joints2d,
joints3d,
has_joints3d,
joint_mapper_gt,
batch_size,
self.device,
self.args,
)
loss_all += joints_loss
self.losses_joints.update(joints_loss.item(), num_data)
self.train_MPJPE.update(mpjpe.item(), num_data)
self.train_PA_MPJPE.update(pa_mpjpe.item(), num_data)
self.HMR_optimizer_all.zero_grad()
self.losses_total.update(loss_all.item(), batch_size)
loss_all.backward()
self.HMR_optimizer_all.step()
if (batch_idx==0 or (batch_idx+1)%self.args.freq_print==0):
for i in range(10 if batch_size > 10 else batch_size):
img_dict = dict()
### original img ###
img_dict["orig_img.jpg"] = img[i]
### context encoder input img ###
img_dict["ce_input_img.jpg"] = context_encoder_input[i].clone().detach()
### center crop img for CE ###
img_dict["center_crop_img.jpg"] = center_crop_img[i].clone().detach()
### output img of CE ###
img_dict["reconst_img.jpg"] = output_ce_224[i].clone().detach()
### jigsaw input img ###
img_dict["jigsaw_input_img.jpg"] = jigsaw_input[i].clone().detach()
### ratation input img ###
img_dict["rotation_input_img.jpg"] = rotation_img[i].clone().detach()
### texture img ###
img_dict["rendering.jpg"] = rendering[i].clone().detach()
### segmentation img ###
img_dict["mask.jpg"] = mask[i].clone().detach()
### detach img ###
img_dict["detach.jpg"] = detach_images[i].clone().detach()
### Segmentation gt ###
img_dict["seg_gt.jpg"] = gt_mask[i].clone().detach()
if self.epoch >= self.args.first_stage_nEpoch:
### rendering background ###
img_dict["rendering_bg.jpg"] = rendering_bg[i].clone().detach()
save_all_img(img_dict, self.output_dir,
self.epoch+1, i+batch_idx)
### save mesh ###
if self.epoch >= self.args.first_stage_nEpoch:
_faces = faces[i].clone().detach()
_vertices = vertices[i].clone().detach()
save_mesh(
_vertices, _faces,
self.output_dir,
self.epoch+1,
i,
)
### print train info while running in batch loop ###
if (batch_idx+1) % self.freq_print == 0 or (batch_idx+1) == len_train_dataloader:
train_template_filled = train_template.format(
self.epoch+1, self.num_epoch,
batch_idx+1, len(self.train_dataloader),
losses_DC=self.losses_DC,
losses_CE=self.losses_CE,
losses_JP=self.losses_JP,
acces_JP=self.acces_JP,
losses_ROT=self.losses_ROT,
acces_ROT=self.acces_ROT,
losses_texture_ori_img=self.losses_texture_ori_img,
losses_seg=self.losses_seg,
losses_texture_total=self.losses_texture_total,
losses_disc_e=self.losses_disc_e,
losses_disc=self.losses_disc,
losses_disc_real=self.losses_disc_real,
losses_disc_fake=self.losses_disc_fake,
losses_HMR_3task=self.losses_HMR_3task,
losses_HMR_joints3d=self.losses_HMR_joints3d,
losses_joints=self.losses_joints,
losses_total=self.losses_total,
train_MPJPE=self.train_MPJPE,
train_PA_MPJPE=self.train_PA_MPJPE,
duration=get_HHMMSS_from_second(seconds=(time.time()-batch_start))
)
print(train_template_filled)
self.train_templates.append(train_template_filled)
if (batch_idx+1) == len_train_dataloader:
self.train_templates.append("======================================================================")
batch_start = time.time()
### save train info when one epoch is completed ###
save_templates_info(self.train_templates, self.output_dir, "train_templates.txt")
print("Train Time: {train_time}, Total Time: {total_time}".format(
train_time=get_HHMMSS_from_second(seconds=(time.time()-self.epoch_start)),
total_time=get_HHMMSS_from_second(seconds=(time.time()-self.total_start))))
### Evaluate ###
def evaluate(self, test_dataloader, test_dataset_name, is_save_pth=True):
joint_mapper_h36m = constants.H36M_TO_J17 if self.args.test_dataset == 'mpi-inf-3dhp' else constants.H36M_TO_J14
joint_mapper_gt = constants.J24_TO_J17 if self.args.test_dataset == 'mpi-inf-3dhp' else constants.J24_TO_J14
test_start = time.time()
self.HMR.eval()
if self.args.train == 0:
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
test_template_batch = \
'===================Test===================\n' \
'Batch: {}/{} | ' \
'Mpjpe: {mpjpe_average_meter.avg:.2f} ' \
'Rec_error: {pa_mpjpe_average_meter.avg:.2f} ' \
'\n=========================================='
elif test_dataset_name == "lsp":
test_template_batch = \
'===================Test===================\n' \
'Batch: {}/{} | ' \
'Part acc: {part_Acc_average_meter:.2f} ' \
'Part F1: {part_F1_average_meter:.2f} ' \
'FG-BG Acc: {Acc_average_meter:.2f} ' \
'FG-BG F1: {F1_average_meter:.2f} ' \
'\n=========================================='
if test_dataset_name == "h36m":
test_template = \
'===================Test===================\n' \
'Test Data: {} | ' \
'Epoch: {}/{} | ' \
'Mpjpe: {mpjpe_average_meter.avg:.2f} | ' \
'Rec_error: {pa_mpjpe_average_meter.avg:.2f} | ' \
'loss: {losses.avg:.5f} | ' \
'Test Time: {test_time} | Epoch Time: {epoch_time} | ' \
'Total Time: {total_time}' \
'\n=========================================='
elif test_dataset_name == "3dpw":
test_template = \
'===================Test===================\n' \
'Test Data: {} | ' \
'Epoch: {}/{} | ' \
'Mpjpe: {mpjpe_average_meter.avg:.2f} | ' \
'Rec_error: {pa_mpjpe_average_meter.avg:.2f} | ' \
'Test Time: {test_time} | Epoch Time: {epoch_time} | ' \
'Total Time: {total_time}' \
'\n=========================================='
elif test_dataset_name == "lsp":
test_template = \
'===================Test===================\n' \
'Test Data: {} | ' \
'Epoch: {}/{} | ' \
'Part acc: {part_Acc_average_meter:.2f} | ' \
'Part F1: {part_F1_average_meter:.2f} | ' \
'FG-BG Acc: {Acc_average_meter:.2f} | ' \
'FG-BG F1: {F1_average_meter:.2f} | ' \
'Test Time: {test_time} | Epoch Time: {epoch_time} | ' \
'Total Time: {total_time}' \
'\n=========================================='
self.mpjpe_average_meter = AverageMeter()
self.pa_mpjpe_average_meter = AverageMeter()
self.losses = AverageMeter()
self.part_Acc_average_meter = 0
self.part_F1_average_meter = 0
self.Acc_average_meter = 0
self.F1_average_meter = 0
if self.args.train != 0:
if test_dataset_name == "h36m":
current_mpjpe = self.current_mpjpe_h36m
elif test_dataset_name == "3dpw":
current_mpjpe = self.current_mpjpe_3dpw
elif test_dataset_name == "lsp":
current_acc = self.current_acc_lsp
batch_num = len(test_dataloader)
accuracy = 0.
parts_accuracy = 0.
pixel_count = 0
parts_pixel_count = 0
tp = np.zeros((2,1))
fp = np.zeros((2,1))
fn = np.zeros((2,1))
parts_tp = np.zeros((7,1))
parts_fp = np.zeros((7,1))
parts_fn = np.zeros((7,1))
with torch.no_grad():
for batch_idx, item in tqdm.tqdm(enumerate(test_dataloader), desc='{} Eval'.format(test_dataset_name), total=len(test_dataloader)):
# Validation for early stopping
if test_dataset_name == "h36m":
img = item["img"]
img = img.to(self.device)
batch_size = img.shape[0]
output = self.HMR(img, J_regressor=self.J_regressor)
output = output[-1]
pred_j3ds = output['kp_3d']
pred_pelvis = (pred_j3ds[:,[2],:] + pred_j3ds[:,[3],:]) / 2.0
pred_j3ds -= pred_pelvis
target_j3ds = item["pose_3d"]
target_j3ds = target_j3ds[:, joint_mapper_gt, :-1]
target_j3ds = target_j3ds.float().to(self.device)
target_pelvis = (target_j3ds[:,[2],:] + target_j3ds[:,[3],:]) / 2.0
target_j3ds -= target_pelvis
loss = self.loss_fn_MSE(pred_j3ds, target_j3ds)
self.losses.update(loss.item(), batch_size)
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
img = item["img"]
img = img.to(self.device)
batch_size = img.shape[0]
output = self.HMR(img, J_regressor=self.J_regressor)
output = output[-1]
pred_j3ds = output['kp_3d']
pred_pelvis = (pred_j3ds[:,[2],:] + pred_j3ds[:,[3],:]) / 2.0
pred_j3ds -= pred_pelvis
pred_vertices = output["verts"]
faces = get_smpl_faces().astype(np.int32)
faces = torch.from_numpy(faces).to(self.device)
faces = faces.expand((batch_size, -1, -1))
J_regressor_batch = self.J_regressor_torch[None, :].expand(pred_vertices.shape[0], -1, -1).to(self.device)
if test_dataset_name == 'h36m':
target_j3ds = item["pose_3d"]
target_j3ds = target_j3ds[:, joint_mapper_gt, :-1]
else:
gt_pose = item["pose"].to(self.device)
gt_betas = item["betas"].to(self.device)
gender = item["gender"].to(self.device)
gt_vertices = self.smpl_male(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices
gt_vertices_female = self.smpl_female(global_orient=gt_pose[:,:3], body_pose=gt_pose[:,3:], betas=gt_betas).vertices
gt_vertices[gender==1, :, :] = gt_vertices_female[gender==1, :, :]
target_j3ds = torch.matmul(J_regressor_batch, gt_vertices)
gt_pelvis = target_j3ds[:, [0],:].clone()
target_j3ds = target_j3ds[:, joint_mapper_h36m, :]
target_j3ds = target_j3ds - gt_pelvis
target_j3ds = target_j3ds.float().to(self.device)
target_pelvis = (target_j3ds[:,[2],:] + target_j3ds[:,[3],:]) / 2.0
target_j3ds -= target_pelvis
errors = torch.sqrt(((pred_j3ds - target_j3ds) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
S1_hat = batch_compute_similarity_transform_torch(pred_j3ds, target_j3ds)
errors_pa = torch.sqrt(((S1_hat - target_j3ds) ** 2).sum(dim=-1)).mean(dim=-1).cpu().numpy()
m2mm = 1000
mpjpe = np.mean(errors) * m2mm
pa_mpjpe = np.mean(errors_pa) * m2mm
self.mpjpe_average_meter.update(mpjpe, batch_size)
self.pa_mpjpe_average_meter.update(pa_mpjpe, batch_size)
elif test_dataset_name == "lsp":
annot_path = config.DATASET_FOLDERS['upi-s1h']
img = item["img"]
img = img.to(self.device)
batch_size = img.shape[0]
orig_shape = item["orig_shape"].cpu().numpy()
scale = item["scale"].cpu().numpy()
center = item["center"].cpu().numpy()
output = self.HMR(img, J_regressor=self.J_regressor)
output = output[-1]
pred_vertices = output["verts"]
cam = output['theta'][:, :3]
faces = get_smpl_faces().astype(np.int32)
faces = torch.from_numpy(faces).to(self.device)
faces = faces.expand((batch_size, -1, -1))
mask, parts = self.seg_renderer(pred_vertices, cam)
save_gt_parts = []
save_gt_seg = []
for i in range(batch_size):
# After rendering, convert imate back to original resolution
pred_mask = uncrop(mask[i].cpu().numpy(), center[i], scale[i], orig_shape[i]) > 0
# Load gt mask
gt_mask = cv2.imread(os.path.join(annot_path, item['maskname'][i]), 0) > 0
if batch_idx == 0:
save_gt_seg.append(gt_mask)
# Evaluation consistent with the original UP-3D code
accuracy += (gt_mask == pred_mask).sum()
pixel_count += np.prod(np.array(gt_mask.shape))
for c in range(2):
cgt = gt_mask == c
cpred = pred_mask == c
tp[c] += (cgt & cpred).sum()
fp[c] += (~cgt & cpred).sum()
fn[c] += (cgt & ~cpred).sum()
f1 = 2 * tp / (2 * tp + fp + fn)
for i in range(batch_size):
pred_parts = uncrop(parts[i].cpu().numpy().astype(np.uint8), center[i], scale[i], orig_shape[i])
# Load gt part segmentation
gt_parts = cv2.imread(os.path.join(annot_path, item['partname'][i]), 0)
if batch_idx == 0:
save_gt_parts.append(gt_parts)
# Evaluation consistent with the original UP-3D code
# 6 parts + background
for c in range(7):
cgt = gt_parts == c
cpred = pred_parts == c
cpred[gt_parts == 255] = 0
parts_tp[c] += (cgt & cpred).sum()
parts_fp[c] += (~cgt & cpred).sum()
parts_fn[c] += (cgt & ~cpred).sum()
gt_parts[gt_parts == 255] = 0
pred_parts[pred_parts == 255] = 0
parts_f1 = 2 * parts_tp / (2 * parts_tp + parts_fp + parts_fn)
parts_accuracy += (gt_parts == pred_parts).sum()
parts_pixel_count += np.prod(np.array(gt_parts.shape))
self.part_Acc_average_meter = (parts_accuracy / parts_pixel_count) * 100
self.part_F1_average_meter = parts_f1[[0,1,2,3,4,5,6]].mean()
self.Acc_average_meter = (accuracy / pixel_count) * 100
self.F1_average_meter = f1.mean()
if batch_idx == 0:
for i in range(10 if batch_size > 10 else batch_size):
img_dict = dict()
img_dict["orig_img.jpg"] = img[i]
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
save_joints3d_img(
target_j3ds[i],
pred_j3ds[i],
self.output_dir,
self.epoch+1,
test_dataset=test_dataset_name,
test_idx=i
)
save_mesh(
pred_vertices[i],
faces[i],
self.output_dir,
self.epoch+1,
test_dataset=test_dataset_name,
test_idx=i
)
elif test_dataset_name == "lsp":
img_dict["mask.jpg"] = mask[i].cpu().numpy()
img_dict["parts.jpg"] = parts[i].cpu().numpy()
img_dict["gt_parts.jpg"] = save_gt_parts[i]
img_dict["gt_seg.jpg"] = save_gt_seg[i]
save_all_img(img_dict, self.output_dir, self.epoch+1, test_dataset=test_dataset_name, test_idx=i)
if self.args.train == 0 and (batch_idx+1) % self.args.freq_print_test == 0:
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
test_template_batch_filled = test_template_batch.format(
batch_idx+1,
batch_num,
mpjpe_average_meter=self.mpjpe_average_meter,
pa_mpjpe_average_meter=self.pa_mpjpe_average_meter,
)
elif test_dataset_name == "lsp":
test_template_batch_filled = test_template_batch.format(
batch_idx+1,
batch_num,
part_Acc_average_meter=self.part_Acc_average_meter,
part_F1_average_meter=self.part_F1_average_meter,
Acc_average_meter=self.Acc_average_meter,
F1_average_meter=self.F1_average_meter,
)
print(test_template_batch_filled)
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
test_template_filled = test_template.format(
test_dataset_name,
self.epoch+1,
self.num_epoch,
mpjpe_average_meter=self.mpjpe_average_meter,
pa_mpjpe_average_meter=self.pa_mpjpe_average_meter,
losses=self.losses,
test_time=get_HHMMSS_from_second(seconds=(time.time()-test_start)),
epoch_time=get_HHMMSS_from_second(seconds=(time.time()-self.epoch_start)),
total_time=get_HHMMSS_from_second(seconds=(time.time()-self.total_start))
)
elif test_dataset_name == "3dpw":
test_template_filled = test_template.format(
test_dataset_name,
self.epoch+1,
self.num_epoch,
mpjpe_average_meter=self.mpjpe_average_meter,
pa_mpjpe_average_meter=self.pa_mpjpe_average_meter,
test_time=get_HHMMSS_from_second(seconds=(time.time()-test_start)),
epoch_time=get_HHMMSS_from_second(seconds=(time.time()-self.epoch_start)),
total_time=get_HHMMSS_from_second(seconds=(time.time()-self.total_start))
)
elif test_dataset_name == "lsp":
test_template_filled = test_template.format(
test_dataset_name,
self.epoch+1,
self.num_epoch,
part_Acc_average_meter=self.part_Acc_average_meter,
part_F1_average_meter=self.part_F1_average_meter,
Acc_average_meter=self.Acc_average_meter,
F1_average_meter=self.F1_average_meter,
test_time=get_HHMMSS_from_second(seconds=(time.time()-test_start)),
epoch_time=get_HHMMSS_from_second(seconds=(time.time()-self.epoch_start)),
total_time=get_HHMMSS_from_second(seconds=(time.time()-self.total_start))
)
print(test_template_filled)
# save test templates info txt file
if self.args.train != 0:
if test_dataset_name == "h36m":
self.test_templates_h36m.append(test_template_filled)
templates_filename = "test_templates_h36m.txt"
save_templates_info(self.test_templates_h36m, self.output_dir, templates_filename)
elif test_dataset_name == "3dpw":
self.test_templates_3dpw.append(test_template_filled)
templates_filename = "test_templates_3dpw.txt"
save_templates_info(self.test_templates_3dpw, self.output_dir, templates_filename)
elif test_dataset_name == "lsp":
self.test_templates_lsp.append(test_template_filled)
templates_filename = "test_templates_lsp.txt"
save_templates_info(self.test_templates_lsp, self.output_dir, templates_filename)
else:
self.test_templates.append(test_template_filled)
templates_filename = "test_templates.txt"
save_templates_info(self.test_templates, self.output_dir, templates_filename)
# save pth file
if is_save_pth:
if self.epoch >= self.args.first_stage_nEpoch:
if test_dataset_name == "h36m" or test_dataset_name == "3dpw":
# Save best pth
if self.mpjpe_average_meter.avg < current_mpjpe:
print("MPJPE changes from {:.4f} to {:.4f}".format(current_mpjpe, self.mpjpe_average_meter.avg))
self.save_checkpoint_all(test_dataset_name, best=True)
else:
print("MPJPE doesn't change {:.4f}".format(current_mpjpe))
elif test_dataset_name == "lsp":
# Save best pth
if self.Acc_average_meter > current_acc:
print("ACC changes from {:.4f} to {:.4f}".format(current_acc, self.Acc_average_meter))
self.save_checkpoint_all(test_dataset_name, best=True)
else:
print("ACC doesn't change {:.4f}".format(current_acc))
# Save lastest pth
if self.args.save_pth_all_epoch:
self.save_checkpoint_all(test_dataset_name, save_all_epoch=True)
else:
self.save_checkpoint_all(test_dataset_name)
def save_checkpoint_all(self, test_dataset_name, best=False, save_all_epoch=False):
"""
save pth file
"""
filename = "best_{}_{}.pth" if best else "{}.pth"
if save_all_epoch and not best:
filename = "{}_{}_epoch"+str(self.epoch+1)+".pth"
### 3task network save pth, texture net save pth ###
if self.epoch < self.args.first_stage_nEpoch:
save_checkpoint({
"state_dict": self.context_encoder_net.state_dict(),
"loss": self.losses_CE.avg,
"optimizer": self.context_encoder_optimizer.state_dict()
},
self.output_dir,
filename.format("context_encoder_net", test_dataset_name))
save_checkpoint({
"state_dict": self.discriminator.state_dict(),
"loss": self.losses_DC.avg,
"optimizer": self.discriminator_optimizer.state_dict()
},
self.output_dir,
filename.format("discriminator", test_dataset_name))
save_checkpoint({
"state_dict": self.jigsaw_puzzle_net.state_dict(),
"accuracy": self.acces_JP.avg,
"optimizer": self.jigsaw_puzzle_optimizer.state_dict()
},
self.output_dir,
filename.format("jigsaw_puzzle_net", test_dataset_name))
save_checkpoint({
"state_dict": self.rotation_net.state_dict(),
"accuracy": self.acces_ROT.avg,
"optimizer": self.rotation_optimizer.state_dict()
},
self.output_dir,
filename.format("rotation_net", test_dataset_name))
save_checkpoint({
"state_dict": self.texture_net.state_dict(),
"loss": self.losses_texture_ori_img.avg,
"optimizer": self.texture_net_optimizer.state_dict(),
},
self.output_dir,
filename.format("texture_net", test_dataset_name))
### hmr save pth ###
else:
save_checkpoint({
"state_dict": self.HMR.state_dict(),
"loss_joints3d": self.losses_HMR_joints3d.avg,
"loss_3task": self.losses_HMR_3task.avg,
"optimizer_joints": self.HMR_optimizer_all.state_dict(),
},
self.output_dir,
filename.format("hmr", test_dataset_name))
save_checkpoint({
"state_dict": self.texture_discriminator.state_dict(),
"loss": self.losses_disc.avg,
"optimizer": self.texture_discriminator_optimizer.state_dict()
},
self.output_dir,
filename.format("texture_discriminator", test_dataset_name))
if best:
if test_dataset_name == "h36m":
self.current_mpjpe_h36m = self.mpjpe_average_meter.avg
elif test_dataset_name == "3dpw":
self.current_mpjpe_3dpw = self.mpjpe_average_meter.avg
elif test_dataset_name == "lsp":
self.current_acc_lsp = self.Acc_average_meter
def fit(self):
if self.args.train != 0:
self.current_mpjpe_h36m = math.inf
self.current_mpjpe_3dpw = math.inf
self.current_acc_lsp = 0
self.train_loss = math.inf
self.total_start = time.time()
self.train_templates = list()
self.test_templates_h36m = list()
self.test_templates_3dpw = list()
self.test_templates_lsp = list()
for epoch in range(self.num_epoch):
self.epoch = epoch
self.epoch_start = time.time()
if epoch == 0 and self.args.first_eval:
self.epoch = -1
self.evaluate(self.test_dataloader_h36m, test_dataset_name="h36m", is_save_pth=False)
# self.evaluate(self.test_dataloader_3dpw, test_dataset_name="3dpw", is_save_pth=False)
# self.evaluate(self.test_dataloader_lsp, test_dataset_name="lsp", is_save_pth=False)
self.epoch = epoch
print("HMR_optimizer_joints lr:", self.HMR_scheduler_all.get_lr())
if self.epoch == self.args.first_stage_nEpoch:
texture_discriminator_checkpoint = torch.load(os.path.join("results", self.args.output_dir, "save_pth", "discriminator.pth"), map_location=self.device)
self.texture_discriminator.load_state_dict(texture_discriminator_checkpoint["state_dict"])
self.train()
if self.epoch >= self.args.first_stage_nEpoch:
self.HMR_scheduler_all.step()
if (epoch+1)%self.args.freq_eval == 0 or (epoch+1) == self.num_epoch:
self.evaluate(self.test_dataloader_h36m, test_dataset_name="h36m")
# self.evaluate(self.test_dataloader_3dpw, test_dataset_name="3dpw")
# self.evaluate(self.test_dataloader_lsp, test_dataset_name="lsp")
else:
self.epoch = 0
self.num_epoch = 1
self.total_start = time.time()
self.epoch_start = time.time()
self.test_templates = list()
self.evaluate(self.test_dataloader, test_dataset_name=self.args.test_dataset.replace("-p2", ""), is_save_pth=False) | JunukCha/SSPSE | trainer.py | trainer.py | py | 45,916 | python | en | code | 6 | github-code | 36 |
17170052915 | import logging
from flask import Flask, request
from picstitch import load_review_stars, load_amazon_prime, load_fonts, \
PicStitch
from gcloud import storage
import boto
import io
import time
import os
# # ---- Logging prefs -----
log_format = "[%(asctime)s] [%(process)d] [%(levelname)-1s] %(message)s"
date_format = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(level=logging.INFO,
format=log_format,
datefmt=date_format)
application = Flask(__name__)
def get_s3():
# aws stuff
s3_region = 'us-east-1'
s3_bucket_name = 'if-kip-chat-images'
conn = boto.s3.connect_to_region(s3_region)
s3_bucket = conn.get_bucket(s3_bucket_name)
return s3_bucket
def upload_to_s3(image, s3_bucket=get_s3()):
tmp_img = io.BytesIO()
image.created_image.save(tmp_img, 'PNG', quality=90)
k = s3_bucket.new_key(image.uniq_fn)
k.set_contents_from_string(tmp_img.getvalue(), headers={
"Content-Type": "image/png"})
s3_base = 'https://s3.amazonaws.com/' + image.bucket_name + '/'
img_url = s3_base + image.uniq_fn
return img_url
def get_gcloud():
# gcloud stuff
gcloud_config = {
'proj_name': 'kip_styles',
'key': 'gcloud-picstitch.json',
'bucket': 'if-kip-chat-images'
}
gcloud_key_file = os.path.join(
os.path.dirname(__file__),
'gcloud_key',
gcloud_config['key']
)
gcloud_client = storage.Client(project=gcloud_config['proj_name'])
gcloud_client = gcloud_client.from_service_account_json(gcloud_key_file)
gcloud_bucket = gcloud_client.get_bucket(gcloud_config['bucket'])
# gcloud_bucket.make_public(future=True)
return gcloud_bucket
def upload_to_gcloud(image, gcloud_bucket=get_gcloud()):
start = time.time()
tmp_img = io.BytesIO()
image.created_image.save(tmp_img, 'PNG', quality=90)
saved = time.time()
object_upload = gcloud_bucket.blob(
os.path.join(image.origin, image.uniq_fn))
blobbed = time.time()
object_upload.upload_from_string(
tmp_img.getvalue(), content_type='image/png')
uploaded = time.time()
if time.time() - start > 1:
logging.info(
'slow upload. save: %.2fs, blob create: %.2fs, string upload %2fs',
saved - start, blobbed - saved, uploaded - blobbed)
# public_url is a property func that appears to just be a string-format
# call. Probably no value in instrumenting.
return object_upload.public_url
@application.route('/', methods=['GET', 'POST'])
def main():
'''
return upload_image_tos_s3(create_image(request.json))
'''
t1 = time.time()
img_req = request.json
logging.info('received req to make image')
pic = PicStitch(img_req=img_req,
# bucket=s3_bucket,
# gcloud_bucket=gcloud_bucket,
amazon_prime_image=amazon_images,
review_stars_images=review_star_images,
font_dict=font_dict)
t2 = time.time()
gc_url = upload_to_gcloud(pic, gcloud_bucket)
t3 = time.time()
logging.info('request complete. make: %.2fs, upload: %.2fs, total: %.2fs to %s',
t2 - t1, t3 - t2, t3 - t1, gc_url)
return gc_url
@application.route('/health')
def kubernetes_heath_check():
return 'health'
# load connections to gcloud and aws
gcloud_bucket = get_gcloud()
review_star_images = load_review_stars()
amazon_images = load_amazon_prime()
font_dict = load_fonts()
if __name__ == '__main__':
port_num = 5000
# run app
logging.info('__not_threaded__')
logging.info('running app on port ' + str(port_num))
application.run(host='0.0.0.0', port=port_num, debug=True)
| Interface-Foundry/IF-root | src/image_processing/server.py | server.py | py | 3,767 | python | en | code | 1 | github-code | 36 |
74131826985 | import pytest
from framework.base_case import BaseCase
from framework.my_requests import MyRequests
from tests.assertions import Assertions
from tests.data_list_for_test import DataForCommon
id_req = '123-abc-321'
name = 'Jack'
surname = 'Lee'
age = 50
method = 'select'
filter_phone = '1234567890'
class TestCommon(BaseCase):
def test_request_empty(self):
response = MyRequests.any_method(data='{}')
Assertions.check_failure_status_in_response(response=response)
@pytest.mark.parametrize('data', DataForCommon.data_list_type_method)
def test_check_type_methods(self, data):
response = MyRequests.any_method(data=data)
Assertions.check_failure_status_in_response(response=response)
@pytest.mark.parametrize('data', DataForCommon.data_list_without_field_method)
def test_without_field_method(self, data):
response = MyRequests.any_method(data=data)
Assertions.check_failure_status_in_response(response=response)
@pytest.mark.parametrize('data', DataForCommon.data_list_method_is_none)
def test_field_method_none(self, data):
response = MyRequests.any_method(data=data)
Assertions.check_failure_status_in_response(response=response)
| Bozmanok/qa-test | tests/test_common.py | test_common.py | py | 1,229 | python | en | code | 0 | github-code | 36 |
5114570440 | # settings.py
import json, os, yaml
appName = "logunittest"
cmdsUt = ["pipenv", "run", "python", "-m", "unittest"]
# cmds_pt = ["pipenv", "run", "pytest", "--capture=sys"]
cmdsPt = ["pipenv", "run", "pytest", "--capture=sys", "-v", "-s"]
packageDir = os.path.dirname(__file__)
projectDir = os.path.dirname(packageDir)
actionsDir = os.path.join(packageDir, "actions")
actionsImportPath = f"{actionsDir.replace(projectDir, '')}".replace(os.sep, ".")[1:]
testDir = os.path.join(packageDir, "test")
testDataDir = os.path.join(testDir, "data")
# pipenv creates the text environment in the .tox folder (see tox.ini envlist)
venvsDir = os.path.join(packageDir, ".tox")
configDefault = "tox.ini"
envRegex = r"\d+\.\d+\.*\d*" # example 3.7.4
defaultLogDir = os.path.normpath(os.path.expanduser("~/.testlogs"))
standAloneDir = "standalone"
# pick one or more of the following: Logs that excede the threshold will be deleted
# set to None if a particular thresh should not apply
# NOTE: if count is set to None, no deletions or wanrings will appear due to large log count
logPreserveThreshold = {"days": 20, "count": 20}
# if verbosity is set to 1, then warnings will be issued for values threshold * warningTolerance
warningTolerance = 3
global verbose
verbose = 1
logStart = f"{' test stats start ':=^76}"
def get_testlogsdir(*args, application=None, **kwargs):
"""
This returns the testlogs directory for the application. If there is no application, it
returns a standalone folder from testlogs.
Note: an application represents a collection of pgList as defined in application.yml
"""
if application is not None:
return os.path.join(defaultLogDir, application)
else:
return os.path.join(defaultLogDir, "standalone")
def clean_params(params, *args, **kwargs):
cleaned = []
for param in params["pgList"]:
cleaned.append(unalias_path(param))
params["pgList"] = cleaned
return params
# git_sync source is used in ut.py
gitSyncCmdgitSyncCmd = ["powershell.exe", "~/python_venvs/prc/git_sync.ps1"]
hookTypes = ["fileModification"]
def unalias_path(workPath: str) -> str:
"""
repp³laces path aliasse such as . ~ with path text
"""
workPath = workPath.replace(r"%USERPROFILE%", "~")
workPath = workPath.replace("~", os.path.expanduser("~"))
if workPath.startswith(".."):
workPath = os.path.join(os.path.dirname(os.getcwd()), workPath[3:]).replace("/", os.sep)
elif workPath.startswith("."):
workPath = os.path.join(os.getcwd(), workPath[2:]).replace("/", os.sep)
return os.path.normpath(os.path.abspath(workPath))
| lmielke/logunittest | logunittest/settings.py | settings.py | py | 2,631 | python | en | code | 0 | github-code | 36 |
21527384857 | # -*- coding: utf-8 -*-
"""Document directory_store here."""
import codecs
import logging
import os
import platform
from six import string_types
from six.moves.urllib import parse as urllib
from oaiharvest.record import Record
class DirectoryRecordStore(object):
def __init__(self, directory, createSubDirs=False):
self.directory = directory
self.createSubDirs = createSubDirs
self.logger = logging.getLogger(__name__).getChild(self.__class__.__name__)
def write(self, record: Record, metadataPrefix: str):
fp = self._get_output_filepath(record.header, metadataPrefix)
self._ensure_dir_exists(fp)
self.logger.debug("Writing to file {0}".format(fp))
with codecs.open(fp, "w", encoding="utf-8") as fh:
fh.write(record.metadata)
def delete(self, record: Record, metadataPrefix: str):
fp = self._get_output_filepath(record.header, metadataPrefix)
try:
os.remove(fp)
except OSError:
# File probably does't exist in destination directory
# No further action needed
self.logger.debug("")
pass
def _get_output_filepath(self, header, metadataPrefix):
filename = "{0}.{1}.xml".format(header.identifier(), metadataPrefix)
protected = []
if platform.system() != "Windows":
protected.append(":")
if self.createSubDirs:
if isinstance(self.createSubDirs, string_types):
# Replace specified character with platform path separator
filename = filename.replace(self.createSubDirs, os.path.sep)
# Do not escape path separators, so that sub-directories
# can be created
protected.append(os.path.sep)
filename = urllib.quote(filename, "".join(protected))
fp = os.path.join(self.directory, filename)
return fp
def _ensure_dir_exists(self, fp):
if not os.path.isdir(os.path.dirname(fp)):
# Missing base directory or sub-directory
self.logger.debug("Creating target directory {0}".format(self.directory))
os.makedirs(os.path.dirname(fp))
| bloomonkey/oai-harvest | oaiharvest/stores/directory_store.py | directory_store.py | py | 2,189 | python | en | code | 62 | github-code | 36 |
20603617401 | from selenium import webdriver
page_type = -1 # set default
driver = webdriver.Firefox(executable_path="./geckodriver")
driver.fullscreen_window()
driver.implicitly_wait(30)
# for signal
import signal
'''
driver.window_handles[0] : Happy face (default)
driver.window_handles[1] : Map
driver.window_handles[2] : Sad face
'''
def signal_SIGUSR1_handler(signum, frame):
print("Signal switching by signum", signum)
global driver
driver.switch_to.window(window_name=driver.window_handles[0])
def signal_SIGUSR2_handler(signum, frame):
print("Signal switching by signum", signum)
global driver
driver.switch_to.window(window_name=driver.window_handles[1])
def signal_SIGUSR3_handler(signum, frame):
print("Signal switching by signum", signum)
global driver
driver.switch_to.window(window_name=driver.window_handles[2])
signal.signal(signal.SIGUSR1, signal_SIGUSR1_handler) # mac : kill -30 {pid}
# ps | grep chromeOpener | awk 'NR<2{print $1}' | xargs kill -30
signal.signal(signal.SIGUSR2, signal_SIGUSR2_handler) # mac : kill -31 {pid}
# ps | grep chromeOpener | awk 'NR<2{print $1}' | xargs kill -31
signal.signal(signal.SIGINFO, signal_SIGUSR3_handler) # mac : kill -29 {pid}
# ps | grep chromeOpener | awk 'NR<2{print $1}' | xargs kill -29
while True:
# default
if page_type == -1:
driver.get("http://localhost:8080/")
driver.execute_script("window.open('');")
driver.switch_to.window(window_name=driver.window_handles[1])
driver.get("http://localhost:8080/sample2")
driver.execute_script("window.open('');")
driver.switch_to.window(window_name=driver.window_handles[2])
driver.get("http://localhost:8080/sample3")
# set default page
driver.switch_to.window(window_name=driver.window_handles[0])
elif page_type == 0:
print("self switch to page number 0")
driver.switch_to.window(window_name=driver.window_handles[0])
elif page_type == 1:
print("self switch to page number 1")
driver.switch_to.window(window_name=driver.window_handles[1])
elif page_type == 2:
print("self switch to page number 2")
driver.switch_to.window(window_name=driver.window_handles[2])
else:
print("^^ㅗ")
break
page_type = int(input())
driver.quit()
| INYEONGKIM/tony-and-naeyo | ref/display-switching/selenium-ver/firefoxOpener.py | firefoxOpener.py | py | 2,350 | python | en | code | 0 | github-code | 36 |
42097295687 | # Date Printer
# Converting date from one format to another
# Anatoli Penev
# 11.01.2018
def main():
date = input('Enter a date in the form mm/dd/yyyy: ')
print_date(date)
def print_date(date):
month, day, year = date.split("/")
try:
print("The date is: {} {} {}".format(get_month_name(month), day, year))
except KeyError as error:
print("Month {} doesn't exist!".format(error))
main()
def get_month_name(month):
months = {
"1": "January",
"2": "February",
"3": "March",
"4": "April",
"5": "May",
"6": "June",
"7": "July",
"8": "August",
"9": "September",
"10": "October",
"11": "November",
"12": "December"
}
return months[month]
main()
| tolipenev/pythonassignments | date_print.py | date_print.py | py | 837 | python | en | code | 0 | github-code | 36 |
43914132518 | # 시각
import sys
input = sys.stdin.readline
n = int(input())
result = 0
for h in range(n + 1):
for m in range(60):
for s in range(60):
if "3" in str(h)+str(m)+str(s):
result += 1
print(result)
| yesjuhee/study-ps | 2023_study/02_implementation/2.py | 2.py | py | 241 | python | en | code | 0 | github-code | 36 |
31835774368 | import math
d = listFontVariations("MutatorMathTest")
print(list(d.keys()))
for fontName in installedFonts():
variations = listFontVariations(fontName)
if variations:
print(fontName)
for axis_name, dimensions in variations.items():
print (axis_name, dimensions)
print ()
weightMin = listFontVariations('MutatorMathTest')['wght']['minValue']
weightMax = listFontVariations('MutatorMathTest')['wght']['maxValue']
widthMin = listFontVariations('MutatorMathTest')['wdth']['minValue']
widthMax = listFontVariations('MutatorMathTest')['wdth']['maxValue']
steps = 100
txt = '→FISHERIES'
def ip(a, b, f):
return a + f*(b-a)
for i in range(steps):
angle = 2 * pi * (i / steps)
a1 = .5+cos(angle)*.5
a2 = .5+sin(angle)*.5
newPage(1200, 250)
fill(1)
rect(0,0,width(),height())
fill(0)
font("MutatorMathTest")
fontSize(200)
weightValue = ip(weightMin, weightMax, a1)
widthValue = ip(widthMin, widthMax, a2)
fontVariations(wght=weightValue, wdth=widthValue)
text(txt, (20, 50))
font("Menlo-Regular")
fontSize(10)
text('MutatorSans weight: %3.3f, width: %3.3f' % (weightValue, widthValue), (10, 10))
saveImage('mutatorSans.mp4')
saveImage('mutatorSans.gif')
#saveImage('mutatorSans_frames.pdf') | LettError/mutatorSans | drawbot/animateMutatorSans.py | animateMutatorSans.py | py | 1,320 | python | en | code | 112 | github-code | 36 |
40403373260 | from dash import Dash, html, dcc
import plotly.express as px
import pandas as pd
import numpy as np
import statsmodels as sm
from scipy.stats import ttest_1samp
from statsmodels.stats.power import TTestPower
import plotly.express as px
import plotly.offline as pyo
import plotly.io as pio
from jupyter_dash import JupyterDash
from dash import Dash, dcc, html, Input, Output
from dash.dependencies import State
import chart_studio.plotly as py
app = Dash(__name__)
app.layout = html.Div([
html.H1("P-value Simulation"),
# html.Div([
html.H4('# of Sims:', style={'display':'inline-block','margin-left':20,'margin-right':5}),
dcc.Input(
id='nSims',
value='Initial Value',
type = "number",
),
# ]),
# html.Div([
html.H4('Sample Mean:', style={'display':'inline-block','margin-left':20,'margin-right':5}),
dcc.Input(
id='sample-mean',
value='Initial Value',
type = "number",
),
# ]),
# html.Div([
html.H4('Sample Size:', style={'display':'inline-block','margin-left':20,'margin-right':5}),
dcc.Input(
id='sample-size',
value='Initial Value',
type = "number",
),
# ])
html.H4('Std. Dev:', style={'display':'inline-block','margin-left':20,'margin-right':5}),
dcc.Input(
id='std-dev',
value='Initial Value',
type = "number",
),
html.Br(),
html.Button('Submit', id='submit_val'),
html.Div(id='container-button-basic',
children='Enter all parameters and click submit'),
html.Hr(),
html.Label('Output'),
html.Div(id='output-submit')
])
@app.callback(
Output('output-submit', 'children'),
[Input('submit_val', 'n_clicks'),
# Input('input-1-submit', 'n_blur'),
# Input('input-2-submit', 'n_submit'),
# Input('input-2-submit', 'n_blur')
],
[State('nSims', 'value'),
State('sample-mean', 'value'),
State('sample-size', 'value'),
State('std-dev', 'value')
]
)
# Use the below function to get all the input variables and calculate the p-values
def simulations_output(clicked, nSims, sample_mean, sample_size, std_dev):
if clicked:
p_value_list = [] # Initialize a list that will store all the p-values
np.random.seed(1)
for i in range(1,nSims):
x = np.random.normal(loc=sample_mean, scale=std_dev, size=sample_size)
t_stat, p_value = ttest_1samp(x, popmean=100)
p_value_list.insert(i,p_value)
# return p_value_list
hist_df = pd.DataFrame({"p_values":p_value_list})
bars = 20
fig = px.histogram(hist_df, x="p_values")
fig.update_traces(xbins=dict( # bins used for histogram
start=0.0,
end=1.0,
size=0.05
))
fig.update_layout(yaxis_range=[0,nSims], yaxis_title="Frequency of p-values", margin=dict(l=5, r=5, t=5, b=5))
fig.add_hline(y=nSims/bars, line_width=3, line_dash="dash", line_color="red")
return fig.show()
if __name__ == '__main__':
app.run_server(debug=True)
| ashton77/statistical-simulations | simulation_app.py | simulation_app.py | py | 3,106 | python | en | code | 0 | github-code | 36 |
38971244001 | from django.db import models
from datetime import datetime
from multiselectfield import MultiSelectField
from realtors.models import Realtor
from areaprops.models import Area
# Create your models here
# Choices for amenities
amenities_choices = (
('security','security'),
('gymnasium','gymnasium'),
('waste disposal','waste disposal'),
('reserved parking','reserved_parking'),
('lift','lift'),
('club house','club house'),
('shopping center','shopping center'),
('rain water harvesting','rain water harvesting'),
('water plant','water plant'),
('landscape garden','landscape garden'),
('kids play area','kids play area'),
('cctv','cctv'),
('cycle track','cycle track')
)
# Type of property
type_of_property = (
("1/2/3 BHK APARTMENT","1/2/3 BHK APARTMENT"),
("1/2 BHK APARTMENT","1/2 BHK APARTMENT"),
("1 BHK APARTMENT","1 BHK APARTMENT"),
("2 BHK APARTMENT","2 BHK APARTMENT"),
("3 BHK APARTMENT","3 BHK APARTMENT"),
("3 BHK DUPLEX","3 BHK DUPLEX"),
("2 BHK DUPLEX","2 BHK DUPLEX"),
("VILLA","VILLA"),
("BUNGALOW","BUNGALOW"),
("PLOT","PLOT"),
("PENTHOUSE","PENTHOUSE")
)
# Create your models here.
class Listing(models.Model):
realtor = models.ForeignKey(Realtor, on_delete=models.DO_NOTHING)
title = models.CharField(max_length=200)
builder = models.CharField(max_length=200)
rera_id = models.CharField(max_length=200)
project_id = models.CharField(max_length=200)
address = models.CharField(max_length=200)
area = models.ForeignKey(Area, on_delete=models.DO_NOTHING)
city = models.CharField(max_length=30,default='bhopal')
state = models.CharField(max_length=30,default='MP')
zipcode = models.CharField(max_length=20)
description = models.TextField(blank=True)
amenities = MultiSelectField(choices=amenities_choices)
price_start = models.IntegerField()
price_end = models.IntegerField()
area_start = models.IntegerField()
area_end = models.IntegerField()
property_type = models.CharField(max_length=30,choices=type_of_property)
possesion = models.CharField(max_length=20)
photo_main = models.ImageField(upload_to='photos/%Y/%m/%d/')
photo_1 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_2 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_3 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_4 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_5 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_6 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
is_published = models.BooleanField(default=True)
list_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.title | Saxena611/bp_real_estate | listings/models.py | models.py | py | 2,835 | python | en | code | 0 | github-code | 36 |
72907254504 | #Zadanie: Utwórz metodę, która pobierze liczbę i wypisze każdy znak w osobnej
#linii zaczynając od ostatniej cyfry (np. dla liczby 123 będą to trzy
#linie z 3, 2 i 1).
# Utworzenie funkcji showReverseWord
def showReverseWord():
# Zapisanie do zmiennej userInput liczby pobranej od użytkownika
userInput = input("Podaj liczbę: ")
# zapisanej do zmiennej userInputReverse odwróconego ciągu znaków
userInputReverse = userInput[::-1]
# Utworzenie pętli for w celu wyświetlenia poszczególnych znaków od nowej linii
for letter in userInputReverse:
print(letter)
# Uruchomienie funkcji showReverseWord
showReverseWord() | szymon7890/python-1TP | 1 TP programowanie semestr 2/python3 09.06.2021 zadanie2.py | python3 09.06.2021 zadanie2.py | py | 663 | python | pl | code | 0 | github-code | 36 |
74516968424 | #!/usr/bin/env python
import score
from ctypes import *
class PenMLScore(score.Score):
def __init__(self, data, scoref,
do_cache=True, do_storage=True, cachefile=None):
self.scoref = scoref
self.scoref.restype = c_double
self.scoref.argtypes = [c_void_p, c_int, c_int, POINTER(c_int)]
score.Score.__init__(self,data,do_cache, do_storage, cachefile)
def score_ss_var(self, bn, v):
nof_parents, parents = score.cparents(bn,v)
return self.scoref(self.data.dt, v, nof_parents, parents)
| tomisilander/bn | bn/learn/pen_ml_score.py | pen_ml_score.py | py | 562 | python | en | code | 1 | github-code | 36 |
8097925766 | import vk_api
from secret import secret
from vk_api.utils import get_random_id
class Vk:
def __init__(self):
vk_session = vk_api.VkApi(token=secret.vk_token2)
self.vk = vk_session.get_api()
def send_group_message(self, cht_id, msg):
self.vk.messages.send(
key=secret.key,
server=secret.server,
ts="1",
chat_id=cht_id,
message=msg,
random_id=get_random_id()) | vgtstptlk/question_bot | api_vk.py | api_vk.py | py | 464 | python | en | code | 0 | github-code | 36 |
32322430246 | import random
import sys
MAX_SIZE = sys.maxsize
MIN_SIZE = 0
# Call program as python3 create_test_inputs.py $NUM_PAIRS
def get_random_pair(min_size, max_size):
x = random.randint(min_size, max_size)
y = random.randint(min_size, max_size)
return (x, y)
def write_pairs(pairs):
with open(f'testinput-{len(pairs)}', "w") as f:
for pair in pairs:
f.write(f'{pair[0]} {pair[1]}\n')
def generate_random_pairs(num_pairs):
pairs = list()
for _ in range(num_pairs):
pairs.append(get_random_pair(MIN_SIZE, MAX_SIZE))
write_pairs(pairs)
random.seed()
num_pairs = int(sys.argv[1])
generate_random_pairs(num_pairs)
| jemisonf/closest_pair_of_points | create_test_inputs.py | create_test_inputs.py | py | 668 | python | en | code | 2 | github-code | 36 |
15131224068 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def getMinimumDifference(self, root: TreeNode) -> int:
# 풀어서 담을 빈공간
self.stack = []
# 풀어서 stack에 오름차순으로 정리
self.inorder(root)
# 최소 절대값 선언
min_abs = abs(self.stack[0] - self.stack[-1])
for i in range(1, len(self.stack)):
min_abs = min(min_abs, abs(self.stack[i] - self.stack[i - 1]))
return min_abs
def inorder(self, node):
if node.left:
self.inorder(node.left)
self.stack.append(node.val)
if node.right:
self.inorder(node.right)
| EnteLee/practice_algorithm | leetcode/530_minimum_absolute_difference_in_bst/minimum_absolute_difference_in_bst_LJS.py | minimum_absolute_difference_in_bst_LJS.py | py | 798 | python | en | code | 0 | github-code | 36 |
21577690614 | import yaml
import torch
import torch.nn as nn
from . import layers
class Model(nn.Module):
def __init__(self, yaml_file):
super(Model, self).__init__()
with open(yaml_file, 'r') as file:
try:
model_cfg = yaml.load(file.read(), Loader=yaml.FullLoader)
except:
# for old verion or yaml
model_cfg = yaml.load(file.read())
self.layers = []
self.sources = []
self.nums = []
self.input_indexs = set()
layers_config = model_cfg['layers']
for n, line in enumerate(layers_config):
sources, layer_name, args, kwargs, num = line
if isinstance(sources, int):
sources = [sources]
if not isinstance(num, int) or num <= 0:
assert False, "layer's num must be int and > 0"
self.layers.append(
eval(f"layers.{layer_name}")(*args, **kwargs)
)
indexs = []
for source in sources:
if source < 0:
index = len(self.sources) + source
assert index >= 0, "找不到输入层"
indexs.append(index)
else:
self.input_indexs.add(n)
indexs.append(-(source + 1))
self.sources.append(indexs)
self.nums.append(num)
# get output layers index
all_indexs = set()
index_been_used = set()
for i, indexs in enumerate(self.sources):
all_indexs.add(i)
for index in indexs:
index_been_used.add(index)
self.output_indexs = all_indexs - index_been_used
self.layers = nn.Sequential(*self.layers)
def get_layer_output(self, index, forward_dict):
if index in forward_dict.keys():
return forward_dict[index]
else:
source_outputs = []
for source_index in self.sources[index]:
source_outputs.append(self.get_layer_output(source_index, forward_dict))
output = self.layers[index](*source_outputs)
for i in range(self.nums[index] - 1):
if not isinstance(output, list):
output = [output]
output = self.layers[index](*output)
forward_dict[index] = output
return output
def forward(self, *inputs, **kwargs):
assert len(inputs) == len(self.input_indexs), ""
forward_dict = {}
for i, input in enumerate(inputs):
forward_dict[-(i + 1)] = input
outputs = [self.get_layer_output(output_index, forward_dict) for output_index in self.output_indexs]
if len(outputs) == 1:
return outputs[0]
return outputs
| IMath123/imath | Model/__init__.py | __init__.py | py | 2,826 | python | en | code | 0 | github-code | 36 |
19452353827 | # Two sum O(N) O(N)
# Check 1497. Check If Array Pairs Are Divisible by k
# The difference in 1497 is once used need to delete it from lookup table
class Solution:
def numPairsDivisibleBy60(self, time: List[int]) -> int:
lookup = collections.defaultdict(int)
count = 0
for time in time:
key = -time % 60
count += lookup[key]
lookup[time % 60] += 1
return count
| whocaresustc/Leetcode-Summary | 1010. Pairs of Songs With Total Durations Divisible by 60.py | 1010. Pairs of Songs With Total Durations Divisible by 60.py | py | 439 | python | en | code | 0 | github-code | 36 |
22889044030 | import sys
from PyQt5 import QtCore, QtWidgets, uic
import mysql.connector as mc
from PyQt5.QtWidgets import QTableWidgetItem
from PyQt5.QtWidgets import QMessageBox
from FrmMatakuliah import WindowMatakuliah
qtcreator_file = "dashboard_admin.ui" # Enter file here.
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtcreator_file)
class WindowDashboardAdmin(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# Event Setup
self.actionExit.triggered.connect(self.app_exit)
self.actionMatakuliah_2.triggered.connect(self.app_matakuliah)
def app_exit(self):
sys.exit()
def app_matakuliah(self):
winmatakuliah.setWindowModality(QtCore.Qt.ApplicationModal)
winmatakuliah.show()
def messagebox(self, title, message):
mess = QMessageBox()
mess.setWindowTitle(title)
mess.setText(message)
mess.setStandardButtons(QMessageBox.Ok)
mess.exec_()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = WindowDashboardAdmin()
winmatakuliah = WindowMatakuliah()
window.showFullScreen()
sys.exit(app.exec_())
else:
app = QtWidgets.QApplication(sys.argv)
window = WindowDashboardAdmin()
winmatakuliah = WindowMatakuliah() | freddywicaksono/python_login_multiuser | DashboardAdmin.py | DashboardAdmin.py | py | 1,432 | python | en | code | 2 | github-code | 36 |
32998879166 |
def exec_op(instructions):
x = 1
for inst in instructions:
yield x
if inst[0] == 'addx':
yield x
x += int(inst[1])
def main() -> None:
"""
Day Ten Advent of Code problem
:return: None
"""
file = open('./input/dayTen.txt', 'r')
instructions = [line for line in file.read().splitlines()]
file.close()
signals = list(exec_op(inst.split() for inst in instructions))
print("Part A:")
print(sum(signals[i-1] * i for i in [20, 60, 100, 140, 180, 220]))
print('Part B:')
for i in range(6):
print(''.join('.#'[abs(signals[i*40+j] - j) <= 1] for j in range(40)))
if __name__ == '__main__':
main()
| smenon18/AdventOfCode2022 | day_ten.py | day_ten.py | py | 701 | python | en | code | 0 | github-code | 36 |
30239035937 | """
This module provides functions for justifying Unicode text in a monospaced
display such as a terminal.
We used to have our own implementation here, but now we mostly rely on
the 'wcwidth' library.
"""
from unicodedata import normalize
from wcwidth import wcswidth, wcwidth
from ftfy.fixes import remove_terminal_escapes
def character_width(char: str) -> int:
r"""
Determine the width that a character is likely to be displayed as in
a monospaced terminal. The width for a printable character will
always be 0, 1, or 2.
Nonprintable or control characters will return -1, a convention that comes
from wcwidth.
>>> character_width('車')
2
>>> character_width('A')
1
>>> character_width('\N{ZERO WIDTH JOINER}')
0
>>> character_width('\n')
-1
"""
return wcwidth(char)
def monospaced_width(text: str) -> int:
r"""
Return the number of character cells that this string is likely to occupy
when displayed in a monospaced, modern, Unicode-aware terminal emulator.
We refer to this as the "display width" of the string.
This can be useful for formatting text that may contain non-spacing
characters, or CJK characters that take up two character cells.
Returns -1 if the string contains a non-printable or control character.
>>> monospaced_width('ちゃぶ台返し')
12
>>> len('ちゃぶ台返し')
6
>>> monospaced_width('owl\N{SOFT HYPHEN}flavored')
12
>>> monospaced_width('example\x80')
-1
A more complex example: The Korean word 'ibnida' can be written with 3
pre-composed characters or 7 jamo. Either way, it *looks* the same and
takes up 6 character cells.
>>> monospaced_width('입니다')
6
>>> monospaced_width('\u110b\u1175\u11b8\u1102\u1175\u1103\u1161')
6
The word "blue" with terminal escapes to make it blue still takes up only
4 characters, when shown as intended.
>>> monospaced_width('\x1b[34mblue\x1b[m')
4
"""
# NFC-normalize the text first, so that we don't need special cases for
# Hangul jamo.
#
# Remove terminal escapes before calculating width, because if they are
# displayed as intended, they will have zero width.
return wcswidth(remove_terminal_escapes(normalize("NFC", text)))
def display_ljust(text, width, fillchar=" "):
"""
Return `text` left-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Left" here means toward the beginning of the string, which may actually
appear on the right in an RTL context. This is similar to the use of the
word "left" in "left parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_ljust(line, 20, '▒'))
Table flip▒▒▒▒▒▒▒▒▒▒
(╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒
ちゃぶ台返し▒▒▒▒▒▒▒▒
This example, and the similar ones that follow, should come out justified
correctly when viewed in a monospaced terminal. It will probably not look
correct if you're viewing this code or documentation in a Web browser.
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
# There's a control character here, so just don't add padding
return text
padding = max(0, width - text_width)
return text + fillchar * padding
def display_rjust(text, width, fillchar=" "):
"""
Return `text` right-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Right" here means toward the end of the string, which may actually be on
the left in an RTL context. This is similar to the use of the word "right"
in "right parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_rjust(line, 20, '▒'))
▒▒▒▒▒▒▒▒▒▒Table flip
▒▒▒▒▒▒▒(╯°□°)╯︵ ┻━┻
▒▒▒▒▒▒▒▒ちゃぶ台返し
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
return text
padding = max(0, width - text_width)
return fillchar * padding + text
def display_center(text, width, fillchar=" "):
"""
Return `text` centered in a Unicode string whose display width, in a
monospaced terminal, should be at least `width` character cells. The rest
of the string will be padded with `fillchar`, which must be a width-1
character.
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_center(line, 20, '▒'))
▒▒▒▒▒Table flip▒▒▒▒▒
▒▒▒(╯°□°)╯︵ ┻━┻▒▒▒▒
▒▒▒▒ちゃぶ台返し▒▒▒▒
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
return text
padding = max(0, width - text_width)
left_padding = padding // 2
right_padding = padding - left_padding
return fillchar * left_padding + text + fillchar * right_padding
| rspeer/python-ftfy | ftfy/formatting.py | formatting.py | py | 5,798 | python | en | code | 3,623 | github-code | 36 |
37635874200 | # Given an integer array with no duplicates. A maximum tree building on this array is defined as follow:
# The root is the maximum number in the array.
# The left subtree is the maximum tree constructed from left part subarray divided by the maximum number.
# The right subtree is the maximum tree constructed from right part subarray divided by the maximum number.
# Construct the maximum tree by the given array and output the root node of this tree.
# Example 1:
# Input: [3,2,1,6,0,5]
# Output: return the tree root node representing the following tree:
# 6
# / \
# 3 5
# \ /
# 2 0
# \
# 1
# Note:
# The size of the given array will be in the range [1,1000].
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class SegTreeNode:
def __init__(self,max_val=None,max_ind=None,start=None,end=None,left=None,right=None):
self.max_val=max_val
self.max_ind = max_ind
self.start = start
self.end = end
self.left = left
self.right = right
def query(self,start,end):
if start<=self.start and end>=self.end:
return self.max_val,self.max_ind
if end<self.start or start>self.end:
return float("-inf"),-1
else:
lv,lind = self.left.query(start,end)
rv,rind = self.right.query(start,end)
if lv>rv:
return lv,lind
else:
return rv,rind
class MaxRangeSegTree:
def __init__(self,arr):
self.tree = self.construct_tree(arr,0,len(arr)-1)
def query(self,start,end):
return self.tree.query(start,end)
def construct_tree(self,arr,begin,end):
if begin==end:
return SegTreeNode(arr[begin],begin,begin,end)
mid = (begin+end)//2
left_tree = self.construct_tree(arr,begin,mid)
right_tree = self.construct_tree(arr,mid+1,end)
curnode = SegTreeNode()
curnode.left = left_tree
curnode.right = right_tree
curnode.start = begin
curnode.end = end
if left_tree.max_val>right_tree.max_val:
curnode.max_val = left_tree.max_val
curnode.max_ind = left_tree.max_ind
else:
curnode.max_ind = right_tree.max_ind
curnode.max_val = right_tree.max_val
return curnode
class Solution:
def constructMaximumBinaryTree(self, nums):
self.max_seg_tree = MaxRangeSegTree(nums)
return self.helper(nums,0,len(nums)-1)
def helper(self,nums,begin,end):
if begin>end:
return None
max_v,max_ind = self.max_seg_tree.query(begin,end)
curnode = TreeNode(max_v)
curnode.left = self.helper(nums,begin,max_ind-1)
curnode.right = self.helper(nums,max_ind+1,end)
return curnode | sunnyyeti/Leetcode-solutions | 654_Maximum_Binary_tree.py | 654_Maximum_Binary_tree.py | py | 2,946 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.