seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
20896859782 | from __future__ import division
from random import shuffle
from maze import *
def move_forward(maze): # proceed forward choosing random directions until there is no path available
new_maze = maze
directions = [UP, RIGHT, DOWN, LEFT]
creation_ended = False
while not creation_ended:
shuffle(directions)
is_able_to_move_forward = False
for direction in directions:
new_x, new_y = new_maze.stack[-1]
new_x = new_x + direction[0]
new_y = new_y + direction[1]
if new_maze.is_inside_board(new_x, new_y):
if new_maze.is_not_active(new_x, new_y):
new_maze.stack.append((new_x, new_y))
new_maze.walls[new_x][new_y] = 0
new_maze.board[new_x][new_y].is_active = True
is_able_to_move_forward = True
break
if not is_able_to_move_forward:
if new_maze.all_board_cells_active():
creation_ended = True
else:
new_maze = move_backward(new_maze)
return new_maze
def move_backward(maze): # when no path is available, move backward using stack until there is a possible path
new_maze = maze
is_able_to_move_forward = False
stack_index = -2
while not is_able_to_move_forward:
x, y = new_maze.stack[stack_index]
if new_maze.has_not_active_neighbour(x, y):
is_able_to_move_forward = True
new_maze.stack.append((x, y))
stack_index = stack_index - 1
return new_maze
| alextlinden/maze | maze_creation.py | maze_creation.py | py | 1,595 | python | en | code | 0 | github-code | 36 |
37708151811 | from numpy.core.numeric import normalize_axis_tuple
import pandas as pd
import numpy as np
import xarray as xr
from pandarallel import pandarallel
import time
import credentials
import tc_functions as fun
import plotting_functions as tcplt
storm_data = pd.read_csv('data/filtered_storm_list_keep-leading-5.csv')
storm_data["DATETIME"] = pd.to_datetime(storm_data["DATETIME"])
def int_circulation_storm(id, storm_data, r, normalize, plt_folder, data_folder, upper = False, plot = False):
storm = storm_data[storm_data['ID'].str.match(id)]
storm = storm.reset_index(drop = True)
int_circ = []
for index, datapoint in storm.iterrows():
year = datapoint["DATETIME"].year
month = datapoint["DATETIME"].month
day = datapoint["DATETIME"].day
hour = datapoint["DATETIME"].hour
gfs_data = fun.gfs_access(year, month, day, hour,
credentials.RDA_USER, credentials.RDA_PASSWORD)
print("Doing #" + str(index) + "/" + str(storm.shape[0]-1))
# Use upper level winds or shear?
if upper:
vws = fun.wind_stamp(datapoint['LAT'], datapoint['LON'], 800, 200, gfs_data,
vortex_rm = False, vortex_rm_rad = 650)
else:
vws = fun.shear_stamp(datapoint['LAT'], datapoint['LON'], 800, gfs_data,
vortex_rm = True, vortex_rm_rad = 650)
ic = fun.integrated_circulation(vws, r, normalize)
int_circ.append(ic) # Use this later if you want
if plot:
tcplt.two_shade_map(vws, ic,
shading = np.arange(-2.,2.,.05),
ticks = np.arange(-2.,2.,0.5),
savefile = plt_folder + id + "_" + str(index) + ".png",
legend_title = "Integrated Circulation")
np.save(data_folder + id + "_" + str(index) + ".npy", ic)
plt_folder = "/glade/work/galenv/int_circ_figs_kl5/"
data_folder = "/glade/work/galenv/int_circ_data_kl5/"
radius = 150
normalize_option = "log"
#plt_folder = "data/test/"
#data_folder = "data/test/"
unique_storms = pd.Series(np.unique(storm_data['ID']))
print("Getting GFS data warmup...")
gfs_data = fun.gfs_access(2016, 12, 12, 0, credentials.RDA_USER, credentials.RDA_PASSWORD)
print("GFS data has been gotten! On to the parallel stuff")
time.sleep(3)
print("Setting up parallel env.")
pandarallel.initialize()
print("Parallel env set up... starting parallel computations.")
unique_storms.parallel_apply(int_circulation_storm,
args = (storm_data, radius, normalize_option, plt_folder, data_folder, True, False))
print("All done!")
#unique_storms.iloc[3:7].parallel_apply(int_circulation_storm,
# args = (storm_data, radius, normalize_option, plt_folder, data_folder, False, False)) | galenvincent/tc-wind-shear | integrated_circulation.py | integrated_circulation.py | py | 2,941 | python | en | code | 1 | github-code | 36 |
25404280105 | # -*- coding: utf-8 -*-
import torch
from model import Model
from utils import load_img,mkdir
import os
import argparse
import cv2
import time
from glob import glob
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--data', default=r'/data/Disk_B/MSCOCO2014/train2014', type=str, help='')
parser.add_argument('--load_pt', default=True, type=bool, help='')
parser.add_argument('--weights_path', default='./weights/epoch323_fusion.pt', type=str, help='')
parser.add_argument('--lr', default= 1e-3, type=float, help='')
parser.add_argument('--devices', default="0", type=str, help='')
parser.add_argument('--device', default="cuda", type=str, help='')
parser.add_argument('--batch_size', default=32, type=int, help='')
parser.add_argument('--epochs', default=1000, type=int, help='')
parser.add_argument('--multiGPU', default=False, type=bool, help='')
parser.add_argument('--GPUs', default=[0, 1], type=list, help='')
return parser.parse_args()
def getimg(imgir_path):
img = load_img(imgir_path)
with torch.no_grad():
model.setdata(img)
s_time = time.time()
model.forward(isTest=True)
e_time = time.time() - s_time
print(e_time)
# model.saveimgfuse(imgir_path)
return model.getimg()
def sm(x,y):
ex = torch.exp(x)
ey = torch.exp(y)
s = ex+ey
return x*ex/s +y*ey/s
if __name__ == "__main__":
save_path = "result"
test_ir = './Test_ir/'
test_vi = './Test_vi/'
# test_ir = './road/ir/'
# test_vi = './road/vi/'
img_list_ir = glob(test_ir + '*')
img_num = len(img_list_ir)
imgtype = '.bmp'
args = parse_args()
os.chdir(r'./')
os.environ["CUDA_VISIBLE_DEVICES"] = args.devices
model = Model(args).to(args.device)
model.eval()
for i in range(1,img_num+1):
imgir_path = test_ir+str(i)+imgtype
imgvi_path = test_vi+str(i)+imgtype
vi_g1, vi_g2, vi_g3, vi_s = getimg(imgir_path)
ir_g1, ir_g2, ir_g3, ir_s = getimg(imgvi_path)
fused_1 = torch.max(vi_g1, ir_g1) + torch.max(vi_g2, ir_g2) + torch.max(vi_g3, ir_g3) + (vi_s + ir_s) / 2
fused_2 = torch.max(vi_g1, ir_g1) + torch.max(vi_g2, ir_g2) + torch.max(vi_g3, ir_g3) + sm(vi_s, ir_s)
fused_3 = vi_g1 + ir_g1 + vi_g2 + ir_g2 + vi_g3 + ir_g3 + (vi_s + ir_s) / 2
fused_4 = vi_g1 + ir_g1 + vi_g2 + ir_g2 + vi_g3 + ir_g3 + sm(vi_s, ir_s)
fused_1 = fused_1.squeeze(0).squeeze(0).detach().cpu().numpy() * 255
fused_2 = fused_2.squeeze(0).squeeze(0).detach().cpu().numpy() * 255
fused_3 = fused_3.squeeze(0).squeeze(0).detach().cpu().numpy() * 255
fused_4 = fused_4.squeeze(0).squeeze(0).detach().cpu().numpy() * 255
save_path_1 = os.path.join(save_path, 'fuse1')
mkdir(save_path_1)
save_name_1 = os.path.join(save_path_1, '{}.bmp'.format(i))
cv2.imwrite(save_name_1, fused_1)
save_path_2 = os.path.join(save_path, 'fuse2')
mkdir(save_path_2)
save_name_2 = os.path.join(save_path_2, '{}.bmp'.format(i))
cv2.imwrite(save_name_2, fused_2)
save_path_3 = os.path.join(save_path, 'fuse3')
mkdir(save_path_3)
save_name_3 = os.path.join(save_path_3, '{}.bmp'.format(i))
cv2.imwrite(save_name_3, fused_3)
save_path_4 = os.path.join(save_path, 'fuse4')
mkdir(save_path_4)
save_name_4 = os.path.join(save_path_4, '{}.bmp'.format(i))
cv2.imwrite(save_name_4, fused_4)
print("pic:[%d] %s" % (i, save_name_1))
| thfylsty/ImageFusion_DeepDecFusion | fuseimg.py | fuseimg.py | py | 3,582 | python | en | code | 5 | github-code | 36 |
7704126003 | # -*- encoding: utf-8 -*-
import networkx as nx
from tools import const
# 如果没有label_type,标注为“default”
def load_data(graph_name: str, label_type: str) -> (nx.Graph, dict):
if const.System == "Windows":
edge_path = const.WindowsRootPath + "\data\graph\{}.edgelist".format(graph_name)
if label_type == "default":
label_path = const.WindowsRootPath + "\data\label\{}.label".format(graph_name)
else:
label_path = const.WindowsRootPath + "\data\label\{}_{}.label".format(graph_name, label_type)
elif const.System == "Linux":
edge_path = const.LinuxRootPath + "/data/graph/{}.edgelist".format(graph_name)
if label_type == "default":
label_path = const.LinuxRootPath + "/data/label/{}.label".format(graph_name)
else:
label_path = const.LinuxRootPath + "/data/label/{}_{}.label".format(graph_name, label_type)
else:
raise EnvironmentError("only support Windows and Linux")
label_dict = read_label(label_path)
graph = nx.read_edgelist(path=edge_path, create_using=nx.Graph, nodetype=str,
edgetype=float, data=[('weight', float)])
return graph, label_dict
def load_data_from_distance(graph_name, label_name, metric, hop, scale, multi="no", directed=False):
"""
Loda graph data by dataset name.
:param graph_name: graph name, e.g. mkarate
:param label_name: label name, e.g. mkarate_origin
:param directed: bool, if True, return directed graph.
:return: graph, node labels, number of node classes.
"""
if multi == "yes":
edge_path = "../distance/{}/HSD_multi_{}_hop{}.edgelist".format(
graph_name, metric, hop)
else:
edge_path = "../distance/{}/HSD_{}_scale{}_hop{}.edgelist".format(
graph_name, metric, scale, hop)
label_path = f"../data/label/{graph_name}.label"
label_dict, n_class = read_label(label_path)
if directed:
graph = nx.read_edgelist(path=edge_path, create_using=nx.DiGraph,
edgetype=float, data=[('weight', float)])
else:
graph = nx.read_edgelist(path=edge_path, create_using=nx.Graph,
edgetype=float, data=[('weight', float)])
return graph, label_dict, n_class
def read_label(path) -> dict:
"""
read graph node labels.
format:
str(node): int(label)
"""
label_dict = dict()
try:
with open(path, mode="r", encoding="utf-8") as fin:
while True:
line = fin.readline().strip()
if not line:
break
node, label = line.split(" ")
label_dict[node] = int(label)
except FileNotFoundError as e:
print(f"Lable file not exist, path:{path}")
finally:
return label_dict
| Sngunfei/HSD | tools/dataloader.py | dataloader.py | py | 2,898 | python | en | code | 3 | github-code | 36 |
8429575958 | from os import replace
import math
import sys
filename = sys.argv[1]
file = open("{}".format(filename), "r")
read = file.read().split(" ")
fileRead = []
charLength = 0
count = 0
comparisonValue = int(read[1])
finalArray = []
for names in range(2, len(read)):
strippedInput = read[names].strip('\n').replace('\n', " ")
fileRead.append(strippedInput)
charLength += len(strippedInput)
for names in fileRead:
x = names.split(" ")
for y in x:
finalArray.append(y)
avgLength = math.ceil(charLength / len(strippedInput))
for i in finalArray:
if len(i) == avgLength or len(i) - avgLength == 2 or len(i) - avgLength == -2 or len(i) - avgLength == 1 or len(i) - avgLength == -1:
count += 1
if count == comparisonValue:
sys.stdout.write("Yes")
else:
sys.stdout.write("No")
| hassaanarif/White-Board-Interview-Quesitons | Arbisoft_Test_1.py | Arbisoft_Test_1.py | py | 857 | python | en | code | 1 | github-code | 36 |
21769549582 | # -- fgw/simulations.py --
# Author: Jake Cray
# GitHub: crayjake/fgw-python
''' '''
# imports
from fgw.structures import DataStruct
import numpy as np
from typing import TypeVar
from math import sqrt, sin, pi
from .interfaces import SimulationInterface
from .schemes import CrankNicolson
# system without Boussinesq approximation -> deep atmosphere
class Deep(SimulationInterface):
def __init__(
self,
width: int, # ( km )
depth: int, # ( km )
horizontalResolution: int, # number of grid points
verticalResolution: int, # number of grid points
time: float, # time to simulate ( s )
dt: float, # timesteps
latitude: float, # angle from equator, used in calculating coriolis
spongePercentage: float, # percentage of width to damp
damping: float, # damping strength
S0: float, # maximum forcing (heating)
N: float, # buoyancy frequency ( /s )
h: float, # scale height ( km )
modes: np.ndarray, # list of modes
# specific variables
heatingScaleWidth: float, # scale width of heating
initialData: DataStruct = None, # initial data
):
super().__init__(
width,
depth,
horizontalResolution,
verticalResolution,
time,
dt,
latitude,
spongePercentage,
damping,
S0,
N,
h,
modes,
initialData,
)
self.heatingScaleWidth = heatingScaleWidth
# --- abstract methods ---
# horizontal form of the heating
def HeatingHorizontal( self, x ):
return ( 1 / ( np.cosh( x / self.heatingScaleWidth * 1000 ) ) ) ** 2
# S_j mode decomposition of heating
def HeatingDecomposition( self, mode ):
D_t = 10000
rho_s = 1
A = sqrt( 2 / ( rho_s * ( self.N ** 2 ) * self.depth ) )
if ( ( mode * D_t / self.depth ) - 1 ) == 0:
S = A * ( rho_s / 2 ) * D_t
else:
S_A = sin( pi * ( ( mode * D_t / self.depth ) - 1) ) / ( ( mode * D_t / self.depth ) - 1 )
S_B = sin( pi * ( ( mode * D_t / self.depth ) + 1) ) / ( ( mode * D_t / self.depth ) + 1)
S = A * ( rho_s / 2 ) * ( D_t / np.pi ) * ( S_A - S_B )
return S
# rho_0 (z)
def InitialDensity( self, z ):
rho_s = 1
return rho_s * np.exp( -z / self.h )
# phi (z)
def VerticalDependence( self, z ):
pass
# set the simulation step as the default CN scheme
def SimulationStep( self, data: DataStruct ) -> DataStruct:
return CrankNicolson( self.dt, data )
# converts data to 2D
def Convert( self, data: list ) -> list:
pass
| crayjake/fgw-python | fgw/simulations.py | simulations.py | py | 3,185 | python | en | code | 0 | github-code | 36 |
8444672238 | import functools
import warnings
import numpy
import cupy
import cupyx.scipy.fft
def _wraps_polyroutine(func):
def _get_coeffs(x):
if isinstance(x, cupy.poly1d):
return x._coeffs
if cupy.isscalar(x):
return cupy.atleast_1d(x)
if isinstance(x, cupy.ndarray):
x = cupy.atleast_1d(x)
if x.ndim == 1:
return x
raise ValueError('Multidimensional inputs are not supported')
raise TypeError('Unsupported type')
def wrapper(*args):
coeffs = [_get_coeffs(x) for x in args]
out = func(*coeffs)
if all(not isinstance(x, cupy.poly1d) for x in args):
return out
if isinstance(out, cupy.ndarray):
return cupy.poly1d(out)
if isinstance(out, tuple):
return tuple([cupy.poly1d(x) for x in out])
assert False # Never reach
return functools.update_wrapper(wrapper, func)
def poly(seq_of_zeros):
"""Computes the coefficients of a polynomial with the given roots sequence.
Args:
seq_of_zeros (cupy.ndarray): a sequence of polynomial roots.
Returns:
cupy.ndarray: polynomial coefficients from highest to lowest degree.
.. warning::
This function doesn't support general 2d square arrays currently.
Only complex Hermitian and real symmetric 2d arrays are allowed.
.. seealso:: :func:`numpy.poly`
"""
x = seq_of_zeros
if x.ndim == 2 and x.shape[0] == x.shape[1] and x.shape[0] != 0:
if cupy.array_equal(x, x.conj().T):
x = cupy.linalg.eigvalsh(x)
else:
raise NotImplementedError('Only complex Hermitian and real '
'symmetric 2d arrays are supported '
'currently')
elif x.ndim == 1:
x = x.astype(cupy.mintypecode(x.dtype.char), copy=False)
else:
raise ValueError('Input must be 1d or non-empty square 2d array.')
if x.size == 0:
return 1.0
size = 2 ** (x.size - 1).bit_length()
a = cupy.zeros((size, 2), x.dtype)
a[:, 0].fill(1)
cupy.negative(x, out=a[:x.size, 1])
while size > 1:
size = size // 2
a = cupy._math.misc._fft_convolve(a[:size], a[size:], 'full')
return a[0, :x.size + 1]
@_wraps_polyroutine
def polyadd(a1, a2):
"""Computes the sum of two polynomials.
Args:
a1 (scalar, cupy.ndarray or cupy.poly1d): first input polynomial.
a2 (scalar, cupy.ndarray or cupy.poly1d): second input polynomial.
Returns:
cupy.ndarray or cupy.poly1d: The sum of the inputs.
.. seealso:: :func:`numpy.polyadd`
"""
if a1.size < a2.size:
a1, a2 = a2, a1
out = cupy.pad(a2, (a1.size - a2.size, 0))
out = out.astype(cupy.result_type(a1, a2), copy=False)
out += a1
return out
@_wraps_polyroutine
def polysub(a1, a2):
"""Computes the difference of two polynomials.
Args:
a1 (scalar, cupy.ndarray or cupy.poly1d): first input polynomial.
a2 (scalar, cupy.ndarray or cupy.poly1d): second input polynomial.
Returns:
cupy.ndarray or cupy.poly1d: The difference of the inputs.
.. seealso:: :func:`numpy.polysub`
"""
if a1.shape[0] <= a2.shape[0]:
out = cupy.pad(a1, (a2.shape[0] - a1.shape[0], 0))
out = out.astype(cupy.result_type(a1, a2), copy=False)
out -= a2
else:
out = cupy.pad(a2, (a1.shape[0] - a2.shape[0], 0))
out = out.astype(cupy.result_type(a1, a2), copy=False)
out -= 2 * out - a1
return out
@_wraps_polyroutine
def polymul(a1, a2):
"""Computes the product of two polynomials.
Args:
a1 (scalar, cupy.ndarray or cupy.poly1d): first input polynomial.
a2 (scalar, cupy.ndarray or cupy.poly1d): second input polynomial.
Returns:
cupy.ndarray or cupy.poly1d: The product of the inputs.
.. seealso:: :func:`numpy.polymul`
"""
a1 = cupy.trim_zeros(a1, trim='f')
a2 = cupy.trim_zeros(a2, trim='f')
if a1.size == 0:
a1 = cupy.array([0.], a1.dtype)
if a2.size == 0:
a2 = cupy.array([0.], a2.dtype)
return cupy.convolve(a1, a2)
def _polypow_direct(x, n):
if n == 0:
return 1
if n == 1:
return x
if n % 2 == 0:
return _polypow(cupy.convolve(x, x), n // 2)
return cupy.convolve(x, _polypow(cupy.convolve(x, x), (n - 1) // 2))
def _polypow(x, n):
if n == 0:
return 1
if n == 1:
return x
method = cupy._math.misc._choose_conv_method(x, x, 'full')
if method == 'direct':
return _polypow_direct(x, n)
elif method == 'fft':
if x.dtype.kind == 'c':
fft, ifft = cupy.fft.fft, cupy.fft.ifft
else:
fft, ifft = cupy.fft.rfft, cupy.fft.irfft
out_size = (x.size - 1) * n + 1
size = cupyx.scipy.fft.next_fast_len(out_size)
fx = fft(x, size)
fy = cupy.power(fx, n, fx)
y = ifft(fy, size)
return y[:out_size]
else:
assert False
def _polyfit_typecast(x):
if x.dtype.kind == 'c':
return x.astype(numpy.complex128, copy=False)
return x.astype(numpy.float64, copy=False)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""Returns the least squares fit of polynomial of degree deg
to the data y sampled at x.
Args:
x (cupy.ndarray): x-coordinates of the sample points of shape (M,).
y (cupy.ndarray): y-coordinates of the sample points of shape
(M,) or (M, K).
deg (int): degree of the fitting polynomial.
rcond (float, optional): relative condition number of the fit.
The default value is ``len(x) * eps``.
full (bool, optional): indicator of the return value nature.
When False (default), only the coefficients are returned.
When True, diagnostic information is also returned.
w (cupy.ndarray, optional): weights applied to the y-coordinates
of the sample points of shape (M,).
cov (bool or str, optional): if given, returns the coefficients
along with the covariance matrix.
Returns:
cupy.ndarray or tuple:
p (cupy.ndarray of shape (deg + 1,) or (deg + 1, K)):
Polynomial coefficients from highest to lowest degree
residuals, rank, singular_values, rcond \
(cupy.ndarray, int, cupy.ndarray, float):
Present only if ``full=True``.
Sum of squared residuals of the least-squares fit,
rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of ``rcond``.
V (cupy.ndarray of shape (M, M) or (M, M, K)):
Present only if ``full=False`` and ``cov=True``.
The covariance matrix of the polynomial coefficient estimates.
.. warning::
numpy.RankWarning: The rank of the coefficient matrix in the
least-squares fit is deficient. It is raised if ``full=False``.
.. seealso:: :func:`numpy.polyfit`
"""
if x.dtype.char == 'e' and y.dtype.kind == 'b':
raise NotImplementedError('float16 x and bool y are not'
' currently supported')
if y.dtype == numpy.float16:
raise TypeError('float16 y are not supported')
x = _polyfit_typecast(x)
y = _polyfit_typecast(y)
deg = int(deg)
if deg < 0:
raise ValueError('expected deg >= 0')
if x.ndim != 1:
raise TypeError('expected 1D vector for x')
if x.size == 0:
raise TypeError('expected non-empty vector for x')
if y.ndim < 1 or y.ndim > 2:
raise TypeError('expected 1D or 2D array for y')
if x.size != y.shape[0]:
raise TypeError('expected x and y to have same length')
lhs = cupy.polynomial.polynomial.polyvander(x, deg)[:, ::-1]
rhs = y
if w is not None:
w = _polyfit_typecast(w)
if w.ndim != 1:
raise TypeError('expected a 1-d array for weights')
if w.size != x.size:
raise TypeError('expected w and y to have the same length')
lhs *= w[:, None]
if rhs.ndim == 2:
w = w[:, None]
rhs *= w
if rcond is None:
rcond = x.size * cupy.finfo(x.dtype).eps
scale = cupy.sqrt((cupy.square(lhs)).sum(axis=0))
lhs /= scale
c, resids, rank, s = cupy.linalg.lstsq(lhs, rhs, rcond)
if y.ndim > 1:
scale = scale.reshape(-1, 1)
c /= scale
order = deg + 1
if rank != order and not full:
msg = 'Polyfit may be poorly conditioned'
warnings.warn(msg, numpy.RankWarning, stacklevel=4)
if full:
if resids.dtype.kind == 'c':
resids = cupy.absolute(resids)
return c, resids, rank, s, rcond
if cov:
base = cupy.linalg.inv(cupy.dot(lhs.T, lhs))
base /= cupy.outer(scale, scale)
if cov == 'unscaled':
factor = 1
elif x.size > order:
factor = resids / (x.size - order)
else:
raise ValueError('the number of data points must exceed order'
' to scale the covariance matrix')
if y.ndim != 1:
base = base[..., None]
return c, base * factor
return c
def polyval(p, x):
"""Evaluates a polynomial at specific values.
Args:
p (cupy.ndarray or cupy.poly1d): input polynomial.
x (scalar, cupy.ndarray): values at which the polynomial
is evaluated.
Returns:
cupy.ndarray or cupy.poly1d: polynomial evaluated at x.
.. warning::
This function doesn't currently support poly1d values to evaluate.
.. seealso:: :func:`numpy.polyval`
"""
if isinstance(p, cupy.poly1d):
p = p.coeffs
if not isinstance(p, cupy.ndarray) or p.ndim == 0:
raise TypeError('p must be 1d ndarray or poly1d object')
if p.ndim > 1:
raise ValueError('p must be 1d array')
if isinstance(x, cupy.poly1d):
# TODO(asi1024): Needs performance improvement.
dtype = numpy.result_type(x.coeffs, 1)
res = cupy.poly1d(cupy.array([0], dtype=dtype))
prod = cupy.poly1d(cupy.array([1], dtype=dtype))
for c in p[::-1]:
res = res + prod * c
prod = prod * x
return res
dtype = numpy.result_type(p.dtype.type(0), x)
p = p.astype(dtype, copy=False)
if p.size == 0:
return cupy.zeros(x.shape, dtype)
if dtype == numpy.bool_:
return p.any() * x + p[-1]
if not cupy.isscalar(x):
x = cupy.asarray(x, dtype=dtype)[..., None]
x = x ** cupy.arange(p.size, dtype=dtype)
return (p[::-1] * x).sum(axis=-1, dtype=dtype)
def roots(p):
"""Computes the roots of a polynomial with given coefficients.
Args:
p (cupy.ndarray or cupy.poly1d): polynomial coefficients.
Returns:
cupy.ndarray: polynomial roots.
.. warning::
This function doesn't support currently polynomial coefficients
whose companion matrices are general 2d square arrays. Only those
with complex Hermitian or real symmetric 2d arrays are allowed.
The current `cupy.roots` doesn't guarantee the order of results.
.. seealso:: :func:`numpy.roots`
"""
if isinstance(p, cupy.poly1d):
p = p.coeffs
if p.dtype.kind == 'b':
raise NotImplementedError('boolean inputs are not supported')
if p.ndim == 0:
raise TypeError('0-dimensional input is not allowed')
if p.size < 2:
return cupy.array([])
[p] = cupy.polynomial.polyutils.as_series([p[::-1]])
if p.size < 2:
return cupy.array([])
if p.size == 2:
out = (-p[0] / p[1])[None]
if p[0] == 0:
out = out.real.astype(numpy.float64)
return out
cmatrix = cupy.polynomial.polynomial.polycompanion(p)
# TODO(Dahlia-Chehata): Support after cupy.linalg.eigvals is supported
if cupy.array_equal(cmatrix, cmatrix.conj().T):
out = cupy.linalg.eigvalsh(cmatrix)
else:
raise NotImplementedError('Only complex Hermitian and real '
'symmetric 2d arrays are supported '
'currently')
return out.astype(p.dtype)
| cupy/cupy | cupy/lib/_routines_poly.py | _routines_poly.py | py | 12,381 | python | en | code | 7,341 | github-code | 36 |
12486698422 | import contextlib
import os
import subprocess
import tempfile
from pathlib import Path
from pprint import pprint
from shutil import copyfile
from time import monotonic, sleep
from typing import Dict
from unittest import mock
import requests
from docker_tests.command_utils import run_command
from docker_tests.constants import SOURCE_ROOT
from docker_tests.docker_tests_utils import docker_image
AIRFLOW_WWW_USER_USERNAME = os.environ.get("_AIRFLOW_WWW_USER_USERNAME", "airflow")
AIRFLOW_WWW_USER_PASSWORD = os.environ.get("_AIRFLOW_WWW_USER_PASSWORD", "airflow")
DAG_ID = "example_bash_operator"
DAG_RUN_ID = "test_dag_run_id"
def api_request(method: str, path: str, base_url: str = "http://localhost:8080/api/v1", **kwargs) -> Dict:
response = requests.request(
method=method,
url=f"{base_url}/{path}",
auth=(AIRFLOW_WWW_USER_USERNAME, AIRFLOW_WWW_USER_PASSWORD),
headers={"Content-Type": "application/json"},
**kwargs,
)
response.raise_for_status()
return response.json()
@contextlib.contextmanager
def tmp_chdir(path):
current_cwd = os.getcwd()
try:
os.chdir(path)
yield current_cwd
finally:
os.chdir(current_cwd)
def wait_for_container(container_id: str, timeout: int = 300):
container_name = (
subprocess.check_output(["docker", "inspect", container_id, "--format", '{{ .Name }}'])
.decode()
.strip()
)
print(f"Waiting for container: {container_name} [{container_id}]")
waiting_done = False
start_time = monotonic()
while not waiting_done:
container_state = (
subprocess.check_output(["docker", "inspect", container_id, "--format", '{{ .State.Status }}'])
.decode()
.strip()
)
if container_state in ("running", 'restarting'):
health_status = (
subprocess.check_output(
[
"docker",
"inspect",
container_id,
"--format",
"{{ if .State.Health }}{{ .State.Health.Status }}{{ else }}no-check{{ end }}",
]
)
.decode()
.strip()
)
print(f"{container_name}: container_state={container_state}, health_status={health_status}")
if health_status == "healthy" or health_status == "no-check":
waiting_done = True
else:
print(f"{container_name}: container_state={container_state}")
waiting_done = True
if timeout != 0 and monotonic() - start_time > timeout:
raise Exception(f"Timeout. The operation takes longer than the maximum waiting time ({timeout}s)")
sleep(1)
def wait_for_terminal_dag_state(dag_id, dag_run_id):
# Wait 30 seconds
for _ in range(30):
dag_state = api_request("GET", f"dags/{dag_id}/dagRuns/{dag_run_id}").get("state")
print(f"Waiting for DAG Run: dag_state={dag_state}")
sleep(1)
if dag_state in ("success", "failed"):
break
def test_trigger_dag_and_wait_for_result():
compose_file_path = SOURCE_ROOT / "docs" / "apache-airflow" / "start" / "docker-compose.yaml"
with tempfile.TemporaryDirectory() as tmp_dir, tmp_chdir(tmp_dir), mock.patch.dict(
'os.environ', AIRFLOW_IMAGE_NAME=docker_image
):
copyfile(str(compose_file_path), f"{tmp_dir}/docker-compose.yaml")
os.mkdir(f"{tmp_dir}/dags")
os.mkdir(f"{tmp_dir}/logs")
os.mkdir(f"{tmp_dir}/plugins")
(Path(tmp_dir) / ".env").write_text(f"AIRFLOW_UID={subprocess.check_output(['id', '-u']).decode()}\n")
print(".emv=", (Path(tmp_dir) / ".env").read_text())
copyfile(
str(SOURCE_ROOT / "airflow" / "example_dags" / "example_bash_operator.py"),
f"{tmp_dir}/dags/example_bash_operator.py",
)
run_command(["docker-compose", "config"])
run_command(["docker-compose", "down", "--volumes", "--remove-orphans"])
try:
run_command(["docker-compose", "up", "-d"])
# The --wait condition was released in docker-compose v2.1.1, but we want to support
# docker-compose v1 yet.
# See:
# https://github.com/docker/compose/releases/tag/v2.1.1
# https://github.com/docker/compose/pull/8777
for container_id in (
subprocess.check_output(["docker-compose", 'ps', '-q']).decode().strip().splitlines()
):
wait_for_container(container_id)
api_request("PATCH", path=f"dags/{DAG_ID}", json={"is_paused": False})
api_request("POST", path=f"dags/{DAG_ID}/dagRuns", json={"dag_run_id": DAG_RUN_ID})
try:
wait_for_terminal_dag_state(dag_id=DAG_ID, dag_run_id=DAG_RUN_ID)
dag_state = api_request("GET", f"dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}").get("state")
assert dag_state == "success"
except Exception:
print(f"HTTP: GET dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}")
pprint(api_request("GET", f"dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}"))
print(f"HTTP: GET dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}/taskInstances")
pprint(api_request("GET", f"dags/{DAG_ID}/dagRuns/{DAG_RUN_ID}/taskInstances"))
raise
except Exception:
run_command(["docker", "ps"])
run_command(["docker-compose", "logs"])
raise
finally:
run_command(["docker-compose", "down", "--volumes"])
| drivendataorg/snowcast-showdown | 4th Place/images/base/docker_tests/test_docker_compose_quick_start.py | test_docker_compose_quick_start.py | py | 5,681 | python | en | code | 12 | github-code | 36 |
71121334825 | from threading import Thread
import webbrowser, http.server, socketserver
import time;
port_number = 8000
server = None
def startServer(port):
Handler = http.server.SimpleHTTPRequestHandler
global server
server = socketserver.TCPServer(("", port), Handler)
print("Start server at port", port)
server.serve_forever()
def start(port):
thread = Thread(target=startServer, args=[port])
thread.start()
startTime = int(time.time())
while not server:
if int(time.time()) > startTime + 60:
print("Time out")
break
return server
def stop():
if server:
server.shutdown()
def openUrl():
url = "http://localhost:" + str(port_number)
webbrowser.open(url)
print(url + " is opened in browser")
if __name__ == "__main__":
start(port_number)
openUrl()
| ldlchina/Sample-of-WebGL-with-STL-loader | LocalServer.py | LocalServer.py | py | 862 | python | en | code | 17 | github-code | 36 |
33662814296 | # Here is a quick Python solution to Problem 1 on the Mixing Addition Problem Set
import numpy as np # convenient for doing math
P_1 = 10. # Initial pressure Oxygen (atm)
V_o = 1. # Initial Volume Oxygen (L)
T = 298. # Temperature (K)
P_2 = 1. # Initial Pressure Nitrogen (atm)
V_n = 1000. # Initial Volume Nitrogen (L)
R_n = 0.0820573 # Gas Constant (L-atm / mol-K)
R = 8.314 # Gas Constant (J/mol-K)
# Calulate moles of Oxygen (Ideal Gas Law)
n_o = P_1*V_o / (R_n*T)
print('The moles of O_2 are')
print(n_o)
# Calculate moles of Nitrogen (Ideal Gas Law)
n_n = P_2*V_n / (R_n*T)
print('The moles of N_2 are')
print(n_n)
# The system is closed therefore
n_tot = n_o + n_n
print('There are %s total moles in the system') %n_tot
# The system is composed of gases therefore
V_tot = V_o + V_n
# The system is ideal, so the partial pressures are determined without interactions
P_o = n_o*R_n*T / V_tot
P_n = n_n*R_n*T / V_tot
print('The partial pressure of oxygen is %s and nitrogen is %s')%(P_o,P_n)
# Gibbs Free Energy Equation
G = n_o * R * T * np.log(P_o/P_1) + n_n * R * T * np.log(P_n/P_2)
# The system is ideal therefore Enthalpy = 0 and
S = -G / T
print('The change in the Gibbs is %s')%G
print('The change in the Entropy is %s')%S
| swflynn/Teaching_UCI | Chem131_C_2017/Sample_Problems/mixing/mixing.py | mixing.py | py | 1,346 | python | en | code | 0 | github-code | 36 |
74779834984 | from unittest import TestCase
from random import randint
from Common.common import rand_permutate
from collections import namedtuple
from .selection_in_linear_time import select, rand_select
from .problem_9_3 import select_variant
class TestSelection(TestCase):
def test_selection(self):
case_class = namedtuple('case_class', 'array i key expected_res')
for select_method in (rand_select, select, select_variant,):
cases = (
case_class(array=[1], i=0, key=None, expected_res=1),
case_class(array=[3, 2, 1], i=0, key=None, expected_res=1),
case_class(array=[1, 3, 5, 4, 2, 7, 6], i=4, key=None, expected_res=5),
case_class(array=[1, 3, 5, 4, 2, 7, 6], i=2, key=None, expected_res=3),
case_class(array=[1, 3, 5, 4, 2, 7, 6], i=6, key=lambda x: -x, expected_res=1),
case_class(array=[8, 3, 2, 4, 6, 9, 7, 5, 1], i=0, key=None, expected_res=1),
case_class(array=[16, 196, 64, 121, 144, 9, 36, 0, 49, 100, 4, 81, 169, 1, 25], i=4, key=None,
expected_res=16),
case_class(array=[1, 16, 4, 9, 49, 100, 25, 36, 81, 64, 0], i=0, key=None, expected_res=0),
)
for case in cases:
# print(case.array, case.i)
self.assertEqual(case.expected_res, select_method(case.array, case.i, case.key))
for length in range(1, 100):
i = randint(0, length - 1)
array = [x * x for x in range(0, length)]
rand_permutate(array)
case = case_class(array=array, i=i, key=None, expected_res=i * i)
# print(case.array, case.i)
self.assertEqual(case.expected_res, select_method(case.array, case.i, case.key)) | GarfieldJiang/CLRS | P2_Sorting/OrderStatistics/test_selection.py | test_selection.py | py | 1,825 | python | en | code | 0 | github-code | 36 |
25867245413 | #test
import os
import yaml
from ..grid import Grid
import numpy as np
def load_test_data(yml_file_path):
"""Given a file path of the yaml file, return the data in the file."""
with open(yml_file_path, 'r') as f:
s = f.read()
test_data = yaml.load(s)
return test_data
def test_vertices():
#yml_files = ['grid1.yml']
yml_file_path = os.path.join(os.path.dirname(__file__), 'fixture', 'grid_1.yml')
test_data = load_test_data(yml_file_path)
ans = test_data.pop('ans')
ans_vertice = ans['vertice']
grid = Grid.from_vtk_file('example.vtk')
v = grid.vertices[1]
(v == ans_vertice).all
def test_elements():
yml_file_path = os.path.join(os.path.dirname(__file__), 'fixture', 'grid_1.yml')
test_data = load_test_data(yml_file_path)
ans = test_data.pop('ans')
ans_element = ans['element']
grid = Grid.from_vtk_file('example.vtk')
e = grid.elements[1]
(e == ans_element).all
def test_corners():
yml_file_path = os.path.join(os.path.dirname(__file__), 'fixture', 'grid_1.yml')
test_data = load_test_data(yml_file_path)
ans = test_data.pop('ans')
ans_corner = ans['corner']
grid = Grid.from_vtk_file('example.vtk')
c = grid.get_corners(1)
(c == ans_corner).all
# if __name__ == '__main__':
# test_vertices()
# # test_wrong()
| uceclz0/mesh_generation | mesh_generation/test/grid_test.py | grid_test.py | py | 1,354 | python | en | code | 0 | github-code | 36 |
41246308973 | #!/usr/bin/env python3
from http.server import SimpleHTTPRequestHandler
from socketserver import TCPServer
class CORSPermissiveHTTPRequestHandler(SimpleHTTPRequestHandler):
def end_headers(self):
self.send_header('Access-Control-Allow-Origin', '*')
super().end_headers()
if __name__ == "__main__":
with TCPServer(("127.0.0.1", 8000), CORSPermissiveHTTPRequestHandler) as httpd:
print("Serving...")
httpd.serve_forever()
| habitatofmatt/keyboard-reducer | tools/pyodide-serve.py | pyodide-serve.py | py | 465 | python | en | code | 0 | github-code | 36 |
10770360159 | #! /usr/bin/python3
import sys
import logging
import cliff.app
import cliff.commandmanager
from commands.workflows import Workflows
from commands.sysconfig import SysConfig
from commands.daemons import Coordinator
from commands.daemons import Provisioner
from commands.generator import Generator
from commands.reports import Reports
from commands.status import Status
class PancancerApp(cliff.app.App):
log = logging.getLogger(__name__)
def __init__(self):
commandMgr = cliff.commandmanager.CommandManager('pancancer.app')
super(PancancerApp, self).__init__(
description='Pancancer CLI',
version='1.0',
command_manager=commandMgr,
)
commands = {
'workflows': Workflows,
'generator': Generator,
'reports': Reports,
'provisioner': Provisioner,
'coordinator': Coordinator,
'status': Status,
'sysconfig': SysConfig
}
for k, v in commands.items():
commandMgr.add_command(k, v)
def initialize_app(self, argv):
self.log.debug('initialize_app')
def prepare_to_run_command(self, cmd):
self.log.debug('prepare_to_run_command %s', cmd.__class__.__name__)
def clean_up(self, cmd, result, err):
self.log.debug('clean_up %s', cmd.__class__.__name__)
if err:
self.log.debug('got an error: %s', err)
def main(argv=sys.argv[1:]):
app = PancancerApp()
return app.run(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| ICGC-TCGA-PanCancer/cli | scripts/pancancer.py | pancancer.py | py | 1,578 | python | en | code | 8 | github-code | 36 |
72522500264 | import logging
from .models import ChangeLogTracker
logger = logging.getLogger(__name__)
class ChangeLoggerMiddleware(object):
def process_request(self, request):
try:
ChangeLogTracker.thread.request = request
except Exception as e:
logger.error(e)
| kdmukai/changelogger | changelogger/middleware.py | middleware.py | py | 299 | python | en | code | 0 | github-code | 36 |
25022706363 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, argparse, json
from common.utils import use_progressbar, count_file
from smali_opcode import HCG_FILE_NAME
def merge_hash_dict(d1, d2):
'''merge d1 into d2'''
# 1. iterate through all keys of d1
# 2. check if the key is also in d2
# 2.1 yes, place the key with the maximum value in d2
# 2.2 no, add the key-value pair into d2
for node in d1:
if node in d2:
d2[node] = max(d1[node], d2[node])
else:
d2[node] = d1[node]
return d2
def get_hash(hcgpath):
'''get hash values from a hcg.json file'''
# 1. load the hcg into a dictionary
# 2. iterate through all the nodes of the hcg
# 3. make hash values keys of another dictionary
# while the values is its occurrence
# Load hcg
f = open(hcgpath, 'r')
hcg = json.load(f)
f.close()
# Iterate through all nodes
hash_dict = dict()
for node in hcg:
if hcg[node]['nhash'] not in hash_dict:
hash_dict[hcg[node]['nhash']] = 1
else:
hash_dict[hcg[node]['nhash']] += 1
return hash_dict
def count(directory):
'''count all the indivisual hash values'''
# 1. iterate through all the hcg.json files
# 2. get hash values from a hcg.json file
# 3. merge the hash values into one file
# progressbar
file_count = count_file(directory, HCG_FILE_NAME)
pbar = use_progressbar('Calculating maximum occurrence', file_count)
pbar.start()
progress = 0
hash_dict = dict()
for parent, dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename == HCG_FILE_NAME:
# if filename == 'hcg.json':
hash_dict = merge_hash_dict(get_hash(os.path.join(parent, filename)), hash_dict)
# progressbar
progress += 1
pbar.update(progress)
# progressbar
pbar.finish()
return hash_dict
def has_hash_and_occurrence(hash_dict, hash_value, occurrence):
if hash_value in hash_dict:
if hash_dict[hash_value] == occurrence:
return True
return False
def find(directory):
'''find the file with the specific occurrence of given hash value'''
# 1. iterate through all the hcg.json files
# 2. get hash values from a hcg.json file
# 3. compare the hash values with the given ones
hash_value = '0100000000000000000'
occurrence = 2302
for parent, dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename == 'directed_hcg.json':
hash_dict = get_hash(os.path.join(parent, filename))
if has_hash_and_occurrence(hash_dict, hash_value, occurrence):
print(os.path.join(parent, filename))
def save_to_file(hash_dict, directory):
# Dump hash_dict to json file
f = open(os.path.join(directory, 'directed_hash_occurrence.json'), 'w')
# f = open(os.path.join(directory, 'hash_occurrence.json'), 'w')
json.dump(hash_dict, f)
f.close()
print('[SC]All hash values stored in /%s/hash_occurrence.json' % directory)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-d', '--directory', help='directory of the apk')
parser.add_argument('-m', '--mode', help='0 = count hash; 1 = find hash')
args = parser.parse_args()
if args.directory and args.mode:
if args.mode == '0':
hash_dict = count(args.directory)
save_to_file(hash_dict, args.directory)
elif args.mode == '1':
find(args.directory)
else:
parser.print_help()
else:
parser.print_help()
if __name__ == '__main__':
main() | chushu10/StructuralClassification | count_hash.py | count_hash.py | py | 3,759 | python | en | code | 1 | github-code | 36 |
21541507358 | from random import randint
def playGame(MaxRangeChances):
secretNumber = randint(1, MaxRangeChances[0])
print(f"\nI've chosen a secret number between 1 and {MaxRangeChances[0]}.")
for numberOfGuesses in range(MaxRangeChances[1], 0, -1):
print(f"You have {numberOfGuesses} guess(es).")
guess = int(input("What is the number? "))
if guess == secretNumber:
return
else:
print("That was wrong.\n")
return secretNumber
level = int(input("""Choose a difficulty level:
1. Easy
2. Medium
3. Hard\n"""))
while level not in [1, 2, 3]:
level = int(input("invalid choice. Try again: "))
play = [[10, 6], [20, 4], [50, 3]]
rightGuess = playGame(play[level - 1])
if rightGuess == None:
print("You got it right!")
else:
print("Game over.")
print("The secret number is", rightGuess)
| kevinoyovota/Python-Task-3 | guessing_game.py | guessing_game.py | py | 888 | python | en | code | 0 | github-code | 36 |
19524023566 | from django.contrib import messages
from django.shortcuts import render,redirect
from students.models import student_homework,StudentFee,Attendance
from Accounts.models import UserAuthentication,Teacher
from datetime import datetime as dt
from pytz import timezone
# Create your views here.
def parent_home(request):
if request.method == "GET":
tutions = Teacher.objects.all()
return render(request,'parent_home.html',{'tutions':tutions})
else:
messages.warning(request,'Bad Request on parent home...please try again')
return redirect('/accounts/login/')
def show_students_data(request):
if request.method == "GET":
student_key = request.GET.get('parent_contact','')
if student_key == '':
messages.error(request,'Parent Mobile Number is required...got empty')
return redirect('/parents/home/')
else:
if student_homework.objects.filter(username = student_key).exists():
student_data = student_homework.objects.get(username = student_key)
att = []
fee = []
teacher = Teacher.objects.get(teacher = student_data.teacher)
if Attendance.objects.filter(email = student_data).exists():
att = Attendance.objects.filter(email = student_data)
if StudentFee.objects.filter(email = student_key).exists():
fee = StudentFee.objects.filter(email = student_key)
td = dt.now(timezone('Asia/Kolkata')).date()
if teacher.notice_expiry == None:
notice = False
elif td >= teacher.notice_expiry:
notice = False
else:
notice = True
return render(request,'parent_student_data.html',{
'student':student_data,
'att':att,
'fee':fee,
'teacher' : teacher,
'notice' : notice
})
else:
messages.error(request,'Your Mobile Number not associated with any tution..please check with your tution teacher')
return redirect('/parents/home/')
else:
messages.error(request,'Only POST requests are accepted..please try again')
return redirect('/parents/home/')
| Asif-Ali1234/tution_management | parents/views.py | views.py | py | 2,388 | python | en | code | 0 | github-code | 36 |
27288756918 | # choose a player to go and make it move
# each player may take a step a head
# First one to 20 wins
Score = 0
def function():
P1 = str(input("WILL YOU GO FIRST OR SECOND ?"))
if P1 == "first" :
print(" Player 1 starts ")
def function2():
P2 = input(" Enter 1 or 2 ")
if P2 == "1":
result = Score + 1
print(result)
if P2 == "2":
result2 = Score + 2
print(result2)
function()
while Score == 20:
function2()
P1 = input("Add 1 or 2 more to each move")
P2 = input("Add 1 or 2 more to each move")
N = int(P1) + int(P2)
N = "Winner"
print(N)
#Still in working progresss
| Elib22/MIN_CigshamwaByamungu | main.py | main.py | py | 618 | python | en | code | 0 | github-code | 36 |
10528069074 | from PIL import Image
from torchvision import transforms
from JointCompose import JointCompose, IMG_ONLY_TRANSFORM, MASK_ONLY_TRANSFORM, RANDOM_JOINT_TRANSFORM_WITH_BORDERS, BORDER_ONLY_TRANSFORM, JOINT_TRANSFORM_WITH_BORDERS
import numpy as np
from scipy.ndimage.interpolation import map_coordinates
from scipy.ndimage.filters import gaussian_filter
from skimage.segmentation import mark_boundaries, find_boundaries
from skimage.exposure import adjust_gamma, rescale_intensity, equalize_hist, equalize_adapthist
from skimage.util import img_as_float
from skimage.segmentation import quickshift, felzenszwalb, slic
import random
from skimage.color import rgb2lab, rgb2grey
from skimage import filters
import cv2
IMG_SIZE = 256
class TransformSpec:
def __init__(self, transform, transform_type, prob = None):
self.transform = transform
self.transform_type = transform_type
self.prob = prob
class Flip(object):
"""flips the given PIL Image horizontally or vertically.
param type: 0 for horizontal flip, 1 for vertical flip
"""
def __init__(self, flip_type):
self.type = flip_type
def __call__(self, img):
"""
Args:
img (PIL Image): Image to be flipped.
Returns:
PIL Image: flipped image.
"""
if self.type == 0:
return img.transpose(Image.FLIP_LEFT_RIGHT)
else:
return img.transpose(Image.FLIP_TOP_BOTTOM)
# aside: transforms are written as callable classes instead of simple functions so that parameters
# of the transform need not be passed everytime it is called. For this, we just need to implement
# __call__ method and if required, __init__ method.
class Segment(object):
def __call__(self, img):
# img is a numpy rgb image
grey_img = rgb2grey(img)
t1 = filters.threshold_minimum(grey_img)
t2 = filters.threshold_yen(grey_img)
img1 = mark_boundaries(img, (grey_img > t1), color=(1,0,0))
img1 = mark_boundaries(img1, (grey_img > t2), color=(1,0,0))
img2 = mark_boundaries(img, grey_img < 0)
img = ((img1+img2)/2)
#img = mark_boundaries(img, quickshift(img_as_float(img), kernel_size =5, max_dist = 10, ratio = 1.0))
#img = mark_boundaries(img, slic(img_as_float(img), n_segments=10))
#fimg = rgb2grey(img)
#t = filters.threshold_otsu(fimg)
#img = mark_boundaries(img, (fimg > t).astype(np.uint8), color=(1,0,0))
#img = mark_boundaries(img, (fimg - filters.threshold_niblack(fimg)< 0).astype(np.uint8), color=(1,0,0))
#img_gray = rgb2grey(img)
#img_gray = img[:, :, 1]
# morphological opening (size tuned on training data)
#circle7 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
#img_open = cv2.morphologyEx(img_gray, cv2.MORPH_OPEN, circle7)
# Otsu thresholding
#img_th = cv2.threshold(img_open, 0, 255, cv2.THRESH_OTSU)[1]
# Invert the image in case the objects of interest are in the dark side
#if (np.sum(img_th == 255) > np.sum(img_th == 0)):
# img_th = cv2.bitwise_not(img_th)
# second morphological opening (on binary image this time)
#bin_open = cv2.morphologyEx(img_th, cv2.MORPH_OPEN, circle7)
# connected components
#img = mark_boundaries(img,cv2.connectedComponents(bin_open)[1], color=(1,0,0))
return (img*255).astype(np.uint8)
class JitterBrightness(object):
def __call__(self, img):
# img is a numpy rgb image
gamma = random.random() + 0.3
return adjust_gamma(img, gamma)
class Rescale(object):
def __call__(self, img):
# img is a numpy rgb image
return equalize_adapthist(img)
class Negative(object):
def __call__(self, img):
# img is a numpy rgb image
return rescale_intensity(255-img)
class To3D(object):
# make into a 3d RGB-like array required for making it a PIL image and then a tensor
def __call__(self, mask):
h = mask.shape[0]
w = mask.shape[1]
mask_rgb = np.zeros((h,w,3))
for i in xrange(h):
for j in xrange(w):
if mask[i,j] == 1:
mask_rgb[i,j,:] = 255
return mask_rgb.astype(np.uint8)
class To1Ch(object):
def __call__(self, img, channel = 0):
return img[:,:,channel][:,:,None]
class Binarize(object):
def __call__(self, img):
img[img > 0.5] = 1.0
img[img < 1.0] = 0.0
return img
class ElasticTransform(object):
'''
sigma: positive float for smoothing the transformation (elasticy of the transformation.)
If sigma is small teh field looks like a completely random field after normalization
For intermidiate sigma values the displacement fields look like elastic deformation, where sigma is the elasticity coefficient.
If sigma is large, the displacements become close to affine. If sigma is very large the displacements become translations.
alpha: scaling facor - positive float giving the intensity of the transformation. Larger alphas require larger sigmas
default values take from the paper
'''
def __init__(self, sigma=1.5, alpha=34.0):
'''
:param sigma: positive floaf giving the elasticity of the transformation
:param alpha: positive float giving the intensity of the transformation
'''
self.sigma = sigma
self.alpha = alpha
def __call__(self, img, mask, borders):
if len(mask.shape) == 2:
# merge the image and the mask
merged_img = np.zeros(img.shape)
merged_img[:,:,] = img[:,:,]
merged_img[:,:,0] = mask[:,:]
# apply elastic deformation on the merged image
[deformed_merged_img, deformed_borders] = self.__elastic_deformation__([merged_img, borders])
# split image and mask from the merged deformed image
# mask
deformed_mask = np.zeros(mask.shape)
deformed_mask[:,:] = deformed_merged_img[:, :, 0]
self.dichotom(deformed_mask, 0.5, 1.0)
# image
deformed_img = deformed_merged_img[:,:,:]
deformed_img[:,:,0] = img[:,:,0]
else:
[deformed_img, deformed_mask, deformed_borders] = self.__elastic_deformation__([img, mask, borders])
return deformed_img.astype(np.uint8), deformed_mask.astype(np.uint8), deformed_borders.astype(np.uint8)
'''
based on the paper 'Best Practices for Convolutional Neural Networks Applied to Visual Document Analysis' Simard et al 2003
generalized the following implementation: https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
Works on numpy images
'''
def __elastic_deformation__(self, imgs):
img = imgs[0]
# img is a numpy image
shape = img.shape
n_dim = len(shape)
convolved_displacement_fields = []
grid = []
fsize = len(img.flatten())
for i in xrange(n_dim):
if i < 2: # don't touch the channel
cdf = np.array([random.random() for j in xrange(fsize)]).reshape(shape) * 2 - 1
convolved_displacement_fields.append(
gaussian_filter(cdf, self.sigma, mode="constant", cval=0) * self.alpha)
grid.append(np.arange(shape[i]))
grid = np.meshgrid(*grid, indexing='ij')
indices = []
for i in xrange(n_dim):
if i < 2: # don't touch the channel
indices.append(np.reshape(grid[i] + convolved_displacement_fields[i], (-1, 1)))
else:
indices.append(np.reshape(grid[i], (-1, 1)))
deformed_imgs = [map_coordinates(my_img, indices, order=3).reshape(shape) for my_img in imgs]
return deformed_imgs
def dichotom(self, img, thr, v1, v0=0):
if len(img.shape) == 2:
img[img > thr] = v1
img[img < v1] = v0
else:
height, width, channel = img.shape
for i in xrange(height):
for j in xrange(width):
for k in xrange(channel):
if img[i, j, k] == thr:
img[i, j, :] = v1
break
img[img < v1] = v0
def PIL_torch_to_numpy(img):
img = np.transpose(img.numpy(), (1, 2, 0))
if img.shape[2] == 1:
img = img[:,:,0]
return img
def reverse_test_transform(img, original_size):
'''
reverse the basic mask transformation
:param img:
:param original_size: H X W X C of image
:return:
'''
# resize the tenstor to the original size
reverse_transform = transforms.Compose([transforms.ToPILImage(),
transforms.Resize(original_size[:2]), transforms.ToTensor()])
img = PIL_torch_to_numpy(reverse_transform(img))
return img
def to_binary_mask(labelled_mask, with_borders, use_borders_as_mask):
if use_borders_as_mask:
mask = find_boundaries(labelled_mask, mode='outer')
else:
mask = (labelled_mask > 0)
if with_borders:
mask[find_boundaries(labelled_mask, mode='outer')] = 0
#borders = (labelled_mask > 0).astype(np.uint8) - mask # borders of touching cells (if borders are marked)
borders = find_boundaries(labelled_mask, mode='outer')
return mask.astype(np.uint8), borders.astype(np.uint8)
# add transformations to color
transformations = {
"train_transform_elastic":JointCompose(# transformations
[
# turn mask into 3D RGB-Like for PIL and tensor transformation
TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
#Elastic deformation on the numpy images
TransformSpec(ElasticTransform(), RANDOM_JOINT_TRANSFORM_WITH_BORDERS, prob=0.8),
# Convert borders and mask to 1 channel
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
# color jittering (image only)
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM),
TransformSpec(Negative(), IMG_ONLY_TRANSFORM, prob=0.5),
# turn into a PIL image - required to apply torch transforms (both image, mask and borders)
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
# flipping
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.5),
#resize image (bilinear interpolation)
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
#resize borders (bilinear interpolation)
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
# resize mask
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
# finally turn into a torch tenstor (both image and mask)
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
# ensure mask and borders are binarized
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"train_transform":JointCompose(
[TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM, prob=0.5),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.2),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"train_transform_segment":JointCompose(
[TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM, prob=0.9),
TransformSpec(Segment(), IMG_ONLY_TRANSFORM),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.2),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"train_transform_jitter":JointCompose(
[TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM, prob=0.9),
TransformSpec(Negative(), IMG_ONLY_TRANSFORM, prob=0.5),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.2),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"test_transform":JointCompose(
[TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
TransformSpec(Segment(), IMG_ONLY_TRANSFORM),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
),
"toy_transform":JointCompose(
[ # turn mask into 3D RGB-Like for PIL and tensor transformation
TransformSpec(To3D(), MASK_ONLY_TRANSFORM),
TransformSpec(To3D(), BORDER_ONLY_TRANSFORM),
#Elastic deformation on the numpy images
#TransformSpec(ElasticTransform(), RANDOM_JOINT_TRANSFORM_WITH_BORDERS),
# Convert borders and mask to 1 channel
TransformSpec(To1Ch(), BORDER_ONLY_TRANSFORM),
TransformSpec(To1Ch(), MASK_ONLY_TRANSFORM),
#TransformSpec(Rescale(), IMG_ONLY_TRANSFORM),
# color jittering (image only)
TransformSpec(JitterBrightness(), IMG_ONLY_TRANSFORM),
#TransformSpec(Negative(), IMG_ONLY_TRANSFORM),
TransformSpec(Segment(), IMG_ONLY_TRANSFORM),
TransformSpec(transforms.ToPILImage(), JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Flip(1), JOINT_TRANSFORM_WITH_BORDERS, prob=0.0),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.BILINEAR),
IMG_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
BORDER_ONLY_TRANSFORM),
TransformSpec(transforms.Resize((IMG_SIZE,IMG_SIZE),interpolation=Image.NEAREST),
MASK_ONLY_TRANSFORM),
TransformSpec(transforms.ToTensor(),JOINT_TRANSFORM_WITH_BORDERS),
TransformSpec(Binarize(), BORDER_ONLY_TRANSFORM),
TransformSpec(Binarize(), MASK_ONLY_TRANSFORM)]
)
}
| yolish/kaggle-dsb18 | dsbaugment.py | dsbaugment.py | py | 16,818 | python | en | code | 0 | github-code | 36 |
33364078946 | from flask import Flask, request, jsonify, url_for, session, redirect, render_template
from flaskext.mysql import MySQL
from flask_oauth import OAuth
import logging
import time
from logging.handlers import RotatingFileHandler
# from urllib.requests import urlparse
app = Flask(__name__)
app.secret_key = 'secretkey'
# konfigurasi database
mysql = MySQL()
app.config['MYSQL_DATABASE_USER'] = 'root'
app.config['MYSQL_DATABASE_PASSWORD'] = 'root'
app.config['MYSQL_DATABASE_DB'] = 'dbtst'
app.config['MYSQL_DATABASE_HOST'] = '172.24.0.2'
app.config['MYSQL_DATABASE_PORT'] = 3306
app.config['JSONIFY_PRETTYPRINT_REGULAR'] = True
mysql.init_app(app)
oauth = OAuth()
google = oauth.remote_app('google',
base_url='https://www.google.com/accounts/',
authorize_url='https://accounts.google.com/o/oauth2/auth',
request_token_url=None,
request_token_params={'scope': 'https://www.googleapis.com/auth/userinfo.email',
'response_type': 'code'},
access_token_url='https://accounts.google.com/o/oauth2/token',
access_token_method='POST',
access_token_params={'grant_type': 'authorization_code'},
consumer_key='482758007120-vm7lob4gqkmr8eeeq21uo5odpnt6736g.apps.googleusercontent.com',
consumer_secret='cpNDbgd5rFiw-98asLncKZUd')
@app.route('/')
def landing():
return render_template('index.html')
@app.route('/home')
def home():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman utama')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('home.html')
@app.route('/search')
def search():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman pencarian')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('search.html')
@app.route('/register')
def register():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman penambahan data')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('register.html')
@app.route('/hapus')
def hapus():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman penghapusan data')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('hapus.html')
@app.route('/baru')
def baru():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' masuk halaman pembaruan data')
access_token = session.get('access_token')
if access_token is None:
return redirect(url_for('login'))
return render_template('baru.html')
@app.route('/login')
def login():
callback=url_for('authorized', _external=True)
return google.authorize(callback=callback)
@app.route('/authorized')
@google.authorized_handler
def authorized(resp):
access_token = resp['access_token']
session['access_token'] = access_token, ''
return redirect(url_for('home'))
@app.route('/user', methods=['GET'])
def get_user():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' mencari user')
conn = mysql.connect() # connect database
cursor = conn.cursor() # melakukan perintah
carinama = request.args.get('carinama')
query = 'SELECT * FROM mahasiswa WHERE nama=%s'
data = (carinama)
cursor.execute(query, data)
result = cursor.fetchall() # merubah hasil dari my sql menjadi list
result_baru = []
# untuk menambahkan key
for user in result:
user_baru = {
'nim': user[0],
'nama': user[1],
'jurusan': user[2],
'angkatan': user[3]
}
result_baru.append(user_baru)
return {'hasil': result_baru}
@app.route('/create', methods=['POST'])
def insert_user():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' menambahkan data')
conn = mysql.connect()
cursor = conn.cursor()
nim = request.form['nim']
nama = request.form['nama']
jurusan = request.form['jurusan']
angkatan = request.form['angkatan']
query = 'INSERT INTO mahasiswa (nim, nama, jurusan, angkatan) VALUES (%s,%s,%s,%s)'
data = (nim, nama, jurusan, angkatan)
cursor.execute(query, data)
conn.commit()
conn.close()
return 'data berhasil ditambahkan'
@app.route('/update', methods=['PUT'])
def update_user():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' memperbarui data')
conn = mysql.connect()
cursor = conn.cursor()
nim = request.form['nim']
nama = request.form['nama']
jurusan = request.form['jurusan']
angkatan = request.form['angkatan']
query = 'UPDATE mahasiswa SET nama=%s, jurusan=%s, angkatan=%s WHERE nim=%s'
data = (nama, jurusan, angkatan, nim)
cursor.execute(query, data)
conn.commit()
conn.close()
return 'data berhasil diupdate'
@app.route('/delete', methods=['DELETE'])
def delete_user():
app.logger.error(time.strftime('%A %B, %d %Y %H:%M:%S') + ' menghapus data')
conn = mysql.connect()
cursor = conn.cursor()
query = 'DELETE FROM mahasiswa WHERE nim=%s'
data = request.form['carinim']
cursor.execute(query, data)
conn.commit()
conn.close()
return 'data berhasil didelete'
# klo update querynya aja yg diganti. tetep tambah commit execute
if __name__ == "__main__":
handler = RotatingFileHandler('tst.log', maxBytes=10000, backupCount=1)
handler.setLevel(logging.INFO)
app.logger.addHandler(handler)
app.run(host='0.0.0.0') | Iann221/UASTST | api.py | api.py | py | 5,898 | python | en | code | 0 | github-code | 36 |
650884033 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from pymongo import MongoClient
class YymanhuaPipeline(object):
def process_item(self, item, spider):
# 插入数据库
item["_id"] = self.count
self.collection.insert(item)
self.count += 1
return item
def open_spider(self,spider):
self.client = MongoClient()
self.collection = self.client["pySpider"]["yymh_2"]
self.count = 1
print("数据库以连接...")
def close_spider(self, spider):
self.client.close()
print("数据库连接关闭") | jihongzhu/python- | yymanhua/yymanhua/pipelines.py | pipelines.py | py | 737 | python | en | code | 0 | github-code | 36 |
9738742376 | def move():
direction = "W" # The default is always forwards
key = input(">> ").upper()
if key not in 'WASD': # If the entered key is not W A S D
print("Invalid Input\n")
while True:
key = input(">> ").upper()
if key in 'WASD':
break
direction = key # Updating this var if key is valid
return direction
| Aim-Entity/gamejam-1 | algo/movement.py | movement.py | py | 384 | python | en | code | 0 | github-code | 36 |
74506416103 | from django.urls import include, path, re_path
from django.urls import reverse
from rest_framework.routers import DefaultRouter
from .views import SubscriptionViewSet, UserSubscriptionView, SubscriptionCreateView, SubscriptionCancelView, SubscriptionUpdateView, SubscriptionReactiveView
app_name = 'subscription'
router = DefaultRouter(trailing_slash=False)
router.register(r'subscription', SubscriptionViewSet)
urlpatterns = [
path('', include(router.urls)),
path('subscription/user/', UserSubscriptionView.as_view()),
path('subscription/create/', SubscriptionCreateView.as_view()),
path('subscription/cancel/', SubscriptionCancelView.as_view()),
path('subscription/reactive/', SubscriptionReactiveView.as_view()),
path('subscription/update/', SubscriptionUpdateView.as_view()),
] | jubelltols/React_DRF_MySql | DRF/src/onbici/subscription/urls.py | urls.py | py | 814 | python | en | code | 0 | github-code | 36 |
37204976551 | #!/usr/bin/python3
from __future__ import print_function
import os
import sys
import torch
import torch.backends.cudnn as cudnn
import argparse
import cv2
import numpy as np
from collections import OrderedDict
sys.path.append(os.getcwd() + '/../../src')
from config import cfg
from prior_box import PriorBox
from detection import Detect
from nms import nms
from utils import decode
from timer import Timer
from yufacedetectnet import YuFaceDetectNet
parser = argparse.ArgumentParser(description='Face and Mask Detection')
parser.add_argument('-m', '--trained_model', default='weights/yunet_final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--image_file', default='t1.jpg', type=str, help='the image file to be detected')
parser.add_argument('--conf_thresh', default=0.5, type=float, help='conf_thresh')
parser.add_argument('--top_k', default=20, type=int, help='top_k')
parser.add_argument('--nms_thresh', default=0.5, type=float, help='nms_thresh')
parser.add_argument('--keep_top_k', default=20, type=int, help='keep_top_k')
parser.add_argument('--device', default='cuda:0', help='which device the program will run on. cuda:0, cuda:1, ...')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
labels = ('_background_', 'face', 'mask')
num_classes = 3
detect = Detect(num_classes, 0, args.top_k, args.conf_thresh, args.nms_thresh)
if __name__ == '__main__':
# img_dim = 320
device = torch.device(args.device)
torch.set_grad_enabled(False)
# net and model
net = YuFaceDetectNet(phase='test', size=None ) # initialize detector
net = load_model(net, args.trained_model, True)
# net.load_state_dict(torch.load(args.trained_model))
net.eval()
print('Finished loading model!')
## Print model's state_dict
#print("Model's state_dict:")
#for param_tensor in net.state_dict():
# print(param_tensor, "\t", net.state_dict()[param_tensor].size())
cudnn.benchmark = True
net = net.to(device)
_t = {'forward_pass': Timer(), 'misc': Timer()}
# testing begin
img_raw = cv2.imread(args.image_file, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
im_height, im_width, _ = img.shape
#img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
_t['forward_pass'].tic()
loc, conf = net(img) # forward pass
_t['forward_pass'].toc()
_t['misc'].tic()
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
detections = detect(loc, conf, priors)
# detections = out.data
print(detections.size())
scale = torch.Tensor([im_width, im_height, im_width, im_height])
# scale = scale.to(device)
for i in range(detections.size(1)):
j = 0
while detections[0,i,j,0] >= 0.6:
score = detections[0,i,j,0]
label_name = labels[i]
display_txt = '%s: %.2f'%(label_name, score)
pt = (detections[0,i,j,1:]*scale).cpu().numpy()
j+=1
pts = (int(pt[0]), int(pt[1]))
pte = (int(pt[2]), int(pt[3]))
cx = int(pt[0])
cy = int(pt[1]) + 12
cv2.rectangle(img_raw, pts, pte, (0, 255, 0), 2)
cv2.putText(img_raw, label_name, (cx, cy), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
cv2.imshow('facemask', img_raw)
cv2.waitKey(0)
| tienhoangvan/libfacemaskdet | train/tasks/task1/demo.py | demo.py | py | 5,078 | python | en | code | 0 | github-code | 36 |
18657364741 | #!/usr/bin/env python
import matplotlib.pyplot as plt
import csv
x = []
y = []
with open('orderByDay.csv','r') as csvfile:
plots = csv.reader(csvfile, delimiter=',')
for row in plots:
x.append(row[0])
y.append(int(row[1]))
plt.plot(x,y)
plt.xlabel('Weekdays')
plt.ylabel('Number of orders')
plt.title('Order By Weekdays')
plt.show()
| gjtqiyue/Comp421-Database-Project | Deliverable_3/PlotCsv.py | PlotCsv.py | py | 360 | python | en | code | 0 | github-code | 36 |
1955434501 | import logging
import sqlite3.dbapi2 as sqlite3
import os
from ..config import CREATE_QUERY, TEST_QUERY
def load_database(path):
DB_INSTANCE = sqlite3.connect(path)
cursor = DB_INSTANCE.cursor()
try:
cursor.execute(TEST_QUERY)
except sqlite3.OperationalError as e:
logging.error(f"Something went wrong while dealing with the database! More info: {e}")
cursor.close()
exit(e)
logging.info("Done! Database was loaded succesfully!")
cursor.close()
def create_database(path: str):
if not os.path.isfile(path):
new = True
logging.info(f"Creating new database at {path}")
else:
new = False
logging.info(f"Loading database from {path}")
DB = sqlite3.connect(path)
if new:
cursor = DB.cursor()
try:
cursor.execute(CREATE_QUERY)
DB.commit()
cursor.close()
except sqlite3.OperationalError as e:
logging.error(f"Something went wrong while creating the database! More info: {e}")
cursor.close()
exit(e)
logging.debug(f"Succesfully ran CREATE_QUERY on database at {path}, preparing to load")
load_database(path)
| nocturn9x/AmazonOffers-Manager | AmazonBot/database/dbcreator.py | dbcreator.py | py | 1,215 | python | en | code | 10 | github-code | 36 |
31014089660 |
# Imports
import PySimpleGUI as sg
import openai
import os
from openai.error import APIConnectionError, AuthenticationError
import threading
from datetime import datetime
from peewee import SqliteDatabase, Model, CharField, TextField
# Envs
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv())
openai.api_key = os.getenv('openai.api_key')
# Definitions
APP_TITLE="GUI para Chat GPT"
FILE="Archivo"
RESET_CONTEXT="Restablecer contexto"
SAVE_CHAT="Exportar chat"
QUIT="Salir"
OPTIONS="Opciones"
LOAD_API_KEY="Cargar API Key"
HELP="Ayuda"
ABOUT="Acerca de"
DEFAULT_THEME="DarkGrey1"
CHAT_RESULT_KEY="-chat_result-"
PROMPT_KEY="-prompt_input-"
SUBMIT_KEY="-send_prompt-"
CHAT_LISTBOX = "-select-a-chat"
DELETE_CHAT_BUTTON = "-delete-chat-button"
REGENERATE_CHAT_BUTTON = "-regenerate-chat-button"
THEMES=["Temas", ["Default", "Black", "Dark15", "DarkGrey2", "DarkGrey3", "DarkBrown1"]]
ABOUT_TEXT = "ACERCA DE "
# Define database connection
db = SqliteDatabase('my_chats.db')
# Define table
class Chat(Model):
title = CharField()
query = TextField()
response = TextField()
class Meta:
database = db
# Migrate
db.create_tables([Chat])
# Program
class Application:
def __init__(self):
# Tema por defecto
sg.theme(DEFAULT_THEME)
# Lista de chats
self.chats = Chat.select()
# Contexto predeterminado
self.default_context = {"role": "system",
"content": "Eres un asistente muy útil."}
# Mensajes
self.messages = [self.default_context]
# Crea los elementos de la barra de menus
self.menu_def = [
[FILE, [RESET_CONTEXT, SAVE_CHAT, "---", QUIT]],
[OPTIONS, [THEMES, LOAD_API_KEY]],
[HELP, [ABOUT]]
]
self.menu_bar = sg.Menu(self.menu_def)
# Left frame
self.chat_list = sg.Listbox(values=list(map(lambda c : c.title, self.chats)), size=(25, 10), expand_y=True, enable_events=True, key=CHAT_LISTBOX)
self.new_chat_button = sg.Button("Regenerar consulta", key=REGENERATE_CHAT_BUTTON)
self.delete_chat_button = sg.Button("Eliminar", key=DELETE_CHAT_BUTTON)
self.left_frame_layout = [[self.chat_list],[self.new_chat_button, self.delete_chat_button]]
self.left_frame = sg.Frame(title="Historial de conversaciones", layout=self.left_frame_layout, expand_y=True)
# Crea los elementos del panel de la derecha
self.chat_result = sg.Multiline(size=(100, 25), key=CHAT_RESULT_KEY)
self.prompt_label = sg.Text("Sobre qué quieres hablar?:")
self.prompt = sg.Multiline(key=PROMPT_KEY, expand_x=True, size=(100, 5))
self.submit_button = sg.Button("Enviar", key=SUBMIT_KEY, enable_events=True, bind_return_key=True, expand_x=True)
self.right_frame_layout = [
[self.chat_result],
[self.prompt_label],
[self.prompt],
[self.submit_button]
]
self.right_frame = sg.Frame(title="Conversación", layout=self.right_frame_layout)
# Crea la ventana
self.layout = [
[self.menu_bar],
[self.left_frame, sg.VerticalSeparator(), self.right_frame]
]
self.window = sg.Window(APP_TITLE, self.layout)
# Inicia un bucle para manejar los eventos de la ventana
def start(self):
first_load = True
while True:
# Leer eventos
event, values = self.window.read()
if first_load:
self.refresh_chat_list()
first_load = False
# Cierra la ventana
if event == sg.WIN_CLOSED or event == QUIT:
break
# Click en Enviar
if event in (SUBMIT_KEY, 'Return:'+PROMPT_KEY):
# Si necesita clave de la api
if self.needs_api_key():
# Informar que tiene que cargar una clave
sg.popup("No se cargó ninguna API Key", "No se cargó ninguna API Key. Para conseguir una visita https://platform.openai.com\nLuego puedes cargarla a través de Opciones>Cargar API Key")
else:
# Obtener la consulta del usuario
query = values[PROMPT_KEY]
# Se pasa al método encargado de procesar la consulta
self.send_query(query)
# Limpiar el cuadro de consultas
self.window[PROMPT_KEY].update(value="")
# Cargar API KEY
elif event == LOAD_API_KEY:
self.load_api_key()
# Guardar conversacion en un archivo
elif event == SAVE_CHAT:
# Se solicita al usuario que escoga donde guardar
filename = sg.tk.filedialog.asksaveasfilename(
defaultextension='txt',
filetypes=(("Archivos de texto", "*.txt"), ("Todos los archivos", "*.*")),
parent=self.window.TKroot,
title="Guardar como",
initialfile=self.chat_list.get()[0]
)
# Si se escogió un archivo
if filename != None and len(filename) > 0:
# Se exporta la conversacion
self.save_chat_to(filename)
# Acerca de
elif event == ABOUT:
# Muestra cuadro acerca de
sg.popup(ABOUT_TEXT)
# Nuevo chat
elif event == RESET_CONTEXT:
self.reset_context()
elif event == CHAT_LISTBOX:
# Si la lista tiene al menos un elemento
if self.chat_list.get():
# Obtener el elemento seleccionado
selected_title = self.chat_list.get()[0]
# Cargar chat
self.load_chat(selected_title)
# Obtener el indice
index = self.chat_list.get_list_values().index(selected_title)
# Establecer como seleccionado
self.chat_list.update(set_to_index=index)
elif event == DELETE_CHAT_BUTTON:
delete = sg.popup_yes_no("Desea eliminar la conversación seleccionada?")
if delete == "Yes":
self.delete_chat(self.chat_list.get()[0])
elif event == REGENERATE_CHAT_BUTTON:
self.regenerate_query(self.chat_list.get()[0])
# Destruir/cerrar la ventana cuando finaliza el bucle
self.window.close()
# Procesar una consulta
def send_query(self, query):
# Crea un nuevo recurso
new_chat = Chat(title=query[:45]+str(datetime.now()), query=query, response="Esperando respuesta")
# Guarda en la db
new_chat.save()
# Delegacion de la carga
self.load_chat(new_chat.title)
# Crear un hilo para escuchar la respuesta del servidor sin bloquear la app
threading.Thread(target=self.push_response, args=[new_chat.title, query]).start()
def set_query_response(self, title, response):
# Busca por query
selected_chat = Chat.get(Chat.title == title)
# Actualiza la respuesta
selected_chat.response = response
# Guarda
selected_chat.save()
# Carga/muestra el resultado
self.load_chat(title)
def delete_chat(self, title):
selected_chat = Chat.get(Chat.title == title)
selected_chat.delete_instance()
self.refresh_chat_list()
def load_chat(self, title):
# Buscar consulta
chat_from_db = Chat.get(Chat.title == title)
# Armar el texto
chat_text = f"Usuario: {chat_from_db.query}\n"
chat_text += f"ChatBot: {chat_from_db.response}\n"
# Mostrar el texto en el chat view/result
self.window[CHAT_RESULT_KEY].update(value=chat_text)
self.refresh_chat_list()
def refresh_chat_list(self):
# Recargar la lista de chats
self.chats = Chat.select()
# Actualizar el listbox con los titulos
self.chat_list.update(values=[chat.title for chat in self.chats], set_to_index=len(self.chats)-1)
def regenerate_query(self, title):
# Buscamos el chat en la db
selected_chat = Chat.get(Chat.title == title)
# Y volvemos a enviar la consulta
self.send_query(selected_chat.query)
def load_api_key(self):
# Solicitar al usuario que ingrese la clave mediante ventana emergente
new_api_key = sg.popup_get_text(title="Cargar API Key", message="Pega aquí tu API Key:", default_text=openai.api_key)
# Si la nueva clave es ingresada, se guarda, sino se mantiene la anterior
openai.api_key = new_api_key if new_api_key != None else openai.api_key
with open(".env", "w") as file:
file.write(f"openai.api_key={openai.api_key}")
# Nuevo chat
def create_new_chat(self, title, content):
new_chat = {
"title": title,
"messages": content
}
self.chats.append(new_chat)
self.chat_list.update(values=[chat.title for chat in self.chats])
# Reiniciar chat
def reset_context(self):
self.messages = [self.default_context]
self.window[CHAT_RESULT_KEY].update(value="")
self.window[PROMPT_KEY].update(value="")
# Añadir texto al chat
def push_to_chat(self, name, text):
if len(self.chats) == 0:
self.create_new_chat(text[:20], text)
chat = self.window[CHAT_RESULT_KEY].get()
chat += "\n" if chat != "" else "" # Ahre
chat += name
chat += ": "
chat += text
chat += "\n"
self.window[CHAT_RESULT_KEY].update(value=chat)
def push_response(self, title, query):
try:
# Se agrega la consulta al contexto
self.messages.append({"role": "user", "content": query})
# Se envía la consulta
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo", messages=self.messages)
# Se saca lo importante de la respuesta
response_content = response.choices[0].message.content
# Lo agregamos al contexto
self.messages.append({"role": "assistant", "content": response_content})
# Lo asociamos con la consulta
self.set_query_response(title, response_content)
except APIConnectionError as ace:
self.push_to_chat("Sistema", "Ocurrió un error al conectarse con el servidor. Asegurate de que tienes accesso a internet")
except AuthenticationError as authEx:
self.push_to_chat("Sistema", "Error de autenticación. Asegúrese de haber proporcionado una API KEY válida")
def needs_api_key(self):
return openai.api_key == ""
def save_chat_to(self, filename):
with open(filename, "w") as file:
file.write(self.window[CHAT_RESULT_KEY].get())
app = Application()
app.start() | montexbjeliseo/gui_for_chatgpt | gui_chat_gpt_api_python.pyw | gui_chat_gpt_api_python.pyw | pyw | 11,379 | python | es | code | 0 | github-code | 36 |
72192933225 | import redis, json, uuid, time, calendar, csv, io, os, yaml, xmltodict
from bottle import response
from dicttoxml import dicttoxml
from datetime import datetime
allowed_types = {
"application/json",
"application/xml",
"application/x-yaml",
# "text/tab-separated-values",
}
# expire after five minutes
records_expiration = 300
redis_port = os.environ.get("redis_port")
redis_pass = os.environ.get("redis_pass")
r = redis.Redis(
host="localhost",
port=redis_port,
db=0,
password=redis_pass,
charset="utf-8",
decode_responses=True,
)
def delete_message(message_id, provider_id):
keys = r.keys("provider:" + provider_id + "*/uid:" + message_id + "/timestamp*")
if len(keys) == 1:
r.delete(keys[0])
return "deleted"
elif len(keys) == 0:
response.status = 404
return "No keys found"
else:
response.status = 500
return "error: more than one keys found"
def get_messages(topic, limit, type):
# Get keys related to partial search of 'Topic'
keys = r.keys("*/topic:" + topic + "/uid:*")
# Validation
if keys == None:
raise Exception(f"No keys found with {topic}")
timestamps = {}
messages = {}
n = 0
# Loop thrigh keys and timestamps of those keys
for key in keys:
message = r.hgetall(key)
messages[n] = message
timestamps[n] = datetime.fromtimestamp(int(key[-10:]))
n += 1
# Sort by timestamps & retrieve their keys
sorted_timestamps = list(
dict(sorted(timestamps.items(), key=lambda x: x[1])).keys()
)
# index by custom limit (latest timestamps)
index_out = sorted_timestamps[-limit:]
# Dict comprehension -> return messages matching indexed keys
sorted_messages = {str(k): messages[k] for k in index_out}
# Transform the messages
if type == "application/json":
return sorted_messages
elif type == "application/xml":
return dicttoxml(sorted_messages)
elif type == "application/x-yaml":
return yaml.dump(sorted_messages)
# elif type == "text/tab-separated-values":
# body_dict = ""
# byte_str = body.read()
# text_obj = byte_str.decode("UTF-8")
# rd = csv.reader(io.StringIO(text_obj), delimiter="\t", quotechar='"')
# print(rd[1])
# for row in rd:
# print(row)
return
def save_message(body, type, topic, author):
if type == "application/json":
body_dict = json.load(body)
elif type == "application/xml":
body_dict = xmltodict.parse(body)
elif type == "application/x-yaml":
body_dict = yaml.safe_load(body)
elif type == "text/tab-separated-values":
body_dict = ""
message = body_dict["message"]
message_id = uuid.uuid1()
current_GMT = time.gmtime()
time_stamp = calendar.timegm(current_GMT)
key = f"provider:{author}/topic:{topic}/uid:{message_id}/timestamp:{time_stamp}"
r.hset(key, "m", message)
r.hset(key, "a", author)
r.hset(key, "id", str(message_id))
r.expire(key, records_expiration)
response.status = 201
return str(message_id)
| Zamanien/SI_mandatory | esb_transform.py | esb_transform.py | py | 3,154 | python | en | code | 0 | github-code | 36 |
25775618531 | import pydriller
from pydriller.metrics.process.code_churn import CodeChurn
from pydriller.metrics.process.contributors_count import ContributorsCount
hash1 = "f858260790250880fc74ab7108073435f534d7f1"
hash2 = "319f616e572a03b984013d04d1b3a18ffd5b1190"
repo_path = "~/workfolder/dayjs"
churn_metric = CodeChurn(path_to_repo=repo_path,
from_commit=hash1,
to_commit=hash2)
files_count = churn_metric.count()
files_max = churn_metric.max()
files_avg = churn_metric.avg()
print('Code churn total para cada arquivo: {}'.format(files_count))
print('Maximo code churn para cada arquivo: {}'.format(files_max))
print('Code churn médio para cada arquivo: {}'.format(files_avg))
count_metric = ContributorsCount(path_to_repo=repo_path,
from_commit=hash1,
to_commit=hash2)
count = count_metric.count()
minor = count_metric.count_minor()
print('Numero de contribuidores por arquivo: {}'.format(count))
print('Numero de contribuidores "menores" por arquivo: {}'.format(minor)) | vrjuliao/BCC | engenharia-de-software-2/ex-8-1/3.py | 3.py | py | 1,059 | python | pt | code | 0 | github-code | 36 |
14199367630 | import cv2
import pickle as pkl
import time
import xgboost as xgb
import math
import numpy as np
import mediapipe as mp
from speak import speakText
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
width = 640
height = 480
hands = mp_hands.Hands(min_detection_confidence=0.6, min_tracking_confidence=0.75)
modelRight = pkl.load(open('./models/xgboost-model-dynamic-words-16-right-tuned', 'rb'))
modelLeft = pkl.load(open('./models/xgboost-model-dynamic-words-16-left-tuned', 'rb'))
start_time = time.time()
speakWaitTime = 1
labels = {
"0" : "me",
"1" : "you",
"2" : "hello",
"3" : "from",
"4" : "good",
"5" : "how",
"6" : "university",
"7" : "welcome",
"8" : "hope",
"9" : "like",
"10" : "new",
"11" : "people",
"12" : "technology",
"13" : "use",
"14" : "voice",
"15" : "create"
}
cap = cv2.VideoCapture(0)
cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
rightHandFirst = False
isMultiHand = False
# Initialised to True for first frame
addRightFrame = True
addLeftFrame = True
rightKeyFrames = []
leftKeyFrames = []
rightKeyCheckPoints = []
leftKeyCheckPoints = []
rightLabel = ''
rightProb = 0
leftLabel = ''
leftProb = 0
null_12 = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
d_threshold = 0.1
connections = [
(1, 4), (5, 8), (9, 12), (13, 16), (17, 20)
]
r_c_x = 0
r_c_y = 0
l_c_x = 0
l_c_y = 0
r_p_x = 0
r_p_y = 0
l_p_x = 0
l_p_y = 0
def generatePointVectors(points, previousFrames):
vectors = []
prev_origin_x = 0
prev_origin_y = 0
dx = 0
dy = 0
if(len(previousFrames) == 0):
prev_origin_x = 0
prev_origin_y = 0
else:
prev_origin_x = previousFrames[0]
prev_origin_y = previousFrames[1]
origin_x, origin_y = points[0]
origin_x_rounded = round((origin_x), 5)
origin_y_rounded = round((origin_y), 5)
dx = origin_x_rounded - prev_origin_x
dy = origin_y_rounded - prev_origin_y
vectors.append(origin_x_rounded)
vectors.append(origin_y_rounded)
for num, connection in enumerate(connections):
x0, y0 = points[connection[0]]
x1, y1 = points[connection[1]]
x_final = x1 - x0
y_final = y1 - y0
mag = math.sqrt((x_final)**2+(y_final)**2)
x_vector = round((x_final/mag) + dx,5)
y_vector = round((y_final/mag) + dy,5)
vectors.append(x_vector)
vectors.append(y_vector)
return vectors
def generateCheckPoints(points):
checkPoints = []
palm_x, palm_y = points[0]
thumb_x, thumb_y = points[4]
index_x, index_y = points[8]
pinky_x, pinky_y = points[20]
mean_x = round((palm_x + thumb_x + index_x + pinky_x)/4, 5)
mean_y = round((palm_y + thumb_y + index_y + pinky_y)/4, 5)
checkPoints.append(mean_x)
checkPoints.append(mean_y)
return checkPoints
def checkPreviousFrame(currCheckPoints, prevCheckPoints):
current_dx = currCheckPoints[0]
current_dy = currCheckPoints[1]
prev_dx = prevCheckPoints[0]
prev_dy = prevCheckPoints[1]
dx = round(abs(current_dx - prev_dx), 5)
dy = round(abs(current_dy - prev_dy), 5)
if(dx >= d_threshold or dy >= d_threshold):
print("Thresold crossed.")
return True, current_dx, current_dy, prev_dx, prev_dy
else:
return False, current_dx, current_dy, prev_dx, prev_dy
def recalculateFrames(frames):
cycledFrames = []
cycledFrames.extend(frames)
# Current Origin
if(len(frames) > 12):
base_x = cycledFrames[0]
base_y = cycledFrames[1]
secondFrame_dx = cycledFrames[12] - base_x
secondFrame_dy = cycledFrames[13] - base_y
# New Origin
new_base_x = cycledFrames[12]
new_base_y = cycledFrames[13]
if(len(frames) > 24):
thirdFrame_dx = cycledFrames[24] - base_x
thirdFrame_dy = cycledFrames[25] - base_y
# New second frame
new_secondFrame_dx = cycledFrames[24] - new_base_x
new_secondFrame_dy = cycledFrames[25] - new_base_y
if(len(frames) > 36):
fourthFrame_dx = cycledFrames[36] - base_x
fourthFrame_dy = cycledFrames[37] - base_y
# New third frame
new_thirdFrame_dx = cycledFrames[36] - new_base_x
new_thirdFrame_dy = cycledFrames[37] - new_base_y
i = 12
while(i < 48):
# This
if(i >= 14 and i < 24 and len(frames) > 12):
cycledFrames[i] = round((cycledFrames[i] - secondFrame_dx), 5)
cycledFrames[i + 1] = round((cycledFrames[i + 1] - secondFrame_dy), 5)
# This
elif(i >= 26 and i < 36 and len(frames) > 24):
original_keyframe_dx = cycledFrames[i] - thirdFrame_dx
original_keyframe_dy = cycledFrames[i + 1] - thirdFrame_dy
cycledFrames[i] = round(original_keyframe_dx + new_secondFrame_dx, 5)
cycledFrames[i + 1] = round(original_keyframe_dy + new_secondFrame_dy, 5)
# This
elif(i >= 38 and i < 48 and len(frames) > 36):
original_keyframe_dx = cycledFrames[i] - fourthFrame_dx
original_keyframe_dy = cycledFrames[i + 1] - fourthFrame_dy
cycledFrames[i] = round(original_keyframe_dx + new_thirdFrame_dx, 5)
cycledFrames[i + 1] = round(original_keyframe_dy + new_thirdFrame_dy, 5)
i = i + 2
# 0 - 11
# 12 - 23
# 24 - 35
# 36 - 47
# Cycle out
cycledFrames = cycledFrames[12:]
return cycledFrames
def preprocessData(frames):
dataToProcess = []
dataToProcess.extend(frames)
if(len(dataToProcess) != 48):
if(len(dataToProcess) == 12):
dataToProcess.extend(null_12)
dataToProcess.extend(null_12)
dataToProcess.extend(null_12)
elif(len(dataToProcess) == 24):
dataToProcess.extend(null_12)
dataToProcess.extend(null_12)
elif(len(dataToProcess) == 36):
dataToProcess.extend(null_12)
else:
print("Error in preprocessData. Length of dataToProcess: ", len(dataToProcess))
group_0 = []
group_0.extend(dataToProcess[:12])
group_0.extend(null_12)
group_0.extend(null_12)
group_0.extend(null_12)
group_1 = []
group_1.extend(dataToProcess[:24])
group_1.extend(null_12)
group_1.extend(null_12)
group_2 = []
group_2.extend(dataToProcess[:36])
group_2.extend(null_12)
group_3 = []
group_3.extend(dataToProcess[:48])
arr_0 = np.array(group_0)
arr_1 = np.array(group_1)
arr_2 = np.array(group_2)
arr_3 = np.array(group_3)
inputData_0 = xgb.DMatrix(arr_0.data)
inputData_1 = xgb.DMatrix(arr_1.data)
inputData_2 = xgb.DMatrix(arr_2.data)
inputData_3 = xgb.DMatrix(arr_3.data)
# Convert values to DMatrix format
return xgb.DMatrix(arr_0.data), xgb.DMatrix(arr_1.data), xgb.DMatrix(arr_2.data), xgb.DMatrix(arr_3.data)
def classification(inputData_0, inputData_1, inputData_2, inputData_3, model):
prob_list_0 = model.predict(inputData_0)[0]
prob_list_1 = model.predict(inputData_1)[0]
prob_list_2 = model.predict(inputData_2)[0]
prob_list_3 = model.predict(inputData_3)[0]
max_prob_0 = np.amax(prob_list_0)
max_prob_1 = np.amax(prob_list_1)
max_prob_2 = np.amax(prob_list_2)
max_prob_3 = np.amax(prob_list_3)
out_label_0 = labels["{}".format(np.argmax(prob_list_0, axis=0))]
out_label_1 = labels["{}".format(np.argmax(prob_list_1, axis=0))]
out_label_2 = labels["{}".format(np.argmax(prob_list_2, axis=0))]
out_label_3 = labels["{}".format(np.argmax(prob_list_3, axis=0))]
label = out_label_0
prob = max_prob_0
if(prob < max_prob_1 and max_prob_1 > max_prob_2 and max_prob_1 > max_prob_3):
prob = max_prob_1
label = out_label_1
elif(prob < max_prob_2 and max_prob_2 > max_prob_3 and max_prob_2 > max_prob_1):
prob = max_prob_2
label = out_label_2
elif(prob < max_prob_3 and max_prob_3 > max_prob_1 and max_prob_3 > max_prob_2):
prob = max_prob_3
label = out_label_3
return label, prob
def cleanUp(frames, model):
temp_frames = []
temp_frames.extend(frames)
temp_label = ''
temp_prob = 0
if(model is None):
temp_frames = []
return temp_frames, temp_label, temp_prob
while(len(temp_frames) != 0):
temp_frames = recalculateFrames(temp_frames)
if(len(temp_frames) != 0):
# Preprocess
set0, set1, set2, set3 = preprocessData(temp_frames)
# Classify
temp_label, temp_prob = classification(set0, set1, set2, set3, model)
temp_frames = []
return temp_frames, temp_label, temp_prob
while cap.isOpened():
success, image = cap.read()
if not success:
break
# Flip the image horizontally for a later selfie-view display, and convert the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to pass by reference.
image.flags.writeable = False
results = hands.process(image)
if(results.multi_handedness):
if(len(results.multi_handedness) == 1):
isMultiHand = False
else:
isMultiHand = True
# results.multi_handedness[0] is first detected hand
if(results.multi_handedness[0].classification[0].index == 0): # Index 0 is Left, 1 is Right
rightHandFirst = False
else:
rightHandFirst = True
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
rightHandPoints = []
leftHandPoints = []
rightVectors = []
leftVectors = []
rightCheckPoints = []
leftCheckPoints = []
for hand, hand_landmarks in enumerate(results.multi_hand_landmarks):
if(rightHandFirst): # First hand (0) is Right, Second hand (1) is Left
if(hand == 0):
for idx, landmark in enumerate(hand_landmarks.landmark):
rightHandPoints.append((landmark.x, landmark.y))
else:
for idx, landmark in enumerate(hand_landmarks.landmark):
leftHandPoints.append((landmark.x, landmark.y))
else: # First hand (0) is Left, Second hand (1) is Right
if(hand == 0):
for idx, landmark in enumerate(hand_landmarks.landmark):
leftHandPoints.append((landmark.x, landmark.y))
else:
for idx, landmark in enumerate(hand_landmarks.landmark):
rightHandPoints.append((landmark.x, landmark.y))
if(isMultiHand):
if(hand == 1):
if(len(rightHandPoints) != 0 and len(leftHandPoints) != 0):
rightVectors = generatePointVectors(rightHandPoints, rightKeyFrames)
rightCheckPoints = generateCheckPoints(rightHandPoints)
leftVectors = generatePointVectors(leftHandPoints, leftKeyFrames)
leftCheckPoints = generateCheckPoints(leftHandPoints)
if(len(rightKeyFrames) == 48):
rightKeyFrames = recalculateFrames(rightKeyFrames)
print("Right Frame Cycled:", len(rightKeyFrames))
if(len(leftKeyFrames) == 48):
leftKeyFrames = recalculateFrames(leftKeyFrames)
print("Left Frame Cycled:", len(leftKeyFrames))
if(addRightFrame == True or addLeftFrame == True):
rightKeyFrames.extend(rightVectors)
rightKeyCheckPoints.extend(rightCheckPoints)
leftKeyFrames.extend(leftVectors)
leftKeyCheckPoints.extend(leftCheckPoints)
print("Right Added: ", len(rightKeyFrames), "Left Added: ", len(leftKeyFrames))
# Preprocess
r_set0, r_set1, r_set2, r_set3 = preprocessData(rightKeyFrames)
l_set0, l_set1, l_set2, l_set3 = preprocessData(leftKeyFrames)
# Classify
rightLabel, rightProb = classification(r_set0, r_set1, r_set2, r_set3, modelRight)
leftLabel, leftProb = classification(l_set0, l_set1, l_set2, l_set3, modelLeft)
addRightFrame = False
addLeftFrame = False
else:
if(len(rightKeyCheckPoints) == 0):
rightKeyCheckPoints.extend(rightCheckPoints)
else:
addRightFrame, r_c_x, r_c_y, r_p_x, r_p_y = checkPreviousFrame(rightCheckPoints, rightKeyCheckPoints)
if(len(leftKeyCheckPoints) == 0):
leftKeyCheckPoints.extend(leftCheckPoints)
else:
addLeftFrame, l_c_x, l_c_y, l_p_x, l_p_y = checkPreviousFrame(leftCheckPoints, leftKeyCheckPoints)
if(addRightFrame == True or addLeftFrame == True):
rightKeyCheckPoints = []
leftKeyCheckPoints = []
if(len(rightKeyFrames) == 48):
rightKeyFrames = recalculateFrames(rightKeyFrames)
print("Right Frame Cycled:", len(rightKeyFrames))
if(len(leftKeyFrames) == 48):
leftKeyFrames = recalculateFrames(leftKeyFrames)
print("Left Frame Cycled:", len(leftKeyFrames))
cv2.circle(image, (int(r_c_x * width), int(r_c_y * height)), 3, (255, 0, 0), 2)
cv2.circle(image, (int(l_c_x * width), int(l_c_y * height)), 3, (255, 255, 0), 2)
cv2.circle(image, (int(r_p_x * width), int(r_p_y * height)), 3, (0, 255, 0), 2)
cv2.circle(image, (int(l_p_x * width), int(l_p_y * height)), 3, (0, 255, 255), 2)
else:
if(len(rightHandPoints) != 0):
rightVectors = generatePointVectors(rightHandPoints, rightKeyFrames)
rightCheckPoints = generateCheckPoints(rightHandPoints)
if(addRightFrame == True):
rightKeyFrames.extend(rightVectors)
rightKeyCheckPoints.extend(rightCheckPoints)
print("Right Frame Added: ", len(rightKeyFrames))
# Preprocess
r_set0, r_set1, r_set2, r_set3 = preprocessData(rightKeyFrames)
# Classify
rightLabel, rightProb = classification(r_set0, r_set1, r_set2, r_set3, modelRight)
leftLabel = ''
leftProb = 0
addRightFrame = False
else:
if(len(rightKeyCheckPoints) == 0):
rightKeyCheckPoints.extend(rightCheckPoints)
else:
addRightFrame, r_c_x, r_c_y, r_p_x, r_p_y = checkPreviousFrame(rightCheckPoints, rightKeyCheckPoints)
if(addRightFrame == True):
rightKeyCheckPoints = []
if(len(rightKeyFrames) == 48):
rightKeyFrames = recalculateFrames(rightKeyFrames)
print("Right Frame Cycled:", len(rightKeyFrames))
if(len(leftKeyFrames) != 0):
leftKeyFrames, leftLabel, leftProb = cleanUp(leftKeyFrames, None)
leftLabel = ''
if(len(leftHandPoints) != 0):
leftVectors = generatePointVectors(leftHandPoints, leftKeyFrames)
leftCheckPoints = generateCheckPoints(leftHandPoints)
if(addLeftFrame == True):
leftKeyFrames.extend(leftVectors)
leftKeyCheckPoints.extend(leftCheckPoints)
print("Left Frame Added: ", len(leftKeyFrames))
# Preprocess
l_set0, l_set1, l_set2, l_set3 = preprocessData(leftKeyFrames)
# Classify
leftLabel, leftProb = classification(l_set0, l_set1, l_set2, l_set3, modelLeft)
rightLabel = ''
rightProb = 0
addLeftFrame = False
else:
if(len(leftKeyCheckPoints) == 0):
leftKeyCheckPoints.extend(leftCheckPoints)
else:
addLeftFrame, l_c_x, l_c_y, l_p_x, l_p_y = checkPreviousFrame(leftCheckPoints, leftKeyCheckPoints)
if(addLeftFrame == True):
leftKeyCheckPoints = []
if(len(leftKeyFrames) == 48):
leftKeyFrames = recalculateFrames(leftKeyFrames)
print("Left Frame Cycled:", len(leftKeyFrames))
if(len(rightKeyFrames) != 0):
rightKeyFrames, rightLabel, rightProb = cleanUp(rightKeyFrames, None)
rightLabel = ''
cv2.circle(image, (int(r_c_x * width), int(r_c_y * height)), 3, (255, 0, 0), 2)
cv2.circle(image, (int(l_c_x * width), int(l_c_y * height)), 3, (255, 255, 0), 2)
cv2.circle(image, (int(r_p_x * width), int(r_p_y * height)), 3, (0, 255, 0), 2)
cv2.circle(image, (int(l_p_x * width), int(l_p_y * height)), 3, (0, 255, 255), 2)
mp_drawing.draw_landmarks(image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
else:
if(len(rightKeyFrames) != 0):
rightKeyFrames, rightLabel, rightProb = cleanUp(rightKeyFrames, modelRight)
rightLabel = ''
if(len(leftKeyFrames) != 0):
leftKeyFrames, leftLabel, leftProb = cleanUp(leftKeyFrames, modelLeft)
leftLabel = ''
cv2.putText(image, rightLabel, (width - 200, height - 10), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2, 1)
cv2.putText(image, str(rightProb), (10, height - 10), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2, 1)
# Calculate FPS
if (time.time() - start_time) > speakWaitTime :
# Speak
if rightLabel:
speakText(rightLabel)
start_time = time.time()
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
hands.close()
cap.release() | bahrain-uob/PoseMate | run_v2.py | run_v2.py | py | 19,687 | python | en | code | 1 | github-code | 36 |
30467293197 | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def findFrequentTreeSum(self, root: TreeNode):
if not root:
return []
res = {}
self.dfs(root, res)
maxi = max([ele for ele in res.values()])
return [key for key, val in res.items() if val == maxi]
def dfs(self, root, res):
# base case
if not root.left and not root.right:
sum = root.val
if sum in res:
res[sum] += 1
else:
res[sum] = 1
return sum
# general case
left = self.dfs(root.left, res) if root.left else 0
right = self.dfs(root.right, res) if root.right else 0
sum = left + right + root.val
if sum in res:
res[sum] += 1
else:
res[sum] = 1
return sum
if __name__ == '__main__':
P = TreeNode(5)
P.left = TreeNode(2)
# P.left.left = TreeNode(3)
# P.left.right = TreeNode(4)
# P.left.right.left = TreeNode(6)
# P.left.right.right = TreeNode(7)
# P.left.right.right.right = TreeNode(8)
P.right = TreeNode(-3)
# P.right.left = TreeNode(6)
# P.right.right = TreeNode(6)
s = Solution()
print(s.findFrequentTreeSum(P))
| dundunmao/LeetCode2019 | 508. Most Frequent Subtree Sum.py | 508. Most Frequent Subtree Sum.py | py | 1,346 | python | en | code | 0 | github-code | 36 |
18775996369 | from django.urls import path
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('forms', views.form, name='form'),
path('remove/<todo_id>', views.delete_todo, name='remove'),
path('add_todo', views.add_todo, name='new_todo'),
path('update_todo/<todo_id>', views.edit_todo, name='edit_todo'),
path('chart', views.chart_view, name='chart-view'),
path('products', views.add_product, name='add-product'),
path('management', views.add_managers, name='managers'),
path('depart', views.add_department, name='department'),
]
| Torgbui-Hiram/django_crm | website/urls.py | urls.py | py | 580 | python | en | code | 0 | github-code | 36 |
41329717796 |
import tensorflow as tf
import numpy as np
import cv2
import os
def save_image(path, image) :
extension = os.path.splitext(path)[1]
result, encoded_img = cv2.imencode(extension, image)
if result :
with open(path, "wb") as f :
encoded_img.tofile(f)
# 대상 입력
target = input("대상을 입력하세요 : ")
# 사진 폴더 경로 설정
image_path = "./image/" + target + "_face"
# 폴더 내의 사진들 탐색
for i, img in enumerate(os.listdir(image_path)) :
# 사진 읽기
image = cv2.imdecode(np.fromfile(os.path.join(image_path, img), dtype=np.uint8), cv2.IMREAD_COLOR)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# 사진 좌우 뒤집기
flipped = tf.image.flip_left_right(gray)
path = "./image/" + target + "_face/flipped" + img
save_image(path, np.array(flipped))
# 사진 그레이스케일 만들기
grayscaled = tf.image.rgb_to_grayscale(gray)
path = "./image/" + target + "_face/grayscaled" + img
save_image(path, np.array(flipped))
# 사진 포화시키기
for sf in range(1, 6) :
saturated = tf.image.adjust_saturation(gray, sf)
path = "./image/" + target + "_face/saturated" + str(sf) + img
save_image(path, np.array(saturated))
# 시진 밝기 변경하기
for delta in range(1, 6) :
bright = tf.image.adjust_brightness(gray, delta * 0.1)
path = "./image/" + target + "_face/bright" + str(delta) + img
save_image(path, np.array(bright))
print(i)
| moonsung1234/SimilarityComparisonProject | increase.py | increase.py | py | 1,532 | python | en | code | 1 | github-code | 36 |
1598374603 | import unittest
from inverter import scale_number
class TestScaleNumber(unittest.TestCase):
def setUp(self):
self.field_mapping = {"name": "Current Solar Production (kilowatts)",
"format": "{:.1f}kW",
"divisor": 1000,
"color": "green"}
def test_value_is_scaled(self):
"""test that 1000 is correctly scaled to 1"""
self.value = 1000
self.scaled_number = scale_number(value=self.value,
field_info=self.field_mapping)
self.assertEqual(1, self.scaled_number)
def test_zero_value_is_handled(self):
self.value = 0
self.scaled_number = scale_number(value=self.value,
field_info=self.field_mapping)
self.assertEqual(0, self.scaled_number)
def test_negative_value_is_handled(self):
self.value = -1000
self.scaled_number = scale_number(value=self.value,
field_info=self.field_mapping)
self.assertEqual(-1, self.scaled_number)
if __name__ == '__main__':
unittest.main()
| eastc5/inverter | tests/test_inverter.py | test_inverter.py | py | 1,205 | python | en | code | 0 | github-code | 36 |
12262454581 | import sys
import contentful_management
from env import *
client = contentful_management.Client(MANAGEMENT_API_TOKEN)
space = client.spaces().find(SPACE_ID)
environment = space.environments().find('master')
content_types = environment.content_types().all()
content_type = content_types[0]
if '--test' in sys.argv:
# Test by getting entries and showing local image locations
print('Testing...')
print('Your formation index file is set to: ' + FORMATION_INDEX)
print('Your formation image file directory is set to: ' + FORMATION_IMAGE_DIR)
entries = content_type.entries().all()
sys.exit()
if '--delete-all' in sys.argv:
# Delete all entries
entries = content_type.entries().all()
for entry in entries:
if entry.is_published:
entry.unpublish()
environment.entries().delete(entry.id)
# Delete all assets
assets = environment.assets().all()
for asset in assets:
if asset.is_published:
asset.unpublish()
environment.assets().delete(asset.id)
sys.exit()
with open(FORMATION_INDEX, 'r') as formation_file:
for line in formation_file:
formation_code, formation_name = line.strip().split(None, 1)
formation_size = int(formation_code.split('-')[0])
formation_filename = formation_code + '.png'
print(formation_code, formation_name)
# Create a new upload to get the image into Contentful
upload = space.uploads().create(FORMATION_IMAGE_DIR + '/' + formation_filename)
# Associate an asset with the uploaded image
new_asset = environment.assets().create(
None,
{
'fields': {
'title': {
'en-US': formation_code + ': ' + formation_name
},
'file': {
'en-US': {
'fileName': formation_filename,
'contentType': 'image/png',
'uploadFrom': upload.to_link().to_json()
}
}
}
}
)
# Process the asset
new_asset.process()
# Wait for the asset processing to be complete
while True:
processed_asset = environment.assets().find(new_asset.id)
try:
if 'url' in processed_asset.file:
break
except:
continue
# And then publish
processed_asset.publish()
# Add an entry that references the asset
entry_attributes = {
'content_type_id': content_type.id,
'fields': {
'name': {
'en-US': formation_name
},
'code': {
'en-US': formation_code
},
'size': {
'en-US': formation_size
},
'diagram': {
'en-US': {
'sys': {
'type': 'Link',
'linkType': 'Asset',
'id': processed_asset.id
}
}
}
}
}
new_entry = environment.entries().create(
None,
entry_attributes
)
new_entry.publish()
| wildlava/skydiving-formations-react | tools/ingest_formations.py | ingest_formations.py | py | 3,444 | python | en | code | 0 | github-code | 36 |
16076363640 | import argparse
import os
import statistics
from typing import Callable
import dotenv
from tqdm import tqdm
from headhunter import get_vacancies_from_hh, fetch_areas_ids, predict_rub_salary_hh
from salary_helpers import create_table
from superjob import get_vacancies_from_sj, predict_rub_salary_sj, fetch_town_ids
PROGRAMMING_LANGUAGES = [
"TypeScript",
"Swift",
"Scala",
"Objective-C",
"Shell",
"Go",
"C",
"C#",
"C++",
"PHP",
"Ruby",
"Python",
"Java",
"JavaScript",
]
def get_vacancies_statistics(
vacancies: dict, predict_rub_salary_method: Callable
) -> dict | None:
salaries = []
for vacancy in vacancies.get("items"):
salary = predict_rub_salary_method(vacancy)
if not salary:
continue
salaries.append(salary)
average_salary = round(statistics.mean(salaries), 0) if salaries else 0
return {
"vacancies_found": vacancies.get("found"),
"vacancies_processed": len(salaries),
"average_salary": int(average_salary),
}
def main():
parser = argparse.ArgumentParser(
description="Collects statistics on salaries of programming languages."
)
parser.add_argument(
"-l",
"--location",
type=str,
help="Search area. (default: Москва)",
default="Москва",
)
args = parser.parse_args()
dotenv.load_dotenv()
salary_statistics = {}
area_ids = fetch_areas_ids(args.location)
town_ids = fetch_town_ids(os.getenv("SJ_API_KEY"), args.location)
for language in tqdm(PROGRAMMING_LANGUAGES):
vacancy_name = "Программист {}".format(language)
vacancies = get_vacancies_from_hh(
text=vacancy_name,
area_ids=area_ids,
)
head_hunter = salary_statistics.setdefault("Head Hunter", {})
head_hunter[language] = (
get_vacancies_statistics(vacancies, predict_rub_salary_hh) or {}
)
vacancies = get_vacancies_from_sj(
os.getenv("SJ_API_KEY"), text=vacancy_name, town_ids=town_ids
)
super_job = salary_statistics.setdefault("Super Job", {})
super_job[language] = (
get_vacancies_statistics(vacancies, predict_rub_salary_sj) or {}
)
for platform_title, stat in salary_statistics.items():
print(create_table(stat, f"{platform_title} {args.location}"))
if __name__ == "__main__":
main()
| shadowsking/salary-statistics | main.py | main.py | py | 2,481 | python | en | code | 0 | github-code | 36 |
26250599999 | import tensorflow as tf
import math
batch_size = 128
vocabulary_size = 106321
embedding_size = 128
graph = tf.Graph()
with graph.as_default():
train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
# Ops and variables pinned to the CPU because of missing GPU implementation
with tf.device('/cpu:0'):
# Look up embeddings for inputs.
embeddings = tf.Variable(
tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
embed = tf.nn.embedding_lookup(embeddings, train_inputs)
# Construct the variables for the NCE loss
nce_weights = tf.Variable(
tf.truncated_normal([vocabulary_size, embedding_size],
stddev=1.0 / math.sqrt(embedding_size)))
nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels each
# time we evaluate the loss.
loss = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights,
biases=nce_biases,
labels=train_labels,
inputs=embed,
num_sampled=10,
num_classes=vocabulary_size))
# Construct the SGD optimizer using a learning rate of 1.0.
optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)
saver = tf.train.Saver({embeddings.name:embeddings})
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
saver.restore(sess,'/home/dlian/data/location_prediction/gowalla/logdir/model.ckpt')
print(tf.reduce_mean(embeddings).eval())
| DefuLian/script | dl/loc_pred/train.py | train.py | py | 1,739 | python | en | code | 1 | github-code | 36 |
22703498747 | import spacy
import numpy
import os
from numpy import dot
from numpy.linalg import norm
nlp = spacy.load('en_core_web_lg')
def compare_wordlists_by_spacy_vectors(model, wordlist1, wordlist2):
vectorlist1 = model(wordlist1)
vectorlist2 = model(wordlist2) #unicode string
myData = [ ]
for wordvec1 in vectorlist1:
for wordvec2 in vectorlist2:
myData.append([wordvec1, wordvec2, wordvec1.similarity(wordvec2)])
#print(wordvec1, wordvec2)
#print(wordvec1.similarity(wordvec2))
#print("")
return myData
#wordlist1 = (u'illegals refugees immigrants humans')
#wordlist2 = (u'love hate anger disgust')
#compare_wordlists_by_spacy_vectors(nlp, wordlist1, wordlist2)
def compare_within_wordlist_by_spacy_vectors(model, wordlist1):
vectorlist1 = model(wordlist1)
vectorlist2 = model(wordlist1) #unicode string
myData = [ ]
for wordvec1 in vectorlist1:
for wordvec2 in vectorlist2:
myData.append([wordvec1, wordvec2, wordvec1.similarity(wordvec2)])
#print(wordvec1, wordvec2)
#print(wordvec1.similarity(wordvec2))
#print("")
return myData
#wordlist1 = (u'illegals refugees immigrants humans') #list of strings = input at present
#wordlist2 = (u'love hate anger disgust')
#compare_within_wordlist_by_spacy_vectors(nlp, wordlist1)
#compare_within_wordlist_by_spacy_vectors(nlp, wordlist2)
def similar_strings(model, word):
'''
using spaCy .vocab to find top 20 orthographic similar words to an input word
'''
myData = [ ]
# cosine similarity
cosine = lambda v1, v2: dot(v1, v2) / (norm(v1) * norm(v2))
# gather all known words, take only the lowercased versions
allWords = list({w for w in model.vocab if w.has_vector and w.orth_.islower() and w.lower_ != word })
# sort by similarity to word
allWords.sort(key=lambda w: cosine(w.vector, word.vector))
allWords.reverse()
print("Top 20 most similar words to" , word.orth_,":")
for word in allWords[:20]:
myData.append(word.orth_)
return myData
#hit = nlp.vocab[u'hit']
#similar_strings(nlp, hit)
| kariemoorman/iat-weat-wefat | scripts/comparison_functions/spacy_word_comparison_functions.py | spacy_word_comparison_functions.py | py | 2,179 | python | en | code | 0 | github-code | 36 |
33449376249 | import csv
with open("zadanie2.csv", newline='') as file:
reader = csv.reader(file, delimiter=',')
header = next(reader)
wiersze = []
for row in reader:
if row[1] != "":
wiersze.append(row)
wiersze.sort(key=lambda x: int(x[0]) if x[0].isdigit() else -1)
poprzednie_id = None
for row in wiersze:
obecne_id = int(row[0])
if poprzednie_id is not None and poprzednie_id >= obecne_id:
obecne_id = poprzednie_id + 1
row[0] = str(obecne_id)
poprzednie_id = obecne_id
for row in wiersze:
row[1] = row[1].lower()
usuniete_slowa = []
for row in wiersze:
slowa = row[1].split()
nowe_slowa = []
for word in slowa:
add_word = True
for i in range(len(word)-1):
if abs(ord(word[i]) - ord(word[i+1])) == 1:
usuniete_slowa.append((row[0], word))
add_word = False
break
if add_word:
nowe_slowa.append(word)
row[1] = ' '.join(nowe_slowa)
with open('rezultat.csv', mode='w', newline='') as out_file:
writer = csv.writer(out_file, delimiter=',')
writer.writerow(header)
writer.writerows(wiersze)
print("Usunięte wyrazy:")
for word in usuniete_slowa:
print(f"{word[0]}: {word[1]}") | Kubek-3/python | csv.py | csv.py | py | 1,306 | python | pl | code | 0 | github-code | 36 |
10755048516 | #date:21-9-17
import pafy
url="https://www.youtube.com/watch?v=mkKXS0FI_L4"
video=pafy.new(url)
audiostreams = video.audiostreams
for a in audiostreams:
print(a.bitrate, a.extension, a.get_filesize())
########to download audio directly
#audiostreams[1].download()
bestaudio = video.getbestaudio()
bestaudio.bitrate
########To download best audio
#bestaudio.download()
##to see all the streams available
allstreams = video.allstreams
for s in allstreams:
print(s.mediatype, s.extension, s.quality)
| pemagrg1/YoutubeDownloader | download_audio.py | download_audio.py | py | 513 | python | en | code | 1 | github-code | 36 |
31981642211 | #!/usr/bin/python3
def search_replace(my_list, search, replace):
mylist = my_list.copy()
m = mylist.count(search)
if m == 0:
return mylist
for i in range(m):
n = mylist.index(search)
mylist[n] = replace
return mylist
| osamuflair/alx-higher_level_programming | 0x04-python-more_data_structures/1-search_replace.py | 1-search_replace.py | py | 261 | python | en | code | 0 | github-code | 36 |
75156395304 | # Кузнецов Денис ИУ7-13Б
# Программа для нахождения максимального значения над главной диагональю
# И минимального - под побочной диагональю
from def_fool import check_int_number, check_material_number
def find_max_min(A):
max_el = float('-inf')
min_el = float('+inf')
N = len(A[0])
M = len(A)
if N != M:
print('Данная матрица не является квадратной')
return 0
for i in range(N):
for j in range(i + 1, N):
if A[i][j] > max_el:
max_el = A[i][j]
for i in range(1, N):
for j in range(N - 1, N - 1 - i, -1):
if A[i][j] < min_el:
min_el = A[i][j]
print('Максимальное значение над главной диагональю: {:}'.format(max_el))
print('Минимальное значение под побочной диагональю: {:}'.format(min_el))
return max_el, min_el
N = input('Введите порядок квадратной матрицы: ')
while not check_int_number(N):
print('Неверно введенный порядок квадратной матрицы')
N = input('Введите порядок квадратной матрицы: ')
N = int(N)
matrix = []
for i in range(N):
matrix_str = list(map(str,\
input('Введите строку матрицы состоящую из {:} элементов: '.format(N)).split()))
while len(matrix_str) != N:
print('Неверно введенная строка матрицы')
matrix_str = list(map(str,\
input('Введите строку матрицы состоящую из {:} элементов: '.format(N)).split()))
for i in range(N):
while not check_material_number(matrix_str[i]):
print('Неверно введенный массив, все элементы должны быть числами')
matrix_str[i] = input('Введите заново {:} элемент массива: '.format(i+1))
else:
matrix_str[i] = float(matrix_str[i])
matrix.append(matrix_str)
find_max_min(matrix)
| Denis926178/Python | lab9/lab92.py | lab92.py | py | 2,470 | python | ru | code | 0 | github-code | 36 |
72775441703 | # -*- coding: utf-8 -*-
"""
Created on Sat May 9 07:43:03 2020
@author: Das
"""
# Importing Essential Libraries
import nltk
import random
from nltk.classify.scikitlearn import SklearnClassifier
from sklearn.naive_bayes import MultinomialNB, BernoulliNB
from sklearn.tree import DecisionTreeClassifier
from nltk.tokenize import word_tokenize
#reading file
positiv = open("dataset/positive.txt","r").read()
negativ = open("dataset/negative.txt","r").read()
#list for feature
all_words = []
final = []
#"j" for adjectives by this we're allowing type Adjectives only
#j is adject , r is adverb, v is verb
allwd_word = ["J"]
for p in positiv.split('\n'):
final.append( (p, "pos") ) #adding positive label to data
words = word_tokenize(p) #tokenizing
pos = nltk.pos_tag(words) #
for w in pos:
if w[1][0] in allwd_word:
all_words.append(w[0].lower())
for p in negativ.split('\n'):
final.append( (p, "neg") ) #adding negative label to data
words = word_tokenize(p)
pos = nltk.pos_tag(words)
for w in pos:
if w[1][0] in allwd_word:
all_words.append(w[0].lower())
all_words = nltk.FreqDist(all_words) #frequency to the words i.e. count
#limiting features for better results
word_feat = list(all_words.keys())[:5000]
def find_features(document):
words = word_tokenize(document)
features = {}
for w in word_feat:
features[w] = (w in words)
return features
featuresets = [(find_features(rev), category) for (rev, category) in final]
#shuffling features as the first half is positive and second is negative
random.shuffle(featuresets)
#dividing features to training and test set
testing_set = featuresets[10000:] #10000 for training
training_set = featuresets[:10000] # rest 664 for testing
#applying MultinomialNB to training and testing its accuracy
#gives accuracy = 71-72%
"""MNB_clf = SklearnClassifier(MultinomialNB())
MNB_clf.train(training_set)
print("MNB_classifier accuracy percent:", (nltk.classify.accuracy(MNB_clf, testing_set))*100)
"""
#applying BernoulliNB to training and testing its accuracy
#gives accuracy = 72-74%
BNB_clf = SklearnClassifier(BernoulliNB())
BNB_clf.train(training_set)
#print("BernoulliNB_classifier accuracy percent:", (nltk.classify.accuracy(BNB_clf, testing_set))*100)
#applying Decision Tree to training and testing its accuracy
#gives accuracy = 62-65%
"""dct_clf = SklearnClassifier(DecisionTreeClassifier())
dct_clf.train(training_set)
print("Decision Tree Classifier accuracy percent:", (nltk.classify.accuracy(dct_clf, testing_set))*100)"""
def sentiment(text):
feats = find_features(text)
v = BNB_clf.classify(feats)
return v
| DasBhai/MNCRankingRealtime | mod_sentiment.py | mod_sentiment.py | py | 2,815 | python | en | code | 0 | github-code | 36 |
6243299984 | import requests
import pprint
import matplotlib.pyplot as plt
from tabulate import tabulate
import numpy as np
print('')
print('- GLOBAL INFO -')
print('')
# CONNECT WITH THE API
url = 'https://api.coingecko.com/api/v3/coins/categories'
r = requests.get(url)
response = r.json()
# pprint.pprint(response)
# exit()
# GET TOTAL MARKET CAP
url1 = 'https://api.coingecko.com/api/v3/global'
r1 = requests.get(url1)
response1 = r1.json()
total_marketcap = round(response1['data']['total_market_cap']['usd'], 3)
total_volume = round(response1['data']['total_volume']['usd'], 3)
print('Total Crypto MarketCap: {0:12,.3f}'.format(total_marketcap))
print('Total Crypto 24h Volume: {0:12,.3f}'.format(total_volume))
bb = round(total_volume / total_marketcap * 100, 2)
print('Total Volume 24h / Total MarketCap (%): ', bb ,'%')
print('')
# GET THE NAME OF THE SECTORES
sector_l = list()
id_l = list()
marketcap_l = list()
data_for_table_l = list()
market_share_perc_l = list()
volume_24h_l = list()
volume_24h_perc_l = list()
perc_vol_marketcap_l = list()
market_cap_changes_24h_l = list()
n_l = list()
positive_marketcap_changes = list()
negative_marketcap_changes = list()
for i in range(len(response)):
v = response[i]['name']
id = response[i]['id']
n = i + 1
try:
market_cap = round(float(response[i]['market_cap']), 3)
market_cap_changes_24h = round(float(response[i]['market_cap_change_24h']), 3)
except:
market_cap = 0
market_cap_changes_24h = 0
continue
perc = round(market_cap / total_marketcap * 100, 2)
try:
volume_24h = round(float(response[i]['volume_24h']), 3)
except:
volume_24h = 0
perc_volume = round(volume_24h / total_volume * 100, 2)
if market_cap != 0:
perc_vol_marketcap = round(volume_24h / market_cap * 100, 2)
else:
perc_vol_marketcap = 0
# INSERT INTO LISTS
sector_l.append(v)
marketcap_l.append(market_cap)
market_share_perc_l.append(perc)
volume_24h_l.append(volume_24h)
volume_24h_perc_l.append(perc_volume)
perc_vol_marketcap_l.append(perc_vol_marketcap)
id_l.append(id)
market_cap_changes_24h_l.append(market_cap_changes_24h)
n_l.append(n)
if market_cap_changes_24h > 0:
positive_marketcap_changes.append(market_cap_changes_24h)
elif market_cap_changes_24h <0:
negative_marketcap_changes.append(market_cap_changes_24h)
# CALCULATIONS
if market_cap / 1000000 < 1000:
marketcap2 = str(round(market_cap / 1000000, 3)) + ' M'
else:
marketcap2 = str(round(market_cap/1000000000, 3)) + ' B'
if volume_24h / 1000000 < 1000:
volume_24h2 = str(round(volume_24h / 1000000, 3)) + ' M'
else:
volume_24h2 = str(round(volume_24h / 1000000000, 3)) + ' B'
a = (n, v, id, marketcap2, perc, volume_24h2, perc_volume, perc_vol_marketcap, market_cap_changes_24h)
data_for_table_l.append(a)
# PRINT MORE VALUES
avg_marketcap_perc = round(np.average(market_cap_changes_24h_l), 3)
print('Avg MCap 24h %: ', avg_marketcap_perc, '%')
positive_avg_marketcap_perc = round(np.average(positive_marketcap_changes), 3)
print('Positive sectors - Avg MCap 24h %: ', positive_avg_marketcap_perc, '%')
negative_avg_marketcap_perc = round(np.average(negative_marketcap_changes), 3)
print('Negative sectors - Avg MCap 24h %: ', negative_avg_marketcap_perc, '%')
print('')
# PRINT VALUES IN TABLE
a = input('You want to see the values in table (yes/no): ')
if a == 'yes':
head = ['n',
'Sector',
'id',
'MarketCap',
'MCap-TotMCap %',
'Volume 24h',
'Vol-TotVol %',
'Vol-MCap %',
'MCap 24h %'
]
print(tabulate(data_for_table_l, headers=head, tablefmt='grid'))
# q1 = input('Do you wanna order by the MCap 24h % (yes/no): ')
# if q1 == 'yes':
# sorted_list = sorted(data_for_table_l, key=lambda x: x[8], reverse=True)
# head = ['n',
# 'Sector',
# 'id',
# 'MarketCap',
# 'MCap-TotMCap %',
# 'Volume 24h',
# 'Vol-TotVol %',
# 'Vol-MCap %',
# 'MCap 24h %'
# ]
# print(tabulate(sorted_list, headers=head, tablefmt='grid'))
#
# # CREATE GRAPH TO GET ALL INFO
#
# print('')
# b1 = input('Graphs (yes/no): ')
# if b1 == 'yes':
# b = input('Graph by market cap (yes/no): ')
# if b == 'yes':
# fig, (ax1, ax2) = plt.subplots(2, sharex=True)
# fig.suptitle ('All crypto sectores')
#
# ax1.bar(sector_l, marketcap_l)
# ax2.bar(sector_l, marketcap_l)
#
# plt.yscale('log')
# plt.xticks(fontsize=8, rotation='vertical')
# plt.ylabel('MarketCap')
#
#
# plt.tight_layout()
# plt.subplots_adjust(hspace=0.05)
# plt.show()
#
# c = input('Graph Market share (yes/no): ')
# if c =='yes':
# plt.bar(sector_l, market_share_perc_l)
# plt.title('Market share by sectores')
# plt.ylabel('%')
# plt.xticks(fontsize=8, rotation='vertical')
#
# plt.tight_layout()
# plt.show()
#
# d = input('Graph for MarketCap and Volume (yes/no): ')
# if d == 'yes':
# fig, (ax1, ax2, ax3) = plt.subplots(3, sharex=True)
# fig.suptitle('Info for all sectores')
#
# size = np.arange(len(sector_l))
#
# bar1 = ax1.bar(size, marketcap_l, 0.35, label='MarketCap')
# bar2 = ax1.bar(size + 0.25, volume_24h_l, 0.35, label='Volume 24h')
#
# ax1.legend((bar1, bar2), ('MarketCap', 'Volume 24h'))
#
# bar3 = ax2.bar(size, market_share_perc_l, 0.35, label = 'Market share %')
# bar4 = ax2.bar(size + 0.5, volume_24h_perc_l, 0.35, label='Vol/Total Volume %')
# bar5 = ax2.bar(size + 0.25, perc_vol_marketcap_l, 0.35, label = 'Vol/MarketCap %')
#
# ax2.legend((bar3, bar4, bar5), ('Market share %', 'Vol/Total Volume %', 'Vol/MarketCap %'))
#
# bar6 = ax3.bar(size, market_share_perc_l, 0.35, label = 'Market share %')
# bar7 = ax3.bar(size + 0.5, volume_24h_perc_l, 0.35, label='Vol/Total Volume %')
# bar8 = ax3.bar(size + 0.25, perc_vol_marketcap_l, 0.35, label = 'Vol/MarketCap %')
#
# ax3.legend((bar6, bar7, bar8), ('Market share %', 'Vol/Total Volume %', 'Vol/MarketCap %'), prop={'size':6})
#
# plt.yscale('log')
#
# plt.xticks(size + 0.25, sector_l, fontsize=8, rotation='vertical')
# plt.subplots_adjust(hspace=0.05)
# plt.show()
print('\n-- DATABASE INFORMATION \n')
import sqlite3
# Create a connection to the SQLite database and create a cursor object
conn = sqlite3.connect("crypto_data.db")
cursor = conn.cursor()
# Get the current date
import datetime
current_date = datetime.datetime.now().strftime("%Y_%m_%d")
# Function to check if a column exists in a table
def column_exists(cursor, table_name, column_name):
cursor.execute(f"PRAGMA table_info({table_name})")
columns = [column[1] for column in cursor.fetchall()]
return column_name in columns
# Create market_cap table if it doesn't exist
cursor.execute("""
CREATE TABLE IF NOT EXISTS market_cap (
sector_id TEXT PRIMARY KEY,
sector_name TEXT
)
""")
print("Created market_cap table if not exists")
# Create daily_volume table if it doesn't exist
cursor.execute("""
CREATE TABLE IF NOT EXISTS daily_volume (
sector_id TEXT PRIMARY KEY,
sector_name TEXT
)
""")
print("Created daily_volume table if not exists")
# Add a prefix to the current_date to avoid column names starting with a number
current_date_column = f"date_{current_date}"
# Add a column for the current date if it doesn't exist
if not column_exists(cursor, "market_cap", current_date_column):
cursor.execute(f"ALTER TABLE market_cap ADD COLUMN {current_date_column} REAL")
print(f"Added column {current_date_column} to market_cap table")
if not column_exists(cursor, "daily_volume", current_date_column):
cursor.execute(f"ALTER TABLE daily_volume ADD COLUMN {current_date_column} REAL")
print(f"Added column {current_date_column} to daily_volume table")
# Update market cap and daily volume information for each sector
for i in range(len(response)):
sector_id = response[i]['id']
sector_name = response[i]['name']
try:
market_cap = round(float(response[i]['market_cap']), 3)
except:
market_cap = None
try:
volume_24h = round(float(response[i]['volume_24h']), 3)
except:
volume_24h = None
print(f"Processing sector: {sector_name}")
# Insert or update the sector in the market_cap table
cursor.execute("""
INSERT OR IGNORE INTO market_cap (sector_id, sector_name)
VALUES (?, ?)
""", (sector_id, sector_name))
if market_cap is not None:
cursor.execute(f"""
UPDATE market_cap
SET {current_date_column} = ?
WHERE sector_id = ?
""", (market_cap, sector_id))
print(f"Updated market cap for {sector_name} on {current_date}")
# Insert or update the sector in the daily_volume table
cursor.execute("""
INSERT OR IGNORE INTO daily_volume (sector_id, sector_name)
VALUES (?, ?)
""", (sector_id, sector_name))
if volume_24h is not None:
cursor.execute(f"""
UPDATE daily_volume
SET {current_date_column} = ?
WHERE sector_id = ?
""", (volume_24h, sector_id))
print(f"Updated daily volume for {sector_name} on {current_date}")
# Commit the changes and close the connection
conn.commit()
conn.close()
print("\n Changes committed and connection closed")
| jbbaptista/Personally | Sectores_coingecko/Add_daily_info_database.py | Add_daily_info_database.py | py | 9,661 | python | en | code | 0 | github-code | 36 |
41019665619 | import os
from pathlib import Path
import structlog
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve(strict=True).parent.parent.parent
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"rest_framework",
"rest_framework_api_key",
"drf_spectacular",
"django_assets",
"django_celery_beat",
"django_celery_results",
"django_extensions",
"django_filters",
"django_admin_filters",
"crispy_forms",
"jsonify",
"import_export",
"gerrit",
"tracker",
"hotfix",
"panel",
"release_dashboard",
"build",
"buildinfo",
"release_changed",
"repoapi",
]
MIDDLEWARE = (
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django_structlog.middlewares.RequestMiddleware",
)
DJANGO_STRUCTLOG_CELERY_ENABLED = True
ROOT_URLCONF = "repoapi.urls"
LOGIN_URL = "rest_framework:login"
LOGOUT_URL = "rest_framework:logout"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": ["repoapi/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "repoapi.wsgi.application"
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = True
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"django_assets.finders.AssetsFinder",
)
STATIC_ROOT = BASE_DIR / "static_media/"
REST_FRAMEWORK = {
"PAGE_SIZE": 10,
"DEFAULT_PAGINATION_CLASS": "rest_framework"
".pagination.LimitOffsetPagination",
"DEFAULT_FILTER_BACKENDS": (
"django_filters.rest_framework.DjangoFilterBackend",
),
"DEFAULT_SCHEMA_CLASS": "drf_spectacular.openapi.AutoSchema",
}
SPECTACULAR_SETTINGS = {
"TITLE": "RepoApi",
"DESCRIPTION": "repoapi, one ring to rule them all",
"VERSION": "1.0.0",
"CONTACT": {
"email": "development@sipwise.com",
"url": "https://www.sipwise.com/",
},
"LICENSE": {
"name": "GPLv3",
"url": "https://www.gnu.org/licenses/gpl-3.0.en.html",
},
}
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"plain_console": {
"()": structlog.stdlib.ProcessorFormatter,
"processor": structlog.dev.ConsoleRenderer(),
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "plain_console",
}
},
"loggers": {
"django_structlog": {
"handlers": ["console"],
"level": "INFO",
"propagate": False,
},
"repoapi": {
"handlers": ["console"],
"level": os.getenv("DJANGO_LOG_LEVEL", "INFO"),
},
},
}
JENKINS_TOKEN = "sipwise_jenkins_ci"
CRISPY_TEMPLATE_PACK = "bootstrap3"
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERY_ACCEPT_CONTENT = ["application/json"]
CELERY_RESULT_BACKEND = "django-db"
CELERY_BEAT_SCHEDULER = "django_celery_beat.schedulers:DatabaseScheduler"
API_KEY_CUSTOM_HEADER = "HTTP_API_KEY"
| sipwise/repoapi | repoapi/settings/common.py | common.py | py | 4,245 | python | en | code | 2 | github-code | 36 |
6208216835 | file = open('file.txt', 'r')
#file2 = file.read()
#print(file2)
print("------------------------")
for l in file:
data = l.split(' ')
print(data)
print("--->>>" + l)
file2 = file2.strip()
for palabra in file2.split(' '):
if palabra.isalpha():
print(palabra)
else:
print(">"+palabra+"<")
file.close() | emmanuelortizhernandez/Lenguajes-y-Automatas-II | practica.py | practica.py | py | 335 | python | en | code | 0 | github-code | 36 |
7796422428 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : 保留最大的数.py
# @Author: smx
# @Date : 2019/8/27
# @Desc :
# 存在的问题:语法错误或者数组越界非法访问等情况 --》 输入有错,输入是两行!题目要求和给的测试案例不一定对应!
# 存在的问题:超时! --》 不用两层for循环用not in判断
# 存在的问题:答案不正确! --》
# 我自己的想法有错误,不能是排序得到答案,需要的将大的数字的位置提到前面来
# 自己的想法有错误,不是去掉比后面一个元素大的元素,是保留的元素要比其他的元素大
# 8215492
# 5
# 92 -- 是正确的,但是我自己不知道!
def solution(str_n, str_k):
k = int(str_k)
count = Counter(str_n)
count = sorted(dict(count).items(), key=lambda x: x[0])
# print(count)
remove_list = []
for each in count:
# print(each)
if k >= each[1]:
remove_list.append(each[0])
k -= each[1]
else:
break
# print(remove_list)
new_string = ''
for each in str_n:
if each not in remove_list:
new_string += each
return new_string
def solution1(str_n, str_k):
if int(str_k) - 1 == len(str_n):
return max(str_n)
remove_list = sorted(str_n)[:int(str_k)]
leave_list = []
for i in range(len(str_n)):
if str_n[i] not in remove_list:
leave_list.append(str_n[i])
return ''.join(leave_list)
def solution2(str_n, k):
new_string = []
n = len(str_n)
for i in range(n):
while k > 0 and new_string and new_string[-1] < str_n[i]:
new_string.pop()
k -= 1
new_string.append(str_n[i])
return ''.join(new_string[:len(new_string) - k])
if __name__ == '__main__':
str_n = input()
k = int(input())
# str_n, str_k = input().strip().split(' ')
new_string = solution2(str_n, k)
print(new_string)
#
# 3251
# 1
# 351
#
# 3251
# 1
# 325
| 20130353/Leetcode | target_offer/数组/保留最大的数.py | 保留最大的数.py | py | 2,015 | python | zh | code | 2 | github-code | 36 |
43751185703 | from __future__ import unicode_literals
import os
import tempfile
import unittest
from textwrap import dedent
import mock
import six
import rows
import rows.plugins.postgresql
import rows.plugins.utils
import tests.utils as utils
from rows import fields
from rows.plugins.postgresql import pgconnect
from rows.utils import Source
class PluginPostgreSQLTestCase(utils.RowsTestMixIn, unittest.TestCase):
plugin_name = "postgresql"
override_fields = {
"bool_column": fields.BoolField,
"percent_column": fields.FloatField,
}
uri = os.environ["POSTGRESQL_URI"]
expected_meta = {
"imported_from": "postgresql",
"source": Source(uri=uri, plugin_name=plugin_name, encoding=None),
}
def get_table_names(self):
connection = pgconnect(self.uri)
cursor = connection.cursor()
cursor.execute(rows.plugins.postgresql.SQL_TABLE_NAMES)
header = [item[0] for item in cursor.description]
result = [dict(zip(header, row))["tablename"] for row in cursor.fetchall()]
cursor.close()
connection.close()
return result
def tearDown(self):
connection = pgconnect(self.uri)
for table in self.get_table_names():
if table.startswith("rows_"):
cursor = connection.cursor()
cursor.execute("DROP TABLE " + table)
cursor.close()
connection.commit()
connection.close()
def test_imports(self):
self.assertIs(
rows.import_from_postgresql, rows.plugins.postgresql.import_from_postgresql
)
self.assertIs(
rows.export_to_postgresql, rows.plugins.postgresql.export_to_postgresql
)
@mock.patch("rows.plugins.postgresql.create_table")
def test_import_from_postgresql_uses_create_table(self, mocked_create_table):
mocked_create_table.return_value = 42
kwargs = {"encoding": "test", "some_key": 123, "other": 456}
rows.export_to_postgresql(utils.table, self.uri, table_name="rows_1")
result = rows.import_from_postgresql(self.uri, table_name="rows_1", **kwargs)
self.assertTrue(mocked_create_table.called)
self.assertEqual(mocked_create_table.call_count, 1)
self.assertEqual(result, 42)
call = mocked_create_table.call_args
meta = call[1].pop("meta")
source = meta.pop("source")
expected_meta = self.expected_meta.copy()
expected_source = expected_meta.pop("source")
self.assertEqual(call[1], kwargs)
self.assertEqual(meta, expected_meta)
self.assertEqual(expected_source.uri, source.uri)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
@mock.patch("rows.plugins.postgresql.create_table")
def test_import_from_postgresql_retrieve_desired_data(self, mocked_create_table):
mocked_create_table.return_value = 42
connection, table_name = rows.export_to_postgresql(
utils.table, self.uri, table_name="rows_2"
)
self.assertTrue(connection.closed)
# import using uri
table_1 = rows.import_from_postgresql(
self.uri, close_connection=True, table_name="rows_2"
)
call_args = mocked_create_table.call_args_list[0]
self.assert_create_table_data(call_args, expected_meta=self.expected_meta)
# import using connection
connection = pgconnect(self.uri)
table_2 = rows.import_from_postgresql(
connection, close_connection=False, table_name="rows_2"
)
self.assertFalse(connection.closed)
connection_type = type(connection)
connection.close()
call_args = mocked_create_table.call_args_list[1]
meta = call_args[1].pop("meta")
call_args[1]["meta"] = {}
self.assert_create_table_data(call_args, expected_meta={})
self.assertTrue(isinstance(meta["source"].fobj, connection_type))
def test_postgresql_injection(self):
with self.assertRaises(ValueError):
rows.import_from_postgresql(
self.uri, table_name=('table1","postgresql_master')
)
with self.assertRaises(ValueError):
rows.export_to_postgresql(
utils.table, self.uri, table_name='table1", "postgresql_master'
)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
def test_export_to_postgresql_uri(self):
rows.export_to_postgresql(utils.table, self.uri, table_name="rows_3")
table = rows.import_from_postgresql(self.uri, table_name="rows_3")
self.assert_table_equal(table, utils.table)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
def test_export_to_postgresql_connection(self):
connection = pgconnect(self.uri)
rows.export_to_postgresql(
utils.table, connection, close_connection=True, table_name="rows_4"
)
table = rows.import_from_postgresql(self.uri, table_name="rows_4")
self.assert_table_equal(table, utils.table)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
def test_export_to_postgresql_create_unique_table_name(self):
first_table = utils.table
second_table = utils.table + utils.table
table_names_before = self.get_table_names()
rows.export_to_postgresql(
first_table, self.uri, table_name_format="rows_{index}"
)
table_names_after = self.get_table_names()
rows.export_to_postgresql(
second_table, self.uri, table_name_format="rows_{index}"
)
table_names_final = self.get_table_names()
diff_1 = list(set(table_names_after) - set(table_names_before))
diff_2 = list(set(table_names_final) - set(table_names_after))
self.assertEqual(len(diff_1), 1)
self.assertEqual(len(diff_2), 1)
new_table_1 = diff_1[0]
new_table_2 = diff_2[0]
result_first_table = rows.import_from_postgresql(
self.uri, table_name=new_table_1
)
result_second_table = rows.import_from_postgresql(
self.uri, table_name=new_table_2
)
self.assert_table_equal(result_first_table, first_table)
self.assert_table_equal(result_second_table, second_table)
@unittest.skipIf(six.PY2, "psycopg2 on Python2 returns binary, skippging test")
def test_export_to_postgresql_forcing_table_name_appends_rows(self):
repeat = 3
for _ in range(repeat):
rows.export_to_postgresql(utils.table, self.uri, table_name="rows_7")
expected_table = utils.table
for _ in range(repeat - 1):
expected_table += utils.table
result_table = rows.import_from_postgresql(self.uri, table_name="rows_7")
self.assertEqual(len(result_table), repeat * len(utils.table))
self.assert_table_equal(result_table, expected_table)
@mock.patch("rows.plugins.postgresql.prepare_to_export")
def test_export_to_postgresql_prepare_to_export(self, mocked_prepare_to_export):
encoding = "iso-8859-15"
kwargs = {"test": 123, "parameter": 3.14}
mocked_prepare_to_export.return_value = iter(
rows.plugins.utils.prepare_to_export(utils.table)
)
rows.export_to_postgresql(
utils.table, self.uri, encoding=encoding, table_name="rows_8", **kwargs
)
self.assertTrue(mocked_prepare_to_export.called)
self.assertEqual(mocked_prepare_to_export.call_count, 1)
call = mocked_prepare_to_export.call_args
self.assertEqual(call[0], (utils.table,))
kwargs["encoding"] = encoding
self.assertEqual(call[1], kwargs)
def test_import_from_postgresql_query_args(self):
connection, table_name = rows.export_to_postgresql(
utils.table, self.uri, close_connection=False, table_name="rows_9"
)
table = rows.import_from_postgresql(
connection,
query="SELECT * FROM rows_9 WHERE float_column > %s",
query_args=(3,),
)
for row in table:
self.assertTrue(row.float_column > 3)
def test_pgimport_force_null(self):
temp = tempfile.NamedTemporaryFile()
filename = "{}.csv".format(temp.name)
temp.close()
self.files_to_delete.append(filename)
with open(filename, mode="wb") as fobj:
fobj.write(
dedent(
"""
field1,field2
"","4"
,2
"""
)
.strip()
.encode("utf-8")
)
rows.utils.pgimport(
filename=filename,
database_uri=self.uri,
table_name="rows_force_null",
)
table = rows.import_from_postgresql(self.uri, "rows_force_null")
self.assertIs(table[0].field1, None)
self.assertEqual(table[0].field2, 4)
self.assertIs(table[1].field1, None)
self.assertEqual(table[1].field2, 2)
| turicas/rows | tests/tests_plugin_postgresql.py | tests_plugin_postgresql.py | py | 9,172 | python | en | code | 851 | github-code | 36 |
7749159191 | from flask import Blueprint, request
from flask import jsonify, render_template
from authlib.specs.rfc6749 import OAuth2Error
from authlib.flask.oauth2 import current_token
from ..models import OAuth2Client, OAuth2Token, User
from ..auth import current_user
from ..forms.auth import ConfirmForm, LoginConfirmForm
from ..services.oauth2 import authorization, scopes, require_oauth
from urlparse import parse_qs
from flask_cors import CORS
bp = Blueprint('oauth2', __name__)
CORS(bp)
@bp.route('/authorize', methods=['GET', 'POST'])
def authorize():
curr_url = '/oauth2/authorize?' + request.query_string
##app.logger.info("method oauth2/authorize")
if current_user:
##app.logger.info("confirm form")
form = ConfirmForm()
else:
##app.logger.info("login confirm form")
form = LoginConfirmForm()
if form.validate_on_submit():
##app.logger.info("submit")
if form.confirm.data:
# granted by current user
grant_user = current_user
else:
grant_user = None
##app.logger.info("calling lib function 'create_authorization_response'")
ret = authorization.create_authorization_response(grant_user)
##app.logger.info("return")
return ret
try:
##app.logger.info("not submit")
##app.logger.info("calling lib function 'validate_authorization_request'")
grant = authorization.validate_authorization_request()
except OAuth2Error as error:
# TODO: add an error page
payload = dict(error.get_body())
return jsonify(payload), error.status_code
client = OAuth2Client.get_by_client_id(request.args['client_id'])
##app.logger.info("render")
return render_template(
'account/authorize.html',
grant=grant,
scopes=scopes,
client=client,
form=form,
)
@bp.route('/token', methods=['POST'])
def issue_token():
##app.logger.info("method oauth2/token")
##app.logger.info("calling lib function 'create_token_response'")
ret = authorization.create_token_response()
##app.logger.info("return")
return ret
@bp.route('/revoke', methods=['POST'])
def revoke_token():
return authorization.create_revocation_response()
@bp.route('/revoke_bearer', methods=['POST'])
def revoke_token_bearer():
##app.logger.info("method oauth2/revoke_bearer")
##app.logger.info("query token")
token = OAuth2Token.query_token(parse_qs(request.query_string)['token'][0])
if token:
##app.logger.info("revoke")
token.revoke()
##app.logger.info("return")
return jsonify(token)
return jsonify({'error': 'Invalid token supplied'}), 401
@bp.route('/tokeninfo', methods=['GET'])
def get_token_info():
##app.logger.info("method oauth2/tokeninfo")
if 'access_token' in request.args:
##app.logger.info("query token")
token = OAuth2Token.query_token(request.args['access_token'])
if token and token.user_id:
##app.logger.info("query user")
user = User.query.get(token.user_id)
udict = user.to_dict(request.host)
udict.update(token.to_dict())
##app.logger.info("return user info")
return jsonify(udict)
return jsonify({'error': 'Invalid token supplied'}), 401
return jsonify({'error': 'Invalid parameters supplied'}), 400
@bp.route('/emailinfo', methods=['GET'])
def get_email_info():
##app.logger.info("method oauth2/emailinfo")
if 'email' in request.args and 'access_token' in request.args:
##app.logger.info("query token")
token = OAuth2Token.query_token(request.args['access_token'])
email = request.args['email']
if token and token.user_id:
##app.logger.info("query user")
user = User.query_email(email)
if user:
udict = user.to_dict(request.host)
##app.logger.info("return user info")
return jsonify(udict)
return jsonify({'error': 'Invalid email supplied'}), 404
return jsonify({'error': 'Invalid token supplied'}), 401
return jsonify({'error': 'Invalid parameters supplied'}), 400
| itsocietysu/EACH-OAuth2.0 | website/routes/oauth2.py | oauth2.py | py | 4,226 | python | en | code | 0 | github-code | 36 |
18846832139 | def play(player):
global input_
new_y, new_x = input("Enter the coordinates: ").split()
if not(new_y.isdigit() and new_x.isdigit()):
print("You should enter numbers!")
else:
y, x = int(new_y), int(new_x)
cell_index = 3 * (y - 1) + (x - 1)
if not(x > 0 and x < 4 and y > 0 and y < 4):
print("Coordinates should be from 1 to 3!")
elif (input_ [cell_index] == "X") or (input_[cell_index] == "O"):
print("This cell is occupied! Choose another one!")
else:
input_[cell_index] = player
print_field(input_)
def empty_field():
return [" ", " ", " ", " ", " ", " ", " ", " ", " "]
def print_field(input_):
print("---------")
for i in range(3):
str_ = ""
for j in range(3):
str_ = str_ + input_[i * 3 + j] + " "
print("| " + str_ + "|")
print("---------")
def is_win_x(input_):
win_x = False
for i in range(3):
if (input_[0 + i] == "X" and input_[1 + i] == "X" and input_[2 + i] == "X"):
win_x = True
if (input_[0 + i] == "X" and input_[3 + i] == "X" and input_[6 + i] == "X"):
win_x = True
if (input_[0] == "X" and input_[4] == "X" and input_[8] == "X"):
win_x = True
if (input_[2] == "X" and input_[4] == "X" and input_[6] == "X"):
win_x = True
return win_x
def is_win_o(input_):
win_o = False
for i in range(3):
if (input_[0 + 3 * i] == "O" and input_[1 + 3 * i] == "O" and input_[2 + 3 * i] == "O"):
win_o = True
if (input_[0 + i] == "O" and input_[3 + i] == "O" and input_[6 + i] == "O"):
win_o = True
if (input_[0] == "O" and input_[4] == "O" and input_[8] == "O"):
win_o = True
if (input_[2] == "O" and input_[4] == "O" and input_[6] == "O"):
win_o = True
return win_o
input_ = empty_field()
print_field(input_)
player = "X"
while True:
xs = [x for x in input_ if x == "X"]
os = [x for x in input_ if x == "O"]
win_x = is_win_x(input_)
win_o = is_win_o(input_)
if (abs(len(xs) - len(os)) > 1) or (win_x and win_o):
print("Impossible")
break
else:
if (not win_x and not win_o) and (len(xs) + len(os)) < 9:
play(player)
else:
if win_x:
print("X wins")
elif win_o:
print("O wins")
else:
print("Draw")
break
if player == "X":
player = "O"
else:
player = "X"
| lika010/SimpleTicTacToe | Simple Tic-Tac-Toe/task/tictactoe.py | tictactoe.py | py | 2,600 | python | en | code | 0 | github-code | 36 |
30076689122 | # general imports
from pathlib import Path
import os
import re
import argparse
from time import time
import multiprocessing as mp
from functools import partial
from collections import Counter
# processing imports
import numpy as np
import pandas as pd
from tqdm import tqdm
from collections import OrderedDict
from difflib import SequenceMatcher
import os
# pdfminer imports
from pdfminer.pdfdocument import PDFDocument
from pdfminer.pdfparser import PDFParser
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.pdfpage import PDFPage
from pdfminer.converter import PDFPageAggregator
from pdfminer.layout import LTPage, LTChar, LTAnno, LAParams, LTTextBox, LTTextLine
# local imports
import rse_watch.sententizer as sententizer
def get_list_of_pdfs_filenames(dirName):
"""
For the given path, get the List of all files in the directory tree
"""
paths = []
for path, subdirs, files in os.walk(dirName):
for name in files:
if (name.lower().endswith(".pdf")):
paths.append((Path(path + "/" + name)))
return paths
def get_companies_metadata_dict(config):
""" Read companies metadata from config and turn it into dictionnary"""
companies_metadata_dict = pd.read_csv(config.annotations_file,
sep=";",
encoding='utf-8-sig').set_index("project_denomination").T.to_dict()
return companies_metadata_dict
def clean_child_str(child_str):
child_str = ' '.join(child_str.split()).strip()
# dealing with hyphens:
# 1. Replace words separators in row by a different char than hyphen (i.e. longer hyphen)
child_str = re.sub("[A-Za-z] - [A-Za-z]", lambda x: x.group(0).replace(' - ', ' – '), child_str)
# 2. Attach the negative term to the following number, # TODO: inutile ? Enlever ?
child_str = re.sub("(- )([0-9])", r"-\2", child_str)
return child_str
class PDFPageDetailedAggregator(PDFPageAggregator):
"""
Custom class to parse pdf and keep position of parsed text lines.
"""
def __init__(self, rsrcmgr, pageno=1, laparams=None):
PDFPageAggregator.__init__(self, rsrcmgr, pageno=pageno, laparams=laparams)
self.rows = []
self.page_number = 0
self.result = ""
def receive_layout(self, ltpage):
def render(item, page_number):
if isinstance(item, LTPage) or isinstance(item, LTTextBox):
for child in item:
render(child, page_number)
elif isinstance(item, LTTextLine):
child_str = ''
for child in item:
if isinstance(child, (LTChar, LTAnno)):
child_str += child.get_text()
child_str = clean_child_str(child_str)
if child_str:
# bbox == (pagenb, x1, y1, x2, y2, text)
row = (page_number, item.bbox[0], item.bbox[1], item.bbox[2], item.bbox[3], child_str)
self.rows.append(row)
for child in item:
render(child, page_number)
return
render(ltpage, self.page_number)
self.page_number += 1
self.rows = sorted(self.rows, key=lambda x: (x[0], -x[2]))
self.result = ltpage
def get_raw_content_from_pdf(input_file, rse_range=None):
"""
Parse pdf file, within rse range of pages if needed, and return list of rows with all metadata
:param input_file: PDF filename
:param rse_range: (nb_first_page_rse:int, nb_last_page_rse:int) tuple, starting at 1
:return: list of rows with (pagenb, x1, y1, x2, y2, text) and page_nb starts at 0!
"""
assert input_file.name.endswith(".pdf")
fp = open(input_file, 'rb')
parser = PDFParser(fp)
doc = PDFDocument(parser)
rsrcmgr = PDFResourceManager()
laparams = LAParams()
device = PDFPageDetailedAggregator(rsrcmgr, laparams=laparams)
interpreter = PDFPageInterpreter(rsrcmgr, device)
if rse_range is not None and rse_range != "":
# start at zero to match real index of pages
pages_selection = range(rse_range[0] - 1, (rse_range[1] - 1) + 1)
else:
pages_selection = range(0, 10000)
first_page_nb = pages_selection[0] + 1 # to start indexation at 1
# Checked: only useful pages are actually parsed.
for nb_page_parsed, page in enumerate(PDFPage.create_pages(doc)):
if nb_page_parsed in pages_selection:
interpreter.process_page(page)
device.get_result()
return device, first_page_nb
def clean_paragraph(p):
""" Curate paragraph object before save, in particular deal with hyphen and spaces """
# Attach together words (>= 2 char to avoid things like A minus, B minus...)
# that may have been split at end of row like géographie = "géo - graphie"
# real separator have been turned into longer hyphen during parsing to avoid confusion with those.
# Accents accepted thks to https://stackoverflow.com/a/24676780/8086033
w_expr = "(?i)(?:(?![×Þß÷þø])[-'a-zÀ-ÿ]){2,}"
p["paragraph"] = re.sub("{} - {}".format(w_expr, w_expr),
lambda x: x.group(0).replace(' - ', ''),
p["paragraph"])
# reattach words that were split, like Fort-Cros = "Fort- Cros"
p["paragraph"] = re.sub("{}- {}".format(w_expr, w_expr),
lambda x: x.group(0).replace('- ', '-'),
p["paragraph"])
return p
def get_paragraphs_from_raw_content(device, idx_first_page):
"""
From parsed data with positional information, aggregate into paragraphs using simple rationale
:param device:
:param idx_first_page:
:param p: size of next gap needs to be smaller than previous min size of letters (among two last rows) times p
:return:
"""
# GROUPING BY COLUMN
column_text_dict = OrderedDict() # keep order of identification in the document.
APPROXIMATION_FACTOR = 10 # to allow for slight shifts at beg of aligned text
N_MOST_COMMON = 4 # e.g. nb max of columns of text that can be considered
LEFT_SECURITY_SHIFT = 20 # to include way more shifted text of previous column
counter = Counter()
item_holder = []
item_index = 0
it_was_last_item = False
while "There are unchecked items in device.rows":
# add the item to the list of the page
try:
(page_id, x_min, _, _, _, _) = device.rows[item_index]
except e:
print("Wrong index {} for device.rows of len {}".format(item_index, len(device.rows)))
print("was that last page ? : {]".format(it_was_last_item))
raise
item_holder.append(device.rows[item_index])
# increment the count of x_min
counter[(x_min // APPROXIMATION_FACTOR) * APPROXIMATION_FACTOR] += 1
# go to next item
it_was_last_item = item_index == (len(device.rows) - 1)
if not it_was_last_item:
item_index += 1
(next_page_id, _, _, _, _, _) = device.rows[item_index]
changing_page = (next_page_id > page_id)
if changing_page or it_was_last_item: # approximate next page
top_n_x_min_approx = counter.most_common(N_MOST_COMMON)
df = pd.DataFrame(top_n_x_min_approx, columns=["x_min_approx", "freq"])
df = df[df["freq"] > df["freq"].sum() * (1 / (N_MOST_COMMON + 1))].sort_values(by="x_min_approx")
x_min_approx = (df["x_min_approx"] - LEFT_SECURITY_SHIFT).values
x_min_approx = x_min_approx * (x_min_approx > 0)
left_x_min_suport = np.hstack([x_min_approx,
[10000]])
def x_grouper(x_min):
delta = left_x_min_suport - x_min
x_group = left_x_min_suport[np.argmin(delta < 0) * 1 - 1]
return x_group
# iter on x_group and add items
page_nb = idx_first_page + page_id
column_text_dict[page_nb] = {}
for item in item_holder:
(page_id, x_min, y_min, x_max, y_max, text) = item
page_nb = idx_first_page + page_id
x_group = x_grouper(x_min)
if x_group in column_text_dict[page_nb].keys():
column_text_dict[page_nb][x_group].append((y_min, y_max, text))
else:
column_text_dict[page_nb][x_group] = [(y_min, y_max, text)]
if it_was_last_item:
break
else:
# restart from zero for next page
counter = Counter()
item_holder = []
# CREATE THE PARAGRAPHS IN EACH COLUMN
# define minimal conditions to define a change of paragraph:
# Being spaced by more than the size of each line (min if different to account for titles)
pararaphs_list = []
paragraph_index = 0
for page_nb, x_groups_dict in column_text_dict.items():
for x_group_name, x_groups_data in x_groups_dict.items():
x_groups_data = sorted(x_groups_data, key=lambda x: x[0],
reverse=True) # sort vertically, higher y = before
x_groups_data_paragraphs = []
p = {"y_min": x_groups_data[0][0],
"y_max": x_groups_data[0][1],
"paragraph": x_groups_data[0][2]}
previous_height = p["y_max"] - p["y_min"]
previous_y_min = p["y_min"]
for y_min, y_max, paragraph in x_groups_data[1:]:
current_height = y_max - y_min
current_y_min = y_min
max_height = max(previous_height, current_height)
relative_var_in_height = (current_height - previous_height) / float(
current_height) # Was min before ???
relative_var_in_y_min = abs(current_y_min - previous_y_min) / float(current_height)
positive_change_in_font_size = (relative_var_in_height > 0.05)
change_in_font_size = abs(relative_var_in_height) > 0.05
different_row = (relative_var_in_y_min > 0.7)
large_gap = (relative_var_in_y_min > 1.2)
artefact_to_ignore = (len(paragraph) <= 2) # single "P" broke row parsing in auchan dpef
if not artefact_to_ignore:
if (positive_change_in_font_size and different_row) or large_gap: # always break
# break paragraph, start new one
# print("break",relative_var_in_height, relative_var_in_y_min, paragraph)
p = clean_paragraph(p)
x_groups_data_paragraphs.append(p)
p = {"y_min": y_min,
"y_max": y_max,
"paragraph": paragraph}
else:
# if change_in_font_size: # to separate titles
# paragraph = paragraph + ".\n"
# paragraph continues
p["y_min"] = y_min
p["paragraph"] = p["paragraph"] + " " + paragraph
previous_height = current_height
previous_y_min = current_y_min
# add the last paragraph of column
p = clean_paragraph(p)
x_groups_data_paragraphs.append(p)
# structure the output
for p in x_groups_data_paragraphs:
pararaphs_list.append({"paragraph_id": paragraph_index,
"page_nb": page_nb,
"x_group": x_group_name,
"y_min_paragraph": round(p["y_min"]),
"y_max_paragraph": round(p["y_max"]),
"paragraph": p["paragraph"]})
paragraph_index += 1
df_par = pd.DataFrame(data=pararaphs_list,
columns=["paragraph_id",
"page_nb",
"paragraph",
"x_group",
"y_min_paragraph",
"y_max_paragraph"])
return df_par
def parse_paragraphs_from_pdf(input_file, rse_ranges=None):
"""
From filename, parse pdf and output structured paragraphs with filter on rse_ranges uif present.
:param input_file: filename ending with ".pdf" or ".PDF".
:param rse_ranges: "(start, end)|(start, end)"
:return: df[[page_nb, page_text]] dataframe
"""
rse_ranges_list = list(map(eval, rse_ranges.split("|")))
df_paragraphs_list = []
for rse_range in rse_ranges_list:
df_par, idx_first_page = get_raw_content_from_pdf(input_file, rse_range=rse_range)
df_par = get_paragraphs_from_raw_content(df_par, idx_first_page)
df_paragraphs_list.append(df_par)
df_par = pd.concat(df_paragraphs_list, axis=0, ignore_index=True)
return df_par
def compute_string_similarity(a, b):
"""Compares two strings and returns a similarity ratio between 0 and 1 """
return SequenceMatcher(None, a, b).ratio()
def cut_footer(df_par, p=0.8, verbose=False):
"""
Cut the paragraph with lowest y_min if other paragraphs are similar.
The similarity is measured with function compute_string_similarity
"""
len_first = len(df_par)
footers = []
deno = df_par['project_denomination'].values[0]
c = 0
while True:
c += 1
len_start = len(df_par)
y_bottom = df_par['y_min_paragraph'].min()
y_top = df_par[df_par['y_min_paragraph'] == y_bottom]['y_max_paragraph'].min()
DSmin = df_par[(df_par['y_max_paragraph'] == y_top) & (df_par['y_min_paragraph'] == y_bottom)].copy()
if len(DSmin) == 1 and c == 1:
if verbose:
print('\n', deno)
return df_par
if len(DSmin) == 1:
break
for candidate in DSmin['paragraph'].values:
DSmin['is_foot'] = DSmin['paragraph'].apply(lambda x: compute_string_similarity(str(x), candidate) > p)
count = len((DSmin[DSmin['is_foot'] == True]))
if count > 1:
footers.append((candidate, count))
index_foot = DSmin[DSmin['is_foot'] == True].index
break
else:
DSmin = DSmin.drop(DSmin.index[0])
if len(footers) == 0:
if verbose:
print('\n', deno)
return df_par
len_end = (len(df_par[~df_par.index.isin(index_foot)]))
df_par = df_par[~df_par.index.isin(index_foot)]
if len_start == len_end:
break
# Below part is for human check that the function works properly
# if verbose:
# len_last = len(df_par)
# S = sum([i for _,i in footers])
# print('\n',deno)
# print(f"Removed {len_first-len_last} lines. {len_first-len_last==S}")
# if footers!=[]:
# L = [foot+" x "+ str(count) for foot, count in footers]
# print("Footers(s) --->\n",'\n '.join(L))
return df_par
def cut_header(df_par, p=0.8, verbose=False):
"Same as function cut_footer() but for headers"
len_first = len(df_par)
headers = []
deno = df_par['project_denomination'].values[0]
c = 0
while True:
c += 1
len_start = len(df_par)
y_top = df_par['y_max_paragraph'].max()
y_bottom = df_par[df_par['y_max_paragraph'] == y_top]['y_min_paragraph'].max()
DSmax = df_par[(df_par['y_max_paragraph'] == y_top) & (df_par['y_min_paragraph'] == y_bottom)].copy()
if len(DSmax) == 1 and c == 1:
if verbose:
print('\n', deno)
return df_par
if len(DSmax) == 1:
break
for candidate in DSmax['paragraph'].values:
DSmax['is_head'] = DSmax['paragraph'].apply(lambda x: compute_string_similarity(str(x), candidate) > p)
count = len((DSmax[DSmax['is_head'] == True]))
if count > 1:
headers.append((candidate, count))
index_head = DSmax[DSmax['is_head'] == True].index
break
else:
DSmax = DSmax.drop(DSmax.index[0])
if len(headers) == 0:
if verbose:
print('\n', deno)
return df_par
len_end = (len(df_par[~df_par.index.isin(index_head)]))
df_par = df_par[~df_par.index.isin(index_head)]
if len_start == len_end:
break
# Below part is for human check that the function works properly
# if verbose:
# len_last = len(df_par)
# S = sum([i for _, i in headers])
# print('\n', deno)
# print(f"Removed {len_first - len_last} lines. {len_first - len_last == S}")
# if headers != []:
# L = [head + " x " + str(count) for head, count in headers]
# print("Header(s) --->\n", '\n '.join(L))
return df_par
def extract_company_metadata(dpef_path, companies_metadata_dict):
""" From metadata dict and a dpef file, get the relevant info."""
project_denomination = dpef_path.name.split("_")[0]
document_year = dpef_path.name.split("_")[1]
company_name = companies_metadata_dict[project_denomination]["denomination"]
company_sectors = None
try:
company_sectors = companies_metadata_dict[project_denomination]["sectors"].split(";")
except KeyError:
print("Sectors not found for company {} for year {}".format(project_denomination,
document_year))
rse_ranges = None
try:
rse_ranges = companies_metadata_dict[project_denomination]["rse_ranges_" + document_year]
except KeyError:
print("RSE ranges not found for company {} for year {}".format(project_denomination,
document_year))
return company_name, project_denomination, company_sectors, document_year, rse_ranges
def get_paragraphs_dataframe_from_pdf(dpef_path, companies_metadata_dict):
"""
Parse a pdf and return a pandas df with paragraph level parsed text.
:param dpef_path_dict_annotations: (inpout_file, dict_annotations) tuple
:return:
"""
company_name, project_denomination, company_sectors, document_year, rse_ranges \
= extract_company_metadata(dpef_path, companies_metadata_dict)
t = time()
print("Start parsing - {} [{}]".format(
project_denomination,
dpef_path.name)
)
df_par = parse_paragraphs_from_pdf(dpef_path, rse_ranges=rse_ranges)
df_par.insert(0, "project_denomination", project_denomination)
df_par.insert(1, "company_sector", ";".join(company_sectors))
df_par.insert(2, "document_year", document_year)
df_par = df_par.drop_duplicates(['paragraph'])
df_par = cut_footer(df_par, verbose=True)
df_par = cut_header(df_par, verbose=True)
print("End parsing - {} [{}] - took {} seconds".format(
project_denomination,
dpef_path.name,
int(t - time()))
)
return df_par
def get_sentences_dataframe_from_pdf(config, dpef_path):
""" Parse a pdf and return a pandas df with sentence level parsed text"""
companies_metadata_dict = get_companies_metadata_dict(config)
df_par = get_paragraphs_dataframe_from_pdf(dpef_path, companies_metadata_dict)
df_sent = sententizer.get_sentence_dataframe_from_paragraph_dataframe(df_par, config)
return df_sent
def get_sentences_from_all_pdfs(config):
"""
Parses all dpefs into a sentence-level format and save the resulting csv according to config.
"""
companies_metadata_dict = get_companies_metadata_dict(config)
all_dpef_paths = get_list_of_pdfs_filenames(config.dpef_dir)
all_dpef_paths = [dpef_path for dpef_path in all_dpef_paths if
dpef_path.name.split("_")[0] in companies_metadata_dict.keys()]
print(all_dpef_paths)
# PARALLELIZATION
parallel_get_sentences_dataframe_from_pdf = partial(get_sentences_dataframe_from_pdf,
config)
n_cores = mp.cpu_count() - 1 or 1
with mp.Pool(n_cores) as pool:
print("Multiprocessing with {} cores".format(n_cores))
df_sents = list(
tqdm(
pool.imap(
parallel_get_sentences_dataframe_from_pdf,
all_dpef_paths
),
total=len(all_dpef_paths)
)
)
# concat
df_sents = pd.concat(df_sents, axis=0, ignore_index=True)
# create parent folder
pickle_path = config.parsed_sent_file.parent
pickle_path.mkdir(parents=True, exist_ok=True)
# save to csv
df_sents.to_csv(config.parsed_sent_file, sep=";", index=False, encoding='utf-8-sig')
return df_sents
def run(config):
"""
Parse the pdfs into structured csv formats
: param conf: conf object with relative paths.
:param task: "parser", "sententizer" or "both" ; Whether to parse
pdfs, sententize the paragraphs, or do both.
"""
df_sents = get_sentences_from_all_pdfs(config)
print(df_sents.shape)
return df_sents
| dataforgoodfr/batch7_rse | polls/rse_model/rse_watch/pdf_parser.py | pdf_parser.py | py | 21,499 | python | en | code | 5 | github-code | 36 |
30631135881 | '''Given a number x,
determine whether the given number is Armstrong number or not.
A positive integer of n digits is called
an Armstrong number of order n (order is number of digits) if.
abcd... = pow(a,n) + pow(b,n) + pow(c,n) + pow(d,n) + .... '''
# #program1 incomplete program tryout
# #input
# num = input("Enter the armstrong number: ")
#
# def check_armstrong(num):
# for ch in num:
# ch = int(ch)
# sh = pow(ch,3)
# print(ch)
# print(sh)
#
# check_armstrong(num)
#
# digit = input("Enter the digits: ")
# def power(a, n):
# res = pow(a,n)
# print(res)
# if (a == res):
# print("Yes it is armstrong number")
# else:
# print("It is not armstrong")
# power(1,3)
#program2
# #input
# num = int(input("Enter a Armstrong number:"))
# temp = num
# sum = 0
#
# #operation
# while temp>0:
# digit = temp%10
# sum+= pow(digit, 3)
# temp//=10
# if (num==sum):
# print("{0}".format(sum))
# print("Yes, it is an armstrong number")
# else:
# print("{0}".format(sum))
# print("NO, it is not an armstrong number")
program3
num = int(input("Enter the armstrong number: "))
temp = num
sum = 0
while temp>0:
digit = temp%10
sum+= pow(digit, 3)
temp//=10
if(num==sum):
print("{0}".format(sum))
print("Yes, it is an Armstrong number")
else:
print("{0}".format(sum))
print("No, it is not armstrong number")
| nishanthhollar/geeksforgeeks_python_basic_programs | basic_programs/armstrongnum.py | armstrongnum.py | py | 1,496 | python | en | code | 0 | github-code | 36 |
1832479916 | def repeat(n):
a=[]
while n!=0:
ele = n%10
if ele in a:
return 0
else:
a.append(ele)
n=n//10
return 1
n=int(input())
m=int(input())
res=0
for i in range(n,m+1):
res=res+repeat(i)
print(res)
| lakshman533/Python_programs | countnn.py | countnn.py | py | 282 | python | en | code | 0 | github-code | 36 |
15680118405 | import json
import logging
from .codebase import CodebaseAgent
from .gpt_agent import GPTAgent, Role
class ProgrammerAgent:
def __init__(self, codebase_repo_path, gpt_api_key):
self.codebase_agent = CodebaseAgent(codebase_repo_path)
self.gpt_agent = GPTAgent(api_key=gpt_api_key, role=Role.PROGRAMMER,enable_memory=True)
def get_code(self, task_description):
"""Get code from a task description by querying a directory structure.
This function gathers the project info using a CodebaseAgent, then asks
a GPTAgent for the code. The function also handles JSONDecodeError and
general exceptions, logging the errors when they occur.
Args:
task_description (str): The task description to get code for.
Returns:
code_content (str): The requested code content, or 'No code found' if unavailable.
"""
try:
# Gather project info
project_structure = self.codebase_agent.get_directory_structure()
# Formulate query for GPTAgent
query = f'Given the project structure {project_structure}, {task_description}.'
response_content_str = self.gpt_agent.ask_query(query)
logging.info(f'Raw Response: {response_content_str}')
# Deserialize the response
response_content = json.loads(response_content_str)
# Retrieve code from response
code_content = response_content.get('code', 'No code found')
return code_content
except json.JSONDecodeError as e:
logging.error(f'JSON Decode Error: {e}')
logging.error(f'Failed Task Description: {task_description}')
except Exception as e:
logging.error(f'An unexpected error occurred: {e}') | csmathguy/SAGA | src/agent/programmer.py | programmer.py | py | 1,812 | python | en | code | 0 | github-code | 36 |
33664019417 | # This is our basic snake game.
# We will utilise python and some libraries to make it
import turtle # this imports the turtle to be used in our game.
t = turtle.Turtle()
for c in [ 'green', 'blue', 'red', 'white']:
t .color(c)
t.forward(50)
t. left(75)
| josephkb87/PythonBasics | Application And Worked Examples/Basic Personal Snake Game/main.py | main.py | py | 270 | python | en | code | 3 | github-code | 36 |
70441946023 | '''
Summary
Attempt #1
Your own answer?: No
Reference: https://leetcode.com/problems/longest-substring-without-repeating-characters/discuss/1731/A-Python-solution-85ms-O(n)
Runtime: 48 ms, faster than 96.47% of Python3 online submissions for Longest Substring Without Repeating Characters.
Memory Usage: 14.4 MB, less than 55.31% of Python3 online submissions for Longest Substring Without Repeating Characters.
Memo: I also came up with the idea of using one pointer pointing to the head of the range and another pointing to the iterater variable while keeping track of each value's latest index,
but somehow failed to get the size out of the custom range.
'''
import sys
input = sys.stdin.readline
class Solution:
def __init__(self) -> None:
s = "abcabcbb"
print(self.lengthOfLongestSubstring(s))
def lengthOfLongestSubstring(self, s: str) -> int:
start = maxLength = 0
used = {}
for idx, val in enumerate(s):
if val in used and start <= used[val]:
start = used[val] + 1
else:
maxLength = max(maxLength, idx - start + 1)
used[val] = idx
return maxLength
Solution() | cjy13753/algo-solutions | leetcode/solution_3.py | solution_3.py | py | 1,208 | python | en | code | 0 | github-code | 36 |
26464454997 | import string
import random
def hangman():
turns=int(input("\nIn how many turns do you want to guess the string : "))
print()
length=int(input("\nhow many letters word do you wanna guess? "))
word=''.join(random.choice(string.ascii_lowercase) for x in range(length))
w1=word
#print(word)
guesses=""
previous_guesses=""
points=0
failed=0
while turns>0:
print()
x=input("Guess a letter: ")
previous_guesses+=x
if x in w1:
print()
print("status:Correct")
guesses+=x
points+=1
w1=w1.replace(x,'',1)
if len(guesses)==len(word):
break
else:
points-=1
print()
print("Status: Wrong")
failed+=1
print("no.of attempts left : {}".format(turns))
print("no.of Failure attempts: {}".format(failed))
print("previously guessed letters: {}".format(previous_guesses))
turns-=1
if turns!=0 and points>0:
print("\nyou won the game by {} points out of {}".format(points,length))
else:
print("\nsorry {} you lost the game with {} points".format(Name,points))
print("\nThe word is: {}".format(word))
x=input("\nDo you wanna play one more(y/n): ")
if x=="y":
hangman()
Name=input("Enter your Name: ")
print("\nHello!!! {} ,Let\'s play hangman".format(Name))
print(" ")
hangman()
| Naveen2224/Hangman | Hangman.py | Hangman.py | py | 1,474 | python | en | code | 1 | github-code | 36 |
25674262123 |
from odoo import models, fields, api,_
class student(models.Model):
_name = 'student.student'
student1=fields.Char(string='student1')
student2=fields.Char(string='student2')
student3=fields.Char(string='student3')
student4=fields.Char(string='student4',compute='onchange_student4',store=True)
@api.one
@api.depends('student1','student2','student3')
def onchange_student4(self):
if not self.student1 or not self.student2 or not self.student3:
return
self.student4=self.student1+self.student2+self.student3
| maaanas/student | models.py | models.py | py | 577 | python | en | code | 0 | github-code | 36 |
1133144480 | import pyfiglet
import platform
import os
import sys
import socket
import threading
from time import sleep
from queue import Queue
def banner():
banner = pyfiglet.figlet_format('PORT SCANNER')
print('\033[93m'+banner+'\033[m')
ports = Queue()
first_port = 1
last_port = 65535
for i in range(first_port, last_port+1):
ports.put(i)
def clear_screen():
if platform.system() == 'Windows':
os.system('cls')
elif platform.system() == 'Linux':
os.system('clear')
def validate_ip(ip):
splited_ip = ip.split('.')
if len(splited_ip) == 4:
for ip_part in splited_ip:
if len(ip_part) <= 3 and len(ip_part) > 0:
return True
else:
return False
else:
return False
def scan(port):
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
socket.setdefaulttimeout(1)
res = sock.connect_ex((target, port))
if res == 0:
print(f'Port {port} is open!')
sock.close()
except:
pass
def worker():
while not ports.empty():
port = ports.get()
scan(port)
ports.task_done()
def start_workers():
threads = []
for i in range(500):
thread = threading.Thread(target=worker, daemon=True)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
def start_scanning():
clear_screen()
banner()
sleep(1)
print(f'Scanning the ports from {first_port} to {last_port}...\n')
start_workers()
banner()
try:
target = sys.argv[1]
except IndexError:
raise ValueError('\033[91mYou need to pass an IP address as an argument! e.g.'+
'python portscanner.py an.ip.address.here\033[m')
print('\033[93m'+
'--------------------Welcome to the port scanner!--------------------'+
'\033[m')
print(f'\n\nThis program will scan now the ports of the address {target}')
while True:
ip_is_okay = input('Is this okay? [Y/N]')
if ip_is_okay.strip().upper() == 'Y':
break
elif ip_is_okay.strip().upper() == 'N':
while True:
new_ip = input('Enter a new IP: ')
if validate_ip(new_ip):
target = new_ip
print('\033[34mYou succefully changed the target IP adress.\033[m')
sleep(1.5)
break
else:
print('''\033[91mERROR! The IP you writed is invalid. Please write
an valid ip address, in the format: 000.000.000.000\033[m \n''')
sleep(2)
continue
break
else:
continue
start_scanning()
print('\n\033[93mAll the ports were scanned. Goodbye!\033[m')
| ArthurDeveloper/portscanner-python | portscanner.py | portscanner.py | py | 2,780 | python | en | code | 3 | github-code | 36 |
34683343714 | #!/home/kelsi/.virtualenvs/mtg/bin/python
# -*- coding: utf-8 -*-
from bottle import route, run, template, view
from datetime import date
import sqlite3
con = sqlite3.connect('mtg.db')
con.row_factory = sqlite3.Row
cursor = con.cursor()
@route('/hello/<name>')
@view('sample')
def hello(name):
return dict(name=name)
@route('/index')
@view('index')
def index():
return dict(date=date.today())
@route('/tournaments/<date>')
@view('tournament_index')
def tournament_index(date):
import datetime
pretty_date = datetime.date.fromtimestamp(float(date))
#pretty_date = pretty_date.strftime('%B %d, %Y')
cursor.execute('''select * from tourneys where date=?''', (date,))
return dict(date=pretty_date.strftime('%B %d, %Y'), cursor=cursor)
@route('/events/<eventid>')
@view('event_index')
def event_index(eventid):
import datetime
# Get generic tournament info
cursor.execute('''select * from tourneys where id=?''', (eventid,))
tourney_info = cursor.fetchone()
date = datetime.date.fromtimestamp(float(tourney_info['date']))
# Eventually want to show results, bracket
# For now, participants and their basic win/losses and deck
cursor.execute('''select participants.*, users.name, users.id as userid from participants left join users on participants.user_id = users.id where participants.tourney_id=? order by participants.rank''', (eventid,))
return dict(eventid=eventid, cursor=cursor, date=date.strftime('%d/%m/%y'), info=tourney_info)
@route('/users/<userid>')
@view('user_index')
def user_index(eventid):
# Want a view of users
# With links to tourneys played in and decks used
return dict(eventid=eventid, cursor=cursor, date=date.strftime('%d/%m/%y'), info=tourney_info)
@route('/cards/<cardid>')
@view('card_stats')
def user_index(cardid):
# Want a view of individual cards
# Tracking usage over time
# Want to be able to visualize multiple cards on same graph
# To compare usage...
cursor.execute('''select count(*) as decks, sum(quantity) as total from decklists where card=?''', (cardid,))
stats = cursor.fetchone()
return dict(cardid=cardid, stats=stats)
# Want a view of decks
# Need to be able to identify decks first!
# Want to be able to see deck usage over time
# Want to be able to compare to other decks over same time period
run(host='localhost', port=8000)
con.close()
| kelind/bio-mtg | mtg_bottle.py | mtg_bottle.py | py | 2,408 | python | en | code | 0 | github-code | 36 |
13245977761 | #!/usr/bin/env python
# coding: utf-8
# # Jenkins - Monitoring, Building and Running Regressions
#
# This tool was written to ease my daily job because I need to check the regression tests regularly which totally have around 7000 regression tests. It's really such a hassle to do it manually and frequently.
#
# This tool will do the following:
#
# - Run jobs periodically based on cron style configurations
# - Start Jenkins build job to build the jobs
# - Run regressions test jobs
# - Monitor the jobs and re-run those jobs which fail completely or partially
# - Produce reports if requested through
# - Monitor your local Outlook email for commands to start, stop jobs and send reports
#
# Jenkins will be organized the following hierarchical way
#
# - Top View: List of Projects
# - Project View
# - Tab Views - each view represents a branch, i.e., release branch, main branch, feature branch. In the code below, this is called branch view
# - List of Jobs for each tab view
# - Job details consisting of build status, builds, etc
# - There is ONE job for building the application
# - There is ONE job for scheduling the regression job runs
#
# ## Jenkins COLOR DEFINITIONS
#
# Each Jenkins job can have varous color representing the status of the job. Here is a list of them.
#
# - RED("red",Messages._BallColor_Failed(), ColorPalette.RED)
# - RED_ANIME("red_anime",Messages._BallColor_InProgress(), ColorPalette.RED)
# - YELLOW("yellow",Messages._BallColor_Unstable(), ColorPalette.YELLOW)
# - YELLOW_ANIME("yellow_anime",Messages._BallColor_InProgress(), ColorPalette.YELLOW)
# - BLUE("blue",Messages._BallColor_Success(), ColorPalette.BLUE)
# - BLUE_ANIME("blue_anime",Messages._BallColor_InProgress(), ColorPalette.BLUE)
# - GREY("grey",Messages._BallColor_Pending(), ColorPalette.GREY)
# - GREY_ANIME("grey_anime",Messages._BallColor_InProgress(), ColorPalette.GREY)
#
# - DISABLED("disabled",Messages._BallColor_Disabled(), ColorPalette.GREY)
# - DISABLED_ANIME("disabled_anime",Messages._BallColor_InProgress(), ColorPalette.GREY)
# - ABORTED("aborted",Messages._BallColor_Aborted(), ColorPalette.GREY)
# - ABORTED_ANIME("aborted_anime",Messages._BallColor_InProgress(), ColorPalette.GREY)
# - NOTBUILT("nobuilt",Messages._BallColor_NotBuilt(), ColorPalette.GREY)
# - NOTBUILT_ANIME("nobuilt_anime",Messages._BallColor_InProgress(), ColorPalette.GREY)
#
# # How To Use It
#
# The tool will generate a YAML configuration file "jenkins.yaml". Refer to the file for more details.
#
# The jenkins.yaml will have "needChange: Yes". So the first thing to do is:
#
# - Edit jenkins.yaml file to create profiles. Each profile must have the following parameters:
# - jenkinServerUrl: "<master URL of Jenkins server>"
# - userName: "<user name used to login to Jenkins>"
# - password: "<password used to login to Jenkins>"
# - buildJob: "<i.e., .*-Build a regular expression to define the pattern of names of build jobs>"
# - schedulerJob: "<i.e., .*-Scheduler a regular expression to define the pattern of names of scheduler jobs>"
# - regressionJobFilter: "<i.e., (.*Build$|.*Scheduler$) a regular expression defining non-regression jobs>"
# - projectName: "<project name - this is bascially Jenkins top level view name>"
# - branchName: "<tab name or branch name if Jenkins regressions are grouped by branches>"
#
# All the parameters can be defined at the top level or defined at the profile level. For example
#
# ```yaml
# jenkinServerUrl: "http://jenkins.com/"
# userName: "myname"
# password: "mypassword"
#
# buildJob: ".*-Build" # The regular expression patterns, separated by comma, of build jobs
# schedulerJob: ".*-Scheduler" # The regular expression patterns, separated by comma, of scheduler jobs
# skipJob: ".*-MOD" # The regular expression patterns, separated by comma, of jobs to be skipped when rerun
#
# # The false filter for regressions jobs. Any job whose name does not satisfy the regular expression
# # is considered as regression jobs.
# # The patterns, separated by comma.
# regressionJobFilter: ".*Build$,.*Scheduler$"
#
# profiles:
# ReleaseA:
# projectName: "projectA"
# branchName: "Release"
#
# BranchA:
# projectName: "projectA"
# branchName: "Branch"
#
# ReleaseB:
# projectName: "projectB"
# branchName: "Release"
# regressionJobFilter: ".*Build$,.*Scheduler$,.*Others"
# ```
# - Change "needChange: Yes" to "needChange: No"
# - Run the tool as "jenkins_tool.py -p profile_name"
# - If you want to run it from IPython, you can provide the values by changing *argvIPython*. See the Main Program section for details.
# In[1]:
import re
import os
import sys
import getopt
import yaml
import collections
import datetime
import bisect
import os
import glob
import json
import jenkinsapi
import itertools
from tabulate import tabulate
from jenkinsapi.jenkins import Jenkins
from collections import abc
class FrozenJSON:
"""A read-only façade for navigating a JSON-like object
using attribute notation.
Credit: "O'Reilly Fluent Python", Luciano Ramalho
http://www.amazon.com/Fluent-Python-Luciano-Ramalho/dp/1491946008
"""
def __init__(self, mapping):
self.__data = dict(mapping)
def __getattr__(self, name):
if hasattr(self.__data, name):
return getattr(self.__data, name)
else:
return FrozenJSON.build(self.__data[name])
@classmethod
def build(cls, obj):
if isinstance(obj, abc.Mapping):
return cls(obj)
elif isinstance(obj, abc.MutableSequence):
return [cls.build(item) for item in obj]
else:
return obj
# In[2]:
class JenkinsServer(object):
"""
Class representing the Jenkins Server for Branch View
"""
actionTable = {
"red" : { "status" : "Failed"},
"red_anime" : { "status" : "InProgress"},
"yellow" : { "status" : "Unstable"},
"yellow_anime" : { "status" : "InProgress"},
"blue" : { "status" : "Success"},
"blue_anime" : { "status" : "InProgress"},
"grey" : { "status" : "Pending"},
"grey_anime" : { "status" : "InProgress"},
"disabled" : { "status" : "Disabled"},
"disabled_anime" : { "status" : "InProgress"},
"aborted" : { "status" : "Aborted"},
"aborted_anime" : { "status" : "InProgress"},
"nobuilt" : { "status" : "NotBuilt"},
"nobuilt_anime" : { "status" : "InProgress"}
}
commandActor = {
"build" : "build",
"schedule" : "schedule",
"rerun" : "runFailedUnstableJobs",
"failed" : "failedJobReport",
"report" : "jobReport"
}
def __init__(self, jkCfg, profile):
self.jkCfg = jkCfg
self.profile = profile
self._jserver = Jenkins(jkCfg.getValue(profile, "jenkinServerUrl"),
jkCfg.getValue(profile, "userName"),
jkCfg.getValue(profile, "password"))
self._projectView = self._jserver.views[self.jkCfg.getValue(self.profile, "projectName")]
self._branchView = self._projectView.views[self.jkCfg.getValue(self.profile, "branchName")]
def _testConditions(self, rexps, value):
"""
Test the value against a list of regular expressions.
Returns True if any of them matches
"""
if not rexps:
return True
tests = [ re.match(m, value) for m in rexps]
return any(tests)
@property
def jenkinsServer(self):
return self._jserver
@property
def projectView(self):
return self._projectView
@property
def branchView(self):
return self._branchView
def getJobs(self):
"""
Generator returns all types jobs
"""
jlist = self._branchView.get_data(self._branchView.python_api_url(self._branchView.baseurl))["jobs"]
for j in jlist:
job = FrozenJSON(j)
yield job
def getRegressionJobs(self, exclude=None):
"""
Generator returns regressions jobs whose name usually not ends with "Build" or "Scheduler"
exclude is a list of conditions separated by comma. Specify it to override the value from jenkins.yaml
"""
if not exclude:
exclude = self.jkCfg.getValue(self.profile, "regressionJobFilter")
rexps = exclude.split(",")
for j in itertools.filterfalse(self._testConditions(rexps, x.name), self.getJobs()):
yield j
def jobDetails(self, job):
return (
job.name,
JenkinsServer.actionTable[job.color]["status"],
job.lastBuild.number if job.lastBuild is not None else "",
job.lastStableBuild.number if job.lastStableBuild is not None else "",
job.healthReport[0].description
)
def isQueuedOrRunning(self, job):
j = self._jserver.get_job(job.name)
return j.is_queued_or_running()
def isFailedOrUnstable(self, job):
return "red" in job.color or "yellow" in job.color or "notbuilt" in job.color
def isSuccessful(self, job):
return not self.isFailedOrUnstable(job)
def findJob(self, namePattern):
"""
Find the first job based on the name pattern in regular expression.
namePattern is a list of regular expressions separated by comma.
"""
return next(self.findJobs(namePattern))
def findJobs(self, namePattern):
"""
A generator
Find all the jobs based on the name pattern in regular expression.
namePattern is a list of regular expressions separated by comma.
Specify it to override the value from jenkins.yaml
"""
rexps = namePattern.split(",")
return (x for x in self.getJobs() if self._testConditions(rexps, x.name))
def getBuildJobs(self, namePattern=None):
"""
This is to get the job for the Building Job which builds the application.
de=Nonede=None
namePattern is a list of regular expressions separated by comma. By default, the build job should
have a name like ".*-Build", exclude=None, exclude=None
"""
if not namePattern:
namePattern = self.jkCfg.getValue(self.profile, "buildJob")
return self.findJobs(namePattern)
def getSchedulerJobs(self, namePattern=None):
"""
This is to get the job for the Building Job which builds the application.
namePattern is a list of regular expressions separated by comma. By default, the build job should
have a name like ".*-Scheduler"
"""
if not namePattern:
namePattern = self.jkCfg.getValue(self.profile, "schedulerJob")
return self.findJobs(namePattern)
def getJobsReportShort(self, onlyFailedJobs=False):
"""
THIS IS FAST.
Generator returns list of details of jobs. It consists the folloowing data:
"Name", "Status", "HealthReport"
If parameter onlyFailedJobs=True is specified, only failed jobs will be reported.
Failed jobs are those with color RED (FAILED) or YELLOW (UNSTABLE)
Use the following to print a pretty-formated report:
print(tabulate(jserver.getJobsReport(), headers=["Name", "Status", "HealthReport"]))
"""
jobs = self.getJobs()
for job in jobs:
healthReport = "-"
statusValue = None
if self.isFailedOrUnstable(job):
j = self.branchView.get_data(self.branchView.python_api_url(job.url))
if len(j["healthReport"]) > 0:
healthReport = j["healthReport"][0]["description"]
statusValue = JenkinsServer.actionTable[job.color]["status"]
if not onlyFailedJobs:
yield (job.name, statusValue, healthReport)
elif self.isFailedOrUnstable(job):
yield (job.name, statusValue, healthReport)
else:
continue
def jobReport(self):
print(tabulate(self.getJobsReportShort(), headers=["Name", "Status", "HealthReport"]))
def failedJobReport(self):
print(tabulate(self.getJobsReportShort(onlyFailedJobs=True), headers=["Name", "Status", "HealthReport"]))
def anyFailedUnstable(self, skipJob=None):
"""
True if there is any failed or unstable job
"""
rexps = None
if not skipJob:
skipJob = self.jkCfg.getValue(self.profile, "skipJob")
if skipJob:
rexps = skipJob.split(",")
jobs = self.getJobs()
for job in jobs:
if self.isFailedOrUnstable(job):
if not self._testConditions(rexps, job.name):
return True
return False
def anyFailedUnstableNotRunningOrQueued(self, skipJob=None):
"""
True if there is any failed or unstable job which is not queued or running
"""
rexps = None
if not skipJob:
skipJob = self.jkCfg.getValue(self.profile, "skipJob")
if skipJob:
rexps = skipJob.split(",")
jobs = self.getJobs()
for job in jobs:
if self.isFailedOrUnstable(job):
if not self._testConditions(rexps, job.name):
if not self.isQueuedOrRunning(job):
return True
return False
def getJobsSlow(self):
"""
Generator returns jobs
"""
for j, url in self._branchView.get_job_dict().items():
job = FrozenJSON(self._branchView.get_data(self._branchView.python_api_url(url)))
yield job
def getJobsReportDetailed(self, onlyFailedJobs=False):
"""
THIS IS SLOW BECUASE IT CHECKS BUILDS OF EACH JOB
Generator returns list of details of jobs. It consists the folloowing data:
"Name", "Status", "Last Build", "Last Stable Build", "Report"
If parameter onlyFailedJobs=True is specified, only failed jobs will be reported.
Failed jobs are those with color RED (FAILED) or YELLOW (UNSTABLE)
Use the following to print a pretty-formated report:
print(tabulate(jserver.getJobsReport(), headers=["Name", "Status", "Last Build", "Last Stable Build", "Report"]))
"""
jobs = self.getJobsSlow()
for job in jobs:
if not onlyFailedJobs:
yield self.jobDetails(job)
elif self.isFailedOrUnstable(job):
yield self.jobDetails(job)
else:
continue
def startJob(self, job):
if not self.isQueuedOrRunning(job):
jobBuild = self.jenkinsServer.get_job(job.name)
jobBuild.invoke()
def build(self, verbose=True, namePattern=None):
"""
Start the building jobs to build the applications.
verbose=True will print the status.
"""
for job in self.getBuildJobs(namePattern):
if verbose:
print("Starting building job: {}".format(job.name))
self.startJob(job)
def isBuilding(self, namePattern=None):
"""
Return True if any build job is running
"""
for job in self.getBuildJobs(namePattern):
if self.isQueuedOrRunning(job):
return True
return False
def schedule(self, verbose=True, namePattern=None):
"""
Start the scheduling jobs to run the regressions jobs.
verbose=True will print the status.
"""
for job in self.getSchedulerJobs(namePattern):
if verbose:
print("Starting schedule job: {}".format(job.name))
self.startJob(job)
def isScheduling(self, namePattern=None):
"""
Return True if any scheduling job is running
"""
for job in self.getSchedulerJobs(namePattern):
if self.isQueuedOrRunning(job):
return True
return False
def runFailedUnstableJobs(self, verbose=True, skipJob=None):
"""
Start failed or unstable jobs. Provide regular expressions to exclude any job from being started
skipJob, regular expressions separated by comma define the jobs to be skipped
"""
if not skipJob:
skipJob = self.jkCfg.getValue(self.profile, "skipJob")
rexps = skipJob.split(",")
jobs = self.getJobsReportShort(onlyFailedJobs=True)
for job in jobs:
j = collections.namedtuple("JobTemp", ("name", "status", "healthReport"))(*job)
if not self._testConditions(rexps, j.name):
if verbose:
print("Starting job: {}".format(j.name))
self.startJob(j)
def runIt(self, func):
f = getattr(self, JenkinsServer.commandActor[func], None)
if f is not None:
f()
else:
raise ValueError("ERROR: Bad function name '{} = {}'".format(func, JenkinsServer.commandActor[func]))
# In[3]:
"""
The main program
"""
jenkins_yaml = """---
# Jenkins configuraions
# If this value is Yes, this application will not run.
# So change the values below and then change needChange to "No"
needChange: Yes
#------------------------------------------------------
# Values for variables not defined at the profile level
#------------------------------------------------------
# Jenkins' master URL
jenkinServerUrl: "http://jenkins.xyz.net/jenkins/"
# User name and password to login to Jenkins master server
userName: "<username>"
password: "<password>"
buildJob: ".*-Build" # The regular expression patterns, separated by comma, of build jobs
schedulerJob: ".*-Scheduler" # The regular expression patterns, separated by comma, of scheduler jobs
skipJob: ".*-MOD" # The regular expression patterns, separated by comma, of jobs to be skipped when rerun
# The false filter for regressions jobs. Any job whose name does not satisfy the regular expression
# is considered as regression jobs.
# The patterns, separated by comma.
regressionJobFilter: ".*Build$,.*Scheduler$"
#------------------------------------------------------
# Values defined at the top level will be overridden by
# the values defined in profile level
#------------------------------------------------------
profiles:
Release:
projectName: "<project>" # Main Jenkins' main view, mostly one per project
branchName: "Release" # Jenkins sub-views, mostly one per mercurial branch
Branch:
projectName: "<project>" # Main Jenkins' main view, mostly one per project
branchName: "Branch" # Jenkins sub-views, mostly one per mercurial branch
..."""
info = """
==============================================================================================
A new Jenkins configuration file ./jenkins.yaml has been generated.
Before you continue, modify the file accordingly first.
Check the jenkins.yaml for details.
==============================================================================================
"""
class JKCfg(object):
commandActor = {
"list" : "listProfiles"
}
def __init__(self, jkCfg):
self._jkCfg = jkCfg
def getValue(self, profile, name):
if not profile:
return self._jkCfg.get(name, None)
pd = self._jkCfg["profiles"][profile]
defaultValue = self._jkCfg[name] if name in self._jkCfg else None
return pd.get(name, defaultValue)
def listProfiles(self, printList=True):
"""
if printList = True, the list will be printed out to standard output.
Return a list of tuples (profile name, project name, branch name)
"""
ls = []
for k in self._jkCfg["profiles"].keys():
ls.append((k, self.getValue(k, "projectName"), self.getValue(k, "branchName")))
if printList:
print(tabulate(sorted(ls), headers=["profile", "project name", "branch name"]))
return ls
def runIt(self, func):
f = getattr(self, JKCfg.commandActor[func], None)
if f is not None:
f()
else:
raise ValueError("ERROR: Bad function name '{} = {}'".format(func, JKCfg.commandActor[func]))
def runIt(jkCfg, profile, options, cfgOptions):
for cmd in cfgOptions:
jkCfg.runIt(cmd)
if profile is None:
return
for p in profile.split(","):
jserver = JenkinsServer(jkCfg, p)
for cmd in options:
jserver.runIt(cmd)
def main(profile, options, cfgOptions):
generatedNewYaml = False
if not os.path.exists("./jenkins.yaml"):
generatedNewYaml = True
with open("./jenkins.yaml", 'w', encoding='utf-8') as f:
f.write(jenkins_yaml)
with open("./jenkins.yaml", 'r') as f:
jkCfg = JKCfg(yaml.load(f))
if generatedNewYaml:
print(info)
if jkCfg.getValue(None, "needChange"):
print("It seems that you've not change the Jenkins configuration jenkins.yaml yet.\nPlease do so and try it again.")
else:
runIt(jkCfg, profile, options, cfgOptions)
# # Main Program
# In[4]:
def run_from_ipython():
try:
__IPYTHON__
return True
except NameError:
return False
def displayHelpAndExit():
print(
'''
Usage:
python jenkins_tool.py -p profile_name
Options:
-p --profile profile names separated by comma
-r --run re-run all failed and unstable jobs
-b --build build the application
-s --schedule schedule all regressions to run
-f --failed list failed jobs
-t --report list all the jobs
-l --list list all the profiles available
'''
)
#argvIPython = ["-lfr", "-p", "16R1.16R1_PE.1805.1806"]
argvIPython = ["-lfr", "-p", "17R1.7.Branch"]
if __name__ == '__main__':
profile = None
options = []
cfgOptions = []
args = argvIPython if run_from_ipython() else sys.argv[1:]
try:
opts, args = getopt.getopt(args,"hbsrftlp:",["help", "build", "schedule", "rerun", "failed", "report", "list", "profile="])
except getopt.GetoptError:
displayHelpAndExit()
for opt, arg in opts:
if opt in ("-h", "--help"):
profile = None
elif opt in ("-p", "--profile"):
profile = arg
elif opt in ("-b", "--build"):
options.append("build")
elif opt in ("-s", "--schedule"):
options.append("schedule")
elif opt in ("-r", "--rerun"):
options.append("rerun")
elif opt in ("-f", "--failed"):
options.append("failed")
elif opt in ("-t", "--report"):
options.append("report")
elif opt in ("-l", "--list"):
cfgOptions.append("list")
if not profile and not cfgOptions:
displayHelpAndExit()
else:
main(profile, options, cfgOptions)
print("\nDone")
# # Test Areas - Remove the below if export it to python
# In[5]:
ipythonTest = False
# In[6]:
if ipythonTest:
with open("./jenkins.yaml", 'r') as f:
jkCfg_ = JKCfg(yaml.load(f))
jserver_ = JenkinsServer(jkCfg_, "16R1.Branch")
| wy8162/hggraph | jenkins_tools.py | jenkins_tools.py | py | 24,012 | python | en | code | 0 | github-code | 36 |
74457209702 | """Futurepedia"""
import json
import re
import time
import random
import scrapy
import os
from urllib.parse import urljoin
from zimeiti.items import ZimeitiItem
from zimeiti.public import refactoring_img, down_img, contenc_description, get_words, timetimes, execute, is_exists, \
refactoring_img1
import math
class MainSpider(scrapy.Spider):
name = "futurepedia"
# allowed_domains = ["xxx.com"]
start_urls = ["https://www.futurepedia.io/ai-tools"]
path = f'//192.168.0.15/data/SEO/images/{name}/'
s = 0
l = 0
def start_requests(self):
start_url = 'https://www.futurepedia.io/api/tags'
yield scrapy.Request(url=start_url,callback=self.lanmu)
def lanmu(self,response):
for resp in json.loads(response.text):
categoryName = resp['categoryName']
toolCount = resp['toolCount']
page = math.ceil(toolCount/9)
for p in range(1,page+1):
url = F'https://www.futurepedia.io/api/tools?page={p}&tag={categoryName}&sort=verified'
yield scrapy.Request(url=url,callback=self.lists,meta={'ncolumn':categoryName})
def lists(self,repsonse):
lists = repsonse.xpath('//ul[@class="yl_left_ul"]/li/a')
if lists:
for li in lists:
fmt = li.xpath('img/@src').get()
fmt_url = urljoin(self.start_urls[0],fmt)
print(fmt_url)
de_url = li.xpath('@href').get()
detail_url = urljoin(self.start_urls[0],de_url)
num = is_exists({'name':self.name,'url':detail_url})
# yield scrapy.Request(url='http://www.41sky.com/gprj/2018-10-24/110.html', callback=self.detail, meta={'ncolumn': repsonse.meta['ncolumn']})
if num == 0:
imgUrl = down_img(fmt_url,repsonse.url,self.path) # 调用下载图片方法
print(imgUrl)
yield scrapy.Request(url=detail_url,callback=self.detail,meta={'ncolumn':repsonse.meta['ncolumn'],'imgUrl':imgUrl})
else:
print('数据库已存在')
pass
next_page = repsonse.xpath('//div[@class="pages"]/ul/li/a[contains(text(),"下一页")]/@href').get()
if next_page:
next_url = urljoin(repsonse.url,next_page)
yield scrapy.Request(url=next_url,callback=self.lists,meta={'ncolumn':repsonse.meta['ncolumn']})
def detail(self,response):
print('获取内容')
self.s += 1
item = ZimeitiItem()
item['title'] = response.xpath('//div[@class="art_content"]/h1/text()').get()
if item['title']:
item['title'] = item['title'].strip()
content = response.xpath('//div[@class="text"]').getall()
if content:
text = "".join(content)
item['Ncontent'] = refactoring_img(text,response.url,self.path)
# item['Ncontent'] = refactoring_img(text,response.url,self.path)
# item['Ncontent'] = content
item['description'] = contenc_description(item['Ncontent'])
item['nkeywords'] = get_words(item['Ncontent'])
item['tag'] = item['nkeywords']
item['domian'] = self.name
item['webName'] = 'Futurepedia'
item['url'] = response.url
item['ncolumn'] = response.meta['ncolumn']
item['naddtime'] = str(int(time.time()))
item['imgUrl'] = response.meta['imgUrl']
# item['lmImgUrl'] = response.meta['lmImgUrl']
item['seo_title'] = response.xpath('//title/text()').get()
item['seo_keywords'] = response.xpath('//meta[@name="keywords"]/@content').get()
item['seo_description'] = response.xpath('//meta[@name="description"]/@content').get()
yield item
if __name__ == '__main__':
os.system('scrapy crawl futurepedia') | AYongmengnan/zimeiti | zimeiti/spiders/futurepedia.py | futurepedia.py | py | 3,916 | python | en | code | 0 | github-code | 36 |
32301076957 | #!/usr/bin/env python
from scapy.all import *
from subprocess import call
import time
op=1 # Op code 1 for ARP requests
victim=raw_input('Enter the target IP to hack: ') #person IP to attack
victim=victim.replace(" ","")
spoof=raw_input('Enter the routers IP *SHOULD BE ON SAME ROUTER*: ') #routers IP.. Should be the same one.
spoof=spoof.replace(" ","")
mac=raw_input('Enter the target MAC to hack: ') #mac of the victim
mac=mac.replace("-",":")
mac=mac.replace(" ","")
arp=ARP(op=op,psrc=spoof,pdst=victim,hwdst=mac)
while 1:
send(arp)
#time.sleep(2)
| ammarx/ARP-spoofing | src/mmattack.py | mmattack.py | py | 563 | python | en | code | 32 | github-code | 36 |
21315566191 | # D = len(data) ## number of docs...data is list of documents
# print('D: ', D)
import torch
import pickle
beta = torch.load('betas-40.pt', map_location=torch.device('cpu'))
beta = beta.numpy()
with open('vocab_50K.pkl', 'rb') as f:
vocab = pickle.load(f)
TC = []
num_topics = len(beta)
for k in range(num_topics):
print('k: {}/{}'.format(k, num_topics))
top_10 = list(beta[k].argsort()[-11:][::-1])
top_words = [vocab[a] for a in top_10]
TC_k = 0
counter = 0 | gretatuckute/neuralNLP | ETM_evaluation/testBetaOrder.py | testBetaOrder.py | py | 488 | python | en | code | 1 | github-code | 36 |
16154117928 | from django.utils.translation import ugettext_lazy as _
STRONGLY_DISAGREE = "strongly disagree"
DISAGREE = "disagree"
SOMEWHAT_DISAGREE = "somewhat disagree"
NEITHER = "neither agree nor disagree"
SOMEWHAT_AGREE = "somewhat agree"
AGREE = "agree"
STRONGLY_AGREE = "strongly agree"
PARENTING_ATTITUDES_CHOICES = [
(STRONGLY_DISAGREE, STRONGLY_DISAGREE.title()),
(DISAGREE, DISAGREE.title()),
(SOMEWHAT_DISAGREE, SOMEWHAT_DISAGREE.title()),
(NEITHER, NEITHER.title()),
(SOMEWHAT_AGREE, SOMEWHAT_AGREE.title()),
(AGREE, AGREE.title()),
(STRONGLY_AGREE, STRONGLY_AGREE.title()),
]
NEVER = "never"
LESS_THAN_1 = "less than once/week"
ONCE = "once/week"
TWO_THREE = "2-3 times/week"
THREE_FOUR = "3-4 times/week"
FIVE_SIX = "5-6 times/week"
DAILY = "daily"
LITERACY_CHOICES = [
(NEVER, NEVER.title()),
(LESS_THAN_1, LESS_THAN_1.title()),
(ONCE, ONCE.title()),
(TWO_THREE, TWO_THREE.title()),
(THREE_FOUR, THREE_FOUR.title()),
(FIVE_SIX, FIVE_SIX.title()),
(DAILY, DAILY.title()),
]
| langcog/web-cdi | webcdi/cdi_forms/choices.py | choices.py | py | 1,038 | python | en | code | 7 | github-code | 36 |
17585378222 | import numpy as np
import matplotlib.pyplot as plt
from starwhale import dataset
def show_image(image) -> None:
plt.imshow(image, cmap="gray")
plt.show(block=True)
ds_name = "fer2013/version/latest"
ds = dataset(ds_name)
row = ds.fetch_one()
data = row.features
show_image(
np.frombuffer(data["image"].to_bytes(), dtype=np.uint8).reshape(data["image"].shape)
)
print(data["label"])
| star-whale/starwhale | example/datasets/fer2013/example.py | example.py | py | 399 | python | en | code | 171 | github-code | 36 |
18305200802 | #!/usr/bin/python
#
# This is a script for dumping and changing peripheral settings on an STM32
# on a running target with he help of OpenOCD. It was written for reverse
# engineering the DPS5005 but should work for any STM32 target with little
# change.
#
# No rights reserved
#
import socket
import sys
prompt = "> " # OpenOCD prompt
ocd_sock = False
def ocd_exchange(str = ""):
output = ""
line = ""
got_prompt = False
if len(str) > 0:
ocd_sock.send(bytearray(str, "ascii"))
while 1:
try:
ch = ocd_sock.recv(1)
if len(ch) > 0:
if ch == b'\d':
pass
if ch == b'\n':
if "%s\n" % line != str:
output += "%s\n" % line
line = ""
elif ord(ch) >= 32 and ord(ch) <= 126:
line += ch.decode("ascii")
if line.endswith(prompt):
got_prompt = True
break
else:
break
except socket.timeout as e:
break
return output.strip()
def ocd_sync():
return ocd_exchange()
def ocd_read(address):
response = ocd_exchange("mdw 0x%08x 1\n" % (address))
parts = response.split(":")
if len(parts) != 2:
print ("Parsing error: %s" % response)
return False
temp_address = int(parts[0].strip(), 16)
value = int(parts[1].strip(), 16)
if temp_address != address:
print ("Address error: %s" % response)
return False
return value
def ocd_write(address, value):
response = ocd_exchange("mww 0x%08x 0x%08x\n" % (address, value))
known_pins = {
"PA0" : ["", "U7 "],
"PA1" : ["M2 button", ""],
"PA2" : ["SEL button", ""],
"PA3" : ["M1 button", ""],
"PA4" : ["DAC1_OUT", "TL594.2 (1IN-)"],
"PA5" : ["DAC2_OUT", "TL594.15 (2IN-)"],
"PA7" : ["ADC1_IN7", "R30-U2.7:V_OUT-B (measures Vout)"],
"PA8" : ["TFT.7", "(not used by TFT)"],
"PA14" : ["", "SWDCLK"],
"PA15" : ["", "R41-TL594.16 (2IN+)"],
"PB0" : ["ADC1_IN8", "R7/R2-R14-D4 (measures Vin)"],
"PB1" : ["ADC1_IN9", "R33-U2.1:V_OUT-A (measures Iout)"],
"PB3" : ["", "R11-R17-R25-U2.5 (V_inB+)"],
"PB4" : ["PWR button", ""],
"PB5" : ["Rotary press", ""],
"PB6" : ["", "NC?"],
"PB7" : ["TIM4_CH2", ""],
"PB8" : ["Rotary enc", ""],
"PB9" : ["Rotary enc", ""],
"PB11" : ["nPwrEnable", "R29-TFT.2 (TFT_VCC)"],
"PB12" : ["SPI2_NSS", "TFT_RESET"],
"PB13" : ["SPI2_SCK", ""],
"PB14" : ["SPI2_MISO", "TFT_A0"],
"PB15" : ["SPI2_MOSI", ""],
"PD1" : ["", "U7"],
}
ADC1_BASE = 0x40012400
AFIO_BASE = 0x40010000
DAC_BASE = 0x40007400
DMA1_BASE = 0x40020000
EXTI_BASE = 0x40010400
GPIOA_BASE = 0x40010800
GPIOB_BASE = 0x40010c00
GPIOC_BASE = 0x40011000
GPIOD_BASE = 0x40011400
GPIOE_BASE = 0x40011800
GPIOF_BASE = 0x40011C00
GPIOG_BASE = 0x40012000
RCC_BASE = 0x40021000
SPI1_BASE = 0x40013000
SPI2_BASE = 0x40003800
TIM1_BASE = 0x40012C00
TIM2_BASE = 0x40000000
TIM3_BASE = 0x40000400
TIM4_BASE = 0x40000800
TIM6_BASE = 0x40001000
TIM7_BASE = 0x40001400
TIM15_BASE = 0x40014000
TIM16_BASE = 0x40014400
TIM17_BASE = 0x40014800
# SPI1 registers
SPI_CR1 = 0x00
SPI_CR2 = 0x04
SPI_SR = 0x08
SPI_DR = 0x0C
SPI_CRCPR = 0x10
SPI_RXCRCR = 0x14
SPI_TXCRCR = 0x18
# TIM2 to TIM5 registers (checked)
TIMx_CR1 = 0x00
TIMx_CR2 = 0x04
TIMx_SMC = 0x08
TIMx_DIER = 0x0c
TIMx_SR = 0x10
TIMx_EGR = 0x14
TIMx_CCMR1 = 0x18
TIMx_CCMR2 = 0x1c
TIMx_CCER = 0x20
TIMx_CNT = 0x24
TIMx_PSC = 0x28
TIMx_ARR = 0x2c
TIMx_RCR = 0x30
TIMx_CCR1 = 0x34
TIMx_CCR2 = 0x38
TIMx_CCR3 = 0x3c
TIMx_CCR4 = 0x40
TIMx_BDTR = 0x44
TIMx_DCR = 0x48
TIMx_DMAR = 0x4c
# ADC registers (checked)
ADC_SR = 0x00
ADC_CR1 = 0x04
ADC_CR2 = 0x08
ADC_SMPR1 = 0x0C
ADC_SMPR2 = 0x10
ADC_JOFR1 = 0x14
ADC_JOFR2 = 0x18
ADC_JOFR3 = 0x1C
ADC_JOFR4 = 0x20
ADC_HTR = 0x24
ADC_LTR = 0x28
ADC_SQR1 = 0x2C
ADC_SQR2 = 0x30
ADC_SQR3 = 0x34
ADC_JSQR = 0x38
ADC_JDR1 = 0x3C
ADC_JDR2 = 0x40
ADC_JDR3 = 0x44
ADC_JDR4 = 0x48
ADC_DR = 0x4C
# DAC registers (checked)
DAC_CR = 0x00
DAC_SWTRIGR = 0x04
DAC_DHR12R1 = 0x08
DAC_DHR12L1 = 0x0C
DAC_DHR8R1 = 0x10
DAC_DHR12R2 = 0x14
DAC_DHR12L2 = 0x18
DAC_DHR8R2 = 0x1c
DAC_DHR12RD = 0x20
DAC_DHR12LD = 0x24
DAC_DHR8RD = 0x28
DAC_DOR1 = 0x2C
DAC_DOR2 = 0x30
DAC_SR = 0x34
# GPIO registers (checked)
GPIOx_CRL = 0x00
GPIOx_CRH = 0x04
GPIOx_IDR = 0x08
GPIOx_ODR = 0x0c
GPIOx_BSRR = 0x10
GPIOx_BRR = 0x14
GPIOx_LCKR = 0x18
# EXTI registers (checked)
EXTI_IMR = 0x00
EXTI_EMR = 0x04
EXTI_RTSR = 0x08
EXTI_FTSR = 0x0c
EXTI_SWIER = 0x10
EXTI_PR = 0x14
# AFIO registers (checked)
AFIO_EVCR = 0x00
AFIO_MAPR = 0x04
AFIO_EXTICR1 = 0x08
AFIO_EXTICR2 = 0x0c
AFIO_EXTICR3 = 0x10
AFIO_EXTICR4 = 0x14
AFIO_MAPR2 = 0x18
# RCC registers (checked)
RCC_CR = 0x00
RCC_CFGR = 0x04
RCC_CIR = 0x08
RCC_APB2RSTR = 0x0c
RCC_APB1RSTR = 0x10
RCC_AHBENR = 0x14
RCC_APB2ENR = 0x18
RCC_APB1ENR = 0x1c
RCC_BDCR = 0x20
RCC_CSR = 0x24
RCC_CFGR2 = 0x2c
# DMA registers (checked)
DMA_ISR = 0x00
DMA_IFCR = 0x04
DMA_CCR1 = 0x08
DMA_CNDTR = 0x0C
DMA_CPAR1 = 0x10
DMA_CMAR1 = 0x14
# 0x18
DMA_CCR2 = 0x1C
DMA_CNDTR = 0x20
DMA_CPAR2 = 0x24
DMA_CMAR2 = 0x28
# 0x2C
DMA_CCR3 = 0x30
DMA_CNDTR = 0x34
DMA_CPAR3 = 0x38
DMA_CMAR3 = 0x3C
# 0x40
DMA_CCR4 = 0x44
DMA_CNDTR = 0x48
DMA_CPAR4 = 0x4C
DMA_CMAR4 = 0x50
# 0x54
DMA_CCR5 = 0x58
DMA_CNDTR = 0x5C
DMA_CPAR5 = 0x60
DMA_CMAR5 = 0x64
# 0x68
DMA_CCR6 = 0x6C
DMA_CNDTR = 0x70
DMA_CPAR6 = 0x74
DMA_CMAR6 = 0x78
# 0x7C
DMA_CCR7 = 0x80
DMA_CNDTR = 0x84
DMA_CPAR7 = 0x88
register_map = [
[0x40023000, 0x400233FF, "CRC"],
[0x40022000, 0x400223FF, "Flash memory interface"],
[0x40021000, 0x400213FF, "Reset and clock control RCC"],
[0x40020000, 0x400203FF, "DMA1"],
[0x40014800, 0x40014BFF, "TIM17 timer"],
[0x40014400, 0x400147FF, "TIM16 timer"],
[0x40014000, 0x400143FF, "TIM15 timer"],
[0x40013800, 0x40013BFF, "USART1"],
[0x40013000, 0x400133FF, "SPI1"],
[0x40012C00, 0x40012FFF, "TIM1 timer"],
[0x40012400, 0x400127FF, "ADC1"],
[0x40011800, 0x40011BFF, "GPIO Port E"],
[0x40011400, 0x400117FF, "GPIO Port D"],
[0x40011000, 0x400113FF, "GPIO Port C"],
[0x40010C00, 0x40010FFF, "GPIO Port B"],
[0x40010800, 0x40010BFF, "GPIO Port A"],
[0x40010400, 0x400107FF, "EXTI"],
[0x40010000, 0x400103FF, "AFIO"],
[0x40007800, 0x40007BFF, "CEC"],
[0x40007400, 0x400077FF, "DAC"],
[0x40007000, 0x400073FF, "Power control PWR"],
[0x40006C00, 0x40006FFF, "Backup registers (BKP)"],
[0x40005800, 0x40005BFF, "I2C2"],
[0x40005400, 0x400057FF, "I2C1"],
[0x40004800, 0x40004BFF, "USART3"],
[0x40004400, 0x400047FF, "USART2"],
[0x40003800, 0x40003BFF, "SPI2"],
[0x40003000, 0x400033FF, "Independent watchdog (IWDG)"],
[0x40002C00, 0x40002FFF, "Window watchdog (WWDG)"],
[0x40002800, 0x40002BFF, "RTC"],
[0x40001400, 0x400017FF, "TIM7 timer"],
[0x40001000, 0x400013FF, "TIM6 timer"],
[0x40000800, 0x40000BFF, "TIM4 timer"],
[0x40000400, 0x400007FF, "TIM3 timer"],
[0x40000000, 0x400003FF, "TIM2 timer"]
]
port_addresses = [GPIOA_BASE, GPIOB_BASE, GPIOC_BASE, GPIOD_BASE, GPIOE_BASE, GPIOF_BASE, GPIOG_BASE]
CRL_CNF_IN = ["An", "Flt", "PuPd", "RFU"]
CRL_CNF_IN_ANALOG = 0
CRL_CNF_IN_FLOAT = 1
CRL_CNF_IN_PUPD = 2
CRL_CNF_IN_RFU = 3
CRL_CNF_OUT = ["PP", "OD", "AF-PP", "AF-OD"]
CRL_CNF_OUT_PP = 0
CRL_CNF_OUT_OD = 1
CRL_CNF_OUT_AF_PP = 2
CRL_CNF_OUT_AF_OD = 3
CRL_MODE = [0, 10, 2, 50]
CRL_MODE_IN = 0
CRL_MODE_OUT_10MHZ = 1
CRL_MODE_OUT_2MHZ = 2
CRL_MODE_OUT_50MHZ = 3
def print_gpio_pin(port_nbr, pin, cnf, mode, level):
cnf_i = ["GPIO_CNF_INPUT_ANALOG", "GPIO_CNF_INPUT_FLOAT", "GPIO_CNF_INPUT_PULL_UPDOWN", "##### RESERVED #####"]
cnf_o = ["GPIO_CNF_OUTPUT_PUSHPULL", "GPIO_CNF_OUTPUT_OPENDRAIN", "GPIO_CNF_OUTPUT_ALTFN_PUSHPULL", "GPIO_CNF_OUTPUT_ALTFN_OPENDRAIN"]
modes = ["GPIO_MODE_INPUT", "GPIO_MODE_OUTPUT_10_MHZ", "GPIO_MODE_OUTPUT_2_MHZ", "GPIO_MODE_OUTPUT_50_MHZ"]
if mode == 0:
cnfs = cnf_i
else:
cnfs = cnf_o
a = "gpio_set_mode(GPIO%c, %s, %s, GPIO%d);\n" % (ord('A')+port_nbr, modes[mode], cnfs[cnf], pin)
if mode == 0 and cnf == 2:
if level == 0:
a += "gpio_clear(GPIO%c, GPIO%d);\n" % (ord('A')+port_nbr, pin)
elif level == 1:
a += "gpio_set(GPIO%c, GPIO%d);\n" % (ord('A')+port_nbr, pin)
else:
a += "rusk!!!!!!!!"
return a
def dump_port_settings(port_nbr):
# print("Checking GPIO%c" % (ord('A') + port_nbr))
crl = ocd_read(port_addresses[port_nbr] + GPIOx_CRL)
crh = ocd_read(port_addresses[port_nbr] + GPIOx_CRH)
idr = ocd_read(port_addresses[port_nbr] + GPIOx_IDR)
odr = ocd_read(port_addresses[port_nbr] + GPIOx_ODR)
for pin in range(0, 16):
if pin < 8:
mode = (crl >> (4*pin)) & 3
cnf = (crl >> (4*pin+2)) & 3
else:
mode = (crh >> (4*(pin-8))) & 3
cnf = (crh >> (4*(pin-8)+2)) & 3
pin_s = "P%c%d" % (ord('A')+port_nbr, pin)
if mode == CRL_MODE_IN:
level = (idr >> pin) & 1
info = "%-4s I %d %-5s" % (pin_s, level, CRL_CNF_IN[cnf])
else:
level = (odr >> pin) & 1
info = "%-4s O %d %-5s (%d Mhz)" % (pin_s, level, CRL_CNF_OUT[cnf], CRL_MODE[mode])
if pin_s in known_pins:
desc = known_pins[pin_s]
else:
desc = ["", ""]
setting = print_gpio_pin(port_nbr, pin, cnf, mode, level)
print("// %-30s %-15s %s" % (info, desc[0], desc[1]))
print("%s" % (setting))
def dump_gpio_port_settings():
for port in range(0, 4):
dump_port_settings(port)
def dump_reg(name, address):
value = ocd_read(address)
print("%-8s : 0x%08x [0x%08x]" % (name, value, address))
"""
CR1 : 0x00000081 [00] ARPE=1 CEN=1
CR2 : 0x00000000 [04]
SMCR : 0x00000000 [08]
DIER : 0x00000000 [0c]
SR : 0x0000001f [10] CC4IF=1 CC3IF=1 CC2IF=1 CC1IF=1 UIF=1
EGR : 0x00000000 [14]
CCMR1 : 0x00006800 [18] OC2M=2 OC2PE=1
CCMR2 : 0x00000000 [1c]
CCER : 0x00000010 [20] CC2P=1
CNT : 0x000056ed [24] <counter>
PSC : 0x00000000 [28]
ARR : 0x00005dbf [2c] <auto reload>
CCR1 : 0x00000000 [34]
CCR2 : 0x00005dc0 [38] <TIM2 compare register 2>
CCR3 : 0x00000000 [3c]
CCR4 : 0x00000000 [40]
DCR : 0x00000000 [48]
DMAR : 0x00000081 [4c] DMA address
U ARR CCR2
1.00 0x00005dbf 0x00005dc0
"""
def dump_tim_settings(name, base_addr):
print("%s settings" % (name))
dump_reg("CR1", base_addr + TIMx_CR1)
dump_reg("CR2", base_addr + TIMx_CR2)
dump_reg("SMC", base_addr + TIMx_SMC)
dump_reg("DIER", base_addr + TIMx_DIER)
dump_reg("SR", base_addr + TIMx_SR)
dump_reg("EGR", base_addr + TIMx_EGR)
dump_reg("CCMR1", base_addr + TIMx_CCMR1)
dump_reg("CCMR2", base_addr + TIMx_CCMR2)
dump_reg("CCER", base_addr + TIMx_CCER)
dump_reg("CNT", base_addr + TIMx_CNT)
dump_reg("PSC", base_addr + TIMx_PSC)
dump_reg("ARR", base_addr + TIMx_ARR)
dump_reg("RCR", base_addr + TIMx_RCR)
dump_reg("CCR1", base_addr + TIMx_CCR1)
dump_reg("CCR2", base_addr + TIMx_CCR2)
dump_reg("CCR3", base_addr + TIMx_CCR3)
dump_reg("CCR4", base_addr + TIMx_CCR4)
dump_reg("BDTR", base_addr + TIMx_BDTR)
dump_reg("DCR", base_addr + TIMx_DCR)
dump_reg("DMAR", base_addr + TIMx_DMAR)
"""
TIM4 settings
CR1 : 0x00000081 [0x40000800]
CR2 : 0x00000000 [0x40000804]
SMC : 0x00000000 [0x40000808]
DIER : 0x00000000 [0x4000080c]
SR : 0x0000001f [0x40000810]
EGR : 0x00000000 [0x40000814]
CCMR1 : 0x00006800 [0x40000818]
CCMR2 : 0x00000000 [0x4000081c]
CCER : 0x00000010 [0x40000820]
CNT : 0x00000ebb [0x40000824]
PSC : 0x00000000 [0x40000828]
ARR : 0x00005dbf [0x4000082c]
RCR : 0x00000000 [0x40000830]
CCR1 : 0x00000000 [0x40000834]
CCR2 : 0x00001f40 [0x40000838]
CCR3 : 0x00000000 [0x4000083c]
CCR4 : 0x00000000 [0x40000840]
BDTR : 0x00000000 [0x40000844]
DCR : 0x00000000 [0x40000848]
DMAR : 0x00000081 [0x4000084c]
"""
def dump_tim4_settings():
dump_tim_settings("TIM4", TIM4_BASE) # TFT backlight intensity
def dump_dac_settings():
print("DAC settings")
dump_reg("CR", DAC_BASE + DAC_CR)
dump_reg("SWTRIGR", DAC_BASE + DAC_SWTRIGR)
dump_reg("DHR12R1", DAC_BASE + DAC_DHR12R1)
dump_reg("DHR12L1", DAC_BASE + DAC_DHR12L1)
dump_reg("DHR8R1", DAC_BASE + DAC_DHR8R1)
dump_reg("DHR12R2", DAC_BASE + DAC_DHR12R2)
dump_reg("DHR12L2", DAC_BASE + DAC_DHR12L2)
dump_reg("DHR8R2", DAC_BASE + DAC_DHR8R2)
dump_reg("DHR12RD", DAC_BASE + DAC_DHR12RD)
dump_reg("DHR12LD", DAC_BASE + DAC_DHR12LD)
dump_reg("DHR8RD", DAC_BASE + DAC_DHR8RD)
dump_reg("DOR1", DAC_BASE + DAC_DOR1)
dump_reg("DOR2", DAC_BASE + DAC_DOR2)
dump_reg("SR", DAC_BASE + DAC_SR)
"""
DAC settings
CR : 0x00030003 [0x40007400]
SWTRIGR : 0x00000000 [0x40007404]
DHR12R1 : 0x00000003 [0x40007408]
DHR12L1 : 0x00000030 [0x4000740c]
DHR8R1 : 0x00000000 [0x40007410]
DHR12R2 : 0x0000008c [0x40007414]
DHR12L2 : 0x000008c0 [0x40007418]
DHR8R2 : 0x00000008 [0x4000741c]
DHR12RD : 0x008c0003 [0x40007420]
DHR12LD : 0x08c00030 [0x40007424]
DHR8RD : 0x00000800 [0x40007428]
DOR1 : 0x00000003 [0x4000742c]
DOR2 : 0x0000008c [0x40007430]
SR : 0x00000000 [0x40007434]
"""
def dump_adc1_settings():
print("ADC1 settings")
dump_reg("SR", ADC1_BASE + ADC_SR)
dump_reg("CR1", ADC1_BASE + ADC_CR1)
dump_reg("CR2", ADC1_BASE + ADC_CR2)
dump_reg("SMPR1", ADC1_BASE + ADC_SMPR1)
dump_reg("SMPR2", ADC1_BASE + ADC_SMPR2)
dump_reg("JOFR1", ADC1_BASE + ADC_JOFR1)
dump_reg("JOFR2", ADC1_BASE + ADC_JOFR2)
dump_reg("JOFR3", ADC1_BASE + ADC_JOFR3)
dump_reg("JOFR4", ADC1_BASE + ADC_JOFR4)
dump_reg("HTR", ADC1_BASE + ADC_HTR)
dump_reg("LTR", ADC1_BASE + ADC_LTR)
dump_reg("SQR1", ADC1_BASE + ADC_SQR1)
dump_reg("SQR2", ADC1_BASE + ADC_SQR2)
dump_reg("SQR3", ADC1_BASE + ADC_SQR3)
dump_reg("JSQR", ADC1_BASE + ADC_JSQR)
dump_reg("JDR1", ADC1_BASE + ADC_JDR1)
dump_reg("JDR2", ADC1_BASE + ADC_JDR2)
dump_reg("JDR3", ADC1_BASE + ADC_JDR3)
dump_reg("JDR4", ADC1_BASE + ADC_JDR4)
dump_reg("DR", ADC1_BASE + ADC_DR)
"""
ADC1 settings
SR : 0x00000010 [0x40012400]
CR1 : 0x00000000 [0x40012404]
CR2 : 0x009e0001 [0x40012408] # TSVREF:1, EXTTRIG:1, EXTSEL:111 (SWSTART) ADON:1
SMPR1 : 0x00000000 [0x4001240c]
SMPR2 : 0x36c00000 [0x40012410]
JOFR1 : 0x00000000 [0x40012414]
JOFR2 : 0x00000000 [0x40012418]
JOFR3 : 0x00000000 [0x4001241c]
JOFR4 : 0x00000000 [0x40012420]
HTR : 0x00000fff [0x40012424]
LTR : 0x00000000 [0x40012428]
SQR1 : 0x00000000 [0x4001242c]
SQR2 : 0x00000000 [0x40012430]
SQR3 : 0x00000008 [0x40012434]
SQR4 : 0x00000000 [0x40012438]
JDR1 : 0x00000000 [0x4001243c]
JDR2 : 0x00000000 [0x40012440]
JDR3 : 0x00000000 [0x40012444]
JDR4 : 0x00000000 [0x40012448]
DR : 0x000001cc [0x4001244c]
DR: 12c 300 5.0
168 360 6.0
19f 415 7.0
1da 474 8.0
21a 538 9.0
254 596 10.0
2ce 718 12.0
Linear (tested with https://plot.ly/create/)
"""
def dump_afio_settings():
print("AFIO settings")
dump_reg("EVCR", AFIO_BASE + AFIO_EVCR)
dump_reg("MAPR", AFIO_BASE + AFIO_MAPR)
dump_reg("EXTICR1", AFIO_BASE + AFIO_EXTICR1)
dump_reg("EXTICR2", AFIO_BASE + AFIO_EXTICR2)
dump_reg("EXTICR3", AFIO_BASE + AFIO_EXTICR3)
dump_reg("EXTICR4", AFIO_BASE + AFIO_EXTICR4)
dump_reg("MAPR2", AFIO_BASE + AFIO_MAPR2)
"""
AFIO settings
EVCR : 0x00000000 [0x40010000]
MAPR : 0x02008000 [0x40010004]
EXTICR1 : 0x00000000 [0x40010008]
EXTICR2 : 0x00000011 [0x4001000c]
EXTICR3 : 0x00000011 [0x40010010]
EXTICR4 : 0x00000000 [0x40010014]
MAPR2 : 0x00000000 [0x40010018]
"""
def dump_exti_settings():
print("EXTI settings")
dump_reg("IMR", EXTI_BASE + EXTI_IMR)
dump_reg("EMR", EXTI_BASE + EXTI_EMR)
dump_reg("RTSR", EXTI_BASE + EXTI_RTSR)
dump_reg("FTSR", EXTI_BASE + EXTI_FTSR)
dump_reg("SWIER", EXTI_BASE + EXTI_SWIER)
dump_reg("PR", EXTI_BASE + EXTI_PR)
"""
EXTI settings
IMR : 0x0000033e [0x40010400]
EMR : 0x00000000 [0x40010404]
RTSR : 0x00000300 [0x40010408]
FTSR : 0x0000033e [0x4001040c]
SWIER : 0x00000000 [0x40010410]
PR : 0x00000000 [0x40010414]
"""
def dump_rcc_settings():
print("RCC settings")
dump_reg("CR", RCC_BASE + RCC_CR)
dump_reg("CFGR", RCC_BASE + RCC_CFGR)
dump_reg("CIR", RCC_BASE + RCC_CIR)
dump_reg("APB2RSTR", RCC_BASE + RCC_APB2RSTR)
dump_reg("APB1RSTR", RCC_BASE + RCC_APB1RSTR)
dump_reg("AHBENR", RCC_BASE + RCC_AHBENR)
dump_reg("APB2ENR", RCC_BASE + RCC_APB2ENR)
dump_reg("APB1ENR", RCC_BASE + RCC_APB1ENR)
dump_reg("BDCR", RCC_BASE + RCC_BDCR)
dump_reg("CSR", RCC_BASE + RCC_CSR)
dump_reg("CFGR2", RCC_BASE + RCC_CFGR2)
def dump_spi_settings(name, base_addr):
print("%s settings" % (name))
dump_reg("CR2", base_addr + SPI_CR1)
dump_reg("CR1", base_addr + SPI_CR2)
dump_reg("SR", base_addr + SPI_SR)
dump_reg("DR", base_addr + SPI_DR)
dump_reg("CRCPR", base_addr + SPI_CRCPR)
dump_reg("RXCRCR", base_addr + SPI_RXCRCR)
dump_reg("TXCRCR", base_addr + SPI_TXCRCR)
def dump_spi1_settings():
dump_spi_settings("SPI1", SPI1_BASE)
def dump_spi2_settings():
dump_spi_settings("SPI2", SPI2_BASE)
def dump_gpio_settings(name, base_addr):
print("%s settings" % (name))
dump_reg("CRL", base_addr + GPIOx_CRL)
dump_reg("CRH", base_addr + GPIOx_CRH)
dump_reg("IDR", base_addr + GPIOx_IDR)
dump_reg("ODR", base_addr + GPIOx_ODR)
dump_reg("BSRR", base_addr + GPIOx_BSRR)
dump_reg("BRR", base_addr + GPIOx_BRR)
dump_reg("LCKR", base_addr + GPIOx_LCKR)
def dump_gpioa_settings():
dump_gpio_settings("GPIOA", GPIOA_BASE)
def dump_gpiob_settings():
dump_gpio_settings("GPIOB", GPIOB_BASE)
def dump_gpioc_settings():
dump_gpio_settings("GPIOC", GPIOC_BASE)
def dump_gpiod_settings():
dump_gpio_settings("GPIOD", GPIOD_BASE)
"""
DAC settings @ 5V :
CR : 0x00030003 [00] CH 1/2 output buffer disable, CH 1/2 enable TSEL=000
SWTRIGR : 0x00000000 [04]
DHR12R1 : 0x00000169 [08] DAC channel1 12-bit right-aligned data holding register
DHR12L1 : 0x00001690 [0c] DAC channel1 12-bit left aligned data holding register
DHR8R1 : 0x00000016 [10] DAC channel1 8-bit right aligned data holding register
DHR12R2 : 0x000001d6 [14] DAC channel2 12-bit right aligned data holding register
DHR12L2 : 0x00001d60 [18] DAC channel2 12-bit left aligned data holding register
DHR8R2 : 0x0000001d [1c] DAC channel2 8-bit right-aligned data holding register
DHR12RD : 0x01d60169 [20] Dual DAC 12-bit right-aligned data holding register
DHR12LD : 0x1d601690 [24] DUAL DAC 12-bit left aligned data holding register
DHR8RD : 0x00001d16 [28] DUAL DAC 8-bit right aligned data holding register
DOR1 : 0x00000169 [2c] DAC channel1 data output register
DOR2 : 0x000001d6 [30] DAC channel2 data output register
SR : 0x00000000 [34]
# 470/V
# 0V
mww 0x40007424 0x1d600000
# 1V
mww 0x40007424 0x1d6004D0
# 2V
mww 0x40007424 0x1d600940
# 3V
mww 0x40007424 0x1d600DB0
# 4V
mww 0x40007424 0x1d601220
# 5V
mww 0x40007424 0x1d601690
# 6V
mww 0x40007424 0x1d601B00
# Full swing (Vin - 1V)
mww 0x40007424 0x1d601f40
# 7.10V @ 7.70V matning
mww 0x40007424 0x1d601fff
"""
def dump_dma_settings():
print("DMA settings")
dump_reg("ISR", DMA1_BASE + DMA_ISR)
dump_reg("IFCR", DMA1_BASE + DMA_IFCR)
dump_reg("CCR1", DMA1_BASE + DMA_CCR1)
dump_reg("CNDTR", DMA1_BASE + DMA_CNDTR)
dump_reg("CPAR1", DMA1_BASE + DMA_CPAR1)
dump_reg("CMAR1", DMA1_BASE + DMA_CMAR1)
dump_reg("CCR2", DMA1_BASE + DMA_CCR2)
dump_reg("CNDTR", DMA1_BASE + DMA_CNDTR)
dump_reg("CPAR2", DMA1_BASE + DMA_CPAR2)
dump_reg("CMAR2", DMA1_BASE + DMA_CMAR2)
dump_reg("CCR3", DMA1_BASE + DMA_CCR3)
dump_reg("CNDTR", DMA1_BASE + DMA_CNDTR)
dump_reg("CPAR3", DMA1_BASE + DMA_CPAR3)
dump_reg("CMAR3", DMA1_BASE + DMA_CMAR3)
dump_reg("CCR4", DMA1_BASE + DMA_CCR4)
dump_reg("CNDTR", DMA1_BASE + DMA_CNDTR)
dump_reg("CPAR4", DMA1_BASE + DMA_CPAR4)
dump_reg("CMAR4", DMA1_BASE + DMA_CMAR4)
dump_reg("CCR5", DMA1_BASE + DMA_CCR5)
dump_reg("CNDTR", DMA1_BASE + DMA_CNDTR)
dump_reg("CPAR5", DMA1_BASE + DMA_CPAR5)
dump_reg("CMAR5", DMA1_BASE + DMA_CMAR5)
dump_reg("CCR6", DMA1_BASE + DMA_CCR6)
dump_reg("CNDTR", DMA1_BASE + DMA_CNDTR)
dump_reg("CPAR6", DMA1_BASE + DMA_CPAR6)
dump_reg("CMAR6", DMA1_BASE + DMA_CMAR6)
dump_reg("CCR7", DMA1_BASE + DMA_CCR7)
dump_reg("CNDTR", DMA1_BASE + DMA_CNDTR)
dump_reg("CPAR7", DMA1_BASE + DMA_CPAR7)
def dump_register_map():
global register_map
for reg in register_map:
address = reg[0]
end = reg[1]
name = reg[2]
print("\n# %s (0x%08x..0x%08x)" % (name, address, end))
while address < end:
value = ocd_read(address)
print("[0x%08x] = 0x%08x" % (address, value))
address += 4
def write_mem():
if len(sys.argv) != 4:
print("%s <address> <value>" % (sys.argv[0]))
else:
address = int(sys.argv[2], 16)
value = int(sys.argv[3], 16)
ocd_write(address, value)
def read_mem():
if len(sys.argv) < 3:
print("%s <address> [<length>]" % (sys.argv[0]))
else:
length = 1
address = int(sys.argv[2], 16)
if len(sys.argv) == 4:
length = int(sys.argv[3], 16)
while length > 0:
value = ocd_read(address)
print("[0x%08x] = 0x%08x" % (address, value))
address += 4
length -= 1
def print_help():
global commands
print("Available commands:")
for cmd in commands:
print(" %s" % (cmd))
def dump_all():
global commands
blocklist = ["reg", "all", "r", "w", "help"]
for cmd in commands:
if cmd not in blocklist:
print(">>>> %s" % cmd)
commands[cmd]()
print("")
def parse_mem_dump(data):
lines = data.split('\n')
for l in lines:
parts = l.split(":")
if len(parts) != 2:
print ("Parsing error: %s" % l)
return
address = int(parts[0].strip(), 16)
parts[1] = parts[1].strip()
data = parts[1].split(" ")
for (i, item) in enumerate(data):
decode_mem(address+4*i, int(item, 16))
try:
ocd_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ocd_sock.connect(('localhost', 4444))
ocd_sock.settimeout(1.0)
except socket.error:
print("Failed connect to Open OCD")
sys.exit(1)
if not ocd_sync():
print("Failed to sync with Open OCD")
sys.exit(1)
commands = {
"adc1" : dump_adc1_settings,
"afio" : dump_afio_settings,
"dac" : dump_dac_settings,
"dma" : dump_dma_settings,
"exti" : dump_exti_settings,
"gpio" : dump_gpio_port_settings,
"gpioa" : dump_gpioa_settings,
"gpiob" : dump_gpiob_settings,
"gpioc" : dump_gpioc_settings,
"gpiod" : dump_gpiod_settings,
"reg" : dump_register_map,
"rcc" : dump_rcc_settings,
"spi1" : dump_spi1_settings,
"spi2" : dump_spi2_settings,
"tim4" : dump_tim4_settings,
"w" : write_mem,
"r" : read_mem,
"help" : print_help,
"all" : dump_all,
}
if len(sys.argv) >= 2:
if sys.argv[1] in commands:
commands[sys.argv[1]]()
else:
print_help();
else:
print_help();
| kanflo/opendps | ocd-client.py | ocd-client.py | py | 24,789 | python | en | code | 840 | github-code | 36 |
35018450468 | from os import path
import sys
import metavision_designer_engine as mvd_engine
from metavision_designer_engine import Controller
import metavision_designer_cv as mvd_cv
import metavision_designer_core as mvd_core
import metavision_hal as mv_hal
import cv2
from Python.Event_Processor.EventProcessor import EventProcessor
from Python.Log_Luminance import Log_Luminance, Gen_Image
from metavision_designer_core import RoiFilter
# ce fichier est le ficher d'input raw si on choisis de ne pas utiliser la caméra événementielle
input_filename = "../../Movie/Log_Luminance/out_2021-07-07_13-13-28.raw" # ne fonctionne pas avec ~/
cam = input("Do you want to use cam ? Y or N ")
if cam == "Y" or cam == "y":
from_file = False
controller = Controller()
device = mv_hal.DeviceDiscovery.open('')
# Add the device interface to the pipeline
interface = mvd_core.HalDeviceInterface(device)
controller.add_device_interface(interface)
cd_producer = mvd_core.CdProducer(interface)
else:
# input_filename = input("File path from main ")
from_file = True
# Check validity of input arguments
if not (path.exists(input_filename) and path.isfile(input_filename)):
print("Error: provided input path '{}' does not exist or is not a file.".format(input_filename))
sys.exit(1)
is_raw = input_filename.endswith('.raw')
if not is_raw:
print("Error: provided input path '{}' does not have the right extension. ".format(input_filename) +
"It has either to be a .raw or a .dat file")
sys.exit(1)
controller = mvd_engine.Controller()
device = mv_hal.DeviceDiscovery.open_raw_file(input_filename)
if not device:
print("Error: could not open file '{}'.".format(input_filename))
sys.exit(1)
# Add the device interface to the pipeline
interface = mvd_core.HalDeviceInterface(device)
controller.add_device_interface(interface)
cd_producer = mvd_core.CdProducer(interface)
# Start the streaming of events
i_events_stream = device.get_i_events_stream()
i_events_stream.start()
# Add cd_producer to the pipeline
controller.add_component(cd_producer, "CD Producer")
# Get the sensor size.
geometry = device.get_i_geometry()
width = geometry.get_width()
height = geometry.get_height()
print("Sensor size width = {} height = {}".format(width, height))
# crop pour la caméra événementielle
roi_width = int(100)
roi_height = int(100)
x0 = int(width / 2 - roi_width / 2)
y0 = int(height / 2 - roi_height / 2)
x1 = x0 + roi_width
y1 = y0 + roi_height
roi_filter = RoiFilter(cd_producer, x0, y0, x1, y1)
controller.add_component(roi_filter)
print("ROI size width = {} height = {} Number of pixels = {}".format(roi_width, roi_height, roi_width * roi_height))
# ActivityNoiseFilter configuration
time_window_length = 1500 # duration in us plus c'est bas plus c'est filtré
cd_filtered = mvd_cv.ActivityNoiseFilter(roi_filter, time_window_length)
controller.add_component(cd_filtered, "Noise filter")
filtered_frame_gen = mvd_core.FrameGenerator(cd_filtered)
controller.add_component(filtered_frame_gen, "Filtered frame generator")
# Create Frame Generator with 20ms accumulation time
frame_gen = mvd_core.FrameGenerator(cd_filtered)
frame_gen.set_dt(20000)
controller.add_component(frame_gen, "FrameGenerator")
# We use PythonConsumer to "grab" the output of two components: cd_producer and frame_gen
# pyconsumer will callback the application each time it receives data, using the event_callback function
frame_gen_name = "FrameGen"
cd_prod_name = "CDProd"
ev_proc = EventProcessor(event_gen_name=cd_prod_name, frame_gen_name=frame_gen_name, width=width, height=height,
display_callback=False)
pyconsumer = mvd_core.PythonConsumer(ev_proc.event_callback)
pyconsumer.add_source(cd_filtered, cd_prod_name) # filtered (cd_filtered) or not filtered (cd_producer)
pyconsumer.add_source(filtered_frame_gen, frame_gen_name) # filtered (filtered_frame_gen) or not filtered (frame_gen)
controller.add_component(pyconsumer, "PythonConsumer")
controller.set_slice_duration(10000)
controller.set_batch_duration(50000)
do_sync = True if from_file else False
# Start the camera
if not from_file:
simple_device = device.get_i_device_control()
simple_device.start()
# Start the streaming of events
i_events_stream = device.get_i_events_stream()
i_events_stream.start()
#################################Parameters#################################
# on part du principe que l'image est carré
divide_matrix_by = 2 # de combien on divise la taille de l'image, si on part d'une résolution 100*100 on obtient du 50*50 si divisé par 2
print("divide size width = {} height = {} Number of pixels = {}".format(int(roi_width/divide_matrix_by), int(roi_height/divide_matrix_by),
int(roi_width/divide_matrix_by) * int(roi_height/divide_matrix_by)))
# les deux matrices de niveaux qui permet de faire fonctionner la log luminance
# on les garde d'un batch à l'autre ce qui fait que si on a qu'un seul gros batch qui contient tous les événements l'algo fonctionne quand même
matrix_level_HQ = Log_Luminance.gen_matrix_PixelState(roi_width, roi_height) # correspond à la haute résolution
matrix_level_LQ = Log_Luminance.gen_matrix_PixelState(int(roi_width / divide_matrix_by), int(roi_height / divide_matrix_by)) # correspond à la basse résolution
#Make Video
# si ce paramétre est mis en true, on ajoute les image créer à chaque batch dans un tableau puis on les enregistre sur le disque avec le nom spécifié dans nom_video
# le nombre de seconde est le nombre de second que la vidéo va duré, j'ai paramétré la video pour qu'elle soit cadancé à 28fps
# le temps de filmé correspond donc au temps pour acquérir nb_second_de_video*28 images au total
make_video_at_the_end = False
nb_second_de_video = 15
nom_video = 'ahahah' # juste mettre le nom, le fichier sortira en .avi à la fin du programme
array_img = []
while not controller.is_done():
controller.run(do_sync)
events = ev_proc.get_event() # tableau d'event
events_LQ = Log_Luminance.log_luminance(events, matrix_level_HQ, matrix_level_LQ, divide_matrix_by, (width, height),
(roi_width, roi_height), treshold=1, interpolation=0)
# cette fonction ne marche pas et je ne comprend pas POURQUOI AAAAAAAAHHHHH: elle fonctionne maintenant mais le commentaire me fait sourir
img_original = ev_proc.get_cut_event_2d_arrays(x0, x1, y0, y1)
img = Gen_Image.create_image_rgb_from_log_luminance(events_LQ, int(roi_width/divide_matrix_by), int(roi_height/divide_matrix_by))
img_original = cv2.resize(img_original, (200, 200))
img = cv2.resize(img, (200, 200))
cv2.imshow("Original", img_original)
cv2.imshow("Log Luminance", img)
# les deux ligne de code en dessous permettes de visualiser le fonctionnement des matrices de niveaux mais consomme beaucoup de ressources
#cv2.imshow("pixelstateHQ", cv2.resize(Gen_Image.create_image_rgb_from_pixel_state(matrix_level_HQ), (400, 400)))
#cv2.imshow("pixelstateLQ", cv2.resize(Gen_Image.create_image_rgb_from_pixel_state(matrix_level_LQ), (400, 400)))
if make_video_at_the_end:
array_img.append(img)
cv2.waitKey(1) # ne jamais oublié cet ligne de code qui empêche l'image de s'afficher si elle n'est pas la
if nb_second_de_video*28 == len(array_img) and make_video_at_the_end:
break
cv2.destroyAllWindows()
if make_video_at_the_end:
Gen_Image.convert_array_of_image_in_video(array_img, nom_video)
| GuillaumeCariou/I3S_Tutorship_Internship | Python/Log_Luminance/Main.py | Main.py | py | 7,685 | python | fr | code | 0 | github-code | 36 |
20157445865 | from collections import defaultdict
from itertools import permutations
class DinnerTable:
def __init__(self, instructions) -> None:
self.ins = instructions
self.happiness_scores = {}
self.guest_list = []
def process_happiness_scores(self):
lines = [line for line in self.ins.split("\n") if line != ""]
for line in lines:
elements = line.split()
name = elements[0].strip()
pos_neg = 0
if elements[2].strip() == "gain":
pos_neg = 1
else:
pos_neg = -1
score = int(elements[3].strip())
neighbour = elements[-1][:-1]
if name not in self.happiness_scores.keys():
self.happiness_scores[name] = {}
self.happiness_scores[name].update({neighbour: score * pos_neg})
self.guest_list = list(self.happiness_scores.keys())
def add_host_to_happiness_scores(self):
self.happiness_scores["Host"] = {}
self.guest_list = list(self.happiness_scores.keys())
for guest in self.guest_list:
self.happiness_scores["Host"].update({guest: 0})
self.happiness_scores[guest].update({"Host": 0})
def seating_plan_naive(self):
possible_plans = permutations(self.guest_list)
possible_plans = [p + (p[0],) for p in possible_plans]
best_plan = []
best_plan_score = 0
for plan in possible_plans:
happiness = 0
first = plan[0]
for neighbour in plan[1:]:
happiness += self.happiness_scores[first][neighbour]
happiness += self.happiness_scores[neighbour][first]
first = neighbour
if happiness > best_plan_score:
best_plan_score = happiness
best_plan = plan
print(
f"The happiest plan {best_plan} with a happiness score of {best_plan_score}."
)
return best_plan_score
if __name__ == "__main__":
with open("input.txt", "r") as f:
instructions = f.read()
dt = DinnerTable(instructions)
dt.process_happiness_scores()
dt.add_host_to_happiness_scores()
dt.seating_plan_naive()
| davidcolton/adventofcode | 2015/day_13/dinner.py | dinner.py | py | 2,243 | python | en | code | 0 | github-code | 36 |
6103056667 | import os
import argparse
import utils
import json
import numpy as np
from sklearn import metrics
from tqdm import tqdm
import torch
from torch.utils.data import DataLoader
import torchvision.transforms as T
from models.dgcnn import DGCNN
from models.pointnet import PointNet, feature_transform_regularizer
import transforms as transforms
from data import PointCloudDataset
def test(args):
current_path = os.path.dirname(args.model_path)
with open(os.path.join(current_path, "settings.txt"), 'r') as f:
settings = json.load(f)
args.exp_name = settings["exp_name"]
args.cuda = torch.cuda.is_available()
if args.cuda:
print(
'Using GPU : ' + str(torch.cuda.current_device()) + ' from ' + str(torch.cuda.device_count()) + ' devices')
torch.cuda.manual_seed(settings["seed"])
else:
print('Using CPU')
device = torch.device("cuda" if args.cuda else "cpu")
numClass = settings["num_classes"]
modelType = settings["model"]
if modelType == "pointnet":
model = PointNet(numClass, emb_dims=settings["emb_dims"], dropout_rate=settings["dropout"], feature_transform=settings["transform_regularization"] > 0.0)
elif modelType =="dgcnn":
model = DGCNN(numClass, emb_dims=settings["emb_dims"], dropout_rate=settings["dropout"], k=settings["k"])
else:
raise Exception("Not implemented")
model_state_path = os.path.join(current_path, "model_Both_loss.t7")
# load model
model = torch.load(model_state_path)
model.to(device)
model.eval()
# DataLoaders
test_transforms = T.Compose([transforms.Normalize()])
test_real_dataset = PointCloudDataset(dataDir = args.data_path, partition='Testing', num_points=1024, transforms = test_transforms, data_type = ["real"], binary_data = False)
test_synthetic_dataset = PointCloudDataset(dataDir = args.data_path, partition='Testing', num_points=1024, transforms = test_transforms, data_type = ["synthetic"], binary_data = False)
test_dataset = PointCloudDataset(dataDir = args.data_path, partition='Testing', num_points=1024, transforms = test_transforms, data_type = ["synthetic","real"], binary_data = False)
test_real_loader = DataLoader(test_real_dataset, num_workers=0, batch_size=args.batch_size, shuffle=False, drop_last=False)
test_synthetic_loader = DataLoader(test_synthetic_dataset, num_workers=0, batch_size=args.batch_size, shuffle=False, drop_last=False)
test_loader = DataLoader(test_dataset, num_workers=0, batch_size=args.batch_size, shuffle=False, drop_last=False)
data_settings = [("Real", test_real_loader), ("Synthetic", test_synthetic_loader), ("All", test_loader)]
with torch.no_grad():
for data_setting, data_loader in data_settings:
predicted_labels = []
correct_labels = []
baseDir = os.path.join(current_path, data_setting)
if not os.path.isdir(baseDir):
os.makedirs(baseDir)
incorrectPredDir = os.path.join(baseDir, "incorrect_predictions")
if not os.path.isdir(incorrectPredDir):
os.makedirs(incorrectPredDir)
for data, label in tqdm(data_loader):
data = data.to(device)
data = data.float()
labels = label.to(device).squeeze()
data = data.permute(0, 2, 1)
batch_size = data.size()[0]
if modelType == "pointnet":
output, _, _ = model(data)
else:
output = model(data)
preds = output.max(dim=1)[1]
correct_labels.append(labels.cpu().numpy())
predicted_labels.append(preds.detach().cpu().numpy())
for i in range(len(labels)):
if labels[i] != preds[i]:
temp_data = data[i].permute(1, 0)
num_csvs = len(os.listdir(incorrectPredDir))
np.savetxt(os.path.join(incorrectPredDir, "prediction{}_{}.csv".format(labels[i], num_csvs)), temp_data.cpu().numpy())
correct_labels = np.concatenate(correct_labels)
predicted_labels = np.concatenate(predicted_labels)
cm = metrics.confusion_matrix(correct_labels, predicted_labels)
print(cm)
precision, recall, f1, support = metrics.precision_recall_fscore_support(correct_labels, predicted_labels, average="weighted")
normal_accuracy = metrics.accuracy_score(correct_labels, predicted_labels)
average_accuracy = metrics.balanced_accuracy_score(correct_labels, predicted_labels)
np.savetxt(os.path.join(baseDir, "ConfusionMatrix.txt"), cm)
with open(os.path.join(baseDir, "metrics.txt"), "w") as f:
f.write("Precision: {}\n".format(precision))
f.write("Recall: {}\n".format(recall))
f.write("F1: {}\n".format(f1))
f.write("Support: {}\n".format(support))
f.write("Average: {}\n".format(normal_accuracy))
f.write("Balanced Average: {}\n".format(average_accuracy))
print(precision, recall, f1, support, normal_accuracy, average_accuracy)
if __name__ == '__main__':
# Training settings
parser = argparse.ArgumentParser(description='Settings for Point Cloud Classification Test')
parser.add_argument('--exp_name', type=str, default='', metavar='N',
help='Name of the experiment')
parser.add_argument('--model_path', type=str, default='', metavar='N',
help='Pretrained model path')
parser.add_argument('--data_path', type=str, default='', metavar='N',
help='Dataset path')
parser.add_argument('--batch_size', type=int, default=32, metavar='batch_size',
help='Size of batch. (Default: 32)')
args = parser.parse_args()
test(args) | leoriczhang/3d-pointcloud- | test_cls.py | test_cls.py | py | 6,038 | python | en | code | 0 | github-code | 36 |
41613216897 | import time
# 切换着执行 yield
def worker1():
# for i in range(n):
while True:
print("in worker1")
yield
time.sleep(1)
def worker2():
# for i in range(n):
while True:
print("in worker2")
yield
time.sleep(1)
if __name__ == '__main__':
# 调用生成器函数 产生生成器对象
w1 = worker1()
w2 = worker2()
while True:
next(w1)
next(w2) | ABDM357/python_summary_knowledge_001 | Day01-15 Python基础课程/Day01-15课程与项目/06协程-网络/03-代码/01-使用yield实现协程.py | 01-使用yield实现协程.py | py | 447 | python | en | code | 0 | github-code | 36 |
30085141101 | from typing import TypedDict
from gdbdash.commands import BoolOption, StrOption
from gdbdash.modules import Module
from gdbdash.utils import FileDescriptorOrPath
DashboardOptions = TypedDict(
"DashboardOptions",
{
"text-highlight": StrOption,
"text-secondary": StrOption,
"text-divider": StrOption,
"text-divider-title": StrOption,
"divider-fill-char": StrOption,
"show-divider": BoolOption,
},
)
DashboardModulesDict = dict[FileDescriptorOrPath, list[Module]]
class Dashboard:
def on_order_changed(self) -> None: ...
| JensDll/dotfiles | unix/.config/gdbdash/gdbdash/dashboard.pyi | dashboard.pyi | pyi | 586 | python | en | code | 0 | github-code | 36 |
31551053914 | """ Crie um programa que crie uma matriz de dimensão 3x3 e preencha com valores lidos pelo teclado.
No final, mostre a matriz na tela, com a formatação correta """
lista = [[],[],[]]
for i in range(3):
for j in range(3):
lista[i].append(int(input(f'Digite um valor [{i}][{j}]: ')))
print('\nA matriz dos números digitados é: ')
for pos, i in enumerate(lista):
for v in lista[pos]:
print(v, end=' ')
print() | ClebersonGarcia05/curso-python | Mundo python 03/Exercícios/Listas/ex086.py | ex086.py | py | 440 | python | pt | code | 0 | github-code | 36 |
31826768248 | # make some tests
import os, glob
from pprint import pprint
from importlib import reload
import designspaceProblems
reload(designspaceProblems)
import designspaceProblems.problems
reload(designspaceProblems.problems)
from designspaceProblems.problems import DesignSpaceProblem, allProblems
from designspaceProblems import DesignSpaceChecker
import ufoProcessor
from fontTools.designspaceLib import SourceDescriptor, InstanceDescriptor, AxisDescriptor, DiscreteAxisDescriptor, RuleDescriptor, processRules
from ufoProcessor import DesignSpaceProcessor, getUFOVersion, getLayer
from fontParts.fontshell import RFont
import fontTools
print("fontTools source:", fontTools.__file__)
print("AxisDescriptor class:", AxisDescriptor)
def printProblems(dc):
for pr in dc.problems:
print(pr)
testedProblems = {}
def showProblems(dc):
global testedProblems
for pr in dc.problems:
key = (pr.category,pr.problem)
if not key in testedProblems:
testedProblems[key] = 0
testedProblems[key] += 1
def showUntested():
global testedProblems
# these problems can't be tested because UFOprocessor already ignores these faults
untestable = [(1,1), (1,2), (1,3), (1,4), (1,5), (1,6), (1,7),
(2, 4), (2,5), (3, 2),
(6, 0), (6, 1), (6, 2), (6, 3),
(4, 5),
]
print("\n\nTested problems")
app = allProblems()
for ap in list(app.keys()):
if ap in testedProblems:
print("✅", ap, app.get(ap))
elif ap in untestable:
print("❔", ap, app.get(ap))
else:
print("❌", ap, app.get(ap))
def makeTests():
path = os.getcwd()
errs = designspaceProblems.problems.allProblems()
# empty designspace
d1 = DesignSpaceProcessor()
tp = os.path.join(path, "empty.designspace")
d1.write(tp)
dc1 = DesignSpaceChecker(tp)
dc1.checkEverything()
showProblems(dc1)
assert (1,0) in dc1.problems # no axes defined
assert (2,0) in dc1.problems # no sources defined
assert (2,7) in dc1.problems # no source on default location
assert (3,10) in dc1.problems # no instances defined
# # malformed file
# d2 = DesignSpaceProcessor()
# tp = os.path.join(path, "malformed_file.designspace")
# d2.write(tp)
# f = open(tp, 'r')
# d2 = f.read()
# f.close()
# d2 += "garbage"*100
# f = open(tp, 'w')
# f.write(d2)
# f.close()
# dc2 = DesignSpaceChecker(tp)
# dc2.checkEverything()
# showProblems(dc2)
# assert (0,0) in dc2.problems # no axes defined
# assert (1,0) in dc2.problems # no axes defined
# assert (2,0) in dc2.problems # no sources defined
# assert (2,7) in dc2.problems # no source on default location
# assert (3,10) in dc2.problems # no instances defined
# # malformed axes
# d3 = DesignSpaceProcessor()
# tp = os.path.join(path, "malformed_axis.designspace")
# a31 = AxisDescriptor()
# a31.name = "snap"
# a31.minimum = 1000
# a31.maximum = 1000
# a31.default = 1000
# a31.tag = "1111"
# d3.addAxis(a31)
# a32 = AxisDescriptor()
# a32.name = "crackle"
# a32.minimum = 0
# a32.maximum = 1000
# a32.default = -1000
# a32.tag = "CRCK"
# d3.addAxis(a32)
# d3.write(tp)
# dc3 = DesignSpaceChecker(tp)
# dc3.checkEverything()
# showProblems(dc3)
# assert (1,9) in dc3.problems # minimum and maximum value are the same
# assert (1,10) in dc3.problems # minimum and maximum value are the same
# assert (2,0) in dc3.problems # no sources defined
# assert (2,7) in dc3.problems # no source on default location
# assert (3,10) in dc3.problems # no instances defined
# designspace with discrete axis
d4 = DesignSpaceProcessor()
tp = os.path.join(path, "discrete_axes.designspace")
# this designspace has 2 axes: one continuous, one discrete
# we'll add a default for discrete crackle=0, but not a discrete crackle=500
a41 = AxisDescriptor()
a41.name = "snap"
a41.minimum = 0
a41.maximum = 1000
a41.default = 0
a41.tag = "SNAP"
d4.addAxis(a41)
a42 = DiscreteAxisDescriptor()
a42.name = "crackle"
a42.default = 0
a42.values = [0, 500]
a42.tag = "CRCK"
print(a42.values)
d4.addAxis(a42)
s41 = SourceDescriptor()
s41.location = dict(snap=0, crackle=0)
s41.path = os.path.join(path, 'masters','geometryMaster1.ufo')
d4.addSource(s41)
s42 = SourceDescriptor()
s42.location = dict(snap=0, crackle=1000)
s42.path = os.path.join(path, 'masters','geometryMaster2.ufo')
d4.addSource(s42)
d4.write(tp)
dc4 = DesignSpaceChecker(tp)
dc4.checkEverything()
printProblems(dc4)
showProblems(dc4)
#print("checkLocationForIllegalDiscreteValues", dc4.checkLocationForIllegalDiscreteValues(dict(snap=0,crackle=1000)))
assert (2,13) in dc4.problems
#assert (1,0) in dc.problems # no axes defined
#assert (2,0) in dc.problems # no sources defined
#assert (2,7) in dc.problems # no source on default location
#assert (3,10) in dc.problems # no instances defined
# ok axis, a source, but no default
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "no_default.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=500)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,7) in dc.problems # no source on default location
# # ok axis, multiple sources on default
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "multiple_defaults.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=0)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,8) in dc.problems # multiple sources on default location
# assert (2,1) not in dc.problems # not: source UFO missing
# # ok axis, source without location
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "source-without-location.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=(10,11))
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,10) in dc.problems # source location is anisotropic
# # ok space, no kerning in default
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "no-kerning-in-default.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1_no_kerning.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=1000)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (5,1) in dc.problems # ok axis, source without location
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "source-without-location.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=(10,11))
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,10) in dc.problems # source location is anisotropic
# # ok space, missing UFO
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "source-ufo-missing.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# a2 = AxisDescriptor()
# a2.name = "pop"
# a2.minimum = 0
# a2.maximum = 1000
# a2.default = 0
# a2.tag = "pop_"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# s2 = SourceDescriptor()
# s2.location = dict(snap=500)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# s2.layerName = "missing_layer"
# d.addSource(s2)
# s3 = SourceDescriptor()
# s3.location = dict(snap=1000)
# s3.path = os.path.join(path, 'masters','geometryMaster_missing.ufo')
# d.addSource(s3)
# d.write(tp)
# dc = DesignSpaceChecker(tp)
# dc.checkEverything()
# showProblems(dc)
# assert (2,1) in dc.problems # source location is anisotropic
# assert (2,3) in dc.problems # source layer missing
# # multiple ssources in same location
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "multiple_sources_on_same_location.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# #s1.name = "master.1"
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# #s2.name = "master.2"
# for i in range(3):
# s2 = SourceDescriptor()
# s2.location = dict(snap=1500)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
# showProblems(dc)
# # instance without location
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "instance_without_location.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# #s1.name = "master.1"
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# #s2.name = "master.2"
# s2 = SourceDescriptor()
# s2.location = dict(snap=1000)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# jd = InstanceDescriptor()
# jd.familyName = None
# jd.styleName = None
# jd.location = None
# jd.path = None
# d.addInstance(jd)
# for i in range(3):
# jd = InstanceDescriptor()
# jd.familyName = "Duped"
# jd.styleName = "Duped"
# jd.location = dict(snap=666)
# jd.path = "some/path.ufo"
# d.addInstance(jd)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
# showProblems(dc)
# #assert (3,1) in dc.problems # instance location missing
# assert (3,4) in dc.problems # multiple instances on location*
# # mapping tests
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "axismapping.designspace")
# a1 = AxisDescriptor()
# a1.name = "ok_axis"
# a1.minimum = 200
# a1.maximum = 800
# a1.default = 200
# a1.tag = "ax01"
# a1.map = [(200,0), (500, 500), (800, 1000)] # map is ok
# d.addAxis(a1)
# a2 = AxisDescriptor()
# a2.name = "input_regression_axis"
# a2.minimum = 200
# a2.maximum = 800
# a2.default = 500
# a2.tag = "ax02"
# a2.map = [(200,100), (190, 150), (800, 200)] # input regresses ok, output ok
# d.addAxis(a2)
# a3 = AxisDescriptor()
# a3.name = "output_regression_axis"
# a3.minimum = 500
# a3.maximum = 800
# a3.default = 600
# a3.tag = "ax03"
# a3.map = [(500,0), (600, 500), (700, 700), (800, 690)] # input progresses ok, output regresses
# d.addAxis(a3)
# a4 = AxisDescriptor()
# a4.name = "mixedup_extremes_axis"
# a4.minimum = 1000
# a4.maximum = 800
# a4.default = 0
# a4.tag = "ax04"
# d.addAxis(a4)
# s1 = SourceDescriptor()
# #s1.name = "master.1"
# s1.location = dict(ok_axis=a1.default, output_regression_axis=a3.default)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# #s2.name = "master.2"
# s2 = SourceDescriptor()
# s2.location = dict(ok_axis=a1.default, output_regression_axis=a3.maximum)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
# assert (1,11) in dc.problems
# assert (1,12) in dc.problems
# showProblems(dc)
# # ok axis, ok sources
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "viable.designspace")
# a1 = AxisDescriptor()
# a1.name = "snap"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "snap"
# d.addAxis(a1)
# s1 = SourceDescriptor()
# #s1.name = "master.1"
# s1.location = dict(snap=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# #s2.name = "master.2"
# s2 = SourceDescriptor()
# s2.location = dict(snap=1000)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# s3 = SourceDescriptor()
# s3.location = dict(snap=500)
# s3.path = os.path.join(path, 'masters','geometryMaster3.ufo') # bad kerning
# d.addSource(s3)
# jd = InstanceDescriptor()
# jd.familyName = "TestFamily"
# jd.styleName = "TestStyle"
# jd.location = dict(snap=500)
# jd.path = os.path.join(path, 'instances','generatedInstance.ufo')
# d.addInstance(jd)
# jd = InstanceDescriptor()
# jd.familyName = None
# jd.styleName = None
# jd.location = dict(snap=600)
# jd.path = os.path.join(path, 'instances','generatedInstance2.ufo')
# d.addInstance(jd)
# jd = InstanceDescriptor()
# jd.familyName = "Aa"
# jd.styleName = "Bb"
# jd.location = dict(snap=600)
# jd.path = None
# d.addInstance(jd)
# r1 = RuleDescriptor()
# r1.name = "rule_no_subs"
# cd1 = dict(name='lalala', minimum=100, maximum=200)
# cd2 = dict(name='snap', minimum=10000, maximum=2000)
# cd3 = dict(name='snap', minimum=10000, maximum=None) # problem 7,10
# cd4 = dict(name='snap', minimum=None, maximum=10000) # problem 7,11
# r1.conditionSets.append([cd1, cd2, cd3, cd4])
# d.addRule(r1)
# r2 = RuleDescriptor()
# r2.name = "rule_no_conditionset"
# r2.subs.append(('glyphFour', 'glyphFour'))
# d.addRule(r2)
# r3 = RuleDescriptor()
# r3.name = "rule_values_the_same"
# cd1 = dict(name='samesees_1', minimum=200, maximum=200)
# r1.conditionSets.append([cd1, cd1, cd1])
# r3.subs.append(('glyphFour', 'glyphFour'))
# d.addRule(r3)
# # data for 7, 9 rule without a name
# r4 = RuleDescriptor()
# r4.name = None
# cd1 = dict(name='samesees_2', minimum=200, maximum=200)
# r1.conditionSets.append([cd1, cd1, cd1])
# r4.subs.append(('glyphFour', 'glyphFour'))
# d.addRule(r4)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
# showProblems(dc)
# for p in dc.problems:
# print(p)
# assert not dc.hasStructuralProblems() # minimum working designspace, ready for fonts
# assert (3,6) in dc.problems # missing family name
# assert (3,7) in dc.problems # missing style name
# assert (4,1) in dc.problems # components
# assert (4,2) in dc.problems # default glyph is empty, glyphName
# assert (4,7) in dc.problems # default glyph is empty, glyphName
# assert (4,9) in dc.problems # incompatible constructions for glyph
# assert (5,0) in dc.problems # kerning: no kerning in source
# assert (5,6) in dc.problems # no kerning groups in source
# assert (6,4) in dc.problems # source font unitsPerEm value different from default unitsPerEm
# assert (7,2) in dc.problems # source and destination glyphs the same
# assert (7,3) in dc.problems # no substition glyphs defined
# assert (7,4) in dc.problems # no conditionset defined
# assert (7,5) in dc.problems # condition values on unknown axis
# assert (7,6) in dc.problems # condition values out of axis bounds
# #print(tp)
# for p in dc.problems:
# if p == (4,9):
# print(p)
# # badly populated designspace
# # this system does not have on-axis masters
# # but a couple of non-aligned off-axis masters.
# # Varlib will complain
# d = DesignSpaceProcessor()
# tp = os.path.join(path, "badly_populated.designspace")
# a1 = AxisDescriptor()
# a1.name = "weight"
# a1.minimum = 0
# a1.maximum = 1000
# a1.default = 0
# a1.tag = "wght"
# d.addAxis(a1)
# a2.name = "width"
# a2.minimum = -500
# a2.maximum = 500
# a2.default = 0
# a2.tag = "wdth"
# d.addAxis(a2)
# a3.name = "optical"
# a3.minimum = 0
# a3.maximum = 1000
# a3.default = 0
# a3.tag = "opsz"
# d.addAxis(a3)
# # neutral
# s1 = SourceDescriptor()
# s1.location = dict(weight=0, width=0, optical=0)
# s1.path = os.path.join(path, 'masters','geometryMaster1.ufo')
# d.addSource(s1)
# # offaxis master 1
# s2 = SourceDescriptor()
# s2.location = dict(width=-500, weight=1000, optical=0)
# s2.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s2)
# # offaxis master 2
# s3 = SourceDescriptor()
# s3.location = dict(width=0, weight=1000, optical=1000)
# s3.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s3)
# # offaxis master 2
# s4 = SourceDescriptor()
# s4.location = dict(width=500, weight=1000, optical=1000)
# s4.path = os.path.join(path, 'masters','geometryMaster2.ufo')
# d.addSource(s4)
# d.write(tp)
# dc = DesignSpaceChecker(d)
# dc.checkEverything()
#showProblems(dc)
showUntested()
def makeEdit(path, find, replace):
f = open(path, 'r')
t = f.read()
f.close()
t = t.replace(find, replace)
f = open(path, 'w')
f.write(t)
f.close()
makeTests()
| LettError/DesignspaceProblems | tests/makeTests.py | makeTests.py | py | 19,721 | python | en | code | 18 | github-code | 36 |
7939019332 | from itertools import combinations
def answer(l):
l.sort(reverse=True)
for i in range(len(l), 0, -1):
for c in itertools.combinations(l, i):
if sum(c) % 3 == 0:
return int(''.join(map(str, c)))
return 0
| deepspacepirate/googlefoobar | L2-please_pass_the_coded_messages.py | L2-please_pass_the_coded_messages.py | py | 235 | python | en | code | 0 | github-code | 36 |
37350691317 | from ase import Atom, Atoms
from ase.units import Bohr
from gpaw import GPAW
from gpaw.test import equal
def test_xc_nonselfconsistentLDA(in_tmp_dir):
a = 7.5 * Bohr
n = 16
atoms = Atoms([Atom('He', (0.0, 0.0, 0.0))], cell=(a, a, a), pbc=True)
calc = GPAW(gpts=(n, n, n), nbands=1, xc='LDA')
atoms.calc = calc
e1 = atoms.get_potential_energy()
e1ref = calc.get_reference_energy()
de12 = calc.get_xc_difference({'name': 'PBE', 'stencil': 1})
calc = GPAW(gpts=(n, n, n), nbands=1, xc={'name': 'PBE', 'stencil': 1})
atoms.calc = calc
e2 = atoms.get_potential_energy()
e2ref = calc.get_reference_energy()
de21 = calc.get_xc_difference('LDA')
print(e1ref + e1 + de12, e2ref + e2)
print(e1ref + e1, e2ref + e2 + de21)
print(de12, de21)
equal(e1ref + e1 + de12, e2ref + e2, 0.02)
equal(e1ref + e1, e2ref + e2 + de21, 0.025)
calc.write('PBE.gpw')
de21b = GPAW('PBE.gpw').get_xc_difference('LDA')
print(de21, de21b)
equal(de21, de21b, 9e-8)
energy_tolerance = 0.0007
equal(e1, -0.0961003634812, energy_tolerance) # svnversion 5252
equal(e2, -0.0790249564625, energy_tolerance) # svnversion 5252
| f-fathurrahman/ffr-learns-gpaw | my_gpaw/test/xc/test_nonselfconsistentLDA.py | test_nonselfconsistentLDA.py | py | 1,193 | python | en | code | 0 | github-code | 36 |
28521392927 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from urbansim.functions import attribute_label
from numpy import ones, array, float32
class ln_access_to_workplace_from_residences(Variable):
"""(SUM(Jobs(i) * exp(logsum_DDD(i to this_zone)), for i=zone_1...zone_n), for
the income type (DDD) of this house) / number_of_DDD_types
Although, the above fomula is really calculated in zones and passed through gridcell
to household.
"""
gc_ln_access_to_workplace_from_residences = "gc_ln_access_to_workplace_from_residences"
hh_grid_id = "grid_id"
def dependencies(self):
return [attribute_label("gridcell", self.gc_ln_access_to_workplace_from_residences),
attribute_label("household", self.hh_grid_id)
]
def compute(self, dataset_pool):
return self.get_dataset().get_2d_dataset_attribute('gc_ln_access_to_workplace_from_residences')
from opus_core.tests import opus_unittest
from urbansim.variable_test_toolbox import VariableTestToolbox
from numpy import ma
class Tests(opus_unittest.OpusTestCase):
variable_name = "urbansim.household_x_gridcell.ln_access_to_workplace_from_residences"
def test_my_inputs(self):
values = VariableTestToolbox().compute_variable(self.variable_name,
{"household":{
"grid_id": array([1, 2, 3, 1])},
"gridcell":{
"gc_ln_access_to_workplace_from_residences": array([1.1, 11.1, 111.1])}},
dataset = "household_x_gridcell")
should_be = array([[1.1, 11.1, 111.1],
[1.1, 11.1, 111.1],
[1.1, 11.1, 111.1],
[1.1, 11.1, 111.1]])
self.assertEqual(ma.allclose(values, should_be, rtol=1e-4),
True, msg = "Error in " + self.variable_name)
if __name__ == "__main__":
opus_unittest.main() | psrc/urbansim | urbansim/household_x_gridcell/ln_access_to_workplace_from_residences.py | ln_access_to_workplace_from_residences.py | py | 2,137 | python | en | code | 4 | github-code | 36 |
11591514846 | def main():
f = open("input/day1.txt", "r")
counter = 0
tmp = 0
for x in f.readlines():
x = int(x.rstrip())
if(x > tmp and tmp != 0):
counter += 1
tmp = x
print(counter)
if __name__ == "__main__":
main() | Tyv217/AdventOfCode2021 | day1a.py | day1a.py | py | 264 | python | en | code | 0 | github-code | 36 |
42736810973 | # -*- coding: utf-8 -*-
"""
Created on Sun May 2 10:11:50 2021
@author: Simon
"""
# Imports
import matplotlib.pyplot
import csv
from bs4 import BeautifulSoup
import requests
import particlemove # Associated file used to move particles
import tkinter as tk
import time
# Model variables
num_of_iterations = 200 # This no. allows all particles to land in high wind
num_of_particles = 0 # This creates the object it is updated in the GUI
wind_speed = 0 # This is updated in the GUI
# Creating lists
particles = [] #Used to hold particle data
citydata = [] #Used to hold bombsite and then particle landing position data
environment = [] #Used to hold a digital elevation model
# Start a timer to time the code
start = time.time() #Used for measuring how long it takes at the end
# Setting up GUI, this part of the code runs before main() at the bottom
# The overall strucutre and some elements of the GUI were developed based
# on tutorials under the name The New Boston found on You Tube (see references)
# This is the routine that runs when the user clicks the run button
def routine():
"""The function called by pressing Run in the GUI, collects user inputs"""
global num_of_particles # Used in other functions so made global
num_of_particles = int(my_entry.get())
print("The number of particles is", num_of_particles)
global wind_speed
wind_speed = int(scale_widget.get())
print("The wind speed is ", wind_speed)
global topography # Used to select the type of surface or basemap
topography = str(listbox_widget.get(tk.ANCHOR))
print("The model uses a", topography)
# The functions below print help instructions when selected
def particlehelp():
"""Prints some help relating to particle numbers when requested in GUI"""
print("HELP")
print("The number of particles chosen will influence the speed")
print("5000 particles will typically take about 5 seconds")
print("If you are interested in rare events where particles land far from")
print("the main landing area then you may want to use more particles")
def windspeedhelp():
"""Prints some help relating to windspeed when requested in the GUI"""
print("HELP")
print("The higher windspeed causes more turbulence")
print("Resulting in particles being more dispersed")
print("The wind direction is fixed as Easterly")
print("In strong wind (above 6) the particles are blown further East")
def surfacehelp():
"""Prints some help relating to the surface choice when requested in GUI"""
print("HELP")
print("The basic model assumes a uniform flat surface or flat plain")
print("The digital elevation model uses a contoured surface")
print("The contoured surface used slopes down in an Easterly direction")
print("This means that particles travel further using the DEM")
# Set up the GUI window and size
root = tk.Tk()
root.geometry("500x300")
root.title("Bacterial Bomb") #Add a title to the GUI window
# Add a menu with help function
# Source: https://www.youtube.com/watch?v=PSm-tq5M-Dc
menu1 = tk.Menu(root)
root.config(menu=menu1)
subMenu = tk.Menu(menu1)
menu1.add_cascade(label="Help", menu=subMenu)
subMenu.add_command(label="Particle choice", command=particlehelp)
subMenu.add_command(label="Windspeed", command=windspeedhelp)
subMenu.add_command(label= "Surface", command=surfacehelp)
# Add a button used for running the program, routine run when button clicked
button1 = tk.Button(root, text ="Run", command=routine)
button1.grid(row=6, column=0)
# Add a label above the entry box
label2 = tk.Label(root, text="Enter number of particles: 5000 recommended")
label2.grid(row=2, column=4, padx=5, pady=5)
# Add an entry box used for number of particles
my_entry = tk.Entry(root, width=15)
my_entry.grid(row=3, column=4)
# Add a label above the scale widget
label3 = tk.Label(root, text="Enter wind speed (beaufort scale)")
label3.grid(row=4, column=4)
# Add a scale widget for windspeed
# Source: dummies.com/programming/python/using-tkinter-widgets-in-python/
scale_widget = tk.Scale(root, from_=0, to=12, orient=tk.HORIZONTAL)
scale_widget.set(4)
scale_widget.grid(row=5, column=4)
# Add a label above the scale widget
label4 = tk.Label(root, text="Enter the type of surface")
label4.grid(row=7, column=4)
# Add a drop down box for choosing the type of surface
listbox_entries = ["Flat plain", "Digital elevation model"]
listbox_widget = tk.Listbox(root, height=2, width=25)
for entry in listbox_entries:
listbox_widget.insert(tk.END, entry)
listbox_widget.grid(row=8, column=4, padx=5, pady=5)
textbox = tk.Text(root,height=4, width=20, padx=5,
pady=5, font=("Helvetica",10))
textbox.insert(
tk.END,"INSTRUCTIONS\nChoose parameters\nPress Run\nClose this window\n")
textbox.grid(row=0, column=0, padx=5, pady=5)
root.mainloop()# GUI window keeps running until it is closed
# The following functions are all called in main at the bottom. They are listed
# in order that they are called.
# Scraping data from the web to identify bomb location (need to be online)
# Website address line is too long but could not make it work splitting it up
def getdata():
"""Scrapes file with the bomb site from the web and saves it as citydata"""
city = []
url ="http://www.geog.leeds.ac.uk/courses/computing/study/core-python-odl2/assessment2/wind.raster"
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, "lxml")
city = soup.find_all("p")
# Save beautiful soup resultset as text file to access the individual numbers
# Saving to a file then reading that file seems rather inefficent
# Need to find a better way to access the data using BeautifulSoup
with open('city.txt', 'w') as testfile:
for row in city:
testfile.write(' '.join([str(a) for a in row]) + '\n')
# This code opens the text file and defines the reader, the new line separator
# in the dataset, and the format of the data with one decimal place.
# Code mainly copied from agent based model work.
f = []
f = open('city.txt', newline='\n')
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader: #Fill the list with 2D data row by row
citydata.append(row)
del citydata[300] # Deletes a fragment of html syntax
f.close()
#print(citydata[0]) #Testing prints
#print(len(citydata))#Expecting 300 items, but gives 301
#print(citydata[300]) #Check what is the last item to make 301
#del citydata[300] #Deletes a fragment of html syntax
#print(len(citydata)) #We now have a list with data for a 300x300 frame
# This code reads an environment data file to use as a DEM basemap.
# Then, if chosen, instead of landing on flat surface particles land on a DEM.
# This code is copied from agent based model work.
def getenvironment():
"""Reads a file with a contoured surface and saves it as environment"""
file = []
file = open('in.txt', newline='')
reader = csv.reader(file, quoting=csv.QUOTE_NONNUMERIC)
for row in reader: # Fill the list with 2D data row by row
environment.append(row)
file.close()
# Find height of land at bomb site
# Particle assumed to be released from height of building + height of land
#print(environment[150][50]) # Land is 200m at bomb site
# Plot the data for intial exploration
#matplotlib.pyplot.imshow(citydata)
#matplotlib.pyplot.axis([0,299,0,299])
#matplotlib.pyplot.show() # Appears to be data around x50 y150
# This code identifies the bomb site
def findbomb():
"""Indentifies the x and y coordinates of the bomb site"""
for i in range (len(citydata)):
for j in range (len(citydata[i])):
if citydata[i][j]>0:
#print(citydata[i][j])
global xb
global yb
xb = j # The coordinates of the bomb
yb = i
# So now we know that there is a single bombsite at location x50,y150.
# It is marked by the number 255, whilst all other cells have zero
# This function creates particles used in function below
# Assume a human stands on the building to release particles 1m above roof
# If "Flat plain" is selected particle height is 75m plus 1m = 76m
# If "DEM" is selected particle height is 200m + 76m = 276m
def createparticles():
"""Creates the number of particles specified in the GUI"""
if topography == "Digital elevation model": #Selected in GUI
z = 276 # The elevation in the DEM at the bomb site is 200m
else:
z = 76
for i in range(num_of_particles):
x = xb
y = yb
ws = wind_speed
particles.append(particlemove.Particle (x, y, z, ws, environment))
#print(particles[i]) # Used for testing
#print(particles[0].x)
# This function iterates the particles through methods in particlemove.py
def iterateparticles():
"""Iterates particles through the move methods in particlemove.py"""
for j in range(num_of_iterations):
#print("Iteration")
for i in range(num_of_particles):
#print("Particle moving")
particles[i].zmove() # Moves particles up or down
particles[i].landing() # Considers if the particle has landed
particles[i].xymove() # Moves particles x or y coordinates
#for i in range(num_of_particles):
#print(particles[i])
# Plot the data as a density map.
# Firstly record the number of particles in each cell of citydata
# Increment the citydata file for each particle landing
# Then plot this data as a density map
# Two mapping options based on Flat plain or DEM selection
def plotdata():
"""Records coordinates of each landing particle and plots a density map"""
#print("Plotting data")
for i in range(num_of_particles):
citydata[particles[i].y][particles[i].x] += 1 # Increment per particle
citydata[150][50] -= 255 #Set bomb site data to zero
#for i in range(len(citydata)):
#for j in range(len(citydata[i])):
#if citydata[i][j]>60: #Used to examine the upper range of data
#print("x ",i,"y ",j,"number ", citydata[i][j])
# If the user has chosen a Flat plain in the GUI the topography = Flat plain
if topography == "Flat plain":
#print("Flat plain")
# Vary the max in line below to see broad range or high central points
matplotlib.pyplot.imshow(citydata, vmin=0,vmax=40)
matplotlib.pyplot.colorbar(label="Particles")
matplotlib.pyplot.title("Map showing distribution of particles",
fontdict=None, loc=None, pad = None, y = None)
matplotlib.pyplot.text(45, 80, s="X marks the bomb site", fontsize=7)
matplotlib.pyplot.text(
45, 76,s="White dots mark cells where single particles landed",
fontsize=7)
matplotlib.pyplot.axis([45, 200, 100, 200])
matplotlib.pyplot.scatter(50, 150, marker="x", linewidth=3, c="w")
# Lines below add white dots where there is a single particle
for i in range(len(citydata)):
for j in range(len(citydata[i])):
if citydata[i][j] == 1:
matplotlib.pyplot.scatter(j, i, s=0.3, c="w")
matplotlib.pyplot.show()
else: #If DEM has been chosen
#print("Digital elevation model")
matplotlib.pyplot.contourf(environment)
matplotlib.pyplot.colorbar(label="Elevation")
matplotlib.pyplot.title(
"Map showing distribution of particles",
fontdict=None, loc=None, pad=None, y=None)
# Next two lines are too long, splitting them mad the plot look poor
matplotlib.pyplot.text(45, 66, s="X marks the bomb site, White dots mark cells where single particles landed",fontsize=7)
matplotlib.pyplot.text(45, 62, s="Pink dots mark cells where 2 to 15 particles landed, Red dots mark cells where more than 15 landed", fontsize=7)
matplotlib.pyplot.axis([45, 300, 80, 220])
matplotlib.pyplot.scatter(50, 150, marker="x", linewidth=3, c="w")
# Code below creates a scatter plot showing different intensities
for i in range(len(citydata)):
for j in range(len(citydata[i])):
if citydata[i][j] > 15:
matplotlib.pyplot.scatter(j, i, s=0.3, c="r")
elif citydata[i][j] > 1 <16:
matplotlib.pyplot.scatter(j, i, s=0.3, c="tab:pink")
elif citydata[i][j] == 1:
matplotlib.pyplot.scatter(j, i, s=0.3, c="w")
matplotlib.pyplot.show()
# Save the density map to a text file (Need to eliminate decimal place)
def savedata():
"""Saves the landed particle coordinates into a text file, citydata.txt"""
with open('citydata.txt', 'w') as testfile:
for row in citydata:
testfile.write(' '.join([str(a) for a in row]) + '\n')
# This is the main function that organises and calls the other functions
def main():
"""Runs the main functions and times the code"""
mainstart = time.time() #For timing the main program
getdata()
findbomb()
getenvironment()
createparticles()
iterateparticles()
plotdata()
savedata()
mainend = time.time()
time_elapsed = mainend-mainstart
print("TIMING")
print ("Time elapsed", "%.4f" % time_elapsed,"seconds")
print("End, file saved to citydata.txt")
main()
| simonhp66/bacterialbomb | bacterialbomb.py | bacterialbomb.py | py | 13,564 | python | en | code | 0 | github-code | 36 |
27895854947 | from django.db import models
from django.conf import settings
from channels.layers import get_channel_layer
from asgiref.sync import async_to_sync
import uuid
channel_layer = get_channel_layer()
class MessageManager(models.Manager):
def get_pending_messages(self, user):
pending_messages_qs = user.pending_messages.order_by('timestamp')
for message in pending_messages_qs:
message.remove_user_from_pending(user)
return pending_messages_qs
def mark_room_as_read(self, user, room):
unread_messages_qs = user.unread_messages.filter(room=room)
for message in unread_messages_qs:
message.mark_as_read(user)
return unread_messages_qs
class Message(models.Model):
author = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.CASCADE)
room = models.ForeignKey(
'Room',
related_name='messages',
on_delete=models.CASCADE)
body = models.TextField(max_length=500, default='')
timestamp = models.DateTimeField(auto_now_add=True)
pending_reception = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='pending_messages')
pending_read = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='unread_messages')
front_key = models.UUIDField(
verbose_name="frontend key",
default=uuid.uuid4,
unique=True)
objects = MessageManager()
class Meta:
verbose_name = "Mensaje"
verbose_name_plural = "Mensajes"
def __str__(self):
return self.body
def signal_to_room(self, message, data={}):
for participant in self.room.participants.all():
async_to_sync(channel_layer.group_send)(
f"group_general_user_{participant.id}", {
"type": "chat_message",
"message": message,
'data': data
})
def remove_user_from_pending(self, user):
if self.pending_reception.filter(id=user.id).exists():
self.pending_reception.remove(user)
# If there are no more pending then signal
if not self.pending_reception.exists():
self.signal_to_room('update_message', {
'message_id': self.id,
'kind': 'all_received'
})
def mark_as_read(self, user):
if self.pending_read.filter(id=user.id).exists():
self.pending_read.remove(user)
# If there are no more pending then signal
if not self.pending_read.exists():
self.signal_to_room('update_message', {
'message_id': self.id,
'kind': 'all_read'
})
class Room(models.Model):
class RoomKind(models.IntegerChoices):
PRIVATE = 1
GROUP = 2
group_name = models.CharField(max_length=255, blank=True, null=True)
kind = models.IntegerField(choices=RoomKind.choices)
participants = models.ManyToManyField(
settings.AUTH_USER_MODEL,
related_name='rooms')
created_at = models.DateTimeField(
verbose_name='Creation Date',
auto_now_add=True)
last_activity = models.DateTimeField(
verbose_name='Last activity date',
auto_now=True)
class Meta:
verbose_name = "Sala"
verbose_name_plural = "Salas"
def signal_to_room(self, message, data={}):
for participant in self.participants.all():
async_to_sync(channel_layer.group_send)(
f"group_general_user_{participant.id}", {
"type": "chat_message",
"message": message,
'data': data
})
| stgoddv/whatsapp-clone-django-vuejs | djchat/chat/models.py | models.py | py | 3,772 | python | en | code | 10 | github-code | 36 |
9204282514 | import sys
import os
import time
import traceback
import numpy as np
import torch
import torchvision
import torch.nn as nn
import torch.nn.init as init
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as transforms
from torch.autograd import Variable
from tqdm import tqdm
#conda install tqdm
from collections import OrderedDict
sys.path.append('../')
from SOL4Py.torch.ZTorchModel import ZTorchModel
from SOL4Py.torch.ZTorchEpochChangeNotifier import ZTorchEpochChangeNotifier
from SOL4Py.torch.ZTorchModelCheckPoint import ZTorchModelCheckPoint
##
# ZTorchSimpleModel
class ZTorchSimpleModel(ZTorchModel):
#
# Constructor
def __init__(self, image_size, n_classes, model_filename):
super(ZTorchSimpleModel, self).__init__(image_size, n_classes, model_filename)
self.n_classes = n_classes
self.image_size = image_size;
ch, h, w = image_size
print("ch:{} h:{} w:{}".format(ch, h, w))
self.model_filename = model_filename
# The following is based on AlexNet
self.features = nn.Sequential(
nn.Conv2d(ch, 64, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
#nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.Conv2d(384, h, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(h, h, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2),
)
self.n_features = 256 * 4 * 4
if h == 32:
self.n_features = h * 4 * 4 # 512
if h == 64:
self.n_features = h * 8 * 8 # 4096
if h == 96:
self.n_features = h * 9216 # 884736
if h == 128:
self.n_features = h * h * h # 2097152
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(self.n_features, self.n_features),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(self.n_features, self.n_features),
nn.ReLU(inplace=True),
nn.Linear(self.n_features, n_classes)
)
def forward(self, input):
output = self.features(input)
output = output.view(output.size(0), self.n_features)
output = self.classifier(output)
return output
| sarah-antillia/SOL4Py_V4 | SOL4Py/torch/ZTorchSimpleModel.py | ZTorchSimpleModel.py | py | 2,551 | python | en | code | 0 | github-code | 36 |
21142071947 | from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.dense.linear import Linear
from torch_geometric.typing import Adj, OptPairTensor, OptTensor, Size
from torch_geometric.utils.repeat import repeat
import torch
from torch_sparse import SparseTensor
from torch import Tensor, nn
from torch.nn import Parameter
from cutlass import cutlass
import math
import numpy as np
from typing import Any
from typing import List, Tuple, Union
def uniform(size: int, value: Any):
if isinstance(value, Tensor):
bound = 1.0 / math.sqrt(size)
value.data.uniform_(-bound, bound)
else:
for v in value.parameters() if hasattr(value, 'parameters') else []:
uniform(size, v)
for v in value.buffers() if hasattr(value, 'buffers') else []:
uniform(size, v)
def constant(value: Any, fill_value: float):
if isinstance(value, Tensor):
value.data.fill_(fill_value)
else:
for v in value.parameters() if hasattr(value, 'parameters') else []:
constant(v, fill_value)
for v in value.buffers() if hasattr(value, 'buffers') else []:
constant(v, fill_value)
def zeros(value: Any):
constant(value, 0.)
basestring = (str, bytes)
def is_list_of_strings(lst):
if lst and isinstance(lst, list):
return all(isinstance(elem, basestring) for elem in lst)
else:
return False
from cutlass import *
import scipy.optimize
MCache = None
def optimizeWeights2D(weights, basis, periodicity, nmc = 32 * 1024, targetIntegral = 1, windowFn = None, verbose = False):
global MCache
M = None
numWeights = weights.shape[0] * weights.shape[1]
# print(weights.shape, numWeights)
normalizedWeights = (weights - torch.sum(weights) / weights.numel())/torch.std(weights)
if not MCache is None:
cfg, M = MCache
w,b,n,p,wfn = cfg
if not(w == weights.shape and np.all(b == basis) and n == nmc and np.all(p ==periodicity) and wfn == windowFn):
M = None
# else:
# print('no cache')
if M is None:
r = torch.sqrt(torch.rand(size=(nmc,1)).to(weights.device).type(torch.float32))
theta = torch.rand(size=(nmc,1)).to(weights.device).type(torch.float32) *2 * np.pi
x = r * torch.cos(theta)
y = r * torch.sin(theta)
u = evalBasisFunction(weights.shape[0], x.T, which = basis[0], periodic = periodicity[0])[0,:].mT
v = evalBasisFunction(weights.shape[1], y.T, which = basis[1], periodic = periodicity[1])[0,:].mT
# print('u', u.shape, u)
# print('v', v.shape, v)
window = weights.new_ones(x.shape[0]) if windowFn is None else windowFn(torch.sqrt(x**2 + y**2))[:,0]
nuv = torch.einsum('nu, nv -> nuv', u, v)
nuv = nuv * window[:,None, None]
# print('nuv', nuv.shape, nuv)
M = np.pi * torch.sum(nuv, dim = 0).flatten().detach().cpu().numpy() / nmc
# print('M', M.shape, M)
MCache = ((weights.shape, basis, nmc, periodicity, windowFn), M)
w = normalizedWeights.flatten().detach().cpu().numpy()
eps = 1e-2
if 'chebyshev' in basis or 'fourier' in basis:
res = scipy.optimize.minimize(fun = lambda x: (M.dot(x) - targetIntegral)**2, \
jac = lambda x: 2 * M * (M.dot(x) - targetIntegral), \
hess = lambda x: 2. * np.outer(M,M), x0 = w, \
method ='trust-constr', constraints = None,\
options={'disp': False, 'maxiter':100})
else:
sumConstraint = scipy.optimize.NonlinearConstraint(fun = np.sum, lb = -eps, ub = eps)
stdConstraint = scipy.optimize.NonlinearConstraint(fun = np.std, lb = 1 - eps, ub = 1 + eps)
res = scipy.optimize.minimize(fun = lambda x: (M.dot(x) - targetIntegral)**2, \
jac = lambda x: 2 * M * (M.dot(x) - targetIntegral), \
hess = lambda x: 2. * np.outer(M,M), x0 = w, \
method ='trust-constr', constraints = [sumConstraint, stdConstraint],\
options={'disp': False, 'maxiter':100})
result = torch.from_numpy(res.x.reshape(weights.shape)).type(torch.float32).to(weights.device)
if verbose:
print('result: ', res)
print('initial weights:', normalizedWeights)
print('result weights:',result)
print('initial:', M.dot(w))
print('integral:', M.dot(res.x))
print('sumConstraint:', np.sum(res.x))
print('stdConstraint:', np.std(res.x))
return result, res.constr, res.fun, M.dot(w), M.dot(res.x)
def mapToSpherical(positions):
x = positions[:,0]
y = positions[:,1]
z = positions[:,2]
r = torch.sqrt(x**2 + y**2 + z**2)
theta = torch.atan2(y, x)
phi = torch.acos(z / (r + 1e-7))
return torch.vstack((r,theta,phi)).mT
def ballToCylinder(positions):
r = torch.linalg.norm(positions, dim = 1)
xy = torch.linalg.norm(positions[:,:2], dim = 1)
absz = torch.abs(positions[:,2])
# debugPrint(r)
# debugPrint(xy)
# debugPrint(absz)
x = positions[:,0]
y = positions[:,1]
z = positions[:,2]
termA = torch.zeros_like(positions)
eps = 1e-7
xB = x * r / (xy + eps)
yB = y * r / (xy + eps)
zB = 3 / 2 * z
termB = torch.vstack((xB, yB, zB)).mT
xC = x * torch.sqrt(3 * r / (r + absz + eps))
yC = y * torch.sqrt(3 * r / (r + absz + eps))
zC = torch.sign(z) * r
termC = torch.vstack((xC, yC, zC)).mT
mapped = torch.zeros_like(positions)
maskA = r < eps
maskB = torch.logical_and(torch.logical_not(maskA), 5/4 * z**2 <= x**2 + y**2)
maskC = torch.logical_and(torch.logical_not(maskA), torch.logical_not(maskB))
mapped[maskB] = termB[maskB]
mapped[maskC] = termC[maskC]
# debugPrint(mapped)
return mapped
# debugPrint(cylinderPositions)
def cylinderToCube(positions):
x = positions[:,0]
y = positions[:,1]
z = positions[:,2]
xy = torch.linalg.norm(positions[:,:2], dim = 1)
eps = 1e-7
termA = torch.vstack((torch.zeros_like(x), torch.zeros_like(y), z)).mT
# debugPrint(termA)
xB = torch.sign(x) * xy
yB = 4. / np.pi * torch.sign(x) * xy * torch.atan(y/(x+eps))
zB = z
termB = torch.vstack((xB, yB, zB)).mT
xC = 4. / np.pi * torch.sign(y) * xy * torch.atan(x / (y + eps))
yC = torch.sign(y) * xy
zC = z
termC = torch.vstack((xC, yC, zC)).mT
maskA = torch.logical_and(torch.abs(x) < eps, torch.abs(y) < eps)
maskB = torch.logical_and(torch.logical_not(maskA), torch.abs(y) <= torch.abs(x))
maskC = torch.logical_and(torch.logical_not(maskA), torch.logical_not(maskB))
# debugPrint(torch.sum(maskA))
# debugPrint(torch.sum(maskB))
# debugPrint(torch.sum(maskC))
mapped = torch.zeros_like(positions)
mapped[maskA] = termA[maskA]
mapped[maskB] = termB[maskB]
mapped[maskC] = termC[maskC]
return mapped
def mapToSpherePreserving(positions):
cylinderPositions = ballToCylinder(positions)
cubePositions = cylinderToCube(cylinderPositions)
return cubePositions
class RbfConv(MessagePassing):
def __init__(
self,
in_channels: int,
out_channels: int,
dim: int = 2,
size: Union[int, List[int]] = [4, 4],
coordinateMapping : str = 'cartesian',
rbf : Union[int, List[int]] = 'linear',
aggr: str = 'sum',
linearLayer: bool = False,
feedThrough: bool = False,
# biasOffset: bool = False,
preActivation = None,
postActivation = None,
bias = True,
# initializer = torch.nn.init.xavier_uniform_,
initializer = torch.nn.init.uniform_,
batch_size = [16,16],
windowFn = None,
normalizeWeights = False,
normalizationFactor = None,
**kwargs
):
super().__init__(aggr=aggr, **kwargs)
# self.aggr = aggr
# assert self.aggr in ['add', 'mean', 'max', None]
# self.flow = flow
# assert self.flow in ['source_to_target', 'target_to_source']
# self.node_dim = node_dim
# self.inspector = Inspector(self)
# self.inspector.inspect(self.message)
# self.inspector.inspect(self.aggregate, pop_first=True)
# self.inspector.inspect(self.message_and_aggregate, pop_first=True)
# self.inspector.inspect(self.update, pop_first=True)
self.__user_args__ = self.inspector.keys(
['message', 'aggregate', 'update']).difference(self.special_args)
self.__fused_user_args__ = self.inspector.keys(
['message_and_aggregate', 'update']).difference(self.special_args)
# Support for "fused" message passing.
self.fuse = self.inspector.implements('message_and_aggregate')
# Support for GNNExplainer.
self.__explain__ = False
self.__edge_mask__ = None
self.in_channels = in_channels
self.out_channels = out_channels
self.dim = dim
self.coordinateMapping = coordinateMapping
# print('coordinate mapping', self.coordinateMapping)
self.size = size if isinstance(size, list) else repeat(size, dim)
self.rbfs = rbf if is_list_of_strings(rbf) else [rbf] * dim
self.periodic = [False, False] if coordinateMapping != 'polar' else [False,True]
self.initializer = initializer
self.batchSize = batch_size
self.feedThrough = feedThrough
self.preActivation = None if preActivation is None else getattr(nn.functional, preActivation)
self.postActivation = None if postActivation is None else getattr(nn.functional, postActivation)
self.windowFn = windowFn
self.use_bias = bias
# print('Creating layer %d -> %d features'%( in_channels, out_channels))
# print('For dimensionality: %d'% dim)
# print('Parameters:')
# print('\tRBF: ', self.rbfs)
# print('\tSize: ', self.size)
# print('\tPeriodic: ', self.periodic)
if isinstance(in_channels, int):
in_channels = (in_channels, in_channels)
if self.use_bias:
self.bias = Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.K = torch.tensor(self.size).prod().item()
if dim == 1:
self.weight = Parameter(torch.Tensor(self.size[0], in_channels[0], out_channels))
if dim == 2:
self.weight = Parameter(torch.Tensor(self.size[0],self.size[1], in_channels[0], out_channels))
if dim == 3:
self.weight = Parameter(torch.Tensor(self.size[0],self.size[1], self.size[2], in_channels[0], out_channels))
initializer(self.weight, -0.05, 0.05)
with torch.no_grad():
if self.rbfs[0] in ['chebyshev', 'fourier', 'gabor']:
for i in range(self.dim):
if len(self.rbfs) == 1:
self.weight[i] *= np.exp(-i)
if len(self.rbfs) == 2:
self.weight[i,:] *= np.exp(-i)
if len(self.rbfs) == 3:
self.weight[i,:,:] *= np.exp(-i)
if len(self.rbfs) > 1 and self.rbfs[1] in ['chebyshev', 'fourier', 'gabor']:
for i in range(self.dim):
if len(self.rbfs) == 2:
self.weight[:,i] *= np.exp(-i)
if len(self.rbfs) == 3:
self.weight[:,i,:] *= np.exp(-i)
if len(self.rbfs) > 2 and self.rbfs[2] in ['chebyshev', 'fourier', 'gabor']:
for i in range(self.dim):
self.weight[:,:,i] = self.weight[:,:,i] * np.exp(-i)
if normalizeWeights:
if len(self.rbfs) == 2:
print('Starting normalization')
for i in range(in_channels[0]):
for j in range(out_channels):
newWeights, _, _, init, final = optimizeWeights2D(weights = self.weight[:,:,i,j].detach(),\
basis = self.rbfs, periodicity = self.periodic, \
nmc = 32*1024, targetIntegral = 1/in_channels[0], \
windowFn = self.windowFn, verbose = False)
self.weight[:,:,i,j] = newWeights
print('Normalizing [%2d x %2d]: %1.4e => %1.4e (target: %1.4e)' %(i,j, init, final, 1/in_channels[0]))
# self.weight[:,:,i,j] /= in_channels[0]
print('Done with normalization\n------------------------------------------')
self.root_weight = linearLayer
if linearLayer:
self.lin = Linear(in_channels[1], out_channels, bias=self.use_bias,
weight_initializer= 'uniform')
# if biasOffset:
# self.bias = Parameter(torch.Tensor(out_channels))
# else:
# self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
# if not isinstance(self.weight, nn.UninitializedParameter):
# size = self.weight.size(0) * self.weight.size(1)
# self.initializer(self.weight)
if self.root_weight:
self.lin.reset_parameters()
zeros(self.bias)
def forward(self, x: Union[Tensor, OptPairTensor], edge_index: Adj,
edge_attr: OptTensor = None, size: Size = None) -> Tensor:
# print('x', x[0].shape, x)
# print('edge_index', edge_index.shape, edge_index)
# print('edge_attr', edge_attr.shape, edge_attr)
# print('Size', Size)
# if args.cutlad:
# out = self.propagate(edge_index, x=x, edge_attr=edge_attr, size=size)
# else:
x_i, x_j = x
edge_weights = None
if not(self.windowFn is None):
edge_weights = self.windowFn(torch.linalg.norm(edge_attr, axis = 1))
positions = torch.hstack((edge_attr, torch.zeros(edge_attr.shape[0],1, device = edge_attr.device, dtype = edge_attr.dtype)))
if self.coordinateMapping == 'polar':
spherical = mapToSpherical(positions)
mapped = torch.vstack((spherical[:,0] * 2. - 1.,spherical[:,1] / np.pi)).mT
if self.coordinateMapping == 'cartesian':
mapped = edge_attr
if self.coordinateMapping == 'preserving':
cubePositions = mapToSpherePreserving(positions)
mapped = torch.vstack((cubePositions[:,0],cubePositions[:,1] / np.pi)).mT
convolution = cutlass.apply
out = convolution(edge_index, x_i, x_j, mapped, edge_weights, self.weight,
x_i.shape[0], self.node_dim,
self.size , self.rbfs, self.periodic,
self.batchSize[0],self.batchSize[1])
# out = self.propagate2(edge_index, x=x, edge_attr=edge_attr, size=size)
# print('out: ', out.shape, out)
x_r = x[1]
if self.preActivation is not None:
out = self.preActivation(out)
if x_r is not None and self.root_weight:
out = out + self.lin(x_r) if self.preActivation is not None else self.preActivation(self.lin(x_r))
if self.bias is not None:
out = out + self.bias
if self.feedThrough:
out = out + x_r if self.preActivation is not None else self.preActivation(x_r)
if self.postActivation is not None:
out = self.postActivation(out)
return out
def message(self, x_j: Tensor, edge_attr: Tensor) -> Tensor:
if self.dim == 1:
u = evalBasisFunction(self.size[0], edge_attr[:,0], which=self.rbfs[0], periodic = self.periodic[0]).T
return torch.einsum('nu, uio,ni -> no',u,self.weight, x_j)
if self.dim == 2:
u = evalBasisFunction(self.size[0], edge_attr[:,0], which=self.rbfs[0], periodic = self.periodic[0]).T
v = evalBasisFunction(self.size[1], edge_attr[:,1], which=self.rbfs[1], periodic = self.periodic[1]).T
return torch.einsum('nu, nv, uvio,ni -> no',u,v,self.weight, x_j)
if self.dim == 3:
u = evalBasisFunction(self.size[0], edge_attr[:,0], which=self.rbfs[0], periodic = self.periodic[0]).T
v = evalBasisFunction(self.size[1], edge_attr[:,1], which=self.rbfs[1], periodic = self.periodic[1]).T
w = evalBasisFunction(self.size[2], edge_attr[:,1], which=self.rbfs[1], periodic = self.periodic[2]).T
return torch.einsum('nu, nv, uvio,ni -> no',u,v,w,self.weight, x_j)
def __repr__(self) -> str:
return (f'{self.__class__.__name__}({self.in_channels}, '
f'{self.out_channels}, dim={self.dim})')
def __check_input__(self, edge_index, size):
the_size: List[Optional[int]] = [None, None]
if isinstance(edge_index, Tensor):
assert edge_index.dtype == torch.long
assert edge_index.dim() == 2
assert edge_index.size(0) == 2
if size is not None:
the_size[0] = size[0]
the_size[1] = size[1]
return the_size
elif isinstance(edge_index, SparseTensor):
if self.flow == 'target_to_source':
raise ValueError(
('Flow direction "target_to_source" is invalid for '
'message propagation via `torch_sparse.SparseTensor`. If '
'you really want to make use of a reverse message '
'passing flow, pass in the transposed sparse tensor to '
'the message passing module, e.g., `adj_t.t()`.'))
the_size[0] = edge_index.sparse_size(1)
the_size[1] = edge_index.sparse_size(0)
return the_size
raise ValueError(
('`MessagePassing.propagate` only supports `torch.LongTensor` of '
'shape `[2, num_messages]` or `torch_sparse.SparseTensor` for '
'argument `edge_index`.'))
def __collect__(self, args, edge_index, size, kwargs):
i, j = (1, 0) if self.flow == 'source_to_target' else (0, 1)
out = {}
for arg in args:
if arg[-2:] not in ['_i', '_j']:
out[arg] = kwargs.get(arg, Parameter.empty)
else:
dim = 0 if arg[-2:] == '_j' else 1
data = kwargs.get(arg[:-2], Parameter.empty)
if isinstance(data, (tuple, list)):
assert len(data) == 2
if isinstance(data[1 - dim], Tensor):
self.__set_size__(size, 1 - dim, data[1 - dim])
data = data[dim]
if isinstance(data, Tensor):
self.__set_size__(size, dim, data)
data = self.__lift__(data, edge_index,
j if arg[-2:] == '_j' else i)
out[arg] = data
if isinstance(edge_index, Tensor):
out['adj_t'] = None
out['edge_index'] = edge_index
out['edge_index_i'] = edge_index[i]
out['edge_index_j'] = edge_index[j]
out['ptr'] = None
elif isinstance(edge_index, SparseTensor):
out['adj_t'] = edge_index
out['edge_index'] = None
out['edge_index_i'] = edge_index.storage.row()
out['edge_index_j'] = edge_index.storage.col()
out['ptr'] = edge_index.storage.rowptr()
out['edge_weight'] = edge_index.storage.value()
out['edge_attr'] = edge_index.storage.value()
out['edge_type'] = edge_index.storage.value()
out['index'] = out['edge_index_i']
out['size'] = size
out['size_i'] = size[1] or size[0]
out['size_j'] = size[0] or size[1]
out['dim_size'] = out['size_i']
return out
def propagate2(self, edge_index: Adj, size: Size = None, **kwargs):
decomposed_layers = 1 if self.explain else self.decomposed_layers
for hook in self._propagate_forward_pre_hooks.values():
res = hook(self, (edge_index, size, kwargs))
if res is not None:
edge_index, size, kwargs = res
size = self.__check_input__(edge_index, size)
if decomposed_layers > 1:
user_args = self.__user_args__
decomp_args = {a[:-2] for a in user_args if a[-2:] == '_j'}
decomp_kwargs = {
a: kwargs[a].chunk(decomposed_layers, -1)
for a in decomp_args
}
decomp_out = []
for i in range(decomposed_layers):
# if decomposed_layers > 1:
# for arg in decomp_args:
# kwargs[arg] = decomp_kwargs[arg][i]
# coll_dict = self.__collect__(self.__user_args__, edge_index,
# size, kwargs)
# msg_kwargs = self.inspector.distribute('message', coll_dict)
# for hook in self._message_forward_pre_hooks.values():
# res = hook(self, (msg_kwargs, ))
# if res is not None:
# msg_kwargs = res[0] if isinstance(res, tuple) else res
#
# aggr_kwargs = self.inspector.distribute('aggregate', coll_dict)
convolution = cutlass.apply
inFeatures = kwargs['x'][0]
edge_weights = None
if not(self.windowFn is None):
edge_weights = self.windowFn(torch.linalg.norm(kwargs['edge_attr'], axis = 1))
# print(torch.linalg.norm(kwargs['edge_attr'], axis = 1))
# print(edge_weights.shape)
# print(edge_weights)
# print(inFeatures.shape)
# inFeatures = inFeatures * window[:,None]
# print(inFeatures.shape)
positions = torch.hstack((kwargs['edge_attr'], torch.zeros(kwargs['edge_attr'].shape[0],1, device = kwargs['edge_attr'].device, dtype = kwargs['edge_attr'].dtype)))
if self.coordinateMapping == 'polar':
spherical = mapToSpherical(positions)
mapped = torch.vstack((spherical[:,0] * 2. - 1.,spherical[:,1] / np.pi)).mT
if self.coordinateMapping == 'cartesian':
mapped = kwargs['edge_attr']
if self.coordinateMapping == 'preserving':
cubePositions = mapToSpherePreserving(positions)
mapped = torch.vstack((cubePositions[:,0],cubePositions[:,1] / np.pi)).mT
out = convolution(edge_index, kwargs['x'][0], kwargs['x'][1], mapped, edge_weights, self.weight,
size[0], self.node_dim,
self.size , self.rbfs, self.periodic,
self.batchSize[0],self.batchSize[1])
# for hook in self._aggregate_forward_hooks.values():
# res = hook(self, (aggr_kwargs, ), out)
# if res is not None:
# out = res
# update_kwargs = self.inspector.distribute('update', coll_dict)
# out = self.update(out, **update_kwargs)
# if decomposed_layers > 1:
# decomp_out.append(out)
# if decomposed_layers > 1:
# out = torch.cat(decomp_out, dim=-1)
# for hook in self._propagate_forward_hooks.values():
# res = hook(self, (edge_index, size, kwargs), out)
# if res is not None:
# out = res
return out | wi-re/spheric_density_demo | rbfConv.py | rbfConv.py | py | 24,177 | python | en | code | 2 | github-code | 36 |
74158129382 | # test_cloud_client.py
import pytest
from datetime import datetime
from testfixtures import LogCapture
from cloud_server import initialize_server, add_database_entry
from database_definitions import Patient
import io
import os
initialize_server()
pathname = os.getcwd()
full_pathname = pathname + '/images/test_image.jpg'
def convert_file_to_b64_string():
from cloud_client import convert_file_to_b64_string
b64str = convert_file_to_b64_string(full_pathname)
assert b64str[0:20] == "/9j/4AAQSkZJRgABAQEA"
def test_b64_to_ndarray():
from cloud_client import convert_file_to_b64_string
from cloud_client import b64_to_ndarray
b64str = convert_file_to_b64_string("test_image.jpg")
nd = b64_to_ndarray(b64str)
answer = nd[0][0:5]
expected = [[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197]]
assert (answer == expected).all
list1 = ['a', 'b', 'c', 'd']
list2 = [23, 98, 47, 24]
@pytest.mark.parametrize("listvar, val, expected", [(list1, 'c', 2),
(list2, 98, 1)])
def test_get_index(listvar, val, expected):
from cloud_client import get_index
answer = get_index(listvar, val)
assert answer == expected
def test_resize_image():
from cloud_client import convert_file_to_b64_string
from cloud_client import b64_to_ndarray
from cloud_client import resize_image
b64str = convert_file_to_b64_string("test_image.jpg")
nd = b64_to_ndarray(b64str)
resized_nd = resize_image(nd)
answer = resized_nd[0][0:5]
expected = [[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197]]
assert (answer == expected).all
def test_b64_string_to_file():
from cloud_client import convert_file_to_b64_string
from cloud_client import b64_string_to_file
import filecmp
import os
b64str = convert_file_to_b64_string("test_image.jpg")
b64_string_to_file(b64str, open("test_image_output.jpg", "wb"))
answer = filecmp.cmp("test_image.jpg",
"test_image_output.jpg")
os.remove("test_image_output.jpg")
assert answer is True
def test_process_b64():
from cloud_client import convert_file_to_b64_string
from cloud_client import b64_to_ndarray
from cloud_client import resize_image
b64str = convert_file_to_b64_string("test_image.jpg")
nd = b64_to_ndarray(b64str)
resized_nd = resize_image(nd)
answer = resized_nd[0][0:5]
expected = [[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197],
[68, 115, 197]]
assert (answer == expected).all
| pdijour/Patient_Monitoring_FullStack | test_cloud_client.py | test_cloud_client.py | py | 2,802 | python | en | code | 1 | github-code | 36 |
73137302504 | """factRequestForDocuments
Revision ID: 489ca98de532
Revises: d6c3af32b13e
Create Date: 2022-01-26 22:39:14.403851
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '489ca98de532'
down_revision = 'd6c3af32b13e'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('factRequestForDocuments',
sa.Column('actionid', sa.Integer(), nullable=False),
sa.Column('foirequestid', sa.Integer(), nullable=False),
sa.Column('runcycleid', sa.Integer(), nullable=False),
sa.Column('actiontype', sa.Integer()),
sa.Column('description', sa.VARCHAR(length=4000)),
sa.Column('priority', sa.VARCHAR(length=3000)),
sa.Column('emailaddress', sa.VARCHAR(length=4000)),
sa.Column('createddate', sa.DateTime()),
sa.Column('actiondate', sa.DateTime()),
sa.Column('duedate', sa.DateTime()),
sa.Column('responsedate', sa.DateTime()),
sa.Column('parentactionid', sa.Integer()),
sa.Column('createdby', sa.VARCHAR(length=200)),
sa.Column('subject', sa.VARCHAR(length=150)),
sa.Column('programofficeid', sa.Integer()),
sa.Column('reqfordocstatusid', sa.Integer()),
sa.Column('completeddate', sa.DateTime()),
sa.Column('requestofficeid', sa.Integer()),
sa.Column('visiblerequestid', sa.VARCHAR(length=50)),
sa.Column('requestdescription', sa.VARCHAR(length=4000)),
sa.Column('officeid', sa.Integer()),
sa.Column('requesttypeid', sa.Integer()),
sa.Column('overduedays', sa.Integer()),
sa.Column('elapseddays', sa.Integer()),
sa.Column('passduedays', sa.Integer()),
sa.Column('rfdage', sa.Integer()),
sa.Column('remainingdays', sa.Integer()),
sa.Column('methodofdelivery', sa.VARCHAR(length=1)),
sa.ForeignKeyConstraint(['reqfordocstatusid'], ['dimRequestForDocumentsStatus.reqfordocstatusid']),
sa.PrimaryKeyConstraint('actionid', 'foirequestid', 'runcycleid')
)
def downgrade():
op.drop_table('factRequestForDocuments')
| bcgov/foi-reporting | datawarehouse/edw_migrations/versions/489ca98de532_factrequestfordocuments.py | 489ca98de532_factrequestfordocuments.py | py | 1,937 | python | en | code | 0 | github-code | 36 |
40321895686 | import os
import pandas as pd
def get_file_list(path):
file_list = []
for root, dirs, files in os.walk(path):
for file in files:
file_list.append([root, file])
return file_list
def save_to_excel(file_list, excel_file):
df = pd.DataFrame(file_list, columns=["Папка", "Имя файла"])
df["Расширение файла"] = df["Имя файла"].str.split(".", expand=True)[1]
df.index += 1
df.to_excel(excel_file, index_label="Номер строки")
try:
file_list = get_file_list(os.getcwd())
save_to_excel(file_list, "result.xlsx")
except Exception as ex:
print(ex)
| leansellerdev/testPythonRpa | main.py | main.py | py | 648 | python | ru | code | 0 | github-code | 36 |
23306322275 | import click
import csv
import itertools
from Bio import motifs
from Bio.Seq import Seq
from Bio.SeqUtils import GC
from Bio.Alphabet import generic_dna
from Bio.Alphabet.IUPAC import unambiguous_dna
def homer_parse(fstream):
def build(name, freq):
m = motifs.Motif(counts=freq)
m.name = name
return m
nct = "ACGT"
name = ""
mtx = {a: [] for a in nct}
for line in fstream:
if line.startswith('>'):
if name != '':
yield build(name, mtx)
name = line.rstrip().split()[1]
mtx = {a: [] for a in nct}
else:
score = [float(x) for x in line.rstrip().split("\t")]
for i, a in enumerate(nct):
mtx[a].append(score[i])
if name != '':
yield build(name, mtx)
def fasta_iter(istream):
name = None
sequence = ''
for line in istream:
if line.startswith('>'):
if name is not None:
yield(name, sequence)
name = line.rstrip().replace('>', '')
sequence = ''
else:
sequence += line.rstrip()
if name is not None:
yield (name, sequence)
def mutation_iter(istream):
for line in istream:
if line.startswith('#'):
continue
oligo, pos, name, ref, alt = line.rstrip().split()
if ('N' in ref) or ('N' in alt):
continue
yield (oligo, int(pos), name, ref, alt.split(','))
def progress(iter, freq=100):
count = 0
rotator = 0
label = ("|", "/", "-", "\\")
for i in iter:
yield(i)
count += 1
if count % freq == 0:
rotator = (rotator + 1) % 4
click.echo("[%s] %6d\r" % (label[rotator], count), nl=False, err=True)
@click.command()
@click.option('--motif', '-m', type=click.File('r'), required=True)
@click.option('--reference', '-r', type=click.File('r'), required=True)
@click.option('--mutation', '-M', type=click.File('r'), default='-')
@click.option('--out', '-o', type=click.File('w'), default='-')
def fit_motif(motif, reference, mutation, out):
click.echo("Loading reference sequences.", err=True)
oligos = { name: seq for (name, seq) in progress(fasta_iter(reference)) }
click.echo("Loading motifs matrix.", err=True)
motifs = { _name(m) : m for m in progress(homer_parse(motif)) }
reader = mutation_iter(mutation)
writer = csv.writer(out, delimiter='\t')
writer.writerow(['oligo', 'rsid', 'ref', 'alt', 'tf', 'ref_score', 'alt_score',
'score', 'flank_gc', 'core_gc'])
click.echo("Progessing mutations.", err=True)
for ((oligo, pos, rsid, ref, alts), tf) in progress(itertools.product(reader, motifs)):
sequence = oligos[oligo]
motif = motifs[tf]
refseq = sequence[:pos] + ref + sequence[(pos+1):]
refat, refscore = _score(motif, refseq)
refgc = GC(flank(refseq, refat, refat + len(motif)))
refcore_gc = GC(sequence[refat:(refat+len(motif))])
for alt in alts:
altseq = sequence[:pos] + alt + sequence[(pos+1):]
altat, altscore = _score(motif, altseq)
if altscore > refscore:
flank_gc = GC(flank(altseq, altat, altat + len(motif)))
core_gc = GC(sequence[altat:(altat+len(motif))])
else:
flank_gc = refgc
core_gc = refcore_gc
writer.writerow([oligo, rsid, ref, alt, tf, refscore, altscore,
refscore-altscore, flank_gc, core_gc])
def _score(motif, seq):
seq = Seq(seq, unambiguous_dna)
pssm = motif.pssm
fw_scores = pssm.calculate(seq)
rv_scores = pssm.calculate(seq.reverse_complement())
fw_index = max(range(len(fw_scores)), key=fw_scores.__getitem__)
rv_index = max(range(len(rv_scores)), key=rv_scores.__getitem__)
if fw_scores[fw_index] > rv_scores[rv_index]:
return fw_index, fw_scores[fw_index]
else:
index = len(seq) - len(motif) - rv_index
return index, rv_scores[rv_index]
def flank(seq, start, stop, size=10):
bgupper = max([0, start-size])
endown = min([len(seq), stop+size])
return seq[bgupper:start] + seq[stop:endown]
def _name(motif):
name = motif.name.split('/')[0]
return name.upper().replace('-', '_')
if __name__ == '__main__':
fit_motif()
| ren-lab/snp-selex | pwm/score.py | score.py | py | 4,389 | python | en | code | 15 | github-code | 36 |
24179539315 | """Helper functions for iterables."""
import collections
from itertools import islice, zip_longest
from typing import Callable, Iterable, Iterator, Optional, TypeVar
T = TypeVar("T")
# Courtesy of https://docs.python.org/3/library/itertools.html
def grouper(
iterable: Iterable[T],
n: int,
*,
incomplete: str = "fill",
fillvalue: Optional[T] = None,
) -> Iterable[Iterable[T]]:
"""Collect data into non-overlapping fixed-length chunks or blocks."""
# grouper('ABCDEFG', 3, fillvalue='x') --> ABC DEF Gxx
# grouper('ABCDEFG', 3, incomplete='strict') --> ABC DEF ValueError
# grouper('ABCDEFG', 3, incomplete='ignore') --> ABC DEF
args = [iter(iterable)] * n
if incomplete == "fill":
return zip_longest(*args, fillvalue=fillvalue)
if incomplete == "strict":
return zip(*args, strict=True)
if incomplete == "ignore":
return zip(*args)
else:
raise ValueError("Expected fill, strict, or ignore")
# Also thank to https://docs.python.org/3/library/itertools.html
def sliding_window(iterable: Iterable[T], n: int) -> Iterable[tuple[T, ...]]:
"""Iterate over an iterable as a sliding window."""
# sliding_window('ABCDEFG', 4) --> ABCD BCDE CDEF DEFG
it = iter(iterable)
window = collections.deque(islice(it, n), maxlen=n)
if len(window) == n:
yield tuple(window)
for x in it:
window.append(x)
yield tuple(window)
def read_iter_until(
data: Iterator[T],
terminators: Optional[set[T]] = None,
pred: Optional[Callable[[T], bool]] = None,
) -> tuple[list[T], Optional[T]]:
"""Read an iterator until a terminator, returning the read string and the terminator."""
result: list[T] = []
for i in data:
if terminators is not None and i in terminators:
return result, i
if pred is not None and pred(i):
return result, i
result.append(i)
return result, None
| NickG123/AdventOfCode2022 | utils/iterables.py | iterables.py | py | 1,958 | python | en | code | 0 | github-code | 36 |
74955138663 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import requests
SNIPPET_TEMPLATE = """<snippet>
<content><![CDATA[
{}
]]></content>
<description>{}</description>
<scope>source.python</scope>
<tabTrigger>{}</tabTrigger>
</snippet>"""
def _request_code(target_url):
"""Request a code file."""
r = requests.get(target_url)
code = r.text.encode('utf-8')
return code
def _find_functions(code):
"""Find functions from the code."""
# find non-private functions and the first line of their doc-string
function_pattern = '(@.*\n)?.*def ([^_].*)\((.*)\):\n.*?"""(.*)'
# find and return all of the functions
functions = re.findall(function_pattern, code)
return functions
def main(args):
"""Create sublime text snippets automatically from code."""
CLASS_PREFIX = args['<class_prefix>']
# define the prefix used when saving a new snippet and for the snippet's tab trigger
if args['--snippet_prefix']:
SNIPPET_PREFIX = args['--snippet_prefix']
else:
SNIPPET_PREFIX = CLASS_PREFIX
# if we are pulling the code from a location on the internet (e.g. a raw file on github), get the code
if args['<target_file>'].startswith("http"):
code = _request_code(args['<target_file>'])
else:
with open(args['<target_file>'], 'r') as f:
code = f.read().encode('utf-8')
# find all of the functions in the code
functions = _find_functions(code)
# create a snippet for each function (as appropriate)
for function in functions:
# remove the newline from the end of the decorator
decorator = function[0].strip()
# handle a property decorator
if decorator == "@property":
pass
# handle functions
else:
# split up the arguments for the function
arguments = function[2].split(", ")
# remove 'self' as an argument to the function (just move on if 'self' isn't an argument)
try:
arguments.remove('self')
except ValueError as e:
pass
# create a string with the arguments to the function formatted for a sublime text snippet
argument_string = ""
count = 0
for argument in arguments:
count += 1
argument_string += "${"
argument_string += "{}:{}".format(count, argument)
argument_string += "}"
# if there are more arguments, add a trailing comma
if count < len(arguments):
argument_string += ", "
# create a name for the snippet based on the name of the function
snippet_name = SNIPPET_PREFIX + function[1].replace("_", "")
# create a description for the snippet based on the function's doc string
snippet_description = function[3].replace('"""', '')
# create snippet code (with the class prefix in front of it)
snippet_code = CLASS_PREFIX + "." + function[1] + "(" + argument_string + ")"
# create a snippet
new_snippet = SNIPPET_TEMPLATE.format(snippet_code, snippet_description, snippet_name)
# write the new snippet
with open(args['<output_directory>'] + '/{}.sublime-snippet'.format(snippet_name), 'w') as f:
f.write(new_snippet)
if __name__ == '__main__':
main()
| fhightower/sublime-snippet-creator | sublime_snippet_creator/sublime_snippet_creator.py | sublime_snippet_creator.py | py | 3,465 | python | en | code | 0 | github-code | 36 |
8746369027 | import requests
import numpy as np
from phe import paillier
import math
api_address = "http://localhost:8000"
N_FEATURES = 10
PRECISION = 2**(-16)
EXP = -8
TEST_DATA = [0.48555949, 0.29289251, 0.63463107, 0.41933057, 0.78672205, 0.58910837, 0.00739207, 0.31390802, 0.37037496, 0.3375726]
TEST_PRED = 0.44812144746653826
def encrypt_object(datapoint, public_key):
return [public_key.encrypt(x, precision=PRECISION).ciphertext() for x in datapoint]
def generate_stealing_data():
stealing_data = []
for _ in range(N_FEATURES + 1):
stealing_data.append(np.random.uniform(0, 1, N_FEATURES).tolist())
return stealing_data
def discover_model(data, preds):
N = len(data)
X = np.ones((N, N_FEATURES + 1))
X[:, 1:] = np.array(data)
y = np.array(preds)
coeffs = np.linalg.solve(X,y)
bias = coeffs[0]
weights = coeffs[1:]
return weights, bias
def main():
# create a session
session = requests.session()
public_key, private_key = paillier.generate_paillier_keypair()
stealing_data = generate_stealing_data()
preds = []
for _, datapoint in enumerate(stealing_data):
encrypted_list = encrypt_object(datapoint, public_key)
r = session.post(f"{api_address}/prediction", json={"pub_key_n": public_key.n , "enc_feature_vector": encrypted_list}).json()
encrypted_pred = paillier.EncryptedNumber(public_key, int(r["enc_prediction"]), EXP)
res = private_key.decrypt(encrypted_pred)
preds.append(res)
weights, bias = discover_model(stealing_data, preds)
print(f'weights:\n{weights}')
print(f'bias: {bias:.3f}')
stolen_pred = np.dot(TEST_DATA, weights) + bias
assert 2**(-16) > abs(stolen_pred - TEST_PRED), "Prediction is not correct"
print('All good!')
if __name__ == '__main__':
main() | arinaruck/isp_2022 | hw8/steal_model.py | steal_model.py | py | 1,831 | python | en | code | 0 | github-code | 36 |
16431295526 | from qtdataflow.Qt import QtCore
QObject = QtCore.QObject
Signal = QtCore.Signal
class Node(object):
"""
Logical Representation of a node.
"""
def __init__(self):
#self.schema = schema
self.node_type = 'BaseNode'
self.accepts_input = False
self.generates_output = False
self.out_conn = []
self.in_conn = []
def accept_type(self, node):
return True
def get_view(self):
"""
Which view-class to use, has to return a QGraphicsItem
"""
raise NotImplemented
def get_toolbar_view(self):
"""
Which view-class is used in the Toolbar, defaults to
the standard view.
"""
return self.get_view()
def new_connection_out(self, node):
"""
Called if a new connection (out) was made.
"""
pass
def new_connection_in(self, node):
pass
class MultiTerminalNode(Node):
"""
Node which can have more than one input/output Terminal.
"""
def __init__(self):
Node.__init__(self)
self.input_terminals = {}
self.output_terminals = {}
@property
def accepts_input(self):
return len(self.input_terminals) > 0
@property
def generates_output(self):
return len(self.output_terminals) > 0
class Schema(QObject):
"""
Model a Schema, which includes all Nodes and connections.
"""
node_created = Signal(Node)
node_deleted = Signal(Node)
nodes_connected = Signal(list)
nodes_disconnected = Signal(list)
def __init__(self):
super(Schema, self).__init__()
self.nodes = []
self.connections = []
def add_node(self, node):
"""
Add given Node to the Schema.
"""
if node not in self.nodes:
self.nodes.append(node)
self.node_created.emit(node)
else:
raise ValueError('Node already in Schema.')
def delete_node(self, node):
"""
Deletes given Node from the Schema, calls node_deleted event.
"""
to_delete = [(o, i) for (o, i) in self.connections
if o == node or i == node]
for o, i in to_delete:
self.disconnect_nodes(o, i)
self.nodes.remove(node)
self.node_deleted.emit(node)
def connect_nodes(self, out_node, in_node):
if out_node is in_node:
raise ValueError("Node can't connect to itself")
out_node.out_conn.append(in_node)
in_node.in_conn.append(out_node)
self.connections.append((out_node, in_node))
out_node.new_connection_in(in_node)
in_node.new_connection_out(out_node)
self.nodes_connected.emit([out_node, in_node])
def disconnect_nodes(self, out_node, in_node):
if (out_node, in_node) not in self.connections:
raise ValueError("Nodes are not connected")
self.nodes_disconnected.emit([out_node, in_node])
out_node.out_conn.remove(in_node)
in_node.in_conn.remove(out_node)
self.connections.remove((out_node, in_node))
def to_disk(self, file):
import pickle
to_pickle = (self.nodes, self.connections)
return pickle.dump(to_pickle, file)
def from_disk(self, file):
import pickle
nodes, connections = pickle.load(file)
for n in nodes:
self.add_node(n)
for c in connections:
self.connect_nodes(*c)
| B-Rich/qt-dataflow | qtdataflow/model.py | model.py | py | 3,501 | python | en | code | null | github-code | 36 |
9324455473 | import os
from dotenv import load_dotenv
import discord
from discord import app_commands
from langchain.llms import OpenAI
# Load the .env file
load_dotenv()
token = os.getenv('DISCORD_TOKEN')
guild_id = os.getenv('DISCORD_GUILD')
openai_key = os.getenv('OPENAI_KEY')
class Client(discord.Client):
def __init__(self):
super().__init__(intents=discord.Intents.default())
self.synced = False
async def on_ready(self):
await self.wait_until_ready()
if not self.synced:
await tree.sync(guild = discord.Object(id = guild_id))
self.synced = True
print(f"Logged in as {self.user} (ID: {self.user.id}).")
print("-----")
client = Client()
tree = app_commands.CommandTree(client)
@tree.command(name = "test", description = "testing", guild = discord.Object(id = guild_id))
async def self(interaction: discord.Interaction, name: str):
await interaction.response.send_message(f"Hello {name}! I was made with Discord.py!")
@tree.command(name="llm", description="Language Model", guild = discord.Object(id = guild_id))
async def self(interaction: discord.Interaction, prompt: str):
llm = OpenAI(openai_api_key=openai_key, temperature=0.9)
await interaction.response.send_message(llm.predict(prompt))
if __name__ == "__main__":
client.run(token)
| conradomanclossi/Trends | bot/main.py | main.py | py | 1,334 | python | en | code | 0 | github-code | 36 |
25314544397 | import sys
input = lambda : sys.stdin.readline().rstrip()
def main(s1, s2):
l1, l2, answer = len(s1), len(s2), 0
dp = [0]*l2
for i1 in range(l1):
dp_sub = [0]*l2
for i2 in range(l2):
dp_sub[i2] = (min(i2,1)*dp[i2-1] + 1)*(s1[i1]==s2[i2])
answer = max(answer, max(dp_sub))
dp = dp_sub
print(answer)
if __name__ == "__main__":
s2, s1 = sorted([input() for _ in range(2)], key=len)
main(s1, s2)
# LCS, Longest Common Substring | soohi0/Algorithm_study | 4월_4주/BOJ_공통부분문자열/BOJ_공통부분문자열_강태훈.py | BOJ_공통부분문자열_강태훈.py | py | 494 | python | en | code | 0 | github-code | 36 |
23315419953 | #!/usr/bin/env python
from setuptools import setup, find_packages
tests_require = [
'nose',
'unittest2',
]
setup(
name='quickunit',
version='0.6.0',
author='David Cramer',
author_email='dcramer@gmail.com',
description='A discovery plugin for Nose which relies on sane structure.',
url='http://github.com/dcramer/quickunit',
packages=find_packages(exclude=["tests"]),
zip_safe=False,
entry_points={
'nose.plugins.0.10': [
'quickunit = quickunit.plugin:QuickUnitPlugin'
],
'console_scripts': [
'quickunit-finder = quickunit.scripts.finder:main',
],
},
license='Apache License 2.0',
tests_require=tests_require,
extras_require={
'tests': tests_require,
},
include_package_data=True,
classifiers=[
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Operating System :: OS Independent',
'Topic :: Software Development'
],
)
| dcramer/quickunit | setup.py | setup.py | py | 1,025 | python | en | code | 34 | github-code | 36 |
42866907447 | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
def twoSumBSTs(self, root1: TreeNode, root2: TreeNode, target: int) -> bool:
if not root1 or not root2:
return False
tree1_dict = {}
tree2_dict = {}
self.inorder_traversal(root1, tree1_dict)
self.inorder_traversal(root2, tree2_dict)
#print(tree1_dict, tree2_dict)
for key in tree1_dict:
if target - key in tree2_dict:
return True
return False
def inorder_traversal(self, root, tree_dict):
if root is None:
return
self.inorder_traversal(root.left, tree_dict)
tree_dict[root.val] = 1
self.inorder_traversal(root.right, tree_dict)
| allen791210/LeetCode | 1214_Two_Sum_BSTs.py | 1214_Two_Sum_BSTs.py | py | 914 | python | en | code | 0 | github-code | 36 |
10902517011 | from django.test import TestCase
from home.models import UserProfile
class UserProfileModelTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# Create test data before running the tests
UserProfile.objects.create(
name='John Doe',
email='johndoe@example.com',
bio='Lorem ipsum dolor sit amet',
profile_picture='path/to/profile_picture.jpg'
)
def test_create_user_profile(self):
# Retrieve the created user profile from the database
user_profile = UserProfile.objects.get(email='johndoe@example.com')
# Perform assertions to check if the created user profile matches the provided data
self.assertEqual(user_profile.name, 'John Doe')
self.assertEqual(user_profile.email, 'johndoe@example.com')
self.assertEqual(user_profile.bio, 'Lorem ipsum dolor sit amet')
self.assertEqual(user_profile.profile_picture,
'path/to/profile_picture.jpg')
| shubhamkhuntia/superU-Assignment | home/tests.py | tests.py | py | 1,010 | python | en | code | 0 | github-code | 36 |
32786931379 | __all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__name__ = "influxdump"
__summary__ = 'InfluxDB data backup tool'
__uri__ = "https://github.com/gams/influxdump"
__version__ = "1.0.6"
__author__ = 'Stefan Berder <stefan@measureofquality.com>'
__contact__ = 'code+influxdump@measureofquality.com'
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2016-2020 %s" % __author__
| gams/influxdump | influxdump/__about__.py | __about__.py | py | 484 | python | en | code | 3 | github-code | 36 |
31762824166 | from collections import deque
def bfs():
dx = [0,1,0,-1]
dy = [1,0,-1,0]
while loc:
x_y = loc.popleft()
y, x = x_y[0], x_y[1]
for i in range(4):
nX = x + dx[i]
nY = y + dy[i]
if nX < 0 or nX >= m or nY < 0 or nY >= n: continue
if maze[nY][nX] != 1: continue
loc.append((nY, nX))
maze[nY][nX] = maze[y][x] + 1
if __name__ == "__main__":
n, m = map(int, input().split())
maze = [[int(i) for i in input()] for j in range(n)]
loc = deque([])
loc.append((0,0))
bfs()
print(maze[n-1][m-1])
| 4RG0S/2020-Spring-Jookgorithm | 이승민/[20.03.30]2178.py | [20.03.30]2178.py | py | 626 | python | en | code | 4 | github-code | 36 |
17742767435 | # EDGE LIST GRAPH
from queue import Queue
MAX = 100
V = None
E = None
visited = [False for i in range(MAX)]
path = [0 for i in range(MAX)]
graph = [[] for i in range(MAX)]
dist = [-1 for i in range(MAX)]
def BFS(s):
dist[s] = 0
visited[s] = True
q = Queue()
q.put(s)
while q.empty() == False:
u = q.get()
for v in graph[u]:
if visited[v] == False:
visited[v] = True
dist[v] = dist[u]
q.put(v)
def printPath(s, f):
b = []
if f == s:
return f
while True:
b.append(f)
f = path[f]
if f == s:
b.append(s)
break
for i in range(len(b)-1, -1, -1):
print(b[i], end = " ")
if __name__ == '__main__':
V, E = map(int, input().split())
for i in range(E):
u, v = map(int, input().split())
graph[u].append(v)
graph[v].append(u)
s = 1
f = 5
BFS(s)
printPath(s, f)
| baocogn/self-learning | big_o_coding/Blue_13/Schoolwork/day_5_BFS.py | day_5_BFS.py | py | 985 | python | en | code | 0 | github-code | 36 |
7154136673 | # 23. Merge k Sorted Lists
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeKLists(self, lists: 'List[ListNode]') -> 'ListNode':
newhead = ListNode(None)
curr = newhead
import heapq
heap = []
for i in range(len(lists)):
root = lists[i]
if root:
heapq.heappush(heap, (root.val, i))
lists[i] = lists[i].next
while heap:
# Pop the smallest value from the heap
popped_val, idx = heapq.heappop(heap)
curr.next = ListNode(popped_val)
curr = curr.next
root = lists[idx]
if root:
heapq.heappush(heap, (root.val, idx))
lists[idx] = lists[idx].next
return newhead.next | Fanll123/MyLeetcodeExercise | 23.py | 23.py | py | 951 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.