seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
18903219922 | from logging import getLogger
from os.path import join
from configparser import NoOptionError
from uchicagoldrtoolsuite import log_aware
from uchicagoldrtoolsuite.core.app.abc.cliapp import CLIApp
from ..lib.writers.filesystemstagewriter import FileSystemStageWriter
from ..lib.readers.filesystemstagereader import FileSystemStageReader
from ..lib.processors.generictechnicalmetadatacreator import \
GenericTechnicalMetadataCreator
from ..lib.techmdcreators.fitscreator import FITsCreator
from ..lib.techmdcreators.apifitscreator import APIFITsCreator
__author__ = "Brian Balsamo"
__email__ = "balsamo@uchicago.edu"
__company__ = "The University of Chicago Library"
__copyright__ = "Copyright University of Chicago, 2016"
__publication__ = ""
__version__ = "0.0.1dev"
log = getLogger(__name__)
def launch():
"""
entry point launch hook
"""
app = TechnicalMetadataCreator(
__author__=__author__,
__email__=__email__,
__company__=__company__,
__copyright__=__copyright__,
__publication__=__publication__,
__version__=__version__
)
app.main()
class TechnicalMetadataCreator(CLIApp):
"""
Creates technical metadata (FITs) for all the material suites in a stage.
"""
@log_aware(log)
def main(self):
# Instantiate boilerplate parser
self.spawn_parser(description="The UChicago LDR Tool Suite utility " +
"creating technical metadata for materials in " +
"a stage.",
epilog="{}\n".format(self.__copyright__) +
"{}\n".format(self.__author__) +
"{}".format(self.__email__))
# Add application specific flags/arguments
log.debug("Adding application specific cli app arguments")
self.parser.add_argument("stage_id", help="The id of the stage",
type=str, action='store')
self.parser.add_argument("--skip_existing", help="Skip material " +
"suites which already claim to have " +
"technical metadata",
action='store_true',
default=False)
self.parser.add_argument("--staging_env", help="The path to your " +
"staging environment",
type=str,
default=None)
self.parser.add_argument("--eq_detect", help="The equality " +
"metric to use on writing, check " +
"LDRItemCopier for supported schemes.",
type=str, action='store',
default="bytes")
self.parser.add_argument("--fits_path", help="The path to the FITS " +
"executable on this system. " +
"Overrides any value found in configs.",
type=str, action='store',
default=None)
self.parser.add_argument("--fits_api_url", help="The url of a FITS " +
"Servlet examine endpoint. " +
"Overrides any value found in configs.",
type=str, action='store',
default=None)
self.parser.add_argument("--use_api", help="Use a FITS Servlet " +
"instead of a local FITS install.",
action="store_true",
default=False)
# Parse arguments into args namespace
args = self.parser.parse_args()
self.process_universal_args(args)
# App code
if args.staging_env:
staging_env = args.staging_env
else:
staging_env = self.conf.get("Paths", "staging_environment_path")
staging_env = self.expand_path(staging_env)
dto = {}
try:
dto['fits_path'] = self.conf.get("Paths", "fits_path")
except NoOptionError:
pass
try:
dto['fits_api_url'] = self.conf.get("URLs", "fits_api_url")
except NoOptionError:
pass
if args.fits_api_url is not None:
dto['fits_api_url'] = args.fits_api_url
if args.fits_path is not None:
dto['fits_path'] = args.fits_path
reader = FileSystemStageReader(staging_env, args.stage_id)
stage = reader.read()
log.info("Stage: " + join(staging_env, args.stage_id))
log.info("Processing...")
if args.use_api:
techmd_processors = [APIFITsCreator]
else:
techmd_processors = [FITsCreator]
techmd_creator = GenericTechnicalMetadataCreator(stage,
techmd_processors)
techmd_creator.process(skip_existing=args.skip_existing,
data_transfer_obj=dto)
log.info("Writing...")
writer = FileSystemStageWriter(stage, staging_env,
eq_detect=args.eq_detect)
writer.write()
log.info("Complete")
if __name__ == "__main__":
s = TechnicalMetadataCreator()
s.main()
| uchicago-library/uchicagoldr-toolsuite | uchicagoldrtoolsuite/bit_level/app/technicalmetadatacreator.py | technicalmetadatacreator.py | py | 5,572 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "uchicagoldrtoolsuite.core.app.abc.cliapp.CLIApp",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "configparser.NoOptionError",
"line_number": 102,
"usage_type": "name"
... |
15138075498 | """
test cli module
"""
import subprocess
from typing import List, Tuple
def capture(command: List[str]) -> Tuple[bytes, bytes, int]:
proc = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
out, err = proc.communicate()
return out, err, proc.returncode
def test_cli() -> None:
"""Test cli module"""
command = ["sdwaddle"]
out, err, exitcode = capture(command)
assert exitcode == 0
| entelecheia/super-duper-waddle | tests/sdwaddle/test_cli.py | test_cli.py | py | 469 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "subprocess.Popen",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
... |
15860273253 | from __future__ import division
from builtins import str
import numpy as np
import pandas as pd
import seaborn as sns
from .helpers import *
import matplotlib.pyplot as plt
import matplotlib as mpl
mpl.rcParams['pdf.fonttype'] = 42
def plot(results, subjgroup=None, subjname='Subject Group', listgroup=None,
listname='List', subjconds=None, listconds=None, plot_type=None,
plot_style=None, title=None, legend=True, xlim=None, ylim=None,
save_path=None, show=True, ax=None, **kwargs):
"""
General plot function that groups data by subject/list number and performs analysis.
Parameters
----------
results : quail.FriedEgg
Object containing results
subjgroup : list of strings or ints
String/int variables indicating how to group over subjects. Must be
the length of the number of subjects
subjname : string
Name of the subject grouping variable
listgroup : list of strings or ints
String/int variables indicating how to group over list. Must be
the length of the number of lists
listname : string
Name of the list grouping variable
subjconds : list
List of subject hues (str) to plot
listconds : list
List of list hues (str) to plot
plot_type : string
Specifies the type of plot. If list (default), the list groupings (listgroup)
will determine the plot grouping. If subject, the subject groupings
(subjgroup) will determine the plot grouping. If split (currenty just
works for accuracy plots), both listgroup and subjgroup will determine
the plot groupings
plot_style : string
Specifies the style of the plot. This currently works only for
accuracy and fingerprint plots. The plot style can be bar (default for
accruacy plot), violin (default for fingerprint plots) or swarm.
title : string
The title of the plot
legend : bool
If true (default), a legend is plotted.
ylim : list of numbers
A ymin/max can be specified by a list of the form [ymin, ymax]
xlim : list of numbers
A xmin/max can be specified by a list of the form [xmin, xmax]
save_path : str
Path to save out figure. Include the file extension, e.g.
save_path='figure.pdf'
show : bool
If False, do not show figure, but still return ax handle (default True).
ax : Matplotlib.Axes object or None
A plot object to draw to. If None, a new one is created and returned.
Returns
----------
ax : matplotlib.Axes.Axis
An axis handle for the figure
"""
def plot_acc(data, plot_style, plot_type, listname, subjname, **kwargs):
# set defaul style to bar
plot_style = plot_style if plot_style is not None else 'bar'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=data, x=listname, y="Accuracy", **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x=subjname, y="Accuracy", **kwargs)
elif plot_type is 'split':
ax = plot_func(data=data, x=subjname, y="Accuracy", hue=listname, **kwargs)
return ax
def plot_temporal(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to bar
plot_style = plot_style if plot_style is not None else 'bar'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=data, x=listname, y="Temporal Clustering Score", **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x=subjname, y="Temporal Clustering Score", **kwargs)
elif plot_type is 'split':
ax = plot_func(data=data, x=subjname, y="Temporal Clustering Score", hue=listname, **kwargs)
return ax
def plot_fingerprint(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to violin
plot_style = plot_style if plot_style is not None else 'violin'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
if plot_type is 'list':
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", hue=listname, **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", hue=subjname, **kwargs)
else:
ax = plot_func(data=tidy_data, x="Feature", y="Clustering Score", **kwargs)
return ax
def plot_fingerprint_temporal(data, plot_style, plot_type, listname, subjname, **kwargs):
# set default style to violin
plot_style = plot_style if plot_style is not None else 'violin'
plot_type = plot_type if plot_type is not None else 'list'
if plot_style is 'bar':
plot_func = sns.barplot
elif plot_style is 'swarm':
plot_func = sns.swarmplot
elif plot_style is 'violin':
plot_func = sns.violinplot
order = list(tidy_data['Feature'].unique())
if plot_type is 'list':
ax = plot_func(data=data, x="Feature", y="Clustering Score", hue=listname, order=order, **kwargs)
elif plot_type is 'subject':
ax = plot_func(data=data, x="Feature", y="Clustering Score", hue=subjname, order=order, **kwargs)
else:
ax = plot_func(data=data, x="Feature", y="Clustering Score", order=order, **kwargs)
return ax
def plot_spc(data, plot_style, plot_type, listname, subjname, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data = data, x="Position", y="Proportion Recalled", hue=subjname, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data = data, x="Position", y="Proportion Recalled", hue=listname, **kwargs)
ax.set_xlim(0, data['Position'].max())
return ax
def plot_pnr(data, plot_style, plot_type, listname, subjname, position, list_length, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data = data, x="Position", y='Probability of Recall: Position ' + str(position), hue=subjname, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data = data, x="Position", y='Probability of Recall: Position ' + str(position), hue=listname, **kwargs)
ax.set_xlim(0,list_length-1)
return ax
def plot_lagcrp(data, plot_style, plot_type, listname, subjname, **kwargs):
plot_type = plot_type if plot_type is not None else 'list'
if plot_type is 'subject':
ax = sns.lineplot(data=data[data['Position']<0], x="Position", y="Conditional Response Probability", hue=subjname, **kwargs)
if 'ax' in kwargs:
del kwargs['ax']
sns.lineplot(data=data[data['Position']>0], x="Position", y="Conditional Response Probability", hue=subjname, ax=ax, legend=False, **kwargs)
elif plot_type is 'list':
ax = sns.lineplot(data=data[data['Position']<0], x="Position", y="Conditional Response Probability", hue=listname, **kwargs)
if 'ax' in kwargs:
del kwargs['ax']
sns.lineplot(data=data[data['Position']>0], x="Position", y="Conditional Response Probability", hue=listname, ax=ax, legend=False, **kwargs)
ax.set_xlim(-5,5)
return ax
# if no grouping, set default to iterate over each list independently
subjgroup = subjgroup if subjgroup is not None else results.data.index.levels[0].values
listgroup = listgroup if listgroup is not None else results.data.index.levels[1].values
if subjconds:
# make sure its a list
if type(subjconds) is not list:
subjconds=[subjconds]
# slice
idx = pd.IndexSlice
results.data = results.data.sort_index()
results.data = results.data.loc[idx[subjconds, :],:]
# filter subjgroup
subjgroup = filter(lambda x: x in subjconds, subjgroup)
if listconds:
# make sure its a list
if type(listconds) is not list:
listconds=[listconds]
# slice
idx = pd.IndexSlice
results.data = results.data.sort_index()
results.data = results.data.loc[idx[:, listconds],:]
# convert to tiny and format for plotting
tidy_data = format2tidy(results.data, subjname, listname, subjgroup, analysis=results.analysis, position=results.position)
if not ax==None:
kwargs['ax']=ax
#plot!
if results.analysis=='accuracy':
ax = plot_acc(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='temporal':
ax = plot_temporal(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='fingerprint':
ax = plot_fingerprint(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='fingerprint_temporal':
ax = plot_fingerprint_temporal(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='spc':
ax = plot_spc(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
elif results.analysis=='pfr' or results.analysis=='pnr':
ax = plot_pnr(tidy_data, plot_style, plot_type, listname, subjname, position=results.position, list_length=results.list_length, **kwargs)
elif results.analysis=='lagcrp':
ax = plot_lagcrp(tidy_data, plot_style, plot_type, listname, subjname, **kwargs)
else:
raise ValueError("Did not recognize analysis.")
# add title
if title:
plt.title(title)
if legend is False:
try:
ax.legend_.remove()
except:
pass
if xlim:
plt.xlim(xlim)
if ylim:
plt.ylim(ylim)
if save_path:
mpl.rcParams['pdf.fonttype'] = 42
plt.savefig(save_path)
return ax
| ContextLab/quail | quail/plot.py | plot.py | py | 10,790 | python | en | code | 18 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "seaborn.barplot",
"line_number": 93,
"usage_type": "attribute"
},
{
"api_name": "seaborn.swarmplot",
"line_number": 95,
"usage_type": "attribute"
},
{
"api_name": "... |
14860521204 | #!/usr/bin/env python
'''
对测试集数据进行测试,统计所有数据平均的RRMSE,SNR和CC值
'''
import argparse
import os
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from utility.data import EEGData
from utility.conv_tasnet_v1 import TasNet
from utility.network import ResCNN, Novel_CNN2, Novel_CNN, fcNN
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import math
import scipy.io as io
from scipy.signal import butter, lfilter
EPS = 1e-8
parser = argparse.ArgumentParser('Evaluate separation performance using Conv-TasNet')
parser.add_argument('--model_path', type=str,
help='Path to model file created by training')
parser.add_argument('--data_dir', type=list, default=['./data/mixdata/foldtrain.txt',
'./data/mixdata/foldtest.txt'],
help='directory of fold')
parser.add_argument('--use_cuda', type=int, default=1,
help='Whether use GPU')
parser.add_argument('--batch_size', default=128, type=int,
help='Batch size')
parser.add_argument('--num_workers', default=0, type=int,
help='Num_workers')
# Network architecture
parser.add_argument('--N', default=256, type=int,
help='Encode dim')
parser.add_argument('--B', default=64, type=int,
help='Feature dim')
parser.add_argument('--sr', default=512, type=int,
help='Sample rate')
parser.add_argument('--L', default=16, type=int,
help='Length of the filters in samples (16=16ms at 1kHZ)')
parser.add_argument('--X', default=6, type=int,
help='Number of convolutional blocks in each repeat')
parser.add_argument('--R', default=3, type=int,
help='Number of repeats')
parser.add_argument('--P', default=3, type=int,
help='Kernel size in convolutional blocks')
parser.add_argument('--C', default=1, type=int,
help='Number of speakers')
# 计算相关系数
def calc_corr(a, b):
a_avg = sum(a) / len(a)
b_avg = sum(b) / len(b)
# 计算分子,协方差————按照协方差公式,本来要除以n的,由于在相关系数中上下同时约去了n,于是可以不除以n
cov_ab = sum([(x - a_avg) * (y - b_avg) for x, y in zip(a, b)])
# 计算分母,方差乘积————方差本来也要除以n,在相关系数中上下同时约去了n,于是可以不除以n
sq = math.sqrt(sum([(x - a_avg) ** 2 for x in a]) * sum([(x - b_avg) ** 2 for x in b]))
corr_factor = cov_ab / sq
return corr_factor
def butter_lowpass(data, cutoff, fs, order=5):
nyq = 0.5 * fs
normal_cutoff = cutoff / nyq
b, a = butter(order, normal_cutoff, btype='low', analog=False)
y = lfilter(b, a, data)
return y
def evaluate(args, snr_test=1):
# Load model
package = torch.load(args.model_path)
model = TasNet(args.N, args.B, args.sr, args.L, args.X, args.R, args.P, args.C)
# model = fcNN(lenth=1024)
# model = Novel_CNN2(len_signal=1024)
# model = ResCNN(1024)
model.load_state_dict(package['model'])
# print(model)
# for name in model.state_dict():
# print(name)
# print('encoder1:',model.state_dict()['encoder1.weight'])
model.eval()
if args.use_cuda:
model.cuda()
# Load data
f_test = np.load('../eegdenoisenet/testdata512/test_eeg.npz')
noiseEEG_test, EEG_test, SNRs_test = f_test['noiseEEG_test'], f_test['EEG_test'], f_test['SNRs_test']
# 选择 信噪比
idx = np.where(SNRs_test == snr_test)[0]
# 不分档则以下2行注释
noiseEEG_test = noiseEEG_test[idx]
EEG_test = EEG_test[idx]
evaluate_dataset = EEGData(noiseEEG_test, EEG_test)
evaluate_loader = DataLoader(evaluate_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.num_workers)
with torch.no_grad():
Snr = 1.5
RRMSEspec_total = []
RRMSE_total = []
CC_total = []
SNR = []
estimate_all = []
for i, data in enumerate(evaluate_loader):
# Get batch data
eeg_mix = data['eeg_mix'].type(torch.float32)
# emg = data['emg'].type(torch.float32)
eeg_clean = data['eeg_clean'].type(torch.float32)
lpeeg_mix = butter_lowpass(eeg_mix, 20, 500)
# hpeeg_mix = butter_highpass(eeg_mix,100,500)
eeg_mix = eeg_mix.type(torch.float32)
lpeeg_mix = torch.from_numpy(lpeeg_mix).type(torch.float32)
# hpeeg_mix = torch.from_numpy(hpeeg_mix).type(torch.float32)
# lpeeg_clean = data['lpeeg_clean']
# Forward
if args.use_cuda:
eeg_clean = eeg_clean.cuda()
lpeeg_mix = lpeeg_mix.cuda()
eeg_mix = eeg_mix.cuda()
estimate_source1, estimate_source2 = model(lpeeg_mix, eeg_mix)
# estimate_source2 = model(eeg_mix)
e_noise = eeg_clean - estimate_source2.squeeze()
snr = 10 * torch.log10(torch.sum(eeg_clean ** 2, dim=1) / (torch.sum(e_noise ** 2, dim=1) + EPS) + EPS)
estimate2 = estimate_source2.cpu().numpy().squeeze()
# emg = emg.numpy()
eeg_clean = eeg_clean.cpu().numpy()
estimate_all.append(estimate2)
# eeg_mix = eeg_mix.numpy()
for j in range(estimate2.shape[0]):
eeg_snr = snr[j].item()
SNR.append(eeg_snr)
e_noise = eeg_clean[j, :] - estimate2[j, :]
RRMSE = np.sqrt(np.sum(e_noise ** 2) / (np.sum(eeg_clean[j, :] ** 2) + EPS) + EPS)
# TODO
Pxx_eeg, _ = mlab.psd(eeg_clean[j, :], NFFT=1024)
Pxx_estimate, _ = mlab.psd(estimate2[j, :], NFFT=1024)
# Pxx_eeg = Pxx_eeg[:201] # TODO
# Pxx_estimate = Pxx_estimate[:201] # TODO
RRMSEspec = np.sqrt(np.sum((Pxx_estimate - Pxx_eeg) ** 2) / (np.sum(Pxx_eeg ** 2) + EPS) + EPS)
eeg_RRMSE = RRMSE
# 计算CC
cc = calc_corr(eeg_clean[j, :], estimate2[j, :])
RRMSEspec_total.append(RRMSEspec)
RRMSE_total.append(eeg_RRMSE)
CC_total.append(cc)
estimate_all = np.vstack(estimate_all)
io.savemat(os.path.join('result', f'result_{snr_test}.mat'),
{'estimate_EEG': estimate_all, 'eeg_mix': noiseEEG_test, 'eeg_clean': EEG_test})
# plt.psd
return RRMSE_total, CC_total, SNR, RRMSEspec_total
if __name__ == '__main__':
args = parser.parse_args()
args.model_path = '/mnt/DEV/han/eeg/DASTCN_grnFFT/checkpoint/EEGARNet_model/epoch89.pth.tar'
out_dir = 'result'
print(args)
meanRRMSEs = []
meanRRMSEspecs = []
meanCCs = []
meanSNRs = []
for snr in np.linspace(-7, 2, 10):
print(snr)
RRMSE_total, CC_total, SNR, RRMSEspec_total = evaluate(args, snr_test=snr)
meanRRMSE = np.round(np.mean(RRMSE_total), 4)
meanRRMSEspec = np.round(np.mean(RRMSEspec_total), 4)
meanSNR = np.round(np.mean(SNR), 4)
varRRMSE = np.round(np.var(RRMSE_total), 4)
meanCC = np.round(np.mean(CC_total), 4)
varCC = np.round(np.var(CC_total), 4)
print('meanRRMSEspec:', meanRRMSE)
print('meanRRMSEspec:', meanRRMSEspec)
print('meanCC:', meanCC)
print('meanSNR:', meanSNR)
print('*' * 10)
meanRRMSEs.append(meanRRMSE)
meanCCs.append(meanCC)
meanSNRs.append(meanSNR)
meanRRMSEspecs.append(meanRRMSEspec)
os.makedirs(out_dir, exist_ok=True)
io.savemat(os.path.join(out_dir, 'result_perSNR.mat'), {'meanRRMSEs': meanRRMSEs,
'meanCCs': meanCCs,
'meanSNRs': meanSNRs,
'meanRRMSEspec': meanRRMSEspecs})
| BaenRH/DSATCN | code/evaluate_perSNR.py | evaluate_perSNR.py | py | 8,151 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "scipy.signal.butter",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "scipy.signal.lf... |
27271629688 | import time, random
import pygame
screen_w = 800
screen_h = 600
# Create the window
screen = pygame.display.set_mode((screen_w,screen_h))
black = (0,0, 0)
red = (255,0,0)
green = (0, 200, 0)
class Game(object):
def __init__(self):
self.screen = pygame.display.set_mode((800,600))
self.score = 0
self.oldScore = 0
self.speed = 5
self.speedMultiplier = 1
def gameInit(self):
pygame.init()
def gameName(self, name):
pygame.display.set_caption(name)
def setFPS(self, fps):
pygame.time.Clock().tick(fps)
def screenColor(self):
self.screen.fill(green)
def incSpeed(self, newSpeed):
self.speed = newSpeed
def resetGame(self):
self.oldScore = self.score
self.score = 0
self.speed = 5
self.speedMultiplier = 1
# Player Class
class Player(object):
init_x = 150
init_y = 405
def __init__(self):
self.x = Player.init_x
self.y = Player.init_y
self.width = 50
self.height = 50
self.vel = 10
self.isJump = False
self.jumpCount = 10
# Function to draw the players geometry, in this case just a red square
def draw(self):
# Red square as player
pygame.draw.rect(screen, red, [self.x, self.y, self.width, self.height])
# Black contour defined by hitbox
pygame.draw.rect(screen, black, (self.x, self.y, self.width, self.height), 3)
def jump(self):
# Using a mathematical equation with square calculations for jumping.
# When the player reaches a certain height, jumpCount will get a negative value
# so that the player starts falling down.
if self.jumpCount >= -10:
neg = 1
if self.jumpCount < 0:
neg = -1
# Math equation
self.y -= (self.jumpCount ** 2) * 0.8 * neg
self.jumpCount -= 1
else:
self.isJump = False
self.jumpCount = 10
def setVel(self, vel):
self.vel = vel
# Reset position after death
def reset(self):
self.x = 150
self.y = 405
self.width = 50
self.height = 50
# Enemy Class
class Enemy(object):
def __init__(self, x, y, width, height):
self.x = x
self.y = y
self. width = width
self.height = height
self.end = screen_w
self.vel = 10
# Function to draw the geometry of the enemies, in this case just a polygon in the form of a triangle
def draw(self):
# Calling function move() to actually draw the movement on the screen
self.move()
# If the enemy moves out of the screen it will appear on the other side again, looping
if self.x < 0:
self.x = self.end + random.randint(0,350)
# Pointlist is a set of coordinates for the polygon [(x1,y1),(x2,y2) and so on]
pointlist = [(self.x - self.width/2, self.y - self.width), (self.x - self.height/2, self.y - self.height), (self.x, self.y), (self.x - self.width, self.y)]
# Draw the red contour
pygame.draw.polygon(screen, red, pointlist, 3)
# Draw the black triangle defined by pointlist
pygame.draw.polygon(screen, black, pointlist, 0)
# Function to move the enemy to the left towards the player at a certain velocity
def move(self):
self.x -= self.vel
def setVel(self, newVel):
self.vel = newVel
# Resets position after death
def reset(self, offset):
self.x = random.randint(750,850) + offset
self.y = 454
self.width = 30
self.height = 30
self.vel = 10
# Function to call all class related functions to upload the drawings to the screen
def redrawGameWindow(player, enemyList):
player.draw()
for enemy in enemyList:
enemy.draw()
# Draws the red base in the game
pygame.draw.rect(screen, red, [0,456, screen_w, 200])
# Updates the screen with the drawings applied
pygame.display.update()
# Whenever the player is touches the enemy this function is called and displayes the message DEAD on screen
def printMSG(msg, x, y, size):
# Define font and size
font = pygame.font.Font(None, size)
# Define what message to display and it's color
text_surface = font.render(msg, True, (0, 0, 0))
# Print the message to screen using coordinates
screen.blit(text_surface, (x,y))
# Collision calculation with enemies, when the square touches the triangles it will display message "DEAD"
def checkCollision(game, player, enemies):
for enemy in enemies:
if (player.x + player.width) >= (enemy.x - enemy.width) and player.x <= enemy.x:
if (player.y + player.height) >= (enemy.y - enemy.height) and player.y <= enemy.y:
printMSG("DEAD", 355, 250, 50)
redrawGameWindow(player, enemies)
time.sleep(1)
# When collision occurs the game resets
player.reset()
enemies[0].reset(100)
enemies[1].reset(450)
game.resetGame()
# Increases and prints score as well as the old score
def scoreUpdate(game):
game.score += game.speed
printMSG(("Score: " + str(game.score)), 50, 50, 40)
printMSG(("Old Score: " + str(game.oldScore)), 500, 50, 40)
# Function that increases the speed every 1000 score
def speedUpdate(game, enemylist):
if game.score >= 2000 * game.speedMultiplier:
game.speedMultiplier += 1
for enemy in enemylist:
enemy.setVel(enemy.vel + 1)
def main():
# Game instance
game = Game()
game.gameInit()
game.gameName("Running Game 2")
# Player 1
sq = Player()
# Enemies 1 and 2
ey = Enemy(random.randint(750,850),454,30,30)
ey2 = Enemy(random.randint(1200,1400), 454, 30, 30)
# Enemy list, if several add here
enemyList = [ey, ey2]
# Game condition
running = True
# Game loop
while running:
# Set screen color in RGB
game.screenColor()
# Continously check all events that are happening in the game
for event in pygame.event.get():
# Check if window is closed when the cross is pressed
if event.type == pygame.QUIT:
running = False
# Variable for checking if any key is pressed
keys = pygame.key.get_pressed()
# Arrow key movements of player
if keys[pygame.K_LEFT] and sq.x > 0:
# Move player to the left with the given velocity when left key is pressed
sq.x -= sq.vel
if keys[pygame.K_RIGHT] and sq.x < screen_w - sq.width:
sq.x += sq.vel
# Jump function
if not(sq.isJump):
if keys[pygame.K_SPACE]:
sq.isJump = True
else:
sq.jump()
# Updates score every loop
scoreUpdate(game)
# Increases speed every 1000 score
speedUpdate(game, enemyList)
# Collision detection between player and enemies
checkCollision(game, sq, enemyList)
# Calling this function every loop to update the drawings to screen
redrawGameWindow(sq, enemyList)
# Frames per second
game.setFPS(30)
if __name__ == "__main__":
main()
pygame.quit()
| eliazz95/JumpingGame | firstGame.py | firstGame.py | py | 6,406 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pyg... |
42600266577 | # Копирование найденных надежных и ненадежных аудиозаписей по Социуму за 2017-2018 на архивный диск
import openpyxl, traceback
import os, string, sys, shutil
from collections import Counter
from lib import l, fine_snils_, read_config
FIND_CATALOG = '/media/da3/asteriskBeagleAl/'
#CHANGE_ON_WINDOWS = 'Z:/'
#OUTPUT_CATALOG = 'O:/Документы/Записи/'
OUTPUT_CATALOG = '/media/da3/backup/'
TRUSTREESTR = 'Надежные.xlsx'
PROBLEMREESTR = 'Остальные.xlsx'
REESTRS = '/home/da3/Beagle/потеряшкиАудиозаписи/реестры/'
def isSNILS(snils):
if snils != None:
t = str(snils).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
if len(t) > 11:
if t[3] == '-' and t[7] == '-' and (t[11] == ' ' or t[11] == '_'):
return True
else:
return False
else:
return False
return False
def isAudio(audio):
if audio != None:
t = str(audio).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
t1 = t.split('/')[len(t.split(('/'))) - 1]
if t1.endswith('.'):
t1 = t1[:-1]
if t1.endswith('.mp3') or t1.endswith('.wav'):
t1 = t1[:-4]
if len(t1) > 26:
if t1[2] == '.' and t1[5] == '.' and t1[10] == '_' and (t1[13] == '-' or t1[13] == '_') and \
(t1[16] == '-' or t1[16] == '_'):
return ['длинный', t1]
elif len(''.join([char for i, char in enumerate(t1) if char in string.digits and i < 26])) == 25 \
and t1[14] == '_':
return ['короткий', t1]
else:
return ['', audio]
else:
return ['', audio]
return ['', audio]
def isSocium(audio):
if audio != None:
t = str(audio).replace('\n',' ').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').strip()
t1 = t.split('/')[len(t.split(('/'))) - 1]
if len(t1) > 26:
if t1[2] == '.' and t1[5] == '.' and t1[10] == '_' and (t1[13] == '-' or t1[13] == '_') and \
(t1[16] == '-' or t1[16] == '_') and (t1[6:10] == '2017' or t1[6:10] == '2018'):
return True
elif len(''.join([char for i, char in enumerate(t1) if char in string.digits and i < 26])) == 25 \
and t1[14] == '_' and (t1[:4] == '2017' or t1[:4] == '2018'):
return True
else:
return False
else:
return False
return False
# расшифровка любой ошибки
def full_tb_write(*args):
if not args:
exc_type, exc_val, exc_tb = sys.exc_info()
traceback.print_tb(exc_tb, file=sys.stdout)
elif len(args) == 3:
exc_type, exc_val, exc_tb = args
traceback.print_tb(exc_tb, file=sys.stdout)
elif len(args) == 1:
exc_type, exc_val, exc_tb = args[0].__class__, args[0], args[0].__traceback__
traceback.print_tb(exc_tb, file=sys.stdout)
snilsesTrust = {}
snilsesTrustShort = {}
wb = openpyxl.load_workbook(filename=TRUSTREESTR, read_only=True)
for sheetname in wb.sheetnames:
sheet = wb[sheetname]
if not sheet.max_row:
print('Файл', TRUSTREESTR, 'Excel некорректно сохранен OpenPyxl. Откройте и пересохраните его')
continue
for j, row in enumerate(sheet.rows):
snils = l(row[0].value)
snilsTrustAudios = []
for k, cell in enumerate(row):
if k and cell.value:
snilsTrustAudio = isAudio(cell.value)
if snilsTrustAudio[1] not in snilsTrustAudios:
snilsTrustAudios.append(snilsTrustAudio[1])
if snilsesTrust.get(snils, None):
if cell.value not in snilsesTrust[snils]:
snilsesTrust[snils].append(cell.value)
else:
snilsesTrust[snils] = [cell.value]
snilsesTrustShort[snils] = snilsTrustAudios
for i, snils in enumerate(snilsesTrust):
sucess = False
while not sucess:
try:
if not os.path.exists(OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils)):
os.mkdir(OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils))
audiofilesShort = []
for audiofile in snilsesTrust[snils]:
audiofileShort = isAudio(audiofile)[1]
if os.path.exists(audiofile): #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)):
if audiofileShort in audiofilesShort:
if not os.path.exists(OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils) + '/' + audiofileShort +
'-' + str(Counter(audiofilesShort)[audiofileShort]) + audiofile[-4:]):
shutil.copy(audiofile #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)
, OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils) + '/' + audiofileShort +
'-' + str(Counter(audiofilesShort)[audiofileShort]) + audiofile[-4:])
audiofilesShort.append(audiofileShort)
else:
if not os.path.exists(OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils) + '/' +
audiofileShort + audiofile[-4:]):
shutil.copy(audiofile #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)
, OUTPUT_CATALOG + 'Выгрузки/' + fine_snils_(snils) + '/' + audiofileShort +
audiofile[-4:])
audiofilesShort.append(audiofileShort)
else:
print('!!! Нет исходного файла', audiofile)
sucess = True
except Exception as e:
full_tb_write(e)
print('Ошибка - пробуем ещё раз')
print('Скопировано', i, 'из', len(snilsesTrust))
print('\nТеперь Остальные\n')
snilsesProblem = {}
snilsesProblemShort = {}
wb = openpyxl.load_workbook(filename=PROBLEMREESTR, read_only=True)
for sheetname in wb.sheetnames:
sheet = wb[sheetname]
if not sheet.max_row:
print('Файл', PROBLEMREESTR, 'Excel некорректно сохранен OpenPyxl. Откройте и пересохраните его')
continue
for j, row in enumerate(sheet.rows):
snils = l(row[0].value)
snilsProblemAudios = []
for k, cell in enumerate(row):
if k and cell.value:
snilsProblemAudio = isAudio(cell.value)
if snilsProblemAudio[1] not in snilsProblemAudios:
snilsProblemAudios.append(snilsProblemAudio[1])
if snilsesProblem.get(snils, None):
if cell.value not in snilsesProblem[snils]:
snilsesProblem[snils].append(cell.value)
else:
snilsesProblem[snils] = [cell.value]
snilsesProblemShort[snils] = snilsProblemAudios
for i, snils in enumerate(snilsesProblem):
sucess = False
while not sucess:
try:
if not os.path.exists(OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils)):
os.mkdir(OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils))
audiofilesShort = []
for audiofile in snilsesProblem[snils]:
audiofileShort = isAudio(audiofile)[1]
if os.path.exists(audiofile): #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)):
if audiofileShort in audiofilesShort:
if not os.path.exists(OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils) + '/' + audiofileShort +
'-' + str(Counter(audiofilesShort)[audiofileShort]) + audiofile[-4:]):
shutil.copy(audiofile #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)
, OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils) + '/' + audiofileShort +
'-' + str(Counter(audiofilesShort)[audiofileShort]) + audiofile[-4:])
audiofilesShort.append(audiofileShort)
else:
if not os.path.exists(OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils) + '/' +
audiofileShort + audiofile[-4:]):
shutil.copy(audiofile #.replace(FIND_CATALOG, CHANGE_ON_WINDOWS)
, OUTPUT_CATALOG + 'Остальные/' + fine_snils_(snils) + '/' + audiofileShort +
audiofile[-4:])
audiofilesShort.append(audiofileShort)
else:
print('!!! Нет исходного файла', audiofile)
sucess = True
except Exception as e:
full_tb_write(e)
print('Ошибка - пробуем ещё раз')
print('Скопировано', i, 'из', len(snilsesProblem))
| dekarh/asocium | asociumWrite.py | asociumWrite.py | py | 9,480 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "string.digits",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "string.digits",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "sys.exc_info",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "traceback.print_tb... |
31056572667 | from django.contrib.auth.models import AbstractBaseUser, UserManager, \
PermissionsMixin
from django.core import validators
from django.db import models
from django.utils import timezone
from accounts import constants
class User(AbstractBaseUser, PermissionsMixin):
#: The Permission level for this user
permission = models.CharField(max_length=40, blank=True, null=True,
choices=constants.PERMISSION_CHOICES)
"""
An abstract base class implementing a fully featured User model with
admin-compliant permissions.
Username, password and email are required. Other fields are optional.
"""
username = models.CharField(
'username', max_length=30, unique=True, help_text=_(
'Required. 30 characters or fewer. Letters, digits and '
'@/./+/-/_ only.'),
validators=[
validators.RegexValidator(r'^[\w.@+-]+$', _(
'Enter a valid username.'), 'invalid')
])
first_name = models.CharField('first name', max_length=30, blank=True)
last_name = models.CharField('last name', max_length=30, blank=True)
email = models.EmailField('email address', blank=True)
is_staff = models.BooleanField(
'staff status', default=False,
help_text='Designates whether the user can log into this admin site.')
is_active = models.BooleanField('active', default=True)
date_joined = models.DateTimeField('date joined', default=timezone.now)
company = models.ForeignKey('company.Company', on_delete=models.SET_NULL)
objects = UserManager()
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = ['email']
def get_full_name(self):
"""
Returns the first_name plus the last_name, with a space in between.
"""
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name.strip()
def get_short_name(self):
"Returns the short name for the user."
return self.first_name
def get_content_data(self):
content = {
'permission': self.permission,
'username': self.username,
'first_name': self.first_name,
'last_name': self.last_name,
'email': self.email,
'is_staff': self.is_staff,
'is_active': self.is_active,
'date_joined': self.date_joined,
}
return content
def __unicode__(self):
if self.first_name:
if self.last_name:
return "{0} {1}'s Profile".format(
self.first_name, self.last_name)
else:
return "{0}'s Profile".format(self.first_name)
else:
return "{0}'s Profile".format(self.username)
def save(self, *args, **kwargs):
super(User, self).save(*args, **kwargs)
| sublime1809/pto_tracker | accounts/models.py | models.py | py | 2,840 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.models.AbstractBaseUser",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.PermissionsMixin",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 12,
"... |
26316442971 | import pandas as pd
from sklearn.impute import KNNImputer
from imblearn.over_sampling import SMOTE
from matplotlib import pyplot as plt
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
df = pd.read_csv('./alzheimer.csv', skiprows=1, names=(
'Group', 'M/F', 'Age', 'EDUC', 'SES', 'MMSE', 'CDR', 'eTIV', 'nWBV', 'ASF'
))
print(df.describe())
X = df.copy()
print(X.columns)
y = df[['Group']]
del X['Group']
X = pd.get_dummies(X) # convert m/f to dummy columns
imputer = KNNImputer()
X = pd.DataFrame(imputer.fit_transform(X), columns=X.columns)
# scaler = StandardScaler()
# X = scaler.fit_transform(X)
smote = SMOTE()
X_smote, y_smote = smote.fit_resample(X, y)
demented = df[df['Group'] == 'Demented']
not_demented = df[df['Group'] == 'Nondemented']
converted = df[df['Group'] == 'Converted']
plt.figure(figsize=(25, 12))
plt.subplot(331)
plt.hist([demented['M/F'], not_demented['M/F'], converted['M/F']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by gender")
plt.subplot(332)
plt.hist([demented['Age'], not_demented['Age'], converted['Age']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by age")
plt.subplot(333)
plt.hist([demented['EDUC'], not_demented['EDUC'], converted['EDUC']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by education")
plt.subplot(334)
plt.hist([demented['SES'], not_demented['SES'], converted['SES']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by SES")
plt.subplot(335)
plt.hist([demented['MMSE'], not_demented['MMSE'], converted['MMSE']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by MMSE")
plt.subplot(336)
plt.hist([demented['CDR'], not_demented['CDR'], converted['CDR']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by CDR")
plt.subplot(337)
plt.hist([demented['eTIV'], not_demented['eTIV'], converted['eTIV']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by eTIV")
plt.subplot(338)
plt.hist([demented['nWBV'], not_demented['nWBV'], converted['nWBV']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by nWBV")
plt.subplot(339)
plt.hist([demented['ASF'], not_demented['ASF'], converted['ASF']], rwidth=0.8)
plt.legend(['Demented', 'Not demented', 'Converted'])
plt.title("Frequency of demented, non-demented, and converted patients by ASF")
plt.show()
"""
Notes
methods used:
- imputing using KNNImputer for missing values in SES and MMSE
- dummy variables for the categorical M/F column
- SMOTE due to the 'Converted' target value being a significant minority and consistently miscategorized without SMOTE
- StandardScaler was tested for logistic regression with little to no improvement in results
- TODO: try binning age
Feature selection:
- RFE (for basic logistic regression)
- FFS (for basic logistic regression)
- Feature importance (in random forest module)
- Based on the results from these methods, removed from logistic and bagging model:
- EDUC
- MMSE
- For mlxtend and random forest, removed:
- M/F
- EDUC
- nWBV
- For stacked model, TBD
- RFE results:
SES
CDR
nWBV
ASF
M/F_F
- FFS results:
feature ffs
7 ASF 1.430156
5 eTIV 2.684062
0 Age 14.149083
8 M/F_F 18.656900
9 M/F_M 18.656900
6 nWBV 28.090159
1 EDUC 28.760364
2 SES 41.344708
3 MMSE 170.239290
4 CDR 496.623041
- feature importance results:
importance feature
3 0.482053 CDR
2 0.236816 MMSE
1 0.091915 SES
4 0.067490 eTIV
5 0.066786 ASF
0 0.054942 Age
Data correlations w/ positive dementia:
- Males had a higher proportion
- Age, no obvious relationship
- Loose correlation with mid-level education
- Higher SES loosely correlated (socioeconomic status)
- Lower MMSE strongly correlated (mini mental state examination)
- Higher CDR strongly correlated (clinical dementia rating)
- eTIV, no obvious correlation (estimated intracranial volume)
- Lower nWBV strongly correlated (normalized whole brain volume)
- ASF, no obvious correlation (atlas scaling factor)
""" | dlepke/4948-a1 | data_exploration.py | data_exploration.py | py | 4,701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.set_option",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",... |
10650897169 | import frappe
import re
import jwt
from frappe import _
from frappe.utils.data import cstr, cint, flt
from frappe.utils import getdate
from erpnext.regional.india.e_invoice.utils import (GSPConnector,raise_document_name_too_long_error,read_json,get_transaction_details,\
validate_mandatory_fields,get_doc_details,get_overseas_address_details,get_return_doc_reference,\
get_eway_bill_details,validate_totals,show_link_to_error_log,santize_einvoice_fields,safe_json_load,get_payment_details,\
validate_eligibility,update_item_taxes,get_invoice_value_details,get_party_details,update_other_charges)
from erpnext.regional.india.utils import get_gst_accounts,get_place_of_supply
import json
GST_INVOICE_NUMBER_FORMAT = re.compile(r"^[a-zA-Z0-9\-/]+$") #alphanumeric and - /
def validate_einvoice_fields(doc):
invoice_eligible = validate_eligibility(doc)
if not invoice_eligible:
return
# Finbyz Changes Start: dont change posting date after irn generated
if doc.irn and doc.docstatus == 1 and doc._action == 'submit':
if str(doc.posting_date) != str(frappe.db.get_value("Sales Invoice",doc.name,"posting_date")):
frappe.throw(_('You cannot edit the invoice after generating IRN'), title=_('Edit Not Allowed'))
# Finbyz Changes End
if doc.docstatus == 0 and doc._action == 'save':
if doc.irn and not doc.eway_bill_cancelled and doc.grand_total != frappe.db.get_value("Sales Invoice",doc.name,"grand_total"):# Finbyz Changes:
frappe.throw(_('You cannot edit the invoice after generating IRN'), title=_('Edit Not Allowed'))
if len(doc.name) > 16 and doc.authority == 'Authorized':# Finbyz Changes
raise_document_name_too_long_error()
elif doc.docstatus == 1 and doc._action == 'submit' and not doc.irn and doc.irn_cancelled == 0: # finbyz
frappe.throw(_('You must generate IRN before submitting the document.'), title=_('Missing IRN'))
elif doc.irn and doc.docstatus == 2 and doc._action == 'cancel' and not doc.irn_cancelled:
frappe.throw(_('You must cancel IRN before cancelling the document.'), title=_('Cancel Not Allowed'))
def make_einvoice(invoice):
validate_mandatory_fields(invoice)
schema = read_json('einv_template')
transaction_details = get_transaction_details(invoice)
item_list = get_item_list(invoice)
doc_details = get_doc_details(invoice)
invoice_value_details = get_invoice_value_details(invoice)
seller_details = get_party_details(invoice.company_address)
if invoice.gst_category == 'Overseas':
buyer_details = get_overseas_address_details(invoice.customer_address)
else:
buyer_details = get_party_details(invoice.customer_address)
place_of_supply = get_place_of_supply(invoice, invoice.doctype)
if place_of_supply:
place_of_supply = place_of_supply.split('-')[0]
else:
place_of_supply = invoice.billing_address_gstin[:2]
buyer_details.update(dict(place_of_supply=place_of_supply))
seller_details.update(dict(legal_name=invoice.company))
buyer_details.update(dict(legal_name=invoice.billing_address_title or invoice.customer_name or invoice.customer)) # finbyz change add billing address title
shipping_details = payment_details = prev_doc_details = eway_bill_details = frappe._dict({})
if invoice.shipping_address_name and invoice.customer_address != invoice.shipping_address_name:
if invoice.gst_category == 'Overseas':
shipping_details = get_overseas_address_details(invoice.shipping_address_name)
else:
shipping_details = get_party_details(invoice.shipping_address_name, is_shipping_address=True)
if invoice.is_pos and invoice.base_paid_amount:
payment_details = get_payment_details(invoice)
if invoice.is_return and invoice.return_against:
prev_doc_details = get_return_doc_reference(invoice)
if invoice.transporter and flt(invoice.distance) and not invoice.is_return:
eway_bill_details = get_eway_bill_details(invoice)
# not yet implemented
dispatch_details = period_details = export_details = frappe._dict({})
einvoice = schema.format(
transaction_details=transaction_details, doc_details=doc_details, dispatch_details=dispatch_details,
seller_details=seller_details, buyer_details=buyer_details, shipping_details=shipping_details,
item_list=item_list, invoice_value_details=invoice_value_details, payment_details=payment_details,
period_details=period_details, prev_doc_details=prev_doc_details,
export_details=export_details, eway_bill_details=eway_bill_details
)
try:
einvoice = safe_json_load(einvoice)
einvoice = santize_einvoice_fields(einvoice)
except Exception:
show_link_to_error_log(invoice, einvoice)
validate_totals(einvoice)
return einvoice
def get_item_list(invoice):
item_list = []
for d in invoice.items:
einvoice_item_schema = read_json('einv_item_template')
item = frappe._dict({})
item.update(d.as_dict())
item.sr_no = d.idx
item.description = json.dumps(d.item_group or d.item_name)[1:-1] # finbyz change add item group
item.qty = abs(item.qty)
if invoice.apply_discount_on == 'Net Total' and invoice.discount_amount:
item.discount_amount = abs(item.base_amount - item.base_net_amount)
else:
item.discount_amount = 0
item.unit_rate = abs((abs(item.taxable_value) - item.discount_amount)/ item.qty)
item.gross_amount = abs(item.taxable_value) + item.discount_amount
item.taxable_value = abs(item.taxable_value)
item.batch_expiry_date = frappe.db.get_value('Batch', d.batch_no, 'expiry_date') if d.batch_no else None
item.batch_expiry_date = format_date(item.batch_expiry_date, 'dd/mm/yyyy') if item.batch_expiry_date else None
#finbyz Changes
if frappe.db.get_value('Item', d.item_code, 'is_stock_item') or frappe.db.get_value('Item', d.item_code, 'is_not_service_item'):
item.is_service_item = 'N'
else:
item.is_service_item = 'Y'
#finbyz changes enditem.serial_no = ""
item = update_item_taxes(invoice, item)
item.total_value = abs(
item.taxable_value + item.igst_amount + item.sgst_amount +
item.cgst_amount + item.cess_amount + item.cess_nadv_amount + item.other_charges
)
einv_item = einvoice_item_schema.format(item=item)
item_list.append(einv_item)
return ', '.join(item_list)
# india utils.py
def validate_document_name(doc, method=None):
"""Validate GST invoice number requirements."""
country = frappe.get_cached_value("Company", doc.company, "country")
# Date was chosen as start of next FY to avoid irritating current users.
if country != "India" or getdate(doc.posting_date) < getdate("2021-04-01"):
return
if len(doc.name) > 16 and doc.authority == 'Authorized': #finbyz
frappe.throw(_("Maximum length of document number should be 16 characters as per GST rules. Please change the naming series."))
if not GST_INVOICE_NUMBER_FORMAT.match(doc.name):
frappe.throw(_("Document name should only contain alphanumeric values, dash(-) and slash(/) characters as per GST rules. Please change the naming series."))
| venku31/ceramic | ceramic/e_invoice_ceramic.py | e_invoice_ceramic.py | py | 6,890 | python | en | code | null | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "erpnext.regional.india.e_invoice.utils.validate_eligibility",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "frappe.db.get_value",
"line_number": 25,
"usage_type": "call"
},... |
74788585384 | import operator
from itertools import tee, starmap, groupby
from typing import Literal
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def has_six_digits(i: int):
return 100_000 <= i <= 999_999
def is_nondecreasing_sequence(i: int):
pairs = pairwise(str(i))
goes_up = starmap(operator.le, pairs)
nondecreasing = all(goes_up)
return nondecreasing
def one_pair_is_not_triplet(i: int):
return 2 in {sum(1 for _ in g) for _, g in groupby(str(i))}
def has_pair(i: int):
pairs = pairwise(str(i))
is_equal = starmap(operator.eq, pairs)
return any(is_equal)
def moar_numbers(start_inclusive, stop_inclusive, part=Literal['a', 'b']):
current = start_inclusive
check_extra = part == 'b'
while current <= stop_inclusive:
current_fulfils_spec = (
has_six_digits(current) and
is_nondecreasing_sequence(current) and
has_pair(current) and
(not check_extra or one_pair_is_not_triplet(current))
)
if current_fulfils_spec:
yield current
current += 1
| el-hult/adventofcode2019 | day04/day4_lib.py | day4_lib.py | py | 1,183 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.tee",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "itertools.starmap",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "operator.le",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "itertools.groupby",
... |
6654095261 | from flask import Flask, render_template, request, redirect, session
from datetime import datetime
import random
app = Flask(__name__)
app.secret_key = 'JSaafE54!@#$%$#%^&*()_+'
@app.route('/')
def index():
#crear una variable session
if 'num_azar' not in session:
session['num_azar'] = random.randint(1,100)
session['resultado'] = ""
session['count'] = 0
print(session['num_azar'])
elif 'num_azar' in session:
if session['count'] > 0 and session['count']<= 4:
print("la session ya existe")
elif session['count'] > 4 and session['resultado'] != 'igual':
session['resultado'] = "perdiste"
print(session['num_azar'])
return render_template("index.html")
@app.route('/guess', methods=['POST'])
def guess():
numero_ingresado = int(request.form['numero'])
print(numero_ingresado)
if session['num_azar'] == numero_ingresado:
session['resultado'] = "igual"
print('paso por igual')
elif session['num_azar'] > numero_ingresado:
session['resultado'] = "mayor"
print('paso por mayor')
elif session['num_azar'] < numero_ingresado:
session['resultado'] = "menor"
print('paso por menor')
session['count'] += 1
return redirect('/')
@app.route('/play_again', methods=['POST'])
def play_again():
session.pop('num_azar')
session.pop('resultado')
session.pop('count')
return redirect('/')
@app.route('/ranking', methods=['POST'])
def ranking():
print(session['ranking'])
if 'ranking' not in session:
session['ranking'] = []
session['ranking'] = [
dict(nombre=request.form['nombre'], intentos=session['count'])]
print(request.form['nombre'])
print(session['ranking'])
return render_template("ranking.html", ranking=session['ranking'])
if __name__=="__main__":
app.run(debug=True) | cpinot/CodingDojo | python/flask/fundamentals/numeros_juegos_genial/server.py | server.py | py | 2,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "flask.session",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "random.randint",
"line_num... |
5232344019 | from openpyxl import load_workbook
wb = load_workbook("sample.xlsx")
ws = wb.active
# 번호 영어 수학
# 번호 (국어) 영어 수학
ws.move_range("B1:C11", rows=0, cols=1) # 0줄 밑으로, 1줄 오른쪽으로 이동
ws["B1"].value = "국어" # B1 셀에 '국어' 입력
# ws.move_range("C1:C11", rows=5, cols=-1) # 데이터 옮기면서 덮어씀
wb.save("sample_korean.xlsx") | OctoHoon/PythonStudy_rpa | rpa_basic/1_excel/9_move.py | 9_move.py | py | 402 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 2,
"usage_type": "call"
}
] |
28128335578 | import os
import v2_draw_dynamic as main_app
import logging
import sys
def cmd(cmdstr):
print(cmdstr)
os.system(cmdstr)
def main():
'''
if len(sys.argv) < 2:
logging.error("please input msg log file")
return
'''
while True:
try:
main_app.draw_dynamic(r"D:\Programs\bin3(EC2)_R\log\010011112222\msg.log")
except Exception as e:
print("catch exception",e)
logging.error(str(e))
main() | HZRelaper2020/show_log | v2_draw_dynamic_script.py | v2_draw_dynamic_script.py | py | 497 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.system",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "v2_draw_dynamic.draw_dynamic",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 21,
"usage_type": "call"
}
] |
11071680416 | #!/usr/bin/env python
# coding: utf-8
# -- GongChen'xi
#
# 20220112
# In[1]:
import baostock as bs
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os, sys
# In[2]:
def fetch_info(stock_num, info, start_date, end_date):
bs.login()
rs = bs.query_history_k_data_plus(stock_num, info,
start_date = start_date, end_date = end_date,
frequency="d", adjustflag="3")
data_list = []
while (rs.error_code == '0') & rs.next():
data_list.append(rs.get_row_data())
bs.logout()
return data_list
# In[3]:
# make dictionary of RSI6, RSI24
def get_rsi(stock_num, start_date, end_date):
rsi6 = {}
rsi24 = {}
change = fetch_info(stock_num, 'date, pctChg', start_date, end_date)
for i in range(len(change)):
if i >= 23:
date = change[i][0]
# RSI6
try:
denominator = 0
numerator = 0
for j in range(6):
denominator += abs(float(change[i-j][1]))
if float(change[i-j][1]) > 0:
numerator += abs(float(change[i-j][1]))
rsi6[date] = numerator/denominator
except:
rsi6[date] = 0.0
# RSI24
try:
denominator = 0
numerator = 0
for j in range(24):
denominator += abs(float(change[i-j][1]))
if float(change[i-j][1]) > 0:
numerator += abs(float(change[i-j][1]))
rsi24[date] = numerator/denominator
except:
rsi24[date] = 0.0
return rsi6, rsi24
# In[4]:
# make dictionary of close price
def get_price(stock_num, start_date, end_date):
close_price = {}
close = fetch_info(stock_num, 'date, close', start_date, end_date)
for i in range(len(close)):
close_price[close[i][0]] = float(close[i][1])
return close_price
# In[5]:
# make list of trading dates
def get_trading_dates(stock_num, start_date, end_date):
trading_date_list = []
date = fetch_info(stock_num, 'date', start_date, end_date)
for i in range(len(date)):
trading_date_list.append(date[i][0])
return trading_date_list
# In[6]:
def change_rate(buy_date, date, close_price):
buy_price = close_price[buy_date]
current_price = close_price[date]
change = (current_price-buy_price)/buy_price
return change
def simulation_start_date(trading_date_list):
date = trading_date_list[23]
return date
def next_date(date, trading_date_list):
new_date = None
for i in range(len(trading_date_list)):
if trading_date_list[i] == date:
new_date = trading_date_list[i+1]
return new_date
# In[ ]:
def initialize(stock_num, start_date, end_date):
close_price = get_price(stock_num, start_date, end_date)
trading_date_list = get_trading_dates(stock_num, start_date, end_date)
date = simulation_start_date(trading_date_list)
return close_price, trading_date_list, date
# In[ ]:
# analysis
def analysis(record, close_price, show):
close = []
date = []
for i in close_price:
close.append(close_price[i])
date.append(i)
t = []
for i in range(len(close)):
t.append(i)
buy_price = []
buy_t = []
sell_price = []
sell_t = []
for i in record:
for j in range(len(date)):
if date[j] == i[0]:
if i[1] == 'buy':
buy_price.append(close_price[date[j]])
buy_t.append(j)
if i[1] == 'sell':
sell_price.append(close_price[date[j]])
sell_t.append(j)
trade_list = []
for i in record:
for j in range(len(date)):
if date[j] == i[0]:
trade_list.append([i[0], i[1], close_price[date[j]]])
trade = pd.DataFrame(trade_list, columns = ['date', 'action', 'close'])
#print(trade, end = '\n\n')
profit = []
if len(trade_list) % 2 == 1:
trade_list.pop()
for i in range(int(len(trade_list)/2)):
profit.append((trade_list[2*i+1][2] - trade_list[2*i][2])/trade_list[2*i][2])
profit = np.array(profit)
average = profit.mean()
accumulated = ((profit + 1).cumprod() - 1)[-1]
if show:
print('On this pattern of trading,')
print('Average yield: ', average)
print('Accumulated yield: ', accumulated)
plt.plot(t, close)
plt.scatter(buy_t, buy_price, color = 'red')
plt.scatter(sell_t, sell_price, color = 'green')
return average, accumulated
| Chenxi-Gong/TradingPatternSimulation | simulation.py | simulation.py | py | 4,698 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "baostock.login",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "baostock.query_history_k_data_plus",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "baostock.logout",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pan... |
34338800542 | # https://leetcode.com/problems/n-ary-tree-level-order-traversal/
from typing import List
# Definition for a Node.
class Node:
def __init__(self, val=None, children=None):
self.val = val
self.children = children
class Solution:
def levelOrder(self, root: 'Node') -> List[List[int]]:
rst = []
if not root:
return rst
curNodes = [root]
while curNodes:
nextNodes = []
curVals = []
for node in curNodes:
curVals.append(node.val)
if node.children:
nextNodes += node.children
rst.append(curVals)
curNodes = nextNodes
return rst
| 0x0400/LeetCode | p429.py | p429.py | py | 714 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 12,
"usage_type": "name"
}
] |
15948222705 | import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
def get_data_splits(X, Y, train_size=0.8):
"""This function splits the whole dataset into train, test and validation sets.
Args:
X (pd.DataFrame): DataFrame containing feature values of data points of shape (num_samples,num_features)
Y (pd.DataFrame): DataFrame containing labels for samples of shape (num_samples,)
train_size (float, optional): size of the training set in (0,1), Defaults to 0.8.
Returns:
tuple: tuple containing X_train, Y_train, X_val, Y_val, X_test, Y_test
"""
X_train, X_, Y_train, Y_ = train_test_split(X, Y, train_size=train_size, stratify=Y)
X_val, X_test, Y_val, Y_test = train_test_split(X_, Y_, train_size=0.5, stratify=Y_)
return X_train, Y_train, X_val, Y_val, X_test, Y_test
def rename_columns(df):
"""This function renames the columns of features DataFrame into cat_ and num_ styles.
Args:
df (pd.DataFrame): A DataFrame containing features as columns and samples as rows
Returns:
pd.DataFrame: A DataFrame with renamed columns.
"""
categorical_cols = ["0", "1", "2", "3", "13"]
numerical_cols = ["4", "5", "6", "7", "8", "9", "10", "11", "12"]
new_names = []
for col_name in df.columns.astype(str).values:
if col_name in numerical_cols:
df[col_name] = pd.to_numeric(df[col_name])
new_names.append((col_name, "num_" + col_name))
elif col_name in categorical_cols:
new_names.append((col_name, "cat_" + col_name))
else: # pragma: no cover, other data
new_names.append((col_name, col_name))
df.rename(columns=dict(new_names), inplace=True)
return df
def preprocess(X_df, Y_df=None, label_col="18", enc=None):
"""This function preprocess the features and labels DataFrames by encoding categorical features and relabeling. If Y_df is not None, normal samples get label 1 and anomalies get label -1.
Args:
X_df (pd.DataFrame): DataFrame containing features with renamed columns.
Y_df (pd.DataFrame, optional): DataFrame containing the labels for samples in X_df. Defaults to None.
label_col (str, optional): Name of the label column in Y_df. Defaults to "18".
enc (sklearn.preprocessing.OneHotEncoder, optional): Fitted OneHotEncoder, a new encoder will be fit to the data if none is given. Defaults to None.
Returns:
tuple: df_new, Y_df, enc, the encoded features, the labels, and the OneHotEncoder
"""
X_df = X_df.reset_index(drop=True)
if Y_df is not None:
Y_df = Y_df.reset_index(drop=True)
if not enc:
enc = OneHotEncoder(handle_unknown="ignore")
enc.fit(X_df.loc[:, ["cat_" in i for i in X_df.columns]])
num_cat_features = enc.transform(X_df.loc[:, ["cat_" in i for i in X_df.columns]]).toarray()
df_catnum = pd.DataFrame(num_cat_features)
df_catnum = df_catnum.add_prefix("catnum_")
df_new = pd.concat([X_df, df_catnum], axis=1)
if Y_df is not None:
filter_clear = Y_df[label_col] == 1
filter_infected = Y_df[label_col] < 0
Y_df[label_col][filter_clear] = 1
Y_df[label_col][filter_infected] = -1
return df_new, Y_df, enc
def get_preprocessed_train(X_df, Y_df, label_col="18"):
"""This function prepares the dataset for training. Returns only normal samples.
Args:
X_df (pd.DataFrame): A DataFrame containing features for samples of shape (num_samples, num_features)
Y_df (pd.DataFrame): A DataFrame containing labels for data samples of shape (n_samples,)
label_col (str, optional): Name of the label column in Y_df. Defaults to "18"
Returns:
tuple: X_train_num, Y_train_clear, numerical_cols, ohe_enc: features and labels of normal samples together with names of the columns for training and the OneHotEncoder used in preprocessing
"""
count_norm = X_df[Y_df[label_col] == 1].shape[0]
count_anomaly = X_df[Y_df[label_col] != 1].shape[0]
print("normal:", count_norm, "anomalies:", count_anomaly)
X_df = rename_columns(X_df)
X_df, Y_df, ohe_enc = preprocess(X_df, Y_df)
# select numerical features
numerical_cols = X_df.columns.to_numpy()[["num_" in i for i in X_df.columns]]
X_train_clear = X_df[Y_df[label_col] == 1]
Y_train_clear = Y_df[Y_df[label_col] == 1]
X_train_num = X_train_clear[numerical_cols]
return X_train_num, Y_train_clear, numerical_cols, ohe_enc
def get_test(X_df, enc, Y_df=None):
"""This function works similar to get_preprocessed_train, but prepares the test dataset for evaluation or inference purposes.
Args:
(pd.DataFrame): A DataFrame containing features for samples of shape (num_samples, num_features)
enc (sklearn.preprocessing.OneHotEncoder): A fitted OneHotEncoder for transforming features
Y_df (pd.DataFrame, optional): A DataFrame containing labels for data samples of shape (n_samples,), is none for inference purposes. Defaults to None.
Returns:
tuple: X_df, Y_df, preprocessed features and labels
"""
X_df = rename_columns(X_df)
X_df, Y_df, _ = preprocess(X_df, Y_df, enc=enc)
numerical_cols = X_df.columns.to_numpy()[["num_" in i for i in X_df.columns]]
X_df = X_df[numerical_cols]
return X_df, Y_df
| rahbararman/AnoShiftIDS | IDSAnoShift/data.py | data.py | py | 5,408 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.to_numeric",
"line_number": 36,
"usage_type": "c... |
42026376598 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from .views import HomeView, ArchivesView, AboutView, PhotoView, MusicView, ArticleDetailView, CategoryView, TagListView, TagsView, CategoryListView, search
urlpatterns = [
url(r'^$', HomeView.as_view(), name='home'),
url(r'^archives/(?P<year>[0-9]{4})/(?P<month>[0-9]{1,2})/$', ArchivesView.as_view(), name='archives'),
url(r'^category_list/$', CategoryListView.as_view(), name='category_list'), #分类列表页
url(r'^category/(?P<category_id>\d+)/$', CategoryView.as_view(), name='category'),
url(r'^tag_list/$', TagListView.as_view(), name='tag_list'), #标签列表页
url(r'^tag/(?P<tag_id>\d+)/$', TagsView.as_view(), name='tag'), #所属标签的文章列表
url(r'^about/', AboutView.as_view(), name='about'),
url(r'^photo/', PhotoView.as_view(), name='photo'),
url(r'^music/', MusicView.as_view(), name='music'),
url(r'^article_detail/(?P<article_id>\d+)/$', ArticleDetailView.as_view(), name='article_detail'),
url(r'^search/$',search, name="search"),
]
| JustBreaking/myblog | apps/myblog/urls.py | urls.py | py | 1,101 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.HomeView.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.HomeView",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.conf.u... |
27515854785 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 23 17:32:15 2020
geometry based sieving
"""
import os
import pickle
from skimage import measure
import pandas as pd
import numpy as np
import nibabel as nib
from skimage import measure
from sklearn import neighbors
# PROPERTIES = ['area', 'extent', 'filled_area', 'inertia_tensor', 'major_axis_length', 'minor_axis_length'] # 3d compatible
PROPERTIES = ['area', 'eccentricity', 'extent',
'inertia_tensor', 'major_axis_length', 'minor_axis_length',
'moments_hu', 'perimeter', 'solidity']
def read_nifti(img_path):
"""
Wrapper for nibabel to read in NIfTI scans.
Parameters
----------
img_path : string
The path to the .nii scan
Returns
-------
A numpy array representing the scan
"""
raw = nib.load(img_path)
img = raw.get_fdata()
return img
def label_2d(im):
labeled = im.copy()
vals_seen = []
adder = 0
for i in range(labeled.shape[2]):
sli = im[:,:,i]
labeled_slice = measure.label(sli)
labeled_slice = labeled_slice + (adder * (~np.isclose(labeled_slice, 0)))# we are labeling every slice individually, but we don't want to reuse labels between slices
labeled[:,:,i] = labeled_slice
the_max = labeled_slice.max()
if the_max > 0:
adder = the_max
vals_seen.extend(np.unique(labeled_slice))
vals_seen.remove(0)
'''
print(f'Vals_seen: {vals_seen}')
print(f'Adder: {adder}')
print(f'Unique in this slice: {np.unique(labeled_slice)}')
print('\n')
'''
if len(vals_seen) > len(np.unique(vals_seen)):
raise Exception(f'Labels are not unique after slice {i}')
return labeled.astype(int)
def generate_properties(im, props=PROPERTIES):
"""
Generates geometric properties for shapes in the binary input image
Parameters
----------
im : TYPE
DESCRIPTION.
props : TYPE, optional
DESCRIPTION
Returns
-------
X_train : TYPE
DESCRIPTION.
"""
X_train = pd.DataFrame()
labeled = label_2d(im)
for i in range(labeled.shape[2]):
sli = labeled[:,:,i]
try:
X_train = X_train.append(pd.DataFrame(measure.regionprops_table(sli, properties=props)))
except IndexError:
pass # happens when the slice has no regions in it
return X_train
def standardize_data(data, params):
standard_data = data.copy()
for col, mean, stddev in zip(data.columns, params[0], params[1]):
standard_data[col] = (standard_data[col] - mean) / stddev
return standard_data
def train_and_save(training_data, outloc):
"""
Trains a LOF algorithm for the purposes of novelty detection and pickles it
Standardizes the data first (transforms each column by subtracting the mean
and then dividing by the stddev)
Parameters
----------
training_data : TYPE
a pandas DataFrame of the training data.
out_loc : TYPE
name of the pickled object to save, which is a tuple with length 2, where
the first entry is the model. The second is a list of lists, where the first
list is the list of means used to transform the data and the second is the list
of the stddevs used to transform the data
Returns
-------
a tuple with length 2, where
the first entry is the model. The second is a list of lists, where the first
list is the list of means used to transform the data and the second is the list
of the stddevs used to transform the data
"""
standard_data = training_data.copy()
means = []
stddevs = []
for col in training_data.columns:
mean = training_data[col].mean()
stddev = training_data[col].std()
means.append(mean)
stddevs.append(stddev)
standard_data = standardize_data(training_data, (means, stddevs))
lof = neighbors.LocalOutlierFactor(novelty=True)
lof.fit(standard_data)
out_obj = (lof, (means, stddevs))
pickle.dump(out_obj, open(outloc, "wb" ))
return out_obj
def load_default_model():
script_folder = os.path.dirname(os.path.realpath(__file__))
repo_folder = os.path.dirname(script_folder)
model_loc = os.path.join(repo_folder, 'bin', 'gbs_models', 'gbs_default.pkl')
lof, params = pickle.load(open(model_loc, 'rb'))
return lof, params
def sieve_image(im, model_and_params=None, props=None):
if model_and_params is None:
model_and_params = load_default_model()
if props is None:
props=PROPERTIES
model = model_and_params[0]
params = model_and_params[1]
labeled = label_2d(im)
#observations = pd.DataFrame(measure.regionprops_table(labeled, properties=props))
props_with_label = props
props_with_label.append('label')
observations = generate_properties(labeled, props=props_with_label)
labels_only = pd.DataFrame(observations['label'])
observations_drop = observations.drop(columns='label')
standard_observations = standardize_data(observations_drop, params)
predictions = model.predict(standard_observations)
labels_only['prediction'] = predictions
to_zero = [row['label'] for i, row in labels_only.iterrows() if row['prediction'] == -1]
mask = np.isin(labeled, to_zero)
new_im = im.copy()
new_im[mask] = 0
return new_im.astype(int)
| rsjones94/neurosegment | neurosegment/gbs.py | gbs.py | py | 5,678 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "nibabel.load",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "skimage.measure.label",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "skimage.measure",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "numpy.isclose",
... |
35000621242 | import os
import torch
import pandas as pd
import torchaudio
import cv2
import torchaudio.transforms as T
from torch.utils.data import Dataset
import numpy as np
from .utils_dataset import get_transform
class BatvisionV2Dataset(Dataset):
def __init__(self, cfg, annotation_file, location_blacklist=None):
self.cfg = cfg
self.root_dir = cfg.dataset.dataset_dir
self.audio_format = cfg.dataset.audio_format
location_list = os.listdir(self.root_dir)
if location_blacklist:
location_list = [location for location in location_list if location not in location_blacklist]
location_csv_paths = [os.path.join(self.root_dir, location, annotation_file) for location in location_list]
self.instances = []
for location_csv in location_csv_paths:
self.instances.append(pd.read_csv(location_csv))
self.instances = pd.concat(self.instances)
def __len__(self):
return len(self.instances)
def __getitem__(self, idx):
# Access instance
instance = self.instances.iloc[idx]
# Load path
depth_path = os.path.join(self.root_dir,instance['depth path'],instance['depth file name'])
audio_path = os.path.join(self.root_dir,instance['audio path'],instance['audio file name'])
## Depth
# Load depth map
depth = np.load(depth_path).astype(np.float32)
depth = depth / 1000 # to go from mm to m
if self.cfg.dataset.max_depth:
depth[depth > self.cfg.dataset.max_depth] = self.cfg.dataset.max_depth
# Transform
depth_transform = get_transform(self.cfg, convert = True, depth_norm = self.cfg.dataset.depth_norm)
gt_depth = depth_transform(depth)
## Audio
# Load audio binaural waveform
waveform, sr = torchaudio.load(audio_path)
# STFT parameters for full length audio
win_length = 200
n_fft = 400
hop_length = 100
# Cut audio to fit max depth
if self.cfg.dataset.max_depth:
cut = int((2*self.cfg.dataset.max_depth / 340) * sr)
waveform = waveform[:,:cut]
# Update STFT parameters
win_length = 64
n_fft = 512
hop_length=64//4
# Process sound
if 'spectrogram' in self.audio_format:
if 'mel' in self.audio_format:
spec = self._get_melspectrogram(waveform, n_fft = n_fft, power = 1.0, win_length = win_length)
else:
spec = self._get_spectrogram(waveform, n_fft = n_fft, power = 1.0, win_length = win_length, hop_length = hop_length)
spec_transform = get_transform(self.cfg, convert = False) # convert False because already a tensor
audio2return = spec_transform(spec)
elif 'waveform' in self.audio_format:
audio2return = waveform
return audio2return, gt_depth
# audio transformation: spectrogram
def _get_spectrogram(self, waveform, n_fft = 400, power = 1.0, win_length = 400, hop_length=100):
spectrogram = T.Spectrogram(
n_fft=n_fft,
win_length=win_length,
power=power,
hop_length=hop_length,
)
#db = T.AmplitudeToDB(stype = 'magnitude')
return spectrogram(waveform)
# audio transformation: mel spectrogram
def _get_melspectrogram(self, waveform, n_fft = 400, power = 1.0, win_length = 400, f_min = 20.0, f_max = 20000.0):
melspectrogram = T.MelSpectrogram(sample_rate = 44100,
n_fft=n_fft,
win_length=win_length,
power=power,
f_min = f_min,
f_max = f_max,
n_mels = 32,
)
return melspectrogram(waveform)
| AmandineBtto/Batvision-Dataset | UNetSoundOnly/dataloader/BatvisionV2_Dataset.py | BatvisionV2_Dataset.py | py | 3,853 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "os.listdir",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_... |
28780254481 | """
Python Crash Course, Third Edition https://ehmatthes.github.io/pcc_3e/
My notes: https://github.com/egalli64/pythonesque/pcc3
Chapter 15 - Generating Data - Plotting a Simple Line Graph - Plotting a Series of Points with scatter()
"""
import matplotlib.pyplot as plt
plt.style.use('seaborn')
fig, ax = plt.subplots()
# a few scattered points
xs = [1, 2, 3, 4, 5]
ys = [1, 4, 9, 16, 25]
ax.scatter(xs, ys, s=100)
ax.set_title("Square Numbers", fontsize=24)
ax.set_xlabel("Value", fontsize=14)
ax.set_ylabel("Square of Value", fontsize=14)
ax.tick_params(labelsize=14)
plt.show()
| egalli64/pythonesque | pcc3/ch15/e1f_scatter_points.py | e1f_scatter_points.py | py | 587 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name"... |
38033757546 | import dataclasses
import pickle
import re
from collections.abc import Hashable
from datetime import datetime
from pathlib import Path
from typing import Callable, ClassVar, Dict, FrozenSet, List, Optional, Set, Union
import pytest
from typing_extensions import Literal
import pydantic
from pydantic import BaseModel, Extra, ValidationError, validator
def test_simple():
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
b: float
d = MyDataclass('1', '2.5')
assert d.a == 1
assert d.b == 2.5
d = MyDataclass(b=10, a=20)
assert d.a == 20
assert d.b == 10
def test_model_name():
@pydantic.dataclasses.dataclass
class MyDataClass:
model_name: str
d = MyDataClass('foo')
assert d.model_name == 'foo'
d = MyDataClass(model_name='foo')
assert d.model_name == 'foo'
def test_value_error():
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
b: int
with pytest.raises(ValidationError) as exc_info:
MyDataclass(1, 'wrong')
assert exc_info.value.errors() == [
{'loc': ('b',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
def test_frozen():
@pydantic.dataclasses.dataclass(frozen=True)
class MyDataclass:
a: int
d = MyDataclass(1)
assert d.a == 1
with pytest.raises(AttributeError):
d.a = 7
def test_validate_assignment():
class Config:
validate_assignment = True
@pydantic.dataclasses.dataclass(config=Config)
class MyDataclass:
a: int
d = MyDataclass(1)
assert d.a == 1
d.a = '7'
assert d.a == 7
def test_validate_assignment_error():
@pydantic.dataclasses.dataclass(config=dict(validate_assignment=True))
class MyDataclass:
a: int
d = MyDataclass(1)
with pytest.raises(ValidationError) as exc_info:
d.a = 'xxx'
assert exc_info.value.errors() == [
{'loc': ('a',), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
def test_not_validate_assignment():
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
d = MyDataclass(1)
assert d.a == 1
d.a = '7'
assert d.a == '7'
def test_validate_assignment_value_change():
class Config:
validate_assignment = True
@pydantic.dataclasses.dataclass(config=Config, frozen=False)
class MyDataclass:
a: int
@validator('a')
def double_a(cls, v):
return v * 2
d = MyDataclass(2)
assert d.a == 4
d.a = 3
assert d.a == 6
def test_validate_assignment_extra():
class Config:
validate_assignment = True
@pydantic.dataclasses.dataclass(config=Config, frozen=False)
class MyDataclass:
a: int
d = MyDataclass(1)
assert d.a == 1
d.extra_field = 1.23
assert d.extra_field == 1.23
d.extra_field = 'bye'
assert d.extra_field == 'bye'
def test_post_init():
post_init_called = False
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
def __post_init__(self):
nonlocal post_init_called
post_init_called = True
d = MyDataclass('1')
assert d.a == 1
assert post_init_called
def test_post_init_validation():
@dataclasses.dataclass
class DC:
a: int
def __post_init__(self):
self.a *= 2
def __post_init_post_parse__(self):
self.a += 1
PydanticDC = pydantic.dataclasses.dataclass(DC)
assert DC(a='2').a == '22'
assert PydanticDC(a='2').a == 23
def test_post_init_inheritance_chain():
parent_post_init_called = False
post_init_called = False
@pydantic.dataclasses.dataclass
class ParentDataclass:
a: int
def __post_init__(self):
nonlocal parent_post_init_called
parent_post_init_called = True
@pydantic.dataclasses.dataclass
class MyDataclass(ParentDataclass):
b: int
def __post_init__(self):
super().__post_init__()
nonlocal post_init_called
post_init_called = True
d = MyDataclass(a=1, b=2)
assert d.a == 1
assert d.b == 2
assert parent_post_init_called
assert post_init_called
def test_post_init_post_parse():
post_init_post_parse_called = False
@pydantic.dataclasses.dataclass
class MyDataclass:
a: int
def __post_init_post_parse__(self):
nonlocal post_init_post_parse_called
post_init_post_parse_called = True
d = MyDataclass('1')
assert d.a == 1
assert post_init_post_parse_called
def test_post_init_post_parse_types():
@pydantic.dataclasses.dataclass
class CustomType:
b: int
@pydantic.dataclasses.dataclass
class MyDataclass:
a: CustomType
def __post_init__(self):
assert type(self.a) == dict
def __post_init_post_parse__(self):
assert type(self.a) == CustomType
d = MyDataclass(**{'a': {'b': 1}})
assert d.a.b == 1
def test_post_init_assignment():
from dataclasses import field
# Based on: https://docs.python.org/3/library/dataclasses.html#post-init-processing
@pydantic.dataclasses.dataclass
class C:
a: float
b: float
c: float = field(init=False)
def __post_init__(self):
self.c = self.a + self.b
c = C(0.1, 0.2)
assert c.a == 0.1
assert c.b == 0.2
assert c.c == 0.30000000000000004
def test_inheritance():
@pydantic.dataclasses.dataclass
class A:
a: str = None
@pydantic.dataclasses.dataclass
class B(A):
b: int = None
b = B(a='a', b=12)
assert b.a == 'a'
assert b.b == 12
with pytest.raises(ValidationError):
B(a='a', b='b')
def test_validate_long_string_error():
class Config:
max_anystr_length = 3
@pydantic.dataclasses.dataclass(config=Config)
class MyDataclass:
a: str
with pytest.raises(ValidationError) as exc_info:
MyDataclass('xxxx')
assert exc_info.value.errors() == [
{
'loc': ('a',),
'msg': 'ensure this value has at most 3 characters',
'type': 'value_error.any_str.max_length',
'ctx': {'limit_value': 3},
}
]
def test_validate_assigment_long_string_error():
class Config:
max_anystr_length = 3
validate_assignment = True
@pydantic.dataclasses.dataclass(config=Config)
class MyDataclass:
a: str
d = MyDataclass('xxx')
with pytest.raises(ValidationError) as exc_info:
d.a = 'xxxx'
assert issubclass(MyDataclass.__pydantic_model__.__config__, BaseModel.Config)
assert exc_info.value.errors() == [
{
'loc': ('a',),
'msg': 'ensure this value has at most 3 characters',
'type': 'value_error.any_str.max_length',
'ctx': {'limit_value': 3},
}
]
def test_no_validate_assigment_long_string_error():
class Config:
max_anystr_length = 3
validate_assignment = False
@pydantic.dataclasses.dataclass(config=Config)
class MyDataclass:
a: str
d = MyDataclass('xxx')
d.a = 'xxxx'
assert d.a == 'xxxx'
def test_nested_dataclass():
@pydantic.dataclasses.dataclass
class Nested:
number: int
@pydantic.dataclasses.dataclass
class Outer:
n: Nested
navbar = Outer(n=Nested(number='1'))
assert isinstance(navbar.n, Nested)
assert navbar.n.number == 1
navbar = Outer(n=('2',))
assert isinstance(navbar.n, Nested)
assert navbar.n.number == 2
navbar = Outer(n={'number': '3'})
assert isinstance(navbar.n, Nested)
assert navbar.n.number == 3
with pytest.raises(ValidationError) as exc_info:
Outer(n='not nested')
assert exc_info.value.errors() == [
{
'loc': ('n',),
'msg': 'instance of Nested, tuple or dict expected',
'type': 'type_error.dataclass',
'ctx': {'class_name': 'Nested'},
}
]
with pytest.raises(ValidationError) as exc_info:
Outer(n=('x',))
assert exc_info.value.errors() == [
{'loc': ('n', 'number'), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
def test_arbitrary_types_allowed():
class Button:
def __init__(self, href: str):
self.href = href
class Config:
arbitrary_types_allowed = True
@pydantic.dataclasses.dataclass(config=Config)
class Navbar:
button: Button
btn = Button(href='a')
navbar = Navbar(button=btn)
assert navbar.button.href == 'a'
with pytest.raises(ValidationError) as exc_info:
Navbar(button=('b',))
assert exc_info.value.errors() == [
{
'loc': ('button',),
'msg': 'instance of Button expected',
'type': 'type_error.arbitrary_type',
'ctx': {'expected_arbitrary_type': 'Button'},
}
]
def test_nested_dataclass_model():
@pydantic.dataclasses.dataclass
class Nested:
number: int
class Outer(BaseModel):
n: Nested
navbar = Outer(n=Nested(number='1'))
assert navbar.n.number == 1
def test_fields():
@pydantic.dataclasses.dataclass
class User:
id: int
name: str = 'John Doe'
signup_ts: datetime = None
user = User(id=123)
fields = user.__pydantic_model__.__fields__
assert fields['id'].required is True
assert fields['id'].default is None
assert fields['name'].required is False
assert fields['name'].default == 'John Doe'
assert fields['signup_ts'].required is False
assert fields['signup_ts'].default is None
def test_default_factory_field():
@pydantic.dataclasses.dataclass
class User:
id: int
aliases: Dict[str, str] = dataclasses.field(default_factory=lambda: {'John': 'Joey'})
user = User(id=123)
fields = user.__pydantic_model__.__fields__
assert fields['id'].required is True
assert fields['id'].default is None
assert fields['aliases'].required is False
assert fields['aliases'].default_factory() == {'John': 'Joey'}
def test_default_factory_singleton_field():
class MySingleton:
pass
class MyConfig:
arbitrary_types_allowed = True
MY_SINGLETON = MySingleton()
@pydantic.dataclasses.dataclass(config=MyConfig)
class Foo:
singleton: MySingleton = dataclasses.field(default_factory=lambda: MY_SINGLETON)
# Returning a singleton from a default_factory is supported
assert Foo().singleton is Foo().singleton
def test_schema():
@pydantic.dataclasses.dataclass
class User:
id: int
name: str = 'John Doe'
aliases: Dict[str, str] = dataclasses.field(default_factory=lambda: {'John': 'Joey'})
signup_ts: datetime = None
age: Optional[int] = dataclasses.field(
default=None, metadata=dict(title='The age of the user', description='do not lie!')
)
height: Optional[int] = pydantic.Field(None, title='The height in cm', ge=50, le=300)
user = User(id=123)
assert user.__pydantic_model__.schema() == {
'title': 'User',
'type': 'object',
'properties': {
'id': {'title': 'Id', 'type': 'integer'},
'name': {'title': 'Name', 'default': 'John Doe', 'type': 'string'},
'aliases': {
'title': 'Aliases',
'type': 'object',
'additionalProperties': {'type': 'string'},
},
'signup_ts': {'title': 'Signup Ts', 'type': 'string', 'format': 'date-time'},
'age': {
'title': 'The age of the user',
'description': 'do not lie!',
'type': 'integer',
},
'height': {
'title': 'The height in cm',
'minimum': 50,
'maximum': 300,
'type': 'integer',
},
},
'required': ['id'],
}
def test_nested_schema():
@pydantic.dataclasses.dataclass
class Nested:
number: int
@pydantic.dataclasses.dataclass
class Outer:
n: Nested
assert Outer.__pydantic_model__.schema() == {
'title': 'Outer',
'type': 'object',
'properties': {'n': {'$ref': '#/definitions/Nested'}},
'required': ['n'],
'definitions': {
'Nested': {
'title': 'Nested',
'type': 'object',
'properties': {'number': {'title': 'Number', 'type': 'integer'}},
'required': ['number'],
}
},
}
def test_initvar():
InitVar = dataclasses.InitVar
@pydantic.dataclasses.dataclass
class TestInitVar:
x: int
y: InitVar
tiv = TestInitVar(1, 2)
assert tiv.x == 1
with pytest.raises(AttributeError):
tiv.y
def test_derived_field_from_initvar():
InitVar = dataclasses.InitVar
@pydantic.dataclasses.dataclass
class DerivedWithInitVar:
plusone: int = dataclasses.field(init=False)
number: InitVar[int]
def __post_init__(self, number):
self.plusone = number + 1
derived = DerivedWithInitVar(1)
assert derived.plusone == 2
with pytest.raises(TypeError):
DerivedWithInitVar('Not A Number')
def test_initvars_post_init():
@pydantic.dataclasses.dataclass
class PathDataPostInit:
path: Path
base_path: dataclasses.InitVar[Optional[Path]] = None
def __post_init__(self, base_path):
if base_path is not None:
self.path = base_path / self.path
path_data = PathDataPostInit('world')
assert 'path' in path_data.__dict__
assert 'base_path' not in path_data.__dict__
assert path_data.path == Path('world')
with pytest.raises(TypeError) as exc_info:
PathDataPostInit('world', base_path='/hello')
assert str(exc_info.value) == "unsupported operand type(s) for /: 'str' and 'str'"
def test_initvars_post_init_post_parse():
@pydantic.dataclasses.dataclass
class PathDataPostInitPostParse:
path: Path
base_path: dataclasses.InitVar[Optional[Path]] = None
def __post_init_post_parse__(self, base_path):
if base_path is not None:
self.path = base_path / self.path
path_data = PathDataPostInitPostParse('world')
assert 'path' in path_data.__dict__
assert 'base_path' not in path_data.__dict__
assert path_data.path == Path('world')
assert PathDataPostInitPostParse('world', base_path='/hello').path == Path('/hello/world')
def test_classvar():
@pydantic.dataclasses.dataclass
class TestClassVar:
klassvar: ClassVar = "I'm a Class variable"
x: int
tcv = TestClassVar(2)
assert tcv.klassvar == "I'm a Class variable"
def test_frozenset_field():
@pydantic.dataclasses.dataclass
class TestFrozenSet:
set: FrozenSet[int]
test_set = frozenset({1, 2, 3})
object_under_test = TestFrozenSet(set=test_set)
assert object_under_test.set == test_set
def test_inheritance_post_init():
post_init_called = False
@pydantic.dataclasses.dataclass
class Base:
a: int
def __post_init__(self):
nonlocal post_init_called
post_init_called = True
@pydantic.dataclasses.dataclass
class Child(Base):
b: int
Child(a=1, b=2)
assert post_init_called
def test_hashable_required():
@pydantic.dataclasses.dataclass
class MyDataclass:
v: Hashable
MyDataclass(v=None)
with pytest.raises(ValidationError) as exc_info:
MyDataclass(v=[])
assert exc_info.value.errors() == [
{'loc': ('v',), 'msg': 'value is not a valid hashable', 'type': 'type_error.hashable'}
]
with pytest.raises(TypeError) as exc_info:
MyDataclass()
assert "__init__() missing 1 required positional argument: 'v'" in str(exc_info.value)
@pytest.mark.parametrize('default', [1, None, ...])
def test_hashable_optional(default):
@pydantic.dataclasses.dataclass
class MyDataclass:
v: Hashable = default
MyDataclass()
MyDataclass(v=None)
def test_override_builtin_dataclass():
@dataclasses.dataclass
class File:
hash: str
name: Optional[str]
size: int
content: Optional[bytes] = None
ValidFile = pydantic.dataclasses.dataclass(File)
file = File(hash='xxx', name=b'whatever.txt', size='456')
valid_file = ValidFile(hash='xxx', name=b'whatever.txt', size='456')
assert file.name == b'whatever.txt'
assert file.size == '456'
assert valid_file.name == 'whatever.txt'
assert valid_file.size == 456
assert isinstance(valid_file, File)
assert isinstance(valid_file, ValidFile)
with pytest.raises(ValidationError) as e:
ValidFile(hash=[1], name='name', size=3)
assert e.value.errors() == [{'loc': ('hash',), 'msg': 'str type expected', 'type': 'type_error.str'}]
def test_override_builtin_dataclass_2():
@dataclasses.dataclass
class Meta:
modified_date: Optional[datetime]
seen_count: int
Meta(modified_date='not-validated', seen_count=0)
@pydantic.dataclasses.dataclass
@dataclasses.dataclass
class File(Meta):
filename: str
Meta(modified_date='still-not-validated', seen_count=0)
f = File(filename=b'thefilename', modified_date='2020-01-01T00:00', seen_count='7')
assert f.filename == 'thefilename'
assert f.modified_date == datetime(2020, 1, 1, 0, 0)
assert f.seen_count == 7
def test_override_builtin_dataclass_nested():
@dataclasses.dataclass
class Meta:
modified_date: Optional[datetime]
seen_count: int
@dataclasses.dataclass
class File:
filename: str
meta: Meta
class Foo(BaseModel):
file: File
FileChecked = pydantic.dataclasses.dataclass(File)
f = FileChecked(filename=b'thefilename', meta=Meta(modified_date='2020-01-01T00:00', seen_count='7'))
assert f.filename == 'thefilename'
assert f.meta.modified_date == datetime(2020, 1, 1, 0, 0)
assert f.meta.seen_count == 7
with pytest.raises(ValidationError) as e:
FileChecked(filename=b'thefilename', meta=Meta(modified_date='2020-01-01T00:00', seen_count=['7']))
assert e.value.errors() == [
{'loc': ('meta', 'seen_count'), 'msg': 'value is not a valid integer', 'type': 'type_error.integer'}
]
foo = Foo.parse_obj(
{
'file': {
'filename': b'thefilename',
'meta': {'modified_date': '2020-01-01T00:00', 'seen_count': '7'},
},
}
)
assert foo.file.filename == 'thefilename'
assert foo.file.meta.modified_date == datetime(2020, 1, 1, 0, 0)
assert foo.file.meta.seen_count == 7
def test_override_builtin_dataclass_nested_schema():
@dataclasses.dataclass
class Meta:
modified_date: Optional[datetime]
seen_count: int
@dataclasses.dataclass
class File:
filename: str
meta: Meta
FileChecked = pydantic.dataclasses.dataclass(File)
assert FileChecked.__pydantic_model__.schema() == {
'definitions': {
'Meta': {
'properties': {
'modified_date': {'format': 'date-time', 'title': 'Modified ' 'Date', 'type': 'string'},
'seen_count': {'title': 'Seen Count', 'type': 'integer'},
},
'required': ['modified_date', 'seen_count'],
'title': 'Meta',
'type': 'object',
}
},
'properties': {
'filename': {'title': 'Filename', 'type': 'string'},
'meta': {'$ref': '#/definitions/Meta'},
},
'required': ['filename', 'meta'],
'title': 'File',
'type': 'object',
}
def test_inherit_builtin_dataclass():
@dataclasses.dataclass
class Z:
z: int
@dataclasses.dataclass
class Y(Z):
y: int
@pydantic.dataclasses.dataclass
class X(Y):
x: int
pika = X(x='2', y='4', z='3')
assert pika.x == 2
assert pika.y == 4
assert pika.z == 3
def test_dataclass_arbitrary():
class ArbitraryType:
def __init__(self):
...
@dataclasses.dataclass
class Test:
foo: ArbitraryType
bar: List[ArbitraryType]
class TestModel(BaseModel):
a: ArbitraryType
b: Test
class Config:
arbitrary_types_allowed = True
TestModel(a=ArbitraryType(), b=(ArbitraryType(), [ArbitraryType()]))
def test_forward_stdlib_dataclass_params():
@dataclasses.dataclass(frozen=True)
class Item:
name: str
class Example(BaseModel):
item: Item
other: str
class Config:
arbitrary_types_allowed = True
e = Example(item=Item(name='pika'), other='bulbi')
e.other = 'bulbi2'
with pytest.raises(dataclasses.FrozenInstanceError):
e.item.name = 'pika2'
def test_pydantic_callable_field():
"""pydantic callable fields behaviour should be the same as stdlib dataclass"""
def foo(arg1, arg2):
return arg1, arg2
def bar(x: int, y: float, z: str) -> bool:
return str(x + y) == z
class PydanticModel(BaseModel):
required_callable: Callable
required_callable_2: Callable[[int, float, str], bool]
default_callable: Callable = foo
default_callable_2: Callable[[int, float, str], bool] = bar
@pydantic.dataclasses.dataclass
class PydanticDataclass:
required_callable: Callable
required_callable_2: Callable[[int, float, str], bool]
default_callable: Callable = foo
default_callable_2: Callable[[int, float, str], bool] = bar
@dataclasses.dataclass
class StdlibDataclass:
required_callable: Callable
required_callable_2: Callable[[int, float, str], bool]
default_callable: Callable = foo
default_callable_2: Callable[[int, float, str], bool] = bar
pyd_m = PydanticModel(required_callable=foo, required_callable_2=bar)
pyd_dc = PydanticDataclass(required_callable=foo, required_callable_2=bar)
std_dc = StdlibDataclass(required_callable=foo, required_callable_2=bar)
assert (
pyd_m.required_callable
is pyd_m.default_callable
is pyd_dc.required_callable
is pyd_dc.default_callable
is std_dc.required_callable
is std_dc.default_callable
)
assert (
pyd_m.required_callable_2
is pyd_m.default_callable_2
is pyd_dc.required_callable_2
is pyd_dc.default_callable_2
is std_dc.required_callable_2
is std_dc.default_callable_2
)
def test_pickle_overriden_builtin_dataclass(create_module):
module = create_module(
# language=Python
"""\
import dataclasses
import pydantic
@dataclasses.dataclass
class BuiltInDataclassForPickle:
value: int
class ModelForPickle(pydantic.BaseModel):
# pickle can only work with top level classes as it imports them
dataclass: BuiltInDataclassForPickle
class Config:
validate_assignment = True
"""
)
obj = module.ModelForPickle(dataclass=module.BuiltInDataclassForPickle(value=5))
pickled_obj = pickle.dumps(obj)
restored_obj = pickle.loads(pickled_obj)
assert restored_obj.dataclass.value == 5
assert restored_obj == obj
# ensure the restored dataclass is still a pydantic dataclass
with pytest.raises(ValidationError, match='value\n +value is not a valid integer'):
restored_obj.dataclass.value = 'value of a wrong type'
def test_config_field_info_create_model():
# works
class A1(BaseModel):
a: str
class Config:
fields = {'a': {'description': 'descr'}}
assert A1.schema()['properties'] == {'a': {'title': 'A', 'description': 'descr', 'type': 'string'}}
@pydantic.dataclasses.dataclass(config=A1.Config)
class A2:
a: str
assert A2.__pydantic_model__.schema()['properties'] == {
'a': {'title': 'A', 'description': 'descr', 'type': 'string'}
}
def gen_2162_dataclasses():
@dataclasses.dataclass(frozen=True)
class StdLibFoo:
a: str
b: int
@pydantic.dataclasses.dataclass(frozen=True)
class PydanticFoo:
a: str
b: int
@dataclasses.dataclass(frozen=True)
class StdLibBar:
c: StdLibFoo
@pydantic.dataclasses.dataclass(frozen=True)
class PydanticBar:
c: PydanticFoo
@dataclasses.dataclass(frozen=True)
class StdLibBaz:
c: PydanticFoo
@pydantic.dataclasses.dataclass(frozen=True)
class PydanticBaz:
c: StdLibFoo
foo = StdLibFoo(a='Foo', b=1)
yield foo, StdLibBar(c=foo)
foo = PydanticFoo(a='Foo', b=1)
yield foo, PydanticBar(c=foo)
foo = PydanticFoo(a='Foo', b=1)
yield foo, StdLibBaz(c=foo)
foo = StdLibFoo(a='Foo', b=1)
yield foo, PydanticBaz(c=foo)
@pytest.mark.parametrize('foo,bar', gen_2162_dataclasses())
def test_issue_2162(foo, bar):
assert dataclasses.asdict(foo) == dataclasses.asdict(bar.c)
assert dataclasses.astuple(foo) == dataclasses.astuple(bar.c)
assert foo == bar.c
def test_issue_2383():
@dataclasses.dataclass
class A:
s: str
def __hash__(self):
return 123
class B(pydantic.BaseModel):
a: A
a = A('')
b = B(a=a)
assert hash(a) == 123
assert hash(b.a) == 123
def test_issue_2398():
@dataclasses.dataclass(order=True)
class DC:
num: int = 42
class Model(pydantic.BaseModel):
dc: DC
real_dc = DC()
model = Model(dc=real_dc)
# This works as expected.
assert real_dc <= real_dc
assert model.dc <= model.dc
assert real_dc <= model.dc
def test_issue_2424():
@dataclasses.dataclass
class Base:
x: str
@dataclasses.dataclass
class Thing(Base):
y: str = dataclasses.field(default_factory=str)
assert Thing(x='hi').y == ''
@pydantic.dataclasses.dataclass
class ValidatedThing(Base):
y: str = dataclasses.field(default_factory=str)
assert Thing(x='hi').y == ''
assert ValidatedThing(x='hi').y == ''
def test_issue_2541():
@dataclasses.dataclass(frozen=True)
class Infos:
id: int
@dataclasses.dataclass(frozen=True)
class Item:
name: str
infos: Infos
class Example(BaseModel):
item: Item
e = Example.parse_obj({'item': {'name': 123, 'infos': {'id': '1'}}})
assert e.item.name == '123'
assert e.item.infos.id == 1
with pytest.raises(dataclasses.FrozenInstanceError):
e.item.infos.id = 2
def test_issue_2555():
@dataclasses.dataclass
class Span:
first: int
last: int
@dataclasses.dataclass
class LabeledSpan(Span):
label: str
@dataclasses.dataclass
class BinaryRelation:
subject: LabeledSpan
object: LabeledSpan
label: str
@dataclasses.dataclass
class Sentence:
relations: BinaryRelation
class M(pydantic.BaseModel):
s: Sentence
assert M.schema()
def test_issue_2594():
@dataclasses.dataclass
class Empty:
pass
@pydantic.dataclasses.dataclass
class M:
e: Empty
assert isinstance(M(e={}).e, Empty)
def test_schema_description_unset():
@pydantic.dataclasses.dataclass
class A:
x: int
assert 'description' not in A.__pydantic_model__.schema()
@pydantic.dataclasses.dataclass
@dataclasses.dataclass
class B:
x: int
assert 'description' not in B.__pydantic_model__.schema()
def test_schema_description_set():
@pydantic.dataclasses.dataclass
class A:
"""my description"""
x: int
assert A.__pydantic_model__.schema()['description'] == 'my description'
@pydantic.dataclasses.dataclass
@dataclasses.dataclass
class B:
"""my description"""
x: int
assert A.__pydantic_model__.schema()['description'] == 'my description'
def test_issue_3011():
@dataclasses.dataclass
class A:
thing_a: str
class B(A):
thing_b: str
class Config:
arbitrary_types_allowed = True
@pydantic.dataclasses.dataclass(config=Config)
class C:
thing: A
b = B('Thing A')
c = C(thing=b)
assert c.thing.thing_a == 'Thing A'
def test_issue_3162():
@dataclasses.dataclass
class User:
id: int
name: str
class Users(BaseModel):
user: User
other_user: User
assert Users.schema() == {
'title': 'Users',
'type': 'object',
'properties': {'user': {'$ref': '#/definitions/User'}, 'other_user': {'$ref': '#/definitions/User'}},
'required': ['user', 'other_user'],
'definitions': {
'User': {
'title': 'User',
'type': 'object',
'properties': {'id': {'title': 'Id', 'type': 'integer'}, 'name': {'title': 'Name', 'type': 'string'}},
'required': ['id', 'name'],
}
},
}
def test_discrimated_union_basemodel_instance_value():
@pydantic.dataclasses.dataclass
class A:
l: Literal['a']
@pydantic.dataclasses.dataclass
class B:
l: Literal['b']
@pydantic.dataclasses.dataclass
class Top:
sub: Union[A, B] = dataclasses.field(metadata=dict(discriminator='l'))
t = Top(sub=A(l='a'))
assert isinstance(t, Top)
assert Top.__pydantic_model__.schema() == {
'title': 'Top',
'type': 'object',
'properties': {
'sub': {
'title': 'Sub',
'discriminator': {'propertyName': 'l', 'mapping': {'a': '#/definitions/A', 'b': '#/definitions/B'}},
'anyOf': [{'$ref': '#/definitions/A'}, {'$ref': '#/definitions/B'}],
}
},
'required': ['sub'],
'definitions': {
'A': {
'title': 'A',
'type': 'object',
'properties': {'l': {'title': 'L', 'enum': ['a'], 'type': 'string'}},
'required': ['l'],
},
'B': {
'title': 'B',
'type': 'object',
'properties': {'l': {'title': 'L', 'enum': ['b'], 'type': 'string'}},
'required': ['l'],
},
},
}
def test_post_init_after_validation():
@dataclasses.dataclass
class SetWrapper:
set: Set[int]
def __post_init__(self):
assert isinstance(
self.set, set
), f"self.set should be a set but it's {self.set!r} of type {type(self.set).__name__}"
class Model(pydantic.BaseModel, post_init_call='after_validation'):
set_wrapper: SetWrapper
model = Model(set_wrapper=SetWrapper({1, 2, 3}))
json_text = model.json()
assert Model.parse_raw(json_text) == model
def test_keeps_custom_properties():
class StandardClass:
"""Class which modifies instance creation."""
a: str
def __new__(cls, *args, **kwargs):
instance = super().__new__(cls)
instance._special_property = 1
return instance
StandardLibDataclass = dataclasses.dataclass(StandardClass)
PydanticDataclass = pydantic.dataclasses.dataclass(StandardClass)
clases_to_test = [StandardLibDataclass, PydanticDataclass]
test_string = 'string'
for cls in clases_to_test:
instance = cls(a=test_string)
assert instance._special_property == 1
assert instance.a == test_string
def test_ignore_extra():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.ignore))
class Foo:
x: int
foo = Foo(**{'x': '1', 'y': '2'})
assert foo.__dict__ == {'x': 1, '__pydantic_initialised__': True}
def test_ignore_extra_subclass():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.ignore))
class Foo:
x: int
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.ignore))
class Bar(Foo):
y: int
bar = Bar(**{'x': '1', 'y': '2', 'z': '3'})
assert bar.__dict__ == {'x': 1, 'y': 2, '__pydantic_initialised__': True}
def test_allow_extra():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.allow))
class Foo:
x: int
foo = Foo(**{'x': '1', 'y': '2'})
assert foo.__dict__ == {'x': 1, 'y': '2', '__pydantic_initialised__': True}
def test_allow_extra_subclass():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.allow))
class Foo:
x: int
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.allow))
class Bar(Foo):
y: int
bar = Bar(**{'x': '1', 'y': '2', 'z': '3'})
assert bar.__dict__ == {'x': 1, 'y': 2, 'z': '3', '__pydantic_initialised__': True}
def test_forbid_extra():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.forbid))
class Foo:
x: int
with pytest.raises(TypeError, match=re.escape("__init__() got an unexpected keyword argument 'y'")):
Foo(**{'x': '1', 'y': '2'})
def test_post_init_allow_extra():
@pydantic.dataclasses.dataclass(config=dict(extra=Extra.allow))
class Foobar:
a: int
b: str
def __post_init__(self):
self.a *= 2
assert Foobar(a=1, b='a', c=4).__dict__ == {'a': 2, 'b': 'a', 'c': 4, '__pydantic_initialised__': True}
def test_self_reference_dataclass():
@pydantic.dataclasses.dataclass
class MyDataclass:
self_reference: 'MyDataclass'
assert MyDataclass.__pydantic_model__.__fields__['self_reference'].type_ is MyDataclass
| merlinepedra25/PYDANTIC | tests/test_dataclasses.py | test_dataclasses.py | py | 34,040 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pydantic.dataclasses",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "pydantic.dataclasses",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "pydantic.dataclasses",
"line_number": 42,
"usage_type": "attribute"
},
{
"api... |
31508455076 | import asyncio
import json
from aiogram import Bot, Dispatcher, executor, types
from aiogram.utils.markdown import hbold, hunderline, hcode, hlink
from aiogram.dispatcher.filters import Text
from config import token
from test import morph
from main import check_news_update
bot = Bot(token=token, parse_mode=types.ParseMode.HTML)
dp = Dispatcher(bot)
def get_json(ner, date):
dict_news = list()
with open("news_dict.json", "r", encoding="utf-8") as read_file:
data = json.load(read_file)
day = None
month = None
year = None
print('gf')
for new in data.keys():
ner_tec, _ = morph(str(data[new]['article_full_desc']))
_, data_tec = morph(str(data[new]['article_data']))
c = True
for n in ner:
if n not in ner_tec:
c = False
if len(date) != 0:
day = date.day
month = date.month
year = date.year
for d in data_tec:
if day is not None and day != d.day:
c = False
if month is not None and month != d.day:
c = False
if year is not None and year != d.day:
c = False
if c == True:
dict_news.append([data[new]['article_data'], data[new]['article_full_desc']])
print(dict_news)
return dict_news
@dp.message_handler(content_types=types.ContentTypes.TEXT)
async def process_text_message(message: types.Message):
text,r=morph(message.text)
list_news = get_json(text,r)
print(list_news)
for news in list_news:
result = str(news[0]) + "\n" + str(news[1])
await message.answer(result)
else:
await message.answer("Нет таких новостей")
await message.answer("1")
# @dp.message_handler(Text(equals="Сводка"))
# async def get_news_summary(message: types.Message):
# with open("news_dict.json",encoding="utf-8") as file:
# news_dict = json.load(file)
#
# for k, v in sorted(news_dict.items())[-5:]:
# text = prediction(message.text)
if __name__ == '__main__':
executor.start_polling(dp)
| KondratevProgi/news | tg_bot.py | tg_bot.py | py | 2,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.Bot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "config.token",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "aiogram.types.ParseMode",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "aiogram.types",... |
45556966628 | """Calcula el precio de la energía diario a partir de los precio horarios"""
import pathlib
from utils import read_format_hourly_prices, resample_hourly_prices
base_path = pathlib.Path.cwd()
cleansed_path = base_path.joinpath("data_lake/cleansed")
business_path = base_path.joinpath("data_lake/business")
def compute_daily_prices(
source_path=cleansed_path,
target_path=business_path,
source_filename="precios-horarios.csv",
target_namefile="precios-diarios.csv",
):
"""Calcula los precios promedios diarios.
Usael archivo data_lake/cleansed/precios-horarios.csv, calcula el precio
promedio diario (sobre las 24 horas del dia) para cada uno de los dias. Las
columnas del archivo data_lake/business/precios-diarios.csv son:
* fecha: fecha en formato YYYY-MM-DD
* precio: precio promedio diario de la electricidad en la bolsa nacional
"""
df_hourly_prices = read_format_hourly_prices(source_path, filename=source_filename)
df_daily_prices = resample_hourly_prices(df_hourly_prices, freq="D")
df_daily_prices.to_csv(target_path.joinpath(target_namefile))
if __name__ == "__main__":
import doctest
compute_daily_prices()
doctest.testmod()
| productos-de-datos/proyecto-albetancurqu42 | src/data/compute_daily_prices.py | compute_daily_prices.py | py | 1,216 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "pathlib.Path.cwd",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "utils.read_format_hourly_prices",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "util... |
24788364259 | import collections
import heapq
class Solution:
def networkDelayTime(self, times: list[list[int]], n: int, k: int) -> int:
graph = collections.defaultdict(list)
for u, v, w in times:
graph[u].append((w, v))
hq = [(0, k)]
dist = collections.defaultdict(int)
while hq:
weight, node = heapq.heappop(hq)
if node not in dist:
dist[node] = weight
for next_weight, next in graph[node]:
total_weight = next_weight + weight
heapq.heappush(hq, (total_weight, next))
if len(dist) == n:
return max(dist.values())
return -1
temp = Solution()
print(temp.networkDelayTime([[2,1,1],[2,3,1],[3,4,1]], 4, 2))
| inhyeokJeon/AALGGO | Python/LeetCode/shortest_path/743_network_delay_time.py | 743_network_delay_time.py | py | 772 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "heapq.hea... |
71119081063 | def minigame():
import pygame
import sys
import pictures
import random
status = 'alive'
zombie_size = [50,100,150,200,250,300]
obstacle_list = []
bg_pos = 0
move= 0
side = 0
score = 0
game_screen = pygame.display.set_mode((608,342)) #creates a screen 1024 pixels wide and 576 pixels long
clock = pygame.time.Clock() # creates clock object, this will be used later to control the fps
def move_bg():
game_screen.blit(background,(bg_pos,0))
game_screen.blit(background,(bg_pos+608,0))
def new_obstacle():
z_size = random.choice(zombie_size)
new_obstacle = obstacle.get_rect(center =(800,z_size))
return new_obstacle
def move_obstacle(obstacle):
for obstacle in obstacle_list:
obstacle.centerx = obstacle.centerx -1
def obstacle_screen(obstacle):
for obstacles in obstacle_list:
game_screen.blit(obstacle, obstacles)
def death (obstacle):
status = 'alive'
for obstacle in obstacle_list:
if avatar_bound.colliderect(obstacle):
status = 'dead'
return status
pygame.init() #initialize pygame module
#uploading all the necessary images
background = pygame.image.load('pictures/bg.jpg').convert()
avatar = pygame.image.load('pictures/avatar.png').convert_alpha()
obstacle = pygame.image.load('pictures/zombie.png').convert_alpha()
gameover = pygame.image.load('pictures/gameover.jpg').convert()
gamewin = pygame.image.load('pictures/win.webp').convert()
#transforming and scaling images
avatar = pygame.transform.scale(avatar, (58, 62))
obstacle = pygame.transform.scale(obstacle, (60, 80))
gameover= pygame.transform.scale(gameover,(608,342))
gamewin= pygame.transform.scale(gamewin,(608,342))
avatar_bound = avatar.get_rect(center =(50,150)) # get rectangle around surface this will help us check for collisions
add_obstacle = pygame.USEREVENT
pygame.time.set_timer(add_obstacle, 2200) #every 1500 millseconds event
while True:
# write code to exit out of module (add score parameters
for event in pygame.event.get():
if event.type == pygame.QUIT: #or score reaches 10
pygame.quit()
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_UP or event.key == ord('w'):
move =0
move = move - 3
if event.key == pygame.K_DOWN or event.key == ord('s'):
move =0
move = move + 3
if event.key == pygame.K_LEFT or event.key == ord('a'):
side = 0
side = side - 3
if event.key == pygame.K_RIGHT or event.key == ord('d'):
side = 0
side = side + 3
if event.type == pygame.KEYUP:
side = 0
move = 0
if event.type == add_obstacle :
obstacle_list.append(new_obstacle())
bg_pos = bg_pos-1
game_screen.blit(background,(bg_pos,0)) # block transfer of bg image, upper left corner at (0,0)
move_bg()
if bg_pos <= -608:
bg_pos = 0
if status == 'alive' and score<= 20:
move_obstacle(obstacle_list)
obstacle_screen(obstacle)
game_screen.blit(avatar, avatar_bound)
# controlling movements of avatar
avatar_bound.centery = avatar_bound.centery + move
avatar_bound.centerx = avatar_bound.centerx + side
status = death(obstacle_list)
score = score + 0.01
elif status == 'dead':
game_screen.blit(gameover,(-20,0))
elif score >= 20:
game_screen.blit(gamewin,(0,0))
pygame.display.update() # updating the display screen
clock.tick(100) #updates 100 times in a second | jadeyujinlee/Smithpocalypse-v.1 | minigame.py | minigame.py | py | 3,488 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.display.set_mode",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pygame.time.Clock",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pygame.... |
9108378468 | from setuptools import setup, find_packages
from os import path
from io import open
here = path.abspath(path.dirname(__file__))
reqs = []
with open(path.join(here, "README.md"), encoding="utf-8") as f:
long_description = f.read()
with open(path.join(here, "requirements.txt"), encoding="utf-8") as f:
read_lines = f.readlines()
reqs = [each.strip() for each in read_lines]
setup(
name="decimaljs",
version="1.0.4",
description="An arbitrary-precision Decimal type for JavaScript to Python",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/kirankotari/decimaljs",
author="Kiran Kumar Kotari",
author_email="kirankotari@live.com",
install_requires=reqs,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
],
keywords="decimal.js decimal decimaljs",
packages=find_packages(where=".", exclude=["tests"]),
include_package_data=True,
)
| kirankotari/decimaljs | setup.py | setup.py | py | 1,323 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "io.open",
"line_number": 8,
... |
74267310505 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""linebot.models.actions module."""
from abc import ABCMeta
from future.utils import with_metaclass
from .base import Base
def get_action(action):
"""Get action."""
action_obj = Base.get_or_new_from_json_dict_with_types(
action, {
'postback': PostbackAction,
'message': MessageAction,
'uri': URIAction,
'datetimepicker': DatetimePickerAction,
'camera': CameraAction,
'cameraRoll': CameraRollAction,
'location': LocationAction,
'richmenuswitch': RichMenuSwitchAction,
}
)
return action_obj
def get_actions(actions):
"""Get actions."""
new_actions = []
if actions:
for action in actions:
action_obj = get_action(action)
if action_obj:
new_actions.append(action_obj)
return new_actions
class Action(with_metaclass(ABCMeta, Base)):
"""Abstract base class of Action."""
def __init__(self, **kwargs):
"""__init__ method.
:param kwargs:
"""
super(Action, self).__init__(**kwargs)
self.type = None
class PostbackAction(Action):
"""PostbackAction.
https://developers.line.me/en/docs/messaging-api/reference/#postback-action
When a control associated with this action is tapped,
a postback event is returned via webhook with the specified string in the data property.
"""
def __init__(
self,
label=None,
data=None,
display_text=None,
text=None,
input_option=None,
fill_in_text=None,
**kwargs
):
"""__init__ method.
:param str label: Label for the action.
:param str data: String returned via webhook
in the postback.data property of the postback event.
:param str display_text: Text displayed in the chat as a message sent by
the user when the action is performed.
:param str text: Deprecated. Text displayed in the chat as a message sent by
the user when the action is performed. Returned from the server through a webhook.
:param kwargs:
"""
super(PostbackAction, self).__init__(**kwargs)
self.type = 'postback'
self.label = label
self.data = data
self.display_text = display_text
self.text = text
self.input_option = input_option
self.fill_in_text = fill_in_text
class MessageAction(Action):
"""MessageAction.
https://developers.line.me/en/docs/messaging-api/reference/#message-action
When a control associated with this action is tapped,
the string in the text property is sent as a message from the user.
"""
def __init__(self, label=None, text=None, **kwargs):
"""__init__ method.
:param str label: Label for the action.
:param str text: Text sent when the action is performed.
:param kwargs:
"""
super(MessageAction, self).__init__(**kwargs)
self.type = 'message'
self.label = label
self.text = text
class URIAction(Action):
"""URIAction.
https://developers.line.me/en/docs/messaging-api/reference/#uri-action
When a control associated with this action is tapped,
the URI specified in the uri property is opened.
"""
def __init__(self, label=None, uri=None, alt_uri=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
Max: 20 characters
:param str uri: URI opened when the action is performed.
:param alt_uri: URI opened when the desktop app.
:type alt_uri: T <= :py:class:`linebot.models.actions.AltUri`
:param kwargs:
"""
super(URIAction, self).__init__(**kwargs)
self.type = 'uri'
self.label = label
self.uri = uri
self.alt_uri = self.get_or_new_from_json_dict(alt_uri, AltUri)
class AltUri(with_metaclass(ABCMeta, Base)):
"""AltUri.
https://github.com/line/line-bot-sdk-python/issues/155
URI opened when the desktop app.
"""
def __init__(self, desktop=None, **kwargs):
"""__init__ method.
:param str desktop: URI opened on LINE for macOS and Windows
when the action is performed.
If the altUri.desktop property is set,
the uri property is ignored on LINE for macOS and Windows.
:param kwargs:
"""
super(AltUri, self).__init__(**kwargs)
self.desktop = desktop
class DatetimePickerAction(Action):
"""DatetimePickerAction.
https://developers.line.me/en/docs/messaging-api/reference/#datetime-picker-action
When a control associated with this action is tapped,
a postback event is returned via webhook with the date and time
selected by the user from the date and time selection dialog.
The datetime picker action does not support time zones.
"""
def __init__(self, label=None, data=None, mode=None,
initial=None, max=None, min=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param str data: String returned via webhook
in the postback.data property of the postback event
:param str mode: Action mode
date: Pick date
time: Pick time
datetime: Pick date and time
:param str initial: Initial value of date or time
:param str max: Largest date or time value that can be selected.
Must be greater than the min value.
:param str min: Smallest date or time value that can be selected.
Must be less than the max value.
:param kwargs:
"""
super(DatetimePickerAction, self).__init__(**kwargs)
self.type = 'datetimepicker'
self.label = label
self.data = data
self.mode = mode
self.initial = initial
self.max = max
self.min = min
class CameraAction(Action):
"""CameraAction.
https://developers.line.me/en/reference/messaging-api/#camera-action
This action can be configured only with quick reply buttons.
When a button associated with this action is tapped,
the camera screen in the LINE app is opened.
"""
def __init__(self, label=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param kwargs:
"""
super(CameraAction, self).__init__(**kwargs)
self.type = 'camera'
self.label = label
class CameraRollAction(Action):
"""CameraRollAction.
https://developers.line.me/en/reference/messaging-api/#camera-roll-action
This action can be configured only with quick reply buttons.
When a button associated with this action is tapped,
the camera roll screen in the LINE app is opened.
"""
def __init__(self, label=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param kwargs:
"""
super(CameraRollAction, self).__init__(**kwargs)
self.type = 'cameraRoll'
self.label = label
class LocationAction(Action):
"""LocationRollAction.
https://developers.line.me/en/reference/messaging-api/#location-action
This action can be configured only with quick reply buttons.
When a button associated with this action is tapped,
the location screen in the LINE app is opened.
"""
def __init__(self, label=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param kwargs:
"""
super(LocationAction, self).__init__(**kwargs)
self.type = 'location'
self.label = label
class RichMenuSwitchAction(Action):
"""RichMenuSwitchAction.
https://developers.line.biz/en/reference/messaging-api/#richmenu-switch-action
This action can be configured only with rich menus.
It can't be used for Flex Messages or quick replies.
When you tap a rich menu associated with this action,
you can switch between rich menus,
and a postback event including the rich menu alias ID selected
by the user is returned via a webhook.
"""
def __init__(self, label=None, rich_menu_alias_id=None, data=None, **kwargs):
"""__init__ method.
:param str label: Label for the action
:param str rich_menu_alias_id: Rich menu alias ID to switch to.
:param str data: String returned by the postback.data property
of the postback event via a webhook
:param kwargs:
"""
super(RichMenuSwitchAction, self).__init__(**kwargs)
self.type = 'richmenuswitch'
self.label = label
self.rich_menu_alias_id = rich_menu_alias_id
self.data = data
| line/line-bot-sdk-python | linebot/models/actions.py | actions.py | py | 9,405 | python | en | code | 1,739 | github-code | 36 | [
{
"api_name": "base.Base.get_or_new_from_json_dict_with_types",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "base.Base",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "future.utils.with_metaclass",
"line_number": 54,
"usage_type": "call"
},
{
... |
13038148522 | import fresh_tomatoes
import media
import requests
import json
import config
youtube_suffix = config.youtube_key
youtube_prefix = 'https://www.youtube.com/watch?v='
# Movie list -- Here you can add and subtract movies as your tastes change
movie_list = ["There Will Be Blood", "The Life Aquatic", "Unforgiven",
"Gladiator", "About Time", "The 'Burbs"]
movies = []
def get_info(video):
"""Fetches movie info from Open Movie Database"""
# Request data from Youtube
youtube = requests.get('https://www.googleapis.com/youtube/v3/search?part=s'
'nippet&q=' + video + 'trailer&maxResults=1&key=' +
youtube_suffix, timeout=20)
youtube_str = youtube.text
youtube_dict = json.loads(youtube_str)
video_id = youtube_dict['items'][0]['id']['videoId']
video_url = youtube_prefix + video_id
# Request data from OMDB
result = requests.get('http://www.omdbapi.com/?t=' + video + '&y=&plot='
'short&r=json', timeout=20)
resp_str = result.text
# Convert data into a python dictionary
# http://stackoverflow.com/questions/12788217/extract-single-value-from-json-response-python
resp_dict = json.loads(resp_str)
trailer = video_url
title = resp_dict["Title"]
poster = resp_dict["Poster"]
release = resp_dict["Released"]
rating = resp_dict["Rated"]
runtime = resp_dict["Runtime"]
genre = resp_dict["Genre"]
director = resp_dict["Director"]
plot = resp_dict["Plot"]
actors = resp_dict["Actors"]
movies.append(media.Movie(trailer, title, poster, release, rating, runtime,
genre, director, plot, actors))
# Create movie instances and add them to the movies list
for movie in movie_list:
get_info(movie)
fresh_tomatoes.open_movies_page(movies)
| aaronbjohnson/movie-trailer-website | entertainment_center.py | entertainment_center.py | py | 1,862 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.youtube_key",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "requests.get",
"li... |
23420997070 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ventas', '0075_auto_20161016_1522'),
]
operations = [
migrations.RenameField(
model_name='comanda',
old_name='tiempo_estimado_elaboracion',
new_name='tiempo_estimado_procesamiento',
),
migrations.AlterField(
model_name='comanda',
name='area_solicitante',
field=models.ForeignKey(related_name='area_solicitante_comanda', verbose_name=b'Area Solicitante', to='bar.Sector'),
),
migrations.AlterField(
model_name='comanda',
name='fecha_hora_pedido_comanda',
field=models.DateTimeField(verbose_name=b'Fecha/hora Comanda'),
),
migrations.AlterField(
model_name='comanda',
name='numero_pedido',
field=models.ForeignKey(verbose_name=b'Numero de Pedido', to='ventas.Pedido'),
),
migrations.AlterField(
model_name='comanda',
name='producto_a_entregar',
field=models.ForeignKey(verbose_name=b'Producto Solicitado', to='stock.ProductoVenta'),
),
]
| pmmrpy/SIGB | ventas/migrations/0076_auto_20161016_1554.py | 0076_auto_20161016_1554.py | py | 1,288 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.RenameField",
"line_number": 14,
"usage_type": "call"
},
... |
37677063476 | #!/usr/bin/env python3
import os
import sys
import urllib.request
from flask import (
Flask,
flash,
jsonify,
make_response,
redirect,
render_template,
request,
)
from werkzeug.utils import secure_filename
from scripts import predict_model
from scripts import mongodb
from scripts import train_model
app = Flask(__name__)
app.config["JSONIFY_PRETTYPRINT_REGULAR"] = True
ENVIRONMENT_WINDOWS = True
@app.route("/")
def index():
return "Application Alive"
@app.route("/api/v1.0/users", methods=["GET"])
def get_users():
return jsonify({"Users": mongodb.get_list_users()})
@app.route("/api/v1.0/experiments/<user>", methods=["GET"])
def get_experiments(user):
user = user.lower()
file_dir_data = os.listdir("data/")
if user not in file_dir_data:
return make_response(jsonify({"error": "User Not Found"}), 404)
else:
file_dir_user = os.listdir("data/{}".format(user))
return jsonify({"user": user, "experiments": file_dir_user})
@app.route("/api/v1.0/train/<user>/<experiment_name>", methods=["GET"])
def train_experiment(user, experiment_name):
user = user.lower()
experiment_name = experiment_name.lower()
file_dir_data = os.listdir("data/")
if user not in file_dir_data:
return make_response(jsonify({"error": "User Not Found"}), 404)
file_dir_user = os.listdir("data/{}".format(user))
if experiment_name not in file_dir_user:
return make_response(jsonify({"error": "Experiment Not Found"}), 404)
model_def = "data/{}/{}/model_definition.yaml".format(user, experiment_name)
data_csv = "data/{}/{}/train_data.csv".format(user, experiment_name)
log_file = "data/{}/{}/training.log".format(user, experiment_name)
if ENVIRONMENT_WINDOWS:
output_dir = "data\{}\{}".format(user, experiment_name)
else:
output_dir = "data/{}/{}".format(user, experiment_name)
res = train_model.train_model(
model_def, output_dir, data_csv, experiment_name, log_file
)
if res != True:
return jsonify(
{
"user": user,
"response": res,
"model_definition": model_def,
"data_csv": data_csv,
"log_file": log_file,
"output_dir": output_dir,
}
)
return jsonify({"user": user, "response": "Training in Progress"})
@app.route("/api/v1.0/predict/<user>/<experiment_name>", methods=["POST"])
def predict_experiment(user, experiment_name):
file = request.files["file"]
if file.filename != "":
filename = secure_filename(file.filename)
if "predict" not in os.listdir("data/{}/{}".format(user, experiment_name)):
os.mkdir("data/{}/{}/predict/".format(user, experiment_name))
file.save("data/{}/{}/predict/{}".format(user, experiment_name, filename))
return jsonify(
{"result": {"dog": 0.85, "cat": 0.15}, "exp": experiment_name, "user": user}
)
return jsonify({"response": "Error"})
@app.route("/api/v1.0/register/<user>", methods=["GET"])
def register_user(user):
user = user.lower()
dir_list_users = os.listdir("data/")
if user in dir_list_users:
return make_response(jsonify({"error": "User Already Exists"}), 420)
else:
os.mkdir("data/{}".format(user))
return jsonify({"user": user, "response": "User Successfully Created"})
@app.route("/api/v1.0/register/<user>/<experiment_name>", methods=["GET"])
def register_experiment(user, experiment_name):
user = user.lower()
experiment_name = experiment_name.lower()
dir_list_users = os.listdir("data/")
if user in dir_list_users:
dir_list_experiments = os.listdir("data/{}/".format(user))
if experiment_name in dir_list_experiments:
return make_response(jsonify({"error": "Experiment Already Exists"}), 420)
else:
os.mkdir("data/{}/{}".format(user, experiment_name))
return jsonify(
{
"user": user,
"experiment": experiment_name,
"response": "Experiment Successfully Created",
}
)
else:
return make_response(jsonify({"error": "User Does Not Exist"}), 420)
@app.route("/api/v1.0/remove/<user>", methods=["GET"])
def remove_user(user):
user = user.lower()
dir_list_users = os.listdir("data/")
if user not in dir_list_users:
return make_response(jsonify({"error": "User Does Not Exist"}), 420)
else:
dir_list_experiments = os.listdir("data/{}".format(user))
if len(dir_list_experiments) == 0:
os.rmdir("data/{}".format(user))
return jsonify({"user": user, "response": "User Successfully Removed"})
else:
return jsonify(
{
"user": user,
"response": "User Experiments still exist",
"experiments": dir_list_experiments,
}
)
@app.route("/api/v1.0/remove/<user>/<experiment_name>", methods=["GET"])
def remove_experiment(user, experiment_name):
user = user.lower()
experiment_name = experiment_name.lower()
dir_list_users = os.listdir("data/")
if user in dir_list_users:
dir_list_experiments = os.listdir("data/{}/".format(user))
if experiment_name in dir_list_experiments:
os.rmdir("data/{}/{}".format(user, experiment_name))
return jsonify(
{
"user": user,
"experiment": experiment_name,
"response": "Experiment Successfully Removed",
}
)
else:
return make_response(jsonify({"error": "Experiment Does Not Exist"}), 420)
else:
return make_response(jsonify({"error": "User Does Not Exist"}), 420)
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({"error": "Request Not Found"}), 404)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port="5000")
| projectasteria/PlaceholderAPI | app.py | app.py | py | 6,137 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scripts.mongodb.get_list_users",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "scripts.mong... |
71578942183 | import vtk
def main():
colors = vtk.vtkNamedColors()
# Set the background color.
colors.SetColor("bkg", [0.2, 0.3, 0.4, 1.0])
# Create a sphere to deform
sphere = vtk.vtkSphereSource()
sphere.SetThetaResolution(51)
sphere.SetPhiResolution(17)
sphere.Update()
bounds = sphere.GetOutput().GetBounds()
# Create a filter to color the sphere
ele = vtk.vtkElevationFilter()
ele.SetInputConnection(sphere.GetOutputPort())
ele.SetLowPoint(0,0,-0.5);
ele.SetHighPoint(0,0,0.5);
ele.SetLowPoint((bounds[1] + bounds[0]) / 2.0,
(bounds[3] + bounds[2]) / 2.0,
-bounds[5]);
ele.SetHighPoint((bounds[1] + bounds[0]) / 2.0,
(bounds[3] + bounds[2]) / 2.0,
bounds[5]);
ele.Update()
# Create a mesh to deform the sphere
pts = vtk.vtkPoints()
pts.SetNumberOfPoints(6)
pts.SetPoint(0,
bounds[0] - 0.1 * (bounds[1] - bounds[0]),
(bounds[3] + bounds[2]) / 2.0,
(bounds[5] + bounds[4]) / 2.0)
pts.SetPoint(1,
bounds[1] + 0.1 * (bounds[1] - bounds[0]),
(bounds[3] + bounds[2]) / 2.0,
(bounds[5] + bounds[4]) / 2.0)
pts.SetPoint(2,
(bounds[1] + bounds[0]) / 2.0,
bounds[2] - 0.1 * (bounds[3] - bounds[2]),
(bounds[5] + bounds[4]) / 2.0)
pts.SetPoint(3,
(bounds[1] + bounds[0]) / 2.0,
bounds[3] + 0.1 * (bounds[3] - bounds[2]),
(bounds[5] + bounds[4]) / 2.0)
pts.SetPoint(4,
(bounds[1] + bounds[0]) / 2.0,
(bounds[3] + bounds[2]) / 2.0,
bounds[4] - 0.1 * (bounds[5] - bounds[4]))
pts.SetPoint(5,
(bounds[1] + bounds[0]) / 2.0,
(bounds[3] + bounds[2]) / 2.0,
bounds[5] + 0.1 * (bounds[5] - bounds[4]))
tris = vtk.vtkCellArray()
cells = [[2, 0, 4], [1, 2, 4], [3, 1, 4], [0, 3, 4], [0, 2, 5], [2, 1, 5], [1, 3, 5], [3, 0, 5]]
for cell in cells:
tris.InsertNextCell(3)
for c in cell:
tris.InsertCellPoint(c)
pd = vtk.vtkPolyData()
pd.SetPoints(pts)
pd.SetPolys(tris)
meshMapper = vtk.vtkPolyDataMapper()
meshMapper.SetInputData(pd)
meshActor = vtk.vtkActor()
meshActor.SetMapper(meshMapper)
meshActor.GetProperty().SetRepresentationToWireframe()
meshActor.GetProperty().SetColor(colors.GetColor3d("Black"))
deform = vtk.vtkDeformPointSet()
deform.SetInputData(ele.GetOutput())
deform.SetControlMeshData(pd)
deform.Update()
controlPoint = pts.GetPoint(5)
pts.SetPoint(5, controlPoint[0],
controlPoint[1],
bounds[5] + .8 * (bounds[5] - bounds[4]))
pts.Modified()
polyMapper = vtk.vtkPolyDataMapper()
polyMapper.SetInputConnection(deform.GetOutputPort())
polyActor = vtk.vtkActor()
polyActor.SetMapper(polyMapper)
renderer = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(renderer)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
renderer.AddActor(polyActor)
renderer.AddActor(meshActor)
renderer.GetActiveCamera().SetPosition(1,1,1)
renderer.ResetCamera()
renderer.SetBackground(colors.GetColor3d("bkg"))
renWin.SetSize(300,300)
renWin.Render()
iren.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Meshes/DeformPointSet.py | DeformPointSet.py | py | 3,526 | python | en | code | 319 | github-code | 36 | [
{
"api_name": "vtk.vtkNamedColors",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "vtk.vtkSphereSource",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "vtk.vtkElevationFilter",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "vtk.vtkP... |
71578941543 | #!/usr/bin/env python
import os.path
import vtk
def get_program_parameters():
import argparse
description = 'Decimate polydata.'
epilogue = '''
This is an example using vtkDecimatePro to decimate input polydata, if provided, or a sphere otherwise.
'''
parser = argparse.ArgumentParser(description=description, epilog=epilogue)
parser.add_argument('filename', nargs='?', default=None, help='Optional input filename e.g Torso.vtp.')
parser.add_argument('reduction', nargs='?', type = float, default=.9, help='Sets the decimation target reduction, (default is 0.9).')
args = parser.parse_args()
return args.filename, args.reduction
def main():
filePath, reduction = get_program_parameters()
# Define colors
colors = vtk.vtkNamedColors()
backFaceColor = colors.GetColor3d("gold")
inputActorColor = colors.GetColor3d("flesh")
decimatedActorColor = colors.GetColor3d("flesh")
colors.SetColor('leftBkg', [0.6, 0.5, 0.4, 1.0])
colors.SetColor('rightBkg', [0.4, 0.5, 0.6, 1.0])
if filePath and os.path.isfile(filePath):
inputPolyData = ReadPolyData(filePath)
if not inputPolyData:
inputPolyData = GetSpherePD()
else:
inputPolyData = GetSpherePD()
print("Before decimation")
print(f"There are {inputPolyData.GetNumberOfPoints()} points.")
print(f"There are {inputPolyData.GetNumberOfPolys()} polygons.")
decimate = vtk.vtkDecimatePro()
decimate.SetInputData(inputPolyData)
decimate.SetTargetReduction(reduction)
decimate.PreserveTopologyOn()
decimate.Update()
decimated = vtk.vtkPolyData()
decimated.ShallowCopy(decimate.GetOutput())
print("After decimation")
print(f"There are {decimated.GetNumberOfPoints()} points.")
print(f"There are {decimated.GetNumberOfPolys()} polygons.")
print(f"Reduction: {(inputPolyData.GetNumberOfPolys() - decimated.GetNumberOfPolys()) / inputPolyData.GetNumberOfPolys()}")
inputMapper = vtk.vtkPolyDataMapper()
inputMapper.SetInputData(inputPolyData)
backFace = vtk.vtkProperty()
backFace.SetColor(backFaceColor)
inputActor = vtk.vtkActor()
inputActor.SetMapper(inputMapper)
inputActor.GetProperty().SetInterpolationToFlat()
inputActor.GetProperty().SetColor(inputActorColor)
inputActor.SetBackfaceProperty(backFace)
decimatedMapper = vtk.vtkPolyDataMapper()
decimatedMapper.SetInputData(decimated)
decimatedActor = vtk.vtkActor()
decimatedActor.SetMapper(decimatedMapper)
decimatedActor.GetProperty().SetColor(decimatedActorColor)
decimatedActor.GetProperty().SetInterpolationToFlat()
decimatedActor.SetBackfaceProperty(backFace)
# There will be one render window
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetSize(600, 300)
# And one interactor
interactor = vtk.vtkRenderWindowInteractor()
interactor.SetRenderWindow(renderWindow)
# Define viewport ranges
# (xmin, ymin, xmax, ymax)
leftViewport = [0.0, 0.0, 0.5, 1.0]
rightViewport = [0.5, 0.0, 1.0, 1.0]
# Setup both renderers
leftRenderer = vtk.vtkRenderer()
renderWindow.AddRenderer(leftRenderer)
leftRenderer.SetViewport(leftViewport)
leftRenderer.SetBackground((colors.GetColor3d('leftBkg')))
rightRenderer = vtk.vtkRenderer()
renderWindow.AddRenderer(rightRenderer)
rightRenderer.SetViewport(rightViewport)
rightRenderer.SetBackground((colors.GetColor3d('rightBkg')))
# Add the sphere to the left and the cube to the right
leftRenderer.AddActor(inputActor)
rightRenderer.AddActor(decimatedActor)
# Shared camera
# Shared camera looking down the -y axis
camera = vtk.vtkCamera()
camera.SetPosition (0, -1, 0)
camera.SetFocalPoint (0, 0, 0)
camera.SetViewUp (0, 0, 1)
camera.Elevation(30)
camera.Azimuth(30)
leftRenderer.SetActiveCamera(camera)
rightRenderer.SetActiveCamera(camera)
leftRenderer.ResetCamera()
leftRenderer.ResetCameraClippingRange()
renderWindow.Render()
renderWindow.SetWindowName('Decimation')
interactor.Start()
def ReadPolyData(file_name):
import os
path, extension = os.path.splitext(file_name)
extension = extension.lower()
if extension == ".ply":
reader = vtk.vtkPLYReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtp":
reader = vtk.vtkXMLpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".obj":
reader = vtk.vtkOBJReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".stl":
reader = vtk.vtkSTLReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".vtk":
reader = vtk.vtkpoly_dataReader()
reader.SetFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
elif extension == ".g":
reader = vtk.vtkBYUReader()
reader.SetGeometryFileName(file_name)
reader.Update()
poly_data = reader.GetOutput()
else:
# Return a None if the extension is unknown.
poly_data = None
return poly_data
def GetSpherePD():
"""
:return: The PolyData representation of a sphere.
"""
source = vtk.vtkSphereSource()
source.SetThetaResolution(30)
source.SetPhiResolution(15)
source.Update()
return source.GetOutput()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/Meshes/Decimation.py | Decimation.py | py | 5,683 | python | en | code | 319 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "vtk.vtkNamedColors",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.path.isfile",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.pat... |
70943557223 | from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('test_rdd').getOrCreate()
sc = spark.sparkContext
class TestRDD():
# Creations
def test_create_from_dataframe(self):
df = spark.range(10).toDF('id')
rdd = df.rdd
rows = rdd.collect()
assert len(rows) == 10
assert rows[9]['id'] == 9
def test_create_from_collection(self):
data = [1, 2, 3, 4]
rdd = sc.parallelize(data, 2)
list_1 = rdd.collect()
assert list_1 == [1, 2, 3, 4]
list_2 = rdd.glom().collect()
assert list_2 == [[1, 2], [3, 4]]
def test_create_from_file(self):
pass
# Transformations
def test_map(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
rdd_1 = rdd.map(lambda word: (word, word[0], len(word)))
list_1 = rdd_1.collect()
assert list_1[0] == ('The', 'T', 3)
def test_filter(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
rdd_1 = rdd.map(lambda word: (word, word[0], len(word)))
rdd_2 = rdd_1.filter(lambda record: record[2] == 5)
list_2 = rdd_2.collect()
assert list_2 == [('quick', 'q', 5), ('brown', 'b', 5), ('jumps', 'j', 5)]
def test_sortBy(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
rdd_1 = rdd.sortBy(lambda word: len(word))
list_1 = rdd_1.take(5)
assert list_1 == ['The', 'fox', 'the', 'dog', 'over']
# Partition Transformations
def test_mapPartitions(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
rdd_1 = rdd.mapPartitions(lambda part: [word[::-1] for word in part])
list_1 = rdd_1.collect()
assert list_1 == ['ehT', 'kciuq', 'nworb', 'xof', 'spmuj', 'revo', 'eht', 'yzal', 'god']
def test_foreachPartition(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
def func(partition):
for word in partition:
print(word[::-1])
rdd.foreachPartition(func)
# Actions
def test_count(self):
data = range(1, 5)
rdd = sc.parallelize(data)
cnt = rdd.count()
assert cnt == 4
def test_reduce(self):
data = range(1, 5)
rdd = sc.parallelize(data)
product = rdd.reduce(lambda x, y: x * y)
assert product == 24
# Pair RDDs
def test_keyBy_and_mapValues(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
pair_rdd = sc.parallelize(words).keyBy(lambda word: word.lower()[0])
rdd_1 = pair_rdd.mapValues(lambda word: word.upper())
list_1 = rdd_1.take(3)
assert list_1 == [('t', 'THE'), ('q', 'QUICK'), ('b', 'BROWN')]
list_2 = rdd_1.keys().collect()
assert list_2 == ['t', 'q', 'b', 'f', 'j', 'o', 't', 'l', 'd']
list_3 = rdd_1.values().collect()
assert list_3[0] == 'THE'
def test_countByKey(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
pair_rdd = sc.parallelize(words).map(lambda word: (word.lower()[0], word.upper()))
d = pair_rdd.countByKey()
assert list(d.items()) == [('t', 2), ('q', 1), ('b', 1), ('f', 1), ('j', 1), ('o', 1), ('l', 1), ('d', 1)]
def test_reduceByKey(self):
pair_rdd = sc.parallelize([('a', 1), ('b', 2), ('c', 3), ('b', 2), ('a', 1)])
rdd_1 = pair_rdd.reduceByKey(lambda x, y: x*y)
list_1 = rdd_1.collect()
assert list_1 == [('a', 1), ('b', 4), ('c', 3)]
# Broadcast Variable
def test_BV(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
bv_data = {'e': 5, 'j': 10, 'o': 15, 't': 20, 'y': 25}
bv = sc.broadcast(bv_data)
bv_value = bv.value
rdd_1 = rdd.map(lambda word: bv_value.get(word.lower()[0], -1))
list_1 = rdd_1.collect()
assert list_1 == [20, -1, -1, -1, 10, 15, 20, -1, -1]
# Accumulator
def test_accumulator(self):
words = 'The quick brown fox jumps over the lazy dog'.split(' ')
rdd = sc.parallelize(words, 2)
first_acc = sc.accumulator(value=0)
def func(word):
if len(word) == 3:
first_acc.add(1)
rdd.foreach(func)
assert first_acc.value == 4
if __name__ == '__main__':
test = TestRDD()
# Call a method here
test.test_accumulator()
spark.stop() | bablookr/big-data-experiments | pyspark-experiments/test/test_rdd.py | test_rdd.py | py | 4,725 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession.builder.appName",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 3,
"usage_type": "... |
3082899682 | import json
from hsfs import util
import humps
class TrainingDatasetSplit:
TIME_SERIES_SPLIT = "TIME_SERIES_SPLIT"
RANDOM_SPLIT = "RANDOM_SPLIT"
TRAIN = "train"
VALIDATION = "validation"
TEST = "test"
def __init__(
self,
name,
split_type,
percentage=None,
start_time=None,
end_time=None,
**kwargs
):
self._name = name
self._percentage = percentage
self._split_type = split_type
self._start_time = start_time
self._end_time = end_time
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def percentage(self):
return self._percentage
@percentage.setter
def percentage(self, percentage):
self._percentage = percentage
@property
def split_type(self):
return self._split_type
@split_type.setter
def split_type(self, split_type):
self._split_type = split_type
@property
def start_time(self):
return self._start_time
@start_time.setter
def start_time(self, start_time):
self._start_time = start_time
@property
def end_time(self):
return self._end_time
@end_time.setter
def end_time(self, end_time):
self._end_time = end_time
def json(self):
return json.dumps(self, cls=util.FeatureStoreEncoder)
def to_dict(self):
return {
"name": self._name,
"percentage": self._percentage,
"splitType": self._split_type,
"startTime": self._start_time,
"endTime": self._end_time,
}
@classmethod
def from_response_json(cls, json_dict):
json_decamelized = humps.decamelize(json_dict)
return cls(
name=json_decamelized["name"],
split_type=json_decamelized.get(
"split_type", TrainingDatasetSplit.RANDOM_SPLIT
),
percentage=json_decamelized.get("percentage", None),
start_time=json_decamelized.get("start_time", None),
end_time=json_decamelized.get("end_time", None),
)
| logicalclocks/feature-store-api | python/hsfs/training_dataset_split.py | training_dataset_split.py | py | 2,210 | python | en | code | 50 | github-code | 36 | [
{
"api_name": "json.dumps",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "hsfs.util.FeatureStoreEncoder",
"line_number": 69,
"usage_type": "attribute"
},
{
"api_name": "hsfs.util",
"line_number": 69,
"usage_type": "name"
},
{
"api_name": "humps.decamel... |
72489930665 | import numpy as np
import itertools
import argparse
cards = ['A', 'K', 'Q', 'J', '10', '9', '8', '7', '6', '5', '4', '3', '2']
suits = ['S', 'H', 'C', 'D']
class Card:
def __init__(self, val, suit):
self.val = val
self.suit = suit
def __str__(self):
return f'{self.val} {self.suit}'
class Hand:
def __init__(self, card1, card2):
self.card1 = card1
self.card2 = card2
self.hand_counts = {
9 : [0, 'Straight Flush'],
8 : [0, 'Four of a Kind'],
7 : [0, 'Full House'],
6 : [0, 'Flush'],
5 : [0, 'Straight'],
4 : [0, 'Three of a Kind'],
3 : [0, 'Two Pair'],
2 : [0, 'Pair'],
1 : [0, 'High Card'],
}
def count(self, n):
self.hand_counts[n][0] += 1
def __str__(self):
res = f'{self.card1} {self.card2} makes:\n'
pct = np.asarray([_[0] for _ in self.hand_counts.values()])
pct = pct/pct.sum()*100
for i, hand in enumerate(self.hand_counts.values()):
res += f'{hand[1]}: {round(pct[i], 4)}%\n'
return res
value = {c:13-i for i, c in enumerate(cards)}
def high_card(cards):
return max([value[c.val] for c in cards])
def same_suit(cards):
return len(set([c.suit for c in cards])) == 1
def is_straight(cards):
sc = sorted(cards, key=lambda x: value[x.val])
prev = value[sc[0].val]
if value[sc[1].val] == 1 and prev == 13:
prev = 0
for j in range(1, len(cards)):
curr = value[sc[j].val]
if curr != prev+1:
return False
prev = curr
return True
def is_4_of_a_kind(cards):
_, counts = get_same_cards(cards)
if len(counts) == 2 and counts.max() == 4:
return True
return False
def is_full_house(cards):
_, counts = get_same_cards(cards)
if len(counts) == 2 and counts.max() == 3:
return True
return False
def is_trio(cards):
_, counts = get_same_cards(cards)
if len(counts) == 3 and counts.max() == 3:
return True
return False
def is_2_pair(cards):
_, counts = get_same_cards(cards)
if len(counts) == 3 and counts.max() == 2:
return True
return False
def is_pair(cards):
_, counts = get_same_cards(cards)
if len(counts) == 4:
return True
return False
def get_same_cards(cards):
vals = np.asarray([value[c.val] for c in cards])
return np.unique(vals, return_counts=True)
def get_val(vals, counts, c):
return sorted(vals[counts == c], key=lambda x:-x)
def hand(cards):
hc = high_card(cards)
vals, counts = get_same_cards(cards)
if same_suit(cards) and is_straight(cards):
return (9, [hc])
elif is_4_of_a_kind(cards):
f = get_val(vals, counts, 4)
return (8, f)
elif is_full_house(cards):
f1 = get_val(vals, counts, 3)
f2 = get_val(vals, counts, 2)
return (7, f1 + f2)
elif same_suit(cards):
return (6, [hc])
elif is_straight(cards):
return (5, [hc])
elif is_trio(cards):
f1 = get_val(vals, counts, 3)
f2 = get_val(vals, counts, 1)
return (4, f1 + f2)
elif is_2_pair(cards):
f1 = get_val(vals, counts, 2)
f2 = get_val(vals, counts, 1)
return (3, f1 + f2)
elif is_pair(cards):
f1 = get_val(vals, counts, 2)
f2 = get_val(vals, counts, 1)
return (2, f1 + f2)
else:
return (1, get_val(vals, counts, 1))
def get_best_hand(all_cards):
all_hands = []
for cards in itertools.combinations(all_cards, 5):
all_hands.append(hand(cards))
return max(all_hands)
def draw_cards(Deck, n):
cards = []
for _ in range(n):
cards.append(Deck.pop(0))
return cards
def comp_two_hands(hand1, hand2, Deck):
table = draw_cards(Deck, 5)
bh1 = get_best_hand(hand1+table)
bh2 = get_best_hand(hand2+table)
if bh1 > bh2:
return -1, bh1, bh2
elif bh1 < bh2:
return 1, bh1, bh2
else:
return 0, bh1, bh2
parser = argparse.ArgumentParser()
parser.add_argument('--hand1', type=str, required=True)
parser.add_argument('--hand2', type=str)
parser.add_argument('--comp', action='store_true')
parser.add_argument('--overall_equity', action='store_true')
args = parser.parse_args()
def parse_hand(hand):
return [_.split(' ') for _ in hand.split(' & ')]
def create_deck(remove_cards=[]):
Deck = []
for card in cards:
for suit in suits:
if (card, suit) in remove_cards:
continue
Deck.append(Card(card, suit))
return Deck
if args.comp:
hand1 = parse_hand(args.hand1)
hand2 = parse_hand(args.hand2)
Deck = create_deck(hand1+hand2)
f = lambda x: Card(x[0], x[1])
hand1 = [f(_) for _ in hand1]
hand2 = [f(_) for _ in hand2]
hand1_count = 0
hand2_count = 0
tie_count = 0
nseeds = 10
nsims = 200
Hand1 = Hand(*hand1)
Hand2 = Hand(*hand2)
for seed in range(nseeds):
np.random.seed(seed)
for sim in range(nsims):
deck_shuffled = np.random.permutation(Deck).tolist()
res, h1, h2 = comp_two_hands(hand1, hand2, deck_shuffled.copy())
Hand1.count(h1[0])
Hand2.count(h2[0])
if res == -1:
hand1_count += 1
elif res == 1:
hand2_count += 1
else:
tie_count += 1
h1_win = hand1_count/nseeds/nsims*100
h2_win = hand2_count/nseeds/nsims*100
tie = tie_count/nseeds/nsims*100
print(f'Hand1: {hand1[0]}, {hand1[1]} wins {h1_win}%')
print(f'Hand2: {hand2[0]}, {hand2[1]} wins {h2_win}%')
print(f'Tie happens: {tie}%')
print(Hand1)
print(Hand2)
elif args.overall_equity:
hand1 = parse_hand(args.hand1)
Deck = create_deck(hand1)
f = lambda x: Card(x[0], x[1])
hand1 = [f(_) for _ in hand1]
nseeds = 10
nsims = 500
hand1_count = 0
hand2_count = 0
tie_count = 0
Hand1 = Hand(*hand1)
for seed in range(nseeds):
np.random.seed(seed)
for i in range(nsims):
rnd_card = np.random.randint(0, len(Deck))
rnd_card2 = np.random.randint(0, len(Deck))
while rnd_card2 == rnd_card:
rnd_card2 = np.random.randint(0, len(Deck))
hand2 = [Deck[rnd_card], Deck[rnd_card2]]
Hand2 = Hand(*hand2)
deck_shuffled = np.random.permutation(Deck).tolist()
res, h1, h2 = comp_two_hands(hand1, hand2, deck_shuffled.copy())
Hand1.count(h1[0])
Hand2.count(h2[0])
if res == -1:
hand1_count += 1
elif res == 1:
hand2_count += 1
else:
tie_count += 1
h1_win = hand1_count/nseeds/nsims*100
h2_win = hand2_count/nseeds/nsims*100
tie = tie_count/nseeds/nsims*100
print(f'Hand1: {hand1[0]}, {hand1[1]} wins {h1_win}%')
print(f'Hand2 wins {h2_win}%')
print(f'Tie happens: {tie}%')
print(Hand1)
# print(Hand2)
else:
hand1 = parse_hand(args.hand1)
Deck = create_deck(hand1)
f = lambda x: Card(x[0], x[1])
hand1 = [f(_) for _ in hand1]
nseeds = 10
nsims = 500
Hand1 = Hand(*hand1)
for seed in range(nseeds):
np.random.seed(seed)
for sim in range(nsims):
deck_shuffled = np.random.permutation(Deck).tolist()
table = draw_cards(deck_shuffled, 5)
h1 = get_best_hand(table+hand1)
Hand1.count(h1[0])
print(Hand1)
| arpit-1110/Poker | poker_odds.py | poker_odds.py | py | 7,581 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.asarray",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 95,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
... |
6798346511 | import celery
import logging
import requests
from django.conf import settings
from ..models import Job
from ..helper import data_job_for_applicant
JOB_URL_CREATE = 'api/admin/'
JOB_URL_DETAIL = 'api/admin/{}/'
logger = logging.getLogger('celery-task')
class ApplicantJobMixin:
host = settings.EXOLEVER_HOST + settings.SERVICE_JOBS_HOST
headers = {'USERNAME': settings.AUTH_SECRET_KEY}
def get_url(self, uuid=None):
if uuid is None:
url = self.host + JOB_URL_CREATE
else:
url = self.host + JOB_URL_DETAIL.format(uuid.__str__())
return url
def get_job(self, *args, **kwargs):
try:
return Job.objects.get(id=kwargs.get('job_id'))
except Job.DoesNotExist:
logger.error('Job does not exist')
raise Exception()
class ApplicantJobCreate(ApplicantJobMixin, celery.Task):
name = 'ApplicantJobCreate'
def run(self, *args, **kwargs):
if settings.POPULATOR_MODE:
return
job = self.get_job(*args, **kwargs)
url = self.get_url()
data = data_job_for_applicant(job.applicant)
try:
response = requests.post(url, json=data, headers=self.headers)
assert response.status_code == requests.codes.created
uuid = response.json().get('uuid')
job.uuid = uuid
job.save()
except AssertionError:
message = 'Exception: {}-{}'.format(response.content, url)
logger.error(message)
self.retry(countdown=120, max_retries=20)
class ApplicantJobUpdate(ApplicantJobMixin, celery.Task):
name = 'ApplicantJobUpdate'
def run(self, *args, **kwargs):
if settings.POPULATOR_MODE:
return
job = self.get_job(*args, **kwargs)
url = self.get_url(uuid=job.uuid.__str__())
data = data_job_for_applicant(job.applicant)
try:
response = requests.put(url, json=data, headers=self.headers)
assert response.status_code == requests.codes.ok
except AssertionError:
message = 'Exception: {}-{}'.format(response.content, url)
logger.error(message)
self.retry(countdown=120, max_retries=20)
class ApplicantJobDelete(ApplicantJobMixin, celery.Task):
name = 'ApplicantJobDelete'
def run(self, *args, **kwargs):
uuid = kwargs.get('job_uuid', None)
if settings.POPULATOR_MODE or uuid is None:
return
url = self.get_url(uuid=kwargs.get('job_uuid'))
try:
response = requests.delete(url, headers=self.headers)
assert response.status_code == requests.codes.no_content
except AssertionError:
message = 'Exception: {}-{}'.format(response.content, url)
logger.error(message)
self.retry(countdown=120, max_retries=20)
| tomasgarzon/exo-services | service-exo-opportunities/jobs/tasks/applicant.py | applicant.py | py | 2,899 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.EXOLEVER_HOST",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 18,
"usage_type": "name"
},
{
"ap... |
16645056447 | import re
import pandas as pd
pd.set_option('display.max_rows', None)
from pathlib import Path
file_path = Path('texts/wf_anthology.txt')
# Read in the text file as a string
with open(file_path, 'r') as f:
text = f.read()
# define the regular expression pattern to match the section title, author's name, and introductory paragraph
pattern = r'^##(.+?)\n\n(.+?)\n\n((?:.|\n)*?)\n\n'
# process each section
new_text = ''
for match in re.finditer(pattern, text, re.MULTILINE | re.DOTALL):
# extract the relevant information
title = match.group(1)
author = match.group(2)
intro = match.group(3)
# remove the unwanted text
new_intro = re.sub(r'^' + author + r'(?:.|\n)*?\n\n', '', intro)
# concatenate the modified section to the new text
new_text += '##' + title + '\n\n' + author + '\n\n' + new_intro
with open('wf_anthology_manually_tidied_v1.txt', 'w') as outfile:
outfile.write(''.join(new_text))
'''
# Split the text by the '##' delimiter and extract the text content
blocks = []
for block in re.split(r'(?m)^##', text)[1:]:
# Extract the story name and author name
match = re.match(r'(.*?)\n\n(.*?)\n\n', block, re.DOTALL)
if match:
story_name = match.group(1)
author_name = match.group(2).replace('\n', '')
else:
story_name = ''
author_name = ''
# Extract only the text content
text_content = re.sub(r'(.*?)\n\n(.*?)\n', '', block, re.DOTALL).strip().replace('\n', '')
# Add the block to the list of dictionaries
blocks.append({'story_name': story_name, 'author_name': author_name, 'text': text_content})
# Create a pandas DataFrame from the list of dictionaries
df = pd.DataFrame(blocks)
print(df)
'''
| kspicer80/weird_fiction_experiments | author_story_similarity.py | author_story_similarity.py | py | 1,727 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.set_option",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.finditer",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_num... |
20360966046 | from tkinter import *
from tkinter import ttk
from tkinter.filedialog import askdirectory
from tkinter.filedialog import askopenfilename
from PIL import Image
# =================================================================================
def convertion():
u_path = str(t1.get())
d_path = str(t2.get())
name = str(t3.get())
p_type = str(types.get())
im = Image.open(rf"{t1.get()}")
im.save(rf"{t2.get()}\{t3.get()}{types.get()}")
def clear():
t1.delete(0,'end')
t2.delete(0,'end')
t3.delete(0,'end')
types.delete(0,'end')
def brsfnc():
location = askdirectory()
if t2.get() != "":
t2.delete(0,'end')
t2.insert(0,location)
else:
t2.insert(0,location)
def brspic():
f_loc = askopenfilename()
if t1.get() !="":
t1.delete(0,'end')
t1.insert(0,f_loc)
else:
t1.insert(0,f_loc)
# =================================================================================
win = Tk()
win.title("Image Converter")
win.iconbitmap(r"C:\Users\Public\Pictures\Sample Pictures\Treetog-Junior-Monitor-desktop.ico")
win.geometry("900x500")
win.maxsize(900,500)
win.minsize(900,500)
win['bg']="#83a2f2"
heading = Label(win,text="Image Converter",font=("verdana",35,"bold"),bg="#83a2f2",fg="gold")
heading.place(x=150,y=10)
l1 = Label(win,text="Enter The Image Path",font=("verdana",15,"bold")).grid(row=0,column=0,padx=20,pady=120)
t1 = Entry(win,width=25,borderwidth=5,font=(("verdana",15,"bold")))
t1.grid(row=0,column=1,pady=120)
brs = Button(win,text="Browse File",font=("verdan",8,"bold"),borderwidth=5,width=10,command=brspic)
brs.place(x=660,y=120)
l2 = Label(win,text="Enter Saving Path",font=("verdana",15,"bold")).place(x=20,y=200)
t2 = Entry(win,width=25,borderwidth=5,font=(("verdana",15,"bold")))
t2.place(x=293,y=200)
brsbtn = Button(win,text="Browse Folder",font=("verdan",8,"bold"),borderwidth=5,width=14,command=brsfnc)
brsbtn.place(x=660,y=200)
l3 = Label(win,text="Enter Saving Name",font=("verdana",15,"bold")).place(x=20,y=280)
t3 = Entry(win,width=20,borderwidth=5,font=(("verdana",10,"bold")))
t3.place(x=293,y=280)
combo = ttk.Label(win, text="Enter image type",font=("verdana",14,"bold"))
combo.place(x=20,y=360)
types = ttk.Combobox(win,width = 27,font=("verdana",12,"bold"))
types['values']=('.jgp','.png','.ico','.jpeg','.gif')
types.place(x=293,y=360)
types.current()
b1 = Button(win,text="Convert",font=("verdan",12,"bold"),borderwidth=5,width=12,command=convertion)
b1.place(x=730,y=370)
b1 = Button(win,text="Clear",font=("verdan",12,"bold"),borderwidth=5,width=12,command=clear)
b1.place(x=730,y=440)
win.mainloop() | sagnik403/Image-Converter-Tkinter | main.py | main.py | py | 2,752 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tkinter.file... |
30714122215 | import math
from re import L
import cv2
import mediapipe as mp
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from moviepy.editor import VideoFileClip
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
subjectpath = '/Volumes/Transcend/data/2020醒吾華橋 科技部/DT006-f/'
videopath = subjectpath + 'DT006 t1 前測 側.MP4'
folderpath = 'DT006_t1_S_1_1/'
txtpath = subjectpath + folderpath + 'log.txt'
cap = cv2.VideoCapture(videopath)
clip = VideoFileClip(videopath)
width =cap.get(3)
height =cap.get(4)
TotalFrame = cap.get(cv2.CAP_PROP_FRAME_COUNT)
framelist = []
RAnklePosX = []
LAnklePosX = []
with mp_pose.Pose(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as pose:
while(True):
success, image = cap.read()
if not success:
continue
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
results = pose.process(image)
# Draw the pose annotation on the image.
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
frame = cap.get(cv2.CAP_PROP_POS_FRAMES)
framelist.append(frame)
if int(frame) == 1 and results.pose_landmarks:
firstX = width - results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE].x * width #以右邊界為0
else:
firstX = 5000 #使默認左邊為0
if firstX < 2500 and results.pose_landmarks:
LankleX = width - results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE].x * width #以右邊界為0
RankleX = width - results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ANKLE].x * width
elif results.pose_landmarks:
LankleX = results.pose_landmarks.landmark[mp_pose.PoseLandmark.LEFT_ANKLE].x * width #以左邊界為0
RankleX = results.pose_landmarks.landmark[mp_pose.PoseLandmark.RIGHT_ANKLE].x * width
else:
LankleX = 0
RankleX = 0
LAnklePosX.append(LankleX)
RAnklePosX.append(RankleX)
if cv2.waitKey(5) & int(frame) == int(TotalFrame):
break
Amp = 30 #Arithmetic mean parameter
LAm_X = []
RAm_X = []
Lcal_X = 0
Rcal_X = 0
for i in range(Amp):
Lcal_X = Lcal_X + LAnklePosX[i]
Rcal_X = Rcal_X + RAnklePosX[i]
for i in range(Amp, len(LAnklePosX)):
LAm_X.append(Lcal_X / Amp)
RAm_X.append(Rcal_X / Amp)
Lcal_X = Lcal_X - LAnklePosX[i-Amp]
Lcal_X = Lcal_X + LAnklePosX[i]
Rcal_X = Rcal_X - RAnklePosX[i-Amp]
Rcal_X = Rcal_X + RAnklePosX[i]
Am_framelist = framelist[Amp:]
plt.figure()
plt.plot(Am_framelist, RAm_X)
plt.plot(Am_framelist, LAm_X)
plt.savefig(subjectpath + folderpath + 'ankle_trajectory.png')
Ldx_list = [0]
Rdx_list = [0]
for i in range(1,len(Am_framelist)):
Rdx = RAm_X[i]-RAm_X[i-1]
Ldx = LAm_X[i]-LAm_X[i-1]
Rdx_list.append(Rdx)
Ldx_list.append(Ldx)
plt.figure()
plt.plot(Am_framelist, Rdx_list)
plt.plot(Am_framelist, Ldx_list)
plt.savefig(subjectpath + folderpath + 'delta_ankle.png')
#腳踝
Rslice_frame = []
Lslice_frame = []
label = []
# Rmaxframe = 1
# Rmax = 0
# for i in range(1, 85):
# if Rdx_list[i] > Rmax:
# Rmax = Rdx_list[i]
# Rmaxframe = i
# Rslice_frame.append(Am_framelist[Rmaxframe])
# Lmaxframe = 1
# Lmax = 0
# for i in range(1, 85):
# if Ldx_list[i] > Lmax:
# Lmax = Ldx_list[i]
# Lmaxframe = i
# Lslice_frame.append(Am_framelist[Lmaxframe])
for i in range(85,len(Am_framelist)-85):
Rhighest = 1
Lhighest = 1
for j in range(1,85):
if RAm_X[i] <= 1000 or Rdx_list[i] <= Rdx_list[i+j] or Rdx_list[i] <= Rdx_list[i-j]:
Rhighest = 0
break
if Rhighest == 1:
Rslice_frame.append(Am_framelist[i])
for k in range(1,85):
if LAm_X[i] <= 1000 or Ldx_list[i] <= Ldx_list[i+k] or Ldx_list[i] <= Ldx_list[i-k]:
Lhighest = 0
break
if Lhighest == 1:
Lslice_frame.append(Am_framelist[i])
Rmaxframe = len(Am_framelist)-85
Rmax = 0
for i in range(len(Am_framelist)-85, len(Am_framelist)):
if Rdx_list[i] > Rmax:
Rmax = Rdx_list[i]
Rmaxframe = i
Rslice_frame.append(Am_framelist[Rmaxframe])
Lmaxframe = len(Am_framelist)-85
Lmax = 0
for i in range(len(Am_framelist)-85, len(Am_framelist)):
if Ldx_list[i] > Lmax:
Lmax = Ldx_list[i]
Lmaxframe = i
Lslice_frame.append(Am_framelist[Lmaxframe])
print("Slice frame calculated by the right ankle:", Rslice_frame)
print("Slice frame calculated by the left ankle::", Lslice_frame)
#若左右frame差正負100內,取先發生的
Kslice_frame = []
i=0
j=0
while i < len(Lslice_frame) and j < len(Rslice_frame):
if abs(Lslice_frame[i] - Rslice_frame[j]) <= 100:
if Lslice_frame[i] <= Rslice_frame[j]:
Kslice_frame.append(Lslice_frame[i])
label.append("L")
else:
Kslice_frame.append(Rslice_frame[j])
label.append("R")
i += 1
j += 1
elif Lslice_frame[i] < Rslice_frame[j]:
Kslice_frame.append(Lslice_frame[i])
label.append("L")
i += 1
else:
Kslice_frame.append(Rslice_frame[j])
label.append("R")
j += 1
print("total slice frame:", Kslice_frame)
with open(txtpath, 'w') as f:
f.write(f"Slice frame calculated by the right ankle: {Rslice_frame}\n")
f.write(f"Slice frame calculated by the left ankle: {Lslice_frame}\n")
f.write(f"Total sliced frame: {Kslice_frame}\n")
fps=clip.fps
n=1
b=1
l=1
r=1
for i in range(len(Kslice_frame)):
start_frame = Kslice_frame[i]- 40
if start_frame + 85 <= TotalFrame:
end_frame = start_frame + 85
else:
end_frame = TotalFrame
start_time = start_frame / fps
print("start",start_time)
end_time = end_frame / fps
print("end",end_time)
clip.subclip(start_time, end_time).write_videofile(subjectpath + folderpath + f'{n}.mp4')
n+=1
# if i <= 3:
# clip.subclip(start_time, end_time).write_videofile(subjectpath + folderpath + f'B{b}.mp4')
# b+=1
# elif label[i] == "L":
# clip.subclip(start_time, end_time).write_videofile(subjectpath + folderpath + f'L{l}.mp4')
# l+=1
# elif label[i] == "R":
# clip.subclip(start_time, end_time).write_videofile(subjectpath + folderpath + f'R{r}.mp4')
# r+=1 | wenxxi/LESS-video-slicing | s_ankle_slicing.py | s_ankle_slicing.py | py | 6,489 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mediapipe.solutions",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "mediapipe.solutions",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_nam... |
17883497405 | import time
t_start_script = time.time()
print(__name__)
import matplotlib.pyplot as plt
import numpy as np
a = np.random.randn(1000 * 100* 100)
print('start_time and prepare data:', time.time() - t_start_script)
print(a.shape)
print(sys.argv)
# >>>
#print('11123',a)
if __name__ == '__main__':
t0 = time.time()
# plt.plot([1, 2, 3, 4, 5, 1])
plt.hist(a, bins=20,color='red')
plt.xlabel('haha')
plt.ylabel('y is lalala')
print(time.time() - t0)
print('full time', time.time() - t_start_script)
plt.show()
| pyminer/pyminer | pyminer/packages/applications_toolbar/apps/cftool/test1.py | test1.py | py | 542 | python | en | code | 77 | github-code | 36 | [
{
"api_name": "time.time",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "numpy.random.randn",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_num... |
15442911590 | import argparse
from . import completion_helpers
class ArgumentParser(argparse.ArgumentParser):
def enable_print_header(self):
self.add_argument(
'-q', action='store_true',
help="Suppresses printing of headers when multiple tasks are " +
"being examined"
)
def task_argument(self, optional=False):
kwargs = {
"default": "",
"type": str,
"help": "ID of the task. May match multiple tasks (or all)"
}
if optional:
kwargs["nargs"] = "?"
self.add_argument('task', **kwargs).completer = completion_helpers.task
def file_argument(self):
self.add_argument(
'file', nargs="*", default=["stdout"],
help="Path to the file inside the task's sandbox."
).completer = completion_helpers.file
def path_argument(self):
self.add_argument(
'path', type=str, nargs="?", default="",
help="""Path to view."""
).completer = completion_helpers.file
| mesosphere-backup/mesos-cli | mesos/cli/parser.py | parser.py | py | 1,067 | python | en | code | 116 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "attribute"
}
] |
17585289452 | import requests
from starwhale import Link, Image, Point, dataset, Polygon, MIMEType # noqa: F401
from starwhale.utils.retry import http_retry
PATH_ROOT = "https://starwhale-examples.oss-cn-beijing.aliyuncs.com/dataset/cityscapes"
ANNO_PATH = "disparity/train"
DATA_PATH_LEFT = "leftImg8bit/train"
DATA_PATH_RIGHT = "rightImg8bit/train"
SUFFIX_MASK = "_disparity.png"
SUFFIX_DATA_LEFT = "_leftImg8bit.png"
SUFFIX_DATA_RIGHT = "_rightImg8bit.png"
@http_retry
def request_link_json(anno_link):
return requests.get(anno_link, timeout=10).json()
def mask_image(_name, dir_name):
return Image(
display_name=_name,
mime_type=MIMEType.PNG,
as_mask=True,
link=Link(uri=f"{PATH_ROOT}/{ANNO_PATH}/{dir_name}/{_name}"),
)
def build_ds():
ds = dataset("cityscapes_disparity")
ds.info["baseline"] = 22
ds.info["homepage"] = "https://www.cityscapes-dataset.com"
tree = request_link_json(f"{PATH_ROOT}/{ANNO_PATH}/tree.json")
for d in tree:
if d["type"] != "directory":
continue
dir_name = d["name"]
for f in d["contents"]:
if f["type"] != "file":
continue
_name = str(f["name"])
if not _name.endswith(SUFFIX_MASK):
continue
disparity_mask = mask_image(_name, dir_name)
name = _name.replace(SUFFIX_MASK, "")
right_image = Image(
display_name=name,
link=Link(
uri=f"{PATH_ROOT}/{DATA_PATH_RIGHT}/{dir_name}/{name}{SUFFIX_DATA_RIGHT}"
),
mime_type=MIMEType.JPEG,
)
left_image = Image(
display_name=name,
link=Link(
uri=f"{PATH_ROOT}/{DATA_PATH_LEFT}/{dir_name}/{name}{SUFFIX_DATA_LEFT}"
),
mime_type=MIMEType.JPEG,
)
ds.append(
{
"left_image_8bit": left_image,
"right_image_8bit": right_image,
"disparity_mask": disparity_mask,
}
)
ds.commit()
load_ds = dataset(ds.uri)
print(load_ds.info)
ds.close()
if __name__ == "__main__":
build_ds()
| star-whale/starwhale | example/datasets/cityscapes/disparity/dataset.py | dataset.py | py | 2,278 | python | en | code | 171 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "starwhale.utils.retry.http_retry",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "starwhale.Image",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "starwha... |
41551247368 | from importlib.resources import path
from kubernetes import client as kclient
from kubernetes import config
config.load_incluster_config()
v1 = kclient.CoreV1Api()
# Trainer Pod deletes itself
try:
api_response = v1.delete_namespaced_pod(
name='trainer', namespace='mlbuffet')
except Exception as e:
print("Exception when calling CoreV1Api->connect_delete_namespaced_pod_proxy: %s\n" % e)
| zylklab/mlbuffet | modules/trainer/apoptosis.py | apoptosis.py | py | 408 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "kubernetes.config.load_incluster_config",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "kubernetes.config",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "kubernetes.client.CoreV1Api",
"line_number": 6,
"usage_type": "call"
},
{
... |
22163777058 | #!/usr/bin/env python
import csv
import gzip
import os
import re
import sys
from pyproj import Transformer
csv.field_size_limit(sys.maxsize)
AMOUNT_REGEX = re.compile('Both Installment[\s\S]+?\$([\d,\.]+)')
# California Zone 3
# https://epsg.io/2227
transformer = Transformer.from_crs(2227, 4326)
def get_val(row, key):
val = row[key].strip()
if not val:
return 0
return float(val)
with open('/home/ian/Downloads/Santa_Cruz_Assessor_Parcels.csv') as f_in, \
open('./parse_output.csv', 'w') as f_out:
reader = csv.DictReader(f_in)
fieldnames = ['address', 'apn', 'longitude', 'latitude', 'tax', 'county']
writer = csv.DictWriter(f_out, fieldnames=fieldnames)
count = 0
for row in reader:
count += 1
if count % 1000 == 0:
print(count, '...')
apn = row['APN']
address = row['SITEADD']
try:
x_coord = float(row['XCOORD'])
y_coord = float(row['YCOORD'])
except:
print('-> bad coords')
continue
centroid = transformer.transform(x_coord, y_coord)
print(count, apn, address, centroid)
output_path = '/home/ian/code/prop13/scrapers/santa_cruz/scrape_output/%s.html' % (apn)
if not os.path.exists(output_path):
print('-> no scraped file')
continue
try:
with gzip.open(output_path, 'rt') as f_in:
html = f_in.read()
except:
print('--> bad file')
continue
amount = -1
try:
amount_str = AMOUNT_REGEX.search(html).group(1).replace(',', '')
amount = float(amount_str)
except:
print('--> Could not parse float', amount_str)
continue
print('--> Paid', amount)
writer.writerow({
'address': address,
'apn': apn,
'latitude': centroid[0],
'longitude': centroid[1],
'tax': amount,
'county': 'SCZ',
})
| typpo/ca-property-tax | scrapers/santa_cruz/parse.py | parse.py | py | 2,041 | python | en | code | 89 | github-code | 36 | [
{
"api_name": "csv.field_size_limit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.maxsize",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyproj.Transformer.f... |
73744186345 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('lexicon', '0112_auto_20160929_1340'),
]
operations = [
migrations.AlterModelOptions(
name='meaning',
options={'ordering': ['gloss']},
),
migrations.RenameField(
model_name='meaning',
old_name='elicitation',
new_name='meaningSetIx',
),
migrations.AddField(
model_name='meaning',
name='meaningSetMember',
field=models.IntegerField(default=0),
),
]
| lingdb/CoBL-public | ielex/lexicon/migrations/0113_auto_20161004_1315.py | 0113_auto_20161004_1315.py | py | 682 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterModelOptions",
"line_number": 14,
"usage_type": "call"
... |
36947928289 | from __future__ import print_function
__revision__ = "src/engine/SCons/Tool/rpmutils.py bee7caf9defd6e108fc2998a2520ddb36a967691 2019-12-17 02:07:09 bdeegan"
import platform
import subprocess
import SCons.Util
# Start of rpmrc dictionaries (Marker, don't change or remove!)
os_canon = {
'AIX' : ['AIX','5'],
'AmigaOS' : ['AmigaOS','5'],
'BSD_OS' : ['bsdi','12'],
'CYGWIN32_95' : ['cygwin32','15'],
'CYGWIN32_NT' : ['cygwin32','14'],
'Darwin' : ['darwin','21'],
'FreeBSD' : ['FreeBSD','8'],
'HP-UX' : ['hpux10','6'],
'IRIX' : ['Irix','2'],
'IRIX64' : ['Irix64','10'],
'Linux' : ['Linux','1'],
'Linux/390' : ['OS/390','20'],
'Linux/ESA' : ['VM/ESA','20'],
'MacOSX' : ['macosx','21'],
'MiNT' : ['FreeMiNT','17'],
'NEXTSTEP' : ['NextStep','11'],
'OS/390' : ['OS/390','18'],
'OSF1' : ['osf1','7'],
'SCO_SV' : ['SCO_SV3.2v5.0.2','9'],
'SunOS4' : ['SunOS','4'],
'SunOS5' : ['solaris','3'],
'UNIX_SV' : ['MP_RAS','16'],
'VM/ESA' : ['VM/ESA','19'],
'machten' : ['machten','13'],
'osf3.2' : ['osf1','7'],
'osf4.0' : ['osf1','7'],
}
buildarch_compat = {
'alpha' : ['noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64'],
'ia64' : ['noarch'],
'm68k' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'ppc' : ['noarch','fat'],
'ppc32dy4' : ['noarch'],
'ppc64' : ['noarch','fat'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['noarch'],
'ppc8560' : ['noarch'],
'ppciseries' : ['noarch'],
'ppcpseries' : ['noarch'],
's390' : ['noarch'],
's390x' : ['noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9v'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['noarch'],
'sun4d' : ['noarch'],
'sun4m' : ['noarch'],
'sun4u' : ['noarch'],
'x86_64' : ['noarch'],
}
os_compat = {
'BSD_OS' : ['bsdi'],
'Darwin' : ['MacOSX'],
'FreeMiNT' : ['mint','MiNT','TOS'],
'IRIX64' : ['IRIX'],
'MiNT' : ['FreeMiNT','mint','TOS'],
'TOS' : ['FreeMiNT','MiNT','mint'],
'bsdi4.0' : ['bsdi'],
'hpux10.00' : ['hpux9.07'],
'hpux10.01' : ['hpux10.00'],
'hpux10.10' : ['hpux10.01'],
'hpux10.20' : ['hpux10.10'],
'hpux10.30' : ['hpux10.20'],
'hpux11.00' : ['hpux10.30'],
'hpux9.05' : ['hpux9.04'],
'hpux9.07' : ['hpux9.05'],
'mint' : ['FreeMiNT','MiNT','TOS'],
'ncr-sysv4.3' : ['ncr-sysv4.2'],
'osf4.0' : ['osf3.2','osf1'],
'solaris2.4' : ['solaris2.3'],
'solaris2.5' : ['solaris2.3','solaris2.4'],
'solaris2.6' : ['solaris2.3','solaris2.4','solaris2.5'],
'solaris2.7' : ['solaris2.3','solaris2.4','solaris2.5','solaris2.6'],
}
arch_compat = {
'alpha' : ['axp','noarch'],
'alphaev5' : ['alpha'],
'alphaev56' : ['alphaev5'],
'alphaev6' : ['alphapca56'],
'alphaev67' : ['alphaev6'],
'alphapca56' : ['alphaev56'],
'amd64' : ['x86_64','athlon','noarch'],
'armv3l' : ['noarch'],
'armv4b' : ['noarch'],
'armv4l' : ['armv3l'],
'armv4tl' : ['armv4l'],
'armv5tejl' : ['armv5tel'],
'armv5tel' : ['armv4tl'],
'armv6l' : ['armv5tejl'],
'armv7l' : ['armv6l'],
'atariclone' : ['m68kmint','noarch'],
'atarist' : ['m68kmint','noarch'],
'atariste' : ['m68kmint','noarch'],
'ataritt' : ['m68kmint','noarch'],
'athlon' : ['i686'],
'falcon' : ['m68kmint','noarch'],
'geode' : ['i586'],
'hades' : ['m68kmint','noarch'],
'hppa1.0' : ['parisc'],
'hppa1.1' : ['hppa1.0'],
'hppa1.2' : ['hppa1.1'],
'hppa2.0' : ['hppa1.2'],
'i370' : ['noarch'],
'i386' : ['noarch','fat'],
'i486' : ['i386'],
'i586' : ['i486'],
'i686' : ['i586'],
'ia32e' : ['x86_64','athlon','noarch'],
'ia64' : ['noarch'],
'milan' : ['m68kmint','noarch'],
'mips' : ['noarch'],
'mipsel' : ['noarch'],
'osfmach3_i386' : ['i486'],
'osfmach3_i486' : ['i486','osfmach3_i386'],
'osfmach3_i586' : ['i586','osfmach3_i486'],
'osfmach3_i686' : ['i686','osfmach3_i586'],
'osfmach3_ppc' : ['ppc'],
'parisc' : ['noarch'],
'pentium3' : ['i686'],
'pentium4' : ['pentium3'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc' : ['rs6000'],
'ppc32dy4' : ['ppc'],
'ppc64' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
'rs6000' : ['noarch','fat'],
's390' : ['noarch'],
's390x' : ['s390','noarch'],
'sh3' : ['noarch'],
'sh4' : ['noarch'],
'sh4a' : ['sh4'],
'sparc' : ['noarch'],
'sparc64' : ['sparcv9'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparcv8'],
'sparcv9v' : ['sparcv9'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['amd64','athlon','noarch'],
}
buildarchtranslate = {
'alphaev5' : ['alpha'],
'alphaev56' : ['alpha'],
'alphaev6' : ['alpha'],
'alphaev67' : ['alpha'],
'alphapca56' : ['alpha'],
'amd64' : ['x86_64'],
'armv3l' : ['armv3l'],
'armv4b' : ['armv4b'],
'armv4l' : ['armv4l'],
'armv4tl' : ['armv4tl'],
'armv5tejl' : ['armv5tejl'],
'armv5tel' : ['armv5tel'],
'armv6l' : ['armv6l'],
'armv7l' : ['armv7l'],
'atariclone' : ['m68kmint'],
'atarist' : ['m68kmint'],
'atariste' : ['m68kmint'],
'ataritt' : ['m68kmint'],
'athlon' : ['i386'],
'falcon' : ['m68kmint'],
'geode' : ['i386'],
'hades' : ['m68kmint'],
'i386' : ['i386'],
'i486' : ['i386'],
'i586' : ['i386'],
'i686' : ['i386'],
'ia32e' : ['x86_64'],
'ia64' : ['ia64'],
'milan' : ['m68kmint'],
'osfmach3_i386' : ['i386'],
'osfmach3_i486' : ['i386'],
'osfmach3_i586' : ['i386'],
'osfmach3_i686' : ['i386'],
'osfmach3_ppc' : ['ppc'],
'pentium3' : ['i386'],
'pentium4' : ['i386'],
'powerpc' : ['ppc'],
'powerppc' : ['ppc'],
'ppc32dy4' : ['ppc'],
'ppc64iseries' : ['ppc64'],
'ppc64pseries' : ['ppc64'],
'ppc8260' : ['ppc'],
'ppc8560' : ['ppc'],
'ppciseries' : ['ppc'],
'ppcpseries' : ['ppc'],
's390' : ['s390'],
's390x' : ['s390x'],
'sh3' : ['sh3'],
'sh4' : ['sh4'],
'sh4a' : ['sh4'],
'sparc64v' : ['sparc64'],
'sparcv8' : ['sparc'],
'sparcv9' : ['sparc'],
'sparcv9v' : ['sparc'],
'sun4c' : ['sparc'],
'sun4d' : ['sparc'],
'sun4m' : ['sparc'],
'sun4u' : ['sparc64'],
'x86_64' : ['x86_64'],
}
optflags = {
'alpha' : ['-O2','-g','-mieee'],
'alphaev5' : ['-O2','-g','-mieee','-mtune=ev5'],
'alphaev56' : ['-O2','-g','-mieee','-mtune=ev56'],
'alphaev6' : ['-O2','-g','-mieee','-mtune=ev6'],
'alphaev67' : ['-O2','-g','-mieee','-mtune=ev67'],
'alphapca56' : ['-O2','-g','-mieee','-mtune=pca56'],
'amd64' : ['-O2','-g'],
'armv3l' : ['-O2','-g','-march=armv3'],
'armv4b' : ['-O2','-g','-march=armv4'],
'armv4l' : ['-O2','-g','-march=armv4'],
'armv4tl' : ['-O2','-g','-march=armv4t'],
'armv5tejl' : ['-O2','-g','-march=armv5te'],
'armv5tel' : ['-O2','-g','-march=armv5te'],
'armv6l' : ['-O2','-g','-march=armv6'],
'armv7l' : ['-O2','-g','-march=armv7'],
'atariclone' : ['-O2','-g','-fomit-frame-pointer'],
'atarist' : ['-O2','-g','-fomit-frame-pointer'],
'atariste' : ['-O2','-g','-fomit-frame-pointer'],
'ataritt' : ['-O2','-g','-fomit-frame-pointer'],
'athlon' : ['-O2','-g','-march=athlon'],
'falcon' : ['-O2','-g','-fomit-frame-pointer'],
'fat' : ['-O2','-g','-arch','i386','-arch','ppc'],
'geode' : ['-Os','-g','-m32','-march=geode'],
'hades' : ['-O2','-g','-fomit-frame-pointer'],
'hppa1.0' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.1' : ['-O2','-g','-mpa-risc-1-0'],
'hppa1.2' : ['-O2','-g','-mpa-risc-1-0'],
'hppa2.0' : ['-O2','-g','-mpa-risc-1-0'],
'i386' : ['-O2','-g','-march=i386','-mtune=i686'],
'i486' : ['-O2','-g','-march=i486'],
'i586' : ['-O2','-g','-march=i586'],
'i686' : ['-O2','-g','-march=i686'],
'ia32e' : ['-O2','-g'],
'ia64' : ['-O2','-g'],
'm68k' : ['-O2','-g','-fomit-frame-pointer'],
'milan' : ['-O2','-g','-fomit-frame-pointer'],
'mips' : ['-O2','-g'],
'mipsel' : ['-O2','-g'],
'parisc' : ['-O2','-g','-mpa-risc-1-0'],
'pentium3' : ['-O2','-g','-march=pentium3'],
'pentium4' : ['-O2','-g','-march=pentium4'],
'ppc' : ['-O2','-g','-fsigned-char'],
'ppc32dy4' : ['-O2','-g','-fsigned-char'],
'ppc64' : ['-O2','-g','-fsigned-char'],
'ppc8260' : ['-O2','-g','-fsigned-char'],
'ppc8560' : ['-O2','-g','-fsigned-char'],
'ppciseries' : ['-O2','-g','-fsigned-char'],
'ppcpseries' : ['-O2','-g','-fsigned-char'],
's390' : ['-O2','-g'],
's390x' : ['-O2','-g'],
'sh3' : ['-O2','-g'],
'sh4' : ['-O2','-g','-mieee'],
'sh4a' : ['-O2','-g','-mieee'],
'sparc' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparc64' : ['-O2','-g','-m64','-mtune=ultrasparc'],
'sparc64v' : ['-O2','-g','-m64','-mtune=niagara'],
'sparcv8' : ['-O2','-g','-m32','-mtune=ultrasparc','-mv8'],
'sparcv9' : ['-O2','-g','-m32','-mtune=ultrasparc'],
'sparcv9v' : ['-O2','-g','-m32','-mtune=niagara'],
'x86_64' : ['-O2','-g'],
}
arch_canon = {
'IP' : ['sgi','7'],
'alpha' : ['alpha','2'],
'alphaev5' : ['alphaev5','2'],
'alphaev56' : ['alphaev56','2'],
'alphaev6' : ['alphaev6','2'],
'alphaev67' : ['alphaev67','2'],
'alphapca56' : ['alphapca56','2'],
'amd64' : ['amd64','1'],
'armv3l' : ['armv3l','12'],
'armv4b' : ['armv4b','12'],
'armv4l' : ['armv4l','12'],
'armv5tejl' : ['armv5tejl','12'],
'armv5tel' : ['armv5tel','12'],
'armv6l' : ['armv6l','12'],
'armv7l' : ['armv7l','12'],
'atariclone' : ['m68kmint','13'],
'atarist' : ['m68kmint','13'],
'atariste' : ['m68kmint','13'],
'ataritt' : ['m68kmint','13'],
'athlon' : ['athlon','1'],
'falcon' : ['m68kmint','13'],
'geode' : ['geode','1'],
'hades' : ['m68kmint','13'],
'i370' : ['i370','14'],
'i386' : ['i386','1'],
'i486' : ['i486','1'],
'i586' : ['i586','1'],
'i686' : ['i686','1'],
'ia32e' : ['ia32e','1'],
'ia64' : ['ia64','9'],
'm68k' : ['m68k','6'],
'm68kmint' : ['m68kmint','13'],
'milan' : ['m68kmint','13'],
'mips' : ['mips','4'],
'mipsel' : ['mipsel','11'],
'pentium3' : ['pentium3','1'],
'pentium4' : ['pentium4','1'],
'ppc' : ['ppc','5'],
'ppc32dy4' : ['ppc32dy4','5'],
'ppc64' : ['ppc64','16'],
'ppc64iseries' : ['ppc64iseries','16'],
'ppc64pseries' : ['ppc64pseries','16'],
'ppc8260' : ['ppc8260','5'],
'ppc8560' : ['ppc8560','5'],
'ppciseries' : ['ppciseries','5'],
'ppcpseries' : ['ppcpseries','5'],
'rs6000' : ['rs6000','8'],
's390' : ['s390','14'],
's390x' : ['s390x','15'],
'sh' : ['sh','17'],
'sh3' : ['sh3','17'],
'sh4' : ['sh4','17'],
'sh4a' : ['sh4a','17'],
'sparc' : ['sparc','3'],
'sparc64' : ['sparc64','2'],
'sparc64v' : ['sparc64v','2'],
'sparcv8' : ['sparcv8','3'],
'sparcv9' : ['sparcv9','3'],
'sparcv9v' : ['sparcv9v','3'],
'sun4' : ['sparc','3'],
'sun4c' : ['sparc','3'],
'sun4d' : ['sparc','3'],
'sun4m' : ['sparc','3'],
'sun4u' : ['sparc64','2'],
'x86_64' : ['x86_64','1'],
'xtensa' : ['xtensa','18'],
}
# End of rpmrc dictionaries (Marker, don't change or remove!)
def defaultMachine(use_rpm_default=True):
""" Return the canonicalized machine name. """
if use_rpm_default:
try:
# This should be the most reliable way to get the default arch
rmachine = subprocess.check_output(['rpm', '--eval=%_target_cpu'], shell=False).rstrip()
rmachine = SCons.Util.to_str(rmachine)
except Exception as e:
# Something went wrong, try again by looking up platform.machine()
return defaultMachine(False)
else:
rmachine = platform.machine()
# Try to lookup the string in the canon table
if rmachine in arch_canon:
rmachine = arch_canon[rmachine][0]
return rmachine
def defaultSystem():
""" Return the canonicalized system name. """
rsystem = platform.system()
# Try to lookup the string in the canon tables
if rsystem in os_canon:
rsystem = os_canon[rsystem][0]
return rsystem
def defaultNames():
""" Return the canonicalized machine and system name. """
return defaultMachine(), defaultSystem()
def updateRpmDicts(rpmrc, pyfile):
""" Read the given rpmrc file with RPM definitions and update the
info dictionaries in the file pyfile with it.
The arguments will usually be 'rpmrc.in' from a recent RPM source
tree, and 'rpmutils.py' referring to this script itself.
See also usage() below.
"""
try:
# Read old rpmutils.py file
with open(pyfile,"r") as f:
oldpy = f.readlines()
# Read current rpmrc.in file
with open(rpmrc,"r") as f:
rpm = f.readlines()
# Parse for data
data = {}
# Allowed section names that get parsed
sections = ['optflags',
'arch_canon',
'os_canon',
'buildarchtranslate',
'arch_compat',
'os_compat',
'buildarch_compat']
for l in rpm:
l = l.rstrip('\n').replace(':',' ')
# Skip comments
if l.lstrip().startswith('#'):
continue
tokens = l.strip().split()
if len(tokens):
key = tokens[0]
if key in sections:
# Have we met this section before?
if tokens[0] not in data:
# No, so insert it
data[key] = {}
# Insert data
data[key][tokens[1]] = tokens[2:]
# Write new rpmutils.py file
with open(pyfile,"w") as out:
pm = 0
for l in oldpy:
if pm:
if l.startswith('# End of rpmrc dictionaries'):
pm = 0
out.write(l)
else:
out.write(l)
if l.startswith('# Start of rpmrc dictionaries'):
pm = 1
# Write data sections to single dictionaries
for key, entries in data.items():
out.write("%s = {\n" % key)
for arch in sorted(entries.keys()):
out.write(" '%s' : ['%s'],\n" % (arch, "','".join(entries[arch])))
out.write("}\n\n")
except:
pass
def usage():
print("rpmutils.py rpmrc.in rpmutils.py")
def main():
import sys
if len(sys.argv) < 3:
usage()
sys.exit(0)
updateRpmDicts(sys.argv[1], sys.argv[2])
if __name__ == "__main__":
main()
| mongodb/mongo | src/third_party/scons-3.1.2/scons-local-3.1.2/SCons/Tool/rpmutils.py | rpmutils.py | py | 15,575 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "subprocess.check_output",
"line_number": 412,
"usage_type": "call"
},
{
"api_name": "SCons.Util.Util.to_str",
"line_number": 413,
"usage_type": "call"
},
{
"api_name": "SCons.Util.Util",
"line_number": 413,
"usage_type": "attribute"
},
{
"api_name":... |
72170366505 | import pandas as pd
import numpy as np
import joblib
import pickle
import warnings
import os
from data.make_dataset import preprocess_train_df
warnings.filterwarnings("ignore")
def make_categorical_dataset(processed_dfs, proteins_df):
"""
Turns the train_updrs.csv into a categorical dataset
based on the ratings:
updrs 1 categorical ratings: 10 and below is mild, 11 to 21 is moderate, 22 and above is severe
updrs 2 categorical ratings: 12 and below is mild, 13 to 29 is moderate, 30 and above is severe
updrs 3 categorical ratings: 32 and below is mild, 33 to 58 is moderate, 59 and above is severe
updrs 4 categorical ratings: 4 and below is mild, 5 to 12 is moderate, 13 and above is severe
Args:
processed_df: dataframe with one row per visit_month containing all of the protein and peptide columns
proteins_df: dataframe with the UniProt column and the peptide columns
Returns:
categorical_df: dataframe with the updrs values as categorical values based on the ratings as well as the proteins and peptides values
"""
# read the data
updrs1_df = processed_dfs["updrs_1"]
updrs2_df = processed_dfs["updrs_2"]
updrs3_df = processed_dfs["updrs_3"]
updrs4_df = processed_dfs["updrs_4"]
protein_list = list(proteins_df["UniProt"].unique())
# list of columns for information
info_cols = [
"visit_id",
"patient_id",
"visit_month",
"updrs_1",
"updrs_2",
"updrs_3",
"updrs_4",
"kfold",
]
# protein and peptide columns
peptide_list = [
col
for col in updrs1_df.columns
if col not in protein_list and col not in info_cols
]
prot_pep_cols = protein_list + peptide_list
# add a column for the number of proteins and peptides present
updrs1_df["num_prot_pep"] = updrs1_df[prot_pep_cols].sum(axis=1)
updrs2_df["num_prot_pep"] = updrs2_df[prot_pep_cols].sum(axis=1)
updrs3_df["num_prot_pep"] = updrs3_df[prot_pep_cols].sum(axis=1)
updrs4_df["num_prot_pep"] = updrs4_df[prot_pep_cols].sum(axis=1)
# number of proteins
updrs1_df["num_prot"] = updrs1_df[protein_list].sum(axis=1)
updrs2_df["num_prot"] = updrs2_df[protein_list].sum(axis=1)
updrs3_df["num_prot"] = updrs3_df[protein_list].sum(axis=1)
updrs4_df["num_prot"] = updrs4_df[protein_list].sum(axis=1)
# number of peptides
updrs1_df["num_pept"] = updrs1_df[peptide_list].sum(axis=1)
updrs2_df["num_pept"] = updrs2_df[peptide_list].sum(axis=1)
updrs3_df["num_pept"] = updrs3_df[peptide_list].sum(axis=1)
updrs4_df["num_pept"] = updrs4_df[peptide_list].sum(axis=1)
# apply the categorical ratings
updrs1_df["updrs_1_cat"] = np.where(
updrs1_df["updrs_1"] <= 10,
"mild",
np.where(updrs1_df["updrs_1"] <= 21, "moderate", "severe"),
)
updrs2_df["updrs_2_cat"] = np.where(
updrs2_df["updrs_2"] <= 12,
"mild",
np.where(updrs2_df["updrs_2"] <= 29, "moderate", "severe"),
)
updrs3_df["updrs_3_cat"] = np.where(
updrs3_df["updrs_3"] <= 32,
"mild",
np.where(updrs3_df["updrs_3"] <= 58, "moderate", "severe"),
)
updrs4_df["updrs_4_cat"] = np.where(
updrs4_df["updrs_4"] <= 4,
"mild",
np.where(updrs4_df["updrs_4"] <= 12, "moderate", "severe"),
)
categorical_dfs = {
"updrs_1": updrs1_df,
"updrs_2": updrs2_df,
"updrs_3": updrs3_df,
"updrs_4": updrs4_df,
}
return categorical_dfs
def add_med_data(clin_df, updrs_df):
"""
Takes in the separate upd23b_clinical_state_on_medication data.
Creates dummy columns and adds them to the updrs dataset for the clinical medication data
Args:
clin_df: dataframe with the upd23b_clinical_state_on_medication column and visit_id column
updrs_df: dataframe with the all of the protein, peptide, visit_id, visit_month, and patient_id columns
Returns:
updrs_df: the dataframe with the updrs_1_cat_preds column added
"""
clin_df["upd23b_clinical_state_on_medication"] = clin_df[
"upd23b_clinical_state_on_medication"
].fillna("Unknown")
# get dummies for on_medication column
clin_df_dummies = pd.get_dummies(
clin_df, columns=["upd23b_clinical_state_on_medication"], drop_first=True
)
clin_df_dummies = clin_df_dummies[
[
"visit_id",
"upd23b_clinical_state_on_medication_On",
"upd23b_clinical_state_on_medication_Unknown",
]
]
# merge the updrs data with the clinical data for dummy columns
updrs_df = pd.merge(updrs_df, clin_df_dummies, on="visit_id")
return updrs_df
def predict_updrs1(df):
"""Predict the updrs_1_cat column for the provided dataframe using saved CatBoost Classifier model.
Args:
df: the dataframe with the updrs_1_cat column to be predicted
Returns:
df: the dataframe with the updrs_1_cat_preds column added
"""
# Load the saved model
model_path = os.path.join(
"..", "models", "catboost_updrs_1_model_hyperopt_smote.sav"
)
model = joblib.load(model_path)
# Make predictions on the test data
X = df.drop(columns=["updrs_1_cat", "kfold", "visit_id", "patient_id", "updrs_1"])
try:
preds = model.predict_proba(X)[:, 1]
except AttributeError as e:
print(f"Error: {e}")
# use threshold of 0.46 to get the predicted updrs_1_cat
updrs_1_cat_preds = np.where(preds >= 0.46, 1, 0)
# add the column to the dataframe
df["updrs_1_cat_preds"] = updrs_1_cat_preds
return df
def predict_updrs2(df):
"""Predict the updrs_2_cat column for the provided dataframe using saved CatBoost Classifier model.
Args:
df: the dataframe with the updrs_2_cat column to be predicted
Returns:
df: the dataframe with the updrs_2_cat_preds column added
"""
model_path = os.path.join(
"..", "models", "catboost_updrs_2_model_hyperopt_smote_meds.sav"
)
model = joblib.load(model_path)
# Make predictions on the test data
X = df.drop(columns=["updrs_2_cat", "kfold", "visit_id", "patient_id", "updrs_2"])
try:
preds = model.predict_proba(X)[:, 1]
except AttributeError as e:
print(f"Error: {e}")
# use threshold of 0.22 to get the predicted updrs_2_cat
updrs_2_cat_preds = np.where(preds >= 0.22, 1, 0)
# add the column to the dataframe
df["updrs_2_cat_preds"] = updrs_2_cat_preds
return df
def predict_updrs3(df):
"""Predict the updrs_3_cat column for the provided dataframe using saved LightGBM Classifier model.
Args:
df: the dataframe with the updrs_3_cat column to be predicted
Returns:
df: the dataframe with the updrs_3_cat_preds column added
"""
# Load the saved model
filename = os.path.join(
"..", "models", "lgboost_updrs_3_model_hyperopt_smote_meds.sav"
)
# model = pickle.load(open(filename, "rb"))
model = joblib.load(filename)
# Make predictions on the test data
X = df.drop(columns=["updrs_3_cat", "kfold", "visit_id", "patient_id", "updrs_3"])
try:
preds = model.predict_proba(X, verbose=-100)[:, 1]
except AttributeError as e:
print(f"Error: {e}")
# use threshold of 0.28 to get the predicted updrs_3_cat
updrs_3_cat_preds = np.where(preds >= 0.28, 1, 0)
# add the column to the dataframe
df["updrs_3_cat_preds"] = updrs_3_cat_preds
return df
if __name__ == "__main__":
# read in the data
train_clin_path = os.path.join("..", "data", "raw", "train_clinical_data.csv")
train_prot_path = os.path.join("..", "data", "raw", "train_proteins.csv")
train_pep_path = os.path.join("..", "data", "raw", "train_peptides.csv")
train_clin_df = pd.read_csv(train_clin_path)
train_prot_df = pd.read_csv(train_prot_path)
train_pep_df = pd.read_csv(train_pep_path)
proc_dfs = preprocess_train_df(
train_clin_df, train_prot_df, train_pep_df, save_data=False
)
# convert to only 12 month data since that was what was used for training
for updrs in ["updrs_1", "updrs_2", "updrs_3", "updrs_4"]:
temp_df = proc_dfs[updrs]
proc_dfs[updrs] = temp_df[temp_df["visit_month"] <= 12]
cat_dfs = make_categorical_dataset(proc_dfs, train_prot_df)
cat_dfs["updrs_2"] = add_med_data(train_clin_df, cat_dfs["updrs_2"])
cat_dfs["updrs_3"] = add_med_data(train_clin_df, cat_dfs["updrs_3"])
pred_updrs1_df = predict_updrs1(cat_dfs["updrs_1"])
pred_updrs2_df = predict_updrs2(cat_dfs["updrs_2"])
pred_updrs3_df = predict_updrs3(cat_dfs["updrs_3"])
# combine prediction columns into one dataframe
updrs_preds = pd.merge(
pred_updrs1_df,
pred_updrs2_df[["visit_id", "updrs_2_cat", "updrs_2_cat_preds"]],
on="visit_id",
)
updrs_preds = pd.merge(
updrs_preds,
pred_updrs3_df[["visit_id", "updrs_3_cat", "updrs_3_cat_preds"]],
on="visit_id",
how="left",
)
# save the dataframe as a csv
file_path = os.path.join("..", "data", "predictions", "full_updrs_preds.csv")
updrs_preds.to_csv(file_path, index=False)
| dagartga/Boosted-Models-for-Parkinsons-Prediction | src/pred_pipeline.py | pred_pipeline.py | py | 9,293 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"lin... |
22783063408 | #
# @lc app=leetcode id=347 lang=python3
#
# [347] Top K Frequent Elements
#
# https://leetcode.com/problems/top-k-frequent-elements/description/
#
# algorithms
# Medium (62.25%)
# Likes: 4564
# Dislikes: 260
# Total Accepted: 550.4K
# Total Submissions: 881.4K
# Testcase Example: '[1,1,1,2,2,3]\n2'
#
# Given a non-empty array of integers, return the k most frequent elements.
#
# Example 1:
#
#
# Input: nums = [1,1,1,2,2,3], k = 2
# Output: [1,2]
#
#
#
# Example 2:
#
#
# Input: nums = [1], k = 1
# Output: [1]
#
#
# Note:
#
#
# You may assume k is always valid, 1 ≤ k ≤ number of unique elements.
# Your algorithm's time complexity must be better than O(n log n), where n is
# the array's size.
# It's guaranteed that the answer is unique, in other words the set of the top
# k frequent elements is unique.
# You can return the answer in any order.
#
#
#
# @lc code=start
from heapq import heappush, heappop, heappushpop
from collections import Counter
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
if not nums or len(nums) == 0:
return []
counts = Counter(nums)
freq = [(counts[key], key) for key in counts]
heap = []
for f, num in freq:
if len(heap) < k:
heappush(heap, (f, num))
else:
if f > heap[0][0]:
heappushpop(heap, (f, num))
res = []
while heap:
res.append(heappop(heap)[1])
return res
class Solution:
def topKFrequent(self, nums: List[int], k: int) -> List[int]:
if not nums or len(nums) == 0:
return []
freq = Counter(nums)
heap = []
for num in freq:
if len(heap) < k:
heappush(heap, (freq[num], num))
else:
if freq[num] > heap[0][0]:
heapreplace(heap, (freq[num], num))
# size of heap will be k
res = [pair[1] for pair in heap]
return res
# @lc code=end
# Time: O(nlog(k)) where n represents the number of unique number, worst case is n
# Space: O(n)
| Zhenye-Na/leetcode | python/347.top-k-frequent-elements.py | 347.top-k-frequent-elements.py | py | 2,170 | python | en | code | 17 | github-code | 36 | [
{
"api_name": "collections.Counter",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "heapq.heappushpop",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
... |
43911017589 | import datetime
import zadanie2lista6
import zadanie22lista6
plik = 'plik_do_szyfrowania.txt.txt'
openplik = open(plik,"r").read()
a = int(input("Podaj liczbe od 1-10: "))
date_today = datetime.date.today()
month = date_today.month
year = date_today.year
day = date_today.day
g = ['plik_zaszyfrowany','_',a,year,'-',month,'-',day,'.txt']
all_strings = list(map(str, g))
result = ''.join(all_strings)
s = ['plik_deszyfrowany','_',a,year,'-',month,'-',day,'.txt']
all_strings = list(map(str, s))
wynik2 = ''.join(all_strings)
zaszyfrowane = zadanie2lista6.Szyfr_Cezara(openplik,a)
print(zaszyfrowane)
with open(result,'w', encoding="utf-8") as file:
file.write(zaszyfrowane)
#ZADANIE2
szyfr2 = open(result,'r')
deszyfrowanie = zadanie22lista6.Szyfr_Cezara2(zaszyfrowane,a)
print(deszyfrowanie)
with open(wynik2,'w', encoding="utf-8") as file:
file.write(deszyfrowanie)
| AWyszynska/JSP2022 | lista8/zadanie1i2.py | zadanie1i2.py | py | 898 | python | pl | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "zadanie2lista6.Szyfr_Cezara",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "zada... |
74105621545 | from django.shortcuts import render
# se importan los modelos
from .models import Author, Genre, Book, BookInstance
# se crea la funcion index
def index (request) :
# se optiene el numero de libros
num_books = Book.objects.all().count()
# se optiene el numero de instancias
num_inctances = BookInstance.objects.all().count()
# se optiene el numero de authores
num_authors = Author.objects.all().count()
# para ver los libros disponivles
# aca se trabaja con la base de datos
# status__exact = "a" es para que los campos sean exactamente igual a la letra "a" y con count te da la cantidad de libros con ese campo
disponivles = BookInstance.objects.filter (status__exact = "a").count()
# request hace referencia a el parametro de la funcion
# despues se hace referensia al archivo de la plantilla html que va a mostrar el contenido luego en context van los datos que resive el archibo html
return render (
request,
"index.html",
context = {
"num_books": num_books,
"num_inctances": num_inctances,
"num_authors": num_authors,
"disponivles": disponivles,
}
)
| MallicTesla/Mis_primeros_pasos | Programacion/002 ejemplos/002 - 13 django catalogo/catalog/views.py | views.py | py | 1,201 | python | es | code | 1 | github-code | 36 | [
{
"api_name": "models.Book.objects.all",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "models.Book.objects",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "models.Book",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "models.B... |
3642645644 | #!/usr/bin/python
import json
import sys
out_file = sys.argv[1]
document_entities_file = sys.argv[2]
query_file = sys.argv[3]
query_docs = {}
docs = set()
with open(out_file) as f:
for line in f:
query, _, document, rank, _, _ = line.split()
rank = int(rank)
if rank > 10:
continue
if query not in query_docs:
query_docs[query] = []
query_docs[query].append(document)
docs.add(document)
doc_entities = {}
with open(document_entities_file) as f:
for line in f:
document, entity, _ = line.split()
if document not in docs:
continue
if document not in doc_entities:
doc_entities[document] = []
doc_entities[document].append(entity)
queries = {}
j = json.load(open(query_file))
for q in j['queries']:
queries[q['title']] = q['text']
for query in query_docs:
for doc in query_docs[query]:
i = 0
if doc in doc_entities:
for entity in doc_entities[doc]:
i += 1
if i <= 5:
print('\t'.join([query, '"{}"'.format(queries[query]), entity]))
| gtsherman/entities-experiments | src/query_entities.py | query_entities.py | py | 992 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number":... |
36968708533 | from collections import namedtuple
import os
# from unittest.mock import patch
import datetime
import random
import pytest
import hug
from bson.objectid import ObjectId
from pymongo import MongoClient
from pymongo.uri_parser import parse_uri
from helpers import clean_url, clean_email, hash_password
from db import DB
from middlewares import HostEnvMiddleware, MongoMiddleware
"""
API endpoints test
"""
TEST_MONGO_URL = os.environ.get('MONGODB_URI_TEST')
USERS = (
{'email': 'testuser1@email.com', 'api_key': 'apikey1'},
{'email': 'testuser2@email.com', 'api_key': 'apikey2'},
)
def create_fixtures():
"""
Creating user fixtures for tests
"""
remove_fixtures()
with MongoClient(TEST_MONGO_URL) as conn:
parsed = parse_uri(TEST_MONGO_URL)
db = conn[parsed['database']]
# adding user and one url for each user
for i, user in enumerate(USERS):
user_id = db.users.insert(user)
db.urls.insert({
'code': 'user{}'.format(i),
'short_url': 'http://ef.me/user{}'.format(i),
'long_url': 'http://user{}.com'.format(i),
'url_access': [],
'created_at': datetime.datetime.now(),
'created_by': user_id
})
def remove_fixtures():
"""
Removing fixtures
"""
with MongoClient(TEST_MONGO_URL) as conn:
parsed = parse_uri(TEST_MONGO_URL)
db = conn[parsed['database']]
emails = [i['email'] for i in USERS]
emails.append('testuser3@email.com')
query = {'email': {'$in': emails}}
user_ids = [i['_id'] for i in list(db.users.find(query, {'_id': 1}))]
# removing urls
db.urls.remove({'created_by': {'$in': user_ids}}, {'multi': True})
# removing users
db.users.remove({'_id': {'$in': user_ids}}, {'multi': True})
def setup():
"""
Creating initial fixtures for tests
"""
os.environ['MONGODB_URI'] = TEST_MONGO_URL
os.environ['HOST'] = 'http://ef.me'
create_fixtures()
def teardown():
"""
Clear fixtures for tests
"""
os.environ['MONGODB_URI'] = ''
os.environ['HOST'] = ''
remove_fixtures()
def test_short_url():
"""
test /api/short endpoint
"""
setup()
import api
# bad request without long_url query param
request_url = '/api/short'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers)
assert response.data['error'] == 'long_url GET param missing'
# bad request without authentication header
response = hug.test.get(api, request_url)
assert response.status == '401 Unauthorized'
# bad request with inexistent authentication header
headers = {'X-Api-Key': 'not-exists'}
response = hug.test.get(api, request_url, headers=headers)
assert response.status == '401 Unauthorized'
# bad request with invalid url
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url=(1, 2, 3))
assert response.data['error'] == 'long_url is not a valid URL'
# bad request with long code
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url='www.google.com',
code='lllllllllllllllongggggggggg')
assert response.data['error'] == 'Code param must have a max length of 9'
# good request with code generating short_url
request_url = '/api/short'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url='www.google.com', code='abcd')
assert response.data['short_url'] == 'http://ef.me/abcd'
# good request for same long url will raise a 409 error
request_url = '/api/short'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url='www.google.com', code='abcd')
assert response.data['error'] == 'long_url already exists'
# good request without generating short_url
request_url = '/api/short'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
long_url='www.google.com/123')
assert 'short_url' in response.data
teardown()
def test_expand_url():
"""
/api/expand endpoint tests
"""
setup()
import api
# bad request with missing short_url
request_url = '/api/expand'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers)
assert response.data['error'] == 'short_url GET param missing'
# bad request with a not valid url
request_url = '/api/expand'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
short_url=(1, 2, 3))
assert response.data['error'] == 'short_url is not a valid URL'
# bad request with a inexistent url
request_url = '/api/expand'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
short_url='http://ef.me/noex')
assert response.data['error'] == 'short_url does not exist'
# valid request
request_url = '/api/expand'
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, request_url, headers=headers,
short_url='http://ef.me/user0')
assert response.data['short_url'] == 'http://ef.me/user0'
assert response.data['long_url'] == 'http://user0.com'
teardown()
def test_go_to_url():
"""
testing /s/:code endpoint
"""
setup()
import api
# test not found
response = hug.test.get(api, '/s/123')
assert response.status == '404 Not Found'
# test 301 response
response = hug.test.get(api, '/s/user1')
assert response.status == '301 Moved Permanently'
teardown()
def test_create_user():
"""
testing /api/user endpoint
"""
setup()
import api
# bad request with no payload
response = hug.test.post(api, '/api/user')
assert response.data['error'] == 'Missing email on body request'
# bad request with bad email payload
payload = {'email': (1, 2, 3)}
response = hug.test.post(api, '/api/user', payload)
assert response.data['error'] == 'Email not valid'
# bad request with already added user
payload = {'email': 'testuser1@email.com'}
response = hug.test.post(api, '/api/user', payload)
assert response.data['error'] == 'User already exists'
# good request with valid payload
payload = {'email': 'testuser3@email.com'}
response = hug.test.post(api, '/api/user', payload)
assert response.status == '200 OK'
assert 'api_key' in response.data
teardown()
def test_get_user_urls():
"""
testing /api/urls endpoint
"""
setup()
import api
# bad request without auth
response = hug.test.get(api, '/api/urls')
assert response.status == '401 Unauthorized'
# get all urls from user1
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, '/api/urls', headers=headers).data
assert len(response) == 1
assert response[0]['short_url'] == 'http://ef.me/user0'
assert response[0]['long_url'] == 'http://user0.com'
assert response[0]['code'] == 'user0'
assert response[0]['total_accesses'] == 0
# add one more access to url on user0 and check the results
hug.test.get(api, '/s/user0')
response = hug.test.get(api, '/api/urls', headers=headers).data
assert len(response) == 1
assert response[0]['total_accesses'] == 1
# test pagination
# adding more urls for user0 and retrieve it
for i in range(10):
code = random.randint(4, 99999)
resp = hug.test.get(api, '/api/short', headers=headers,
long_url='http://{}.com'.format(code))
assert resp.status == '201 Created'
response = hug.test.get(api, '/api/urls', headers=headers).data
assert len(response) == 5
# get page 2
response = hug.test.get(api, '/api/urls', headers=headers, page=2).data
assert len(response) == 5
# get page 3. Should have 1 url only
response = hug.test.get(api, '/api/urls', headers=headers, page=3).data
assert len(response) == 1
teardown()
def test_get_user_url():
"""
test /api/urls/{code} endpoint
"""
setup()
import api
# bad request without auth
response = hug.test.get(api, '/api/urls/123')
assert response.status == '401 Unauthorized'
# good request with user url
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, '/api/urls/user0', headers=headers)
assert response.data['short_url'] == 'http://ef.me/user0'
assert response.data['long_url'] == 'http://user0.com'
assert response.data['total_accesses'] == 0
# get url from other user returns 404
headers = {'X-Api-Key': 'apikey1'}
response = hug.test.get(api, '/api/urls/user1', headers=headers)
assert response.data['error'] == 'URL does not exist'
teardown()
"""
Helpers test
"""
def test_clean_url():
"""
testing clean_url helper
"""
bad = 123
bad2 = ''
good = 'http://google.com'
without_scheme = 'google.com'
with_trailing_slash = 'google.com/'
with pytest.raises(ValueError):
clean_url(bad)
with pytest.raises(ValueError):
clean_url(bad2)
assert clean_url(good) == good
assert clean_url(without_scheme) == good
assert clean_url(with_trailing_slash) == good
def test_clean_email():
"""
testing clean_email helper
"""
bad = '123'
bad2 = 123
bad3 = '<<@>>'
good = 'test@email.com'
with pytest.raises(ValueError):
clean_email(bad)
with pytest.raises(ValueError):
clean_email(bad2)
with pytest.raises(ValueError):
clean_email(bad3)
assert clean_email(good) == good
def test_hash_password():
expected = 'd0088c5e26b377da76477cda8d7d2f2e5a3723176eb2a1ddf6c4719d567c3bfe7141f1998a1e3a3cbec86c96740d7d25bc954e2970d4974b66193a9ea210a8af'
assert hash_password('test@email.com', 'salt123') == expected
"""
Middleware test
"""
def test_env_middleware():
os.environ['HOST'] = 'http://bit.ly'
fake_request = namedtuple('Request', 'context')
fake_response = {}
req = fake_request(context={})
e = HostEnvMiddleware()
e.process_request(req, fake_response)
assert req.context['host'] == 'http://bit.ly'
os.environ['HOST'] = 'biggggggggggggghosttttttttttttttt.com'
e = HostEnvMiddleware()
req = fake_request(context={})
with pytest.raises(Exception):
e.process_request(req, fake_response)
os.environ['HOST'] = ''
def test_mongo_middleware():
os.environ['MONGODB_URI'] = TEST_MONGO_URL
parsed = parse_uri(TEST_MONGO_URL)
fake_request = namedtuple('Request', 'context')
fake_response = {}
req = fake_request(context={})
m = MongoMiddleware()
m.process_request(req, fake_response)
assert isinstance(req.context['db'], DB)
assert req.context['db'].database == parsed['database']
m.process_response(req, {}, {})
assert req.context['db'] is None
os.environ['MONGODB_URI'] = ''
"""
DB test
"""
def test_sanitize_query():
bad = ''
good = {}
good2 = {'_id': '58d0211ea1711d51401aee4c'}
assert DB.sanitize_query(bad) is False
assert DB.sanitize_query(good) == {}
assert DB.sanitize_query(good2) == {'_id': ObjectId('58d0211ea1711d51401aee4c')}
| ellisonleao/ef-url-shortener | test_api.py | test_api.py | py | 11,725 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pymongo.uri_parser... |
22430840352 | # Import SQLite3
import sqlite3
#create a database connection called "cars"
conn = sqlite3.connect("cars.db")
#Create the cursor to execute commands
cursor = conn.cursor()
#create a table/query called inventory that includes "make, model and quantity"
#use the cursor to execute this!
cursor.execute("""CREATE TABLE inventory
(Make TEXT, Model TEXT, Quantity INT)
""")
#close the connection
conn.close()
| JackM15/sql | car_sql.py | car_sql.py | py | 438 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
}
] |
8711592711 | import cx_Oracle
class modulo():
codigoSeccion=0
ramo1=""
ramo2=""
ramo3=""
ramo4=""
def __init__(self,codSec) :
self.codigoSeccion=codSec
def crearModulo():
try:
conexion=cx_Oracle.connect(
user='escuela',
password='1234',
dsn='localhost:1521/xe'
)
cursor=conexion.cursor()
codigoSeccion=input("Indique codigo de seccion a crear: ")
ramo1=int(input("Indique ramo 1: "))
ramo2=int(input("Indique ramo 2: "))
ramo3=int(input("Indique ramo 3: "))
ramo4=int(input("Indique ramo 4: "))
cursor.execute(''' insert into secciones (codigosSeccion,ramo1,ramo2,ramo3,ramo4)
values (:cs,:r1,:r2,:r3,:r4)''',cs=codigoSeccion,r1=ramo1,r2=ramo2,r3=ramo3,r4=ramo4)
conexion.commit()
print ("Seccion creada con exito!! ")
except:
print ("Error al crear modulo!!")
finally:
cursor.close()
conexion.close()
def editarModulo():
try:
conexion=cx_Oracle.connect(
user='escuela',
password='1234',
dsn='localhost:1521/xe'
)
cursor=conexion.cursor()
codigoSeccion=input("Indique codigo de seccion a editar: ")
ramo1=int(input("Indique nuevo ramo 1: "))
ramo2=int(input("Indique nuevo ramo 2: "))
ramo3=int(input("Indique nuevo ramo 3: "))
ramo4=int(input("Indique nuevo ramo 4: "))
cursor.execute(''' update secciones set ramo1=:r1, ramo2=:r2,ramo3=:r3,ramo4=:r4
where codigosSeccion=:cod''',cod=codigoSeccion,r1=ramo1,r2=ramo2,r3=ramo3,r4=ramo4)
conexion.commit()
print ("Modulo editado correctamente!!")
except:
print ("Error al editar modulo!!")
finally:
cursor.close()
conexion.close()
def eliminarModulo():
try:
conexion=cx_Oracle.connect(
user='escuela',
password='1234',
dsn='localhost:1521/xe'
)
cursor=conexion.cursor()
idS=input("Indique seccion que desea eliminar: ")
cursor.execute(''' delete from secciones where codigosSeccion=:id ''',id=idS)
conexion.commit()
print ("Modulo eliminado correctamente!! ")
except:
print ("Error al eliminar modulo!!")
finally:
cursor.close()
conexion.close()
def mostrarModulos():
try:
conexion=cx_Oracle.connect(
user='escuela',
password='1234',
dsn='localhost:1521/xe'
)
cursor=conexion.cursor()
cursor.execute(''' select * from secciones ''')
res=cursor.fetchall()
for row in res:
print("\n|Sección:",row[0], "|Ramos:", row[1],"-",row[2],"-",row[3],"-",row[4])
except:
print ("Error al mostrar modulos!! ")
finally:
cursor.close()
conexion.close() | nmolina2733/Universidad | modulo.py | modulo.py | py | 3,357 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "cx_Oracle.connect",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cx_Oracle.connect",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "cx_Oracle.connect",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "cx_Oracle.conne... |
72426420905 | from rest_framework.test import APITestCase
from restapi.models import Companies, Countries
class FilterTest(APITestCase):
@classmethod
def setUpTestData(cls):
companies = 10
Countries.objects.create(name="c", continent="c", population=1, capital="c", surface=1)
country = Countries.objects.get(name="c")
for c_id in range(companies):
Companies.objects.create(name=f"c {c_id}", year_founded=1950 + c_id, number_of_employees=10, country=country,
activity="a")
def test_correct_result(self):
response = self.client.get("/restapi/companies/?year=1950")
self.assertEqual(len(response.data), 10)
response1 = self.client.get("/restapi/companies/?year=1955")
self.assertEqual(len(response1.data), 5)
response2 = self.client.get("/restapi/companies/?year=1960")
self.assertEqual(len(response2.data), 0)
| UBB-SDI-23/lab-5x-andrei-crisan27 | backend-project/tests/test_filter.py | test_filter.py | py | 939 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.test.APITestCase",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "restapi.models.Countries.objects.create",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "restapi.models.Countries.objects",
"line_number": 10,
"usage_type"... |
74430232105 | from django.urls import path
from . import views
urlpatterns = [
path(
'',
views.all_products,
name='products'),
path(
'ranked/',
views.products_ranking,
name='products_ranking'),
path(
'<int:product_id>/',
views.product_detail,
name='product_detail'),
path(
'add/',
views.ProductCreateView.as_view(),
name='add_product'),
path(
'search/',
views.search_page,
name='search_page'),
path(
'htmx-search/',
views.htmx_search_products,
name='htmx_search'),
path(
'edit/<int:pk>/',
views.ProductUpdateView.as_view(),
name='edit_product'),
path(
'delete/<int:product_id>/',
views.delete_product,
name='delete_product'),
path(
'add_to_wishlist/<int:id>/',
views.add_to_wishlist,
name='add_to_wishlist'),
path(
'delete_wishlist/<int:id>/',
views.delete_wishlist_item,
name='delete_wishlist_item'),
path(
'category/<category>/',
views.category_products,
name='product_category_view'),
path(
'add_category/',
views.add_product_category,
name='add_product_category'),
path(
'edit_category/<int:id>/',
views.edit_product_category,
name='edit_product_category'),
path(
'delete_category/<int:id>/',
views.delete_product_category,
name='delete_product_category'),
path(
'delete_comment/<int:id>/',
views.delete_comment,
name='delete_comment'),
]
| neil314159/portfolio-project-5 | products/urls.py | urls.py | py | 1,650 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
15009189438 | import pathlib
from typing import Optional
import essentia.standard as es
import numpy as np
import pyrubberband as pyrb
from madmom.features.downbeats import DBNDownBeatTrackingProcessor, RNNDownBeatProcessor
from mixer.logger import logger
SAMPLE_RATE = 44100 # Sample rate fixed for essentia
class TrackProcessor:
SAMPLE_RATE = SAMPLE_RATE
def __init__(self, file_path: str, name: Optional[str] = None) -> None:
"""
Parameters
----------
file_path : str
absolute or relative location of track audio file
name : Optional[str]
name to give to track if not present in file path
"""
self._file_path = pathlib.Path(file_path)
if name is None:
self._name = self._file_path.stem
else:
self._name = name
self._audio = np.array([])
self._bpm = None
self._downbeats = np.array([])
def __str__(self):
return self._name
@property
def audio(self) -> np.ndarray:
return self._audio
@property
def downbeats(self) -> np.ndarray:
return self._downbeats
@property
def bpm(self) -> Optional[float]:
return self._bpm
@bpm.setter
def bpm(self, bpm: float) -> np.ndarray:
"""
Time stretch audio file to increase BPM to target
Parameters
----------
bpm : float
intended BPM of audio
Returns
-------
np.ndarray
time-stretched audio
"""
if self._bpm is None:
self.calculate_bpm()
assert self._bpm is not None
stretch_factor = bpm / self._bpm
self._audio = pyrb.time_stretch(
self._audio, SAMPLE_RATE, stretch_factor
).astype(np.float32)
self.calculate_bpm()
logger.info(f"Tempo for {self} set to {round(self._bpm, 2)}")
return self._audio
def load(self, path: Optional[str] = None) -> np.ndarray:
"""
Load an audio file from a given path.
Parameters
----------
path : Optional[str]
local path to audio file
if None, file_path attribute value used
Returns
-------
np.ndarray
mono representation of audio file
"""
if path is None:
path = str(self._file_path.resolve())
loader = es.MonoLoader(filename=path, sampleRate=SAMPLE_RATE)
self._audio = loader()
logger.info(f"Loaded audio for {self}")
return self._audio
def crop(self, offset: int, length: int) -> None:
"""
Crop track using number of downbeats.
Parameters
----------
offset : int
number of downbeats into original audio to crop from
length : int
number of downbeats that new audio will contain
"""
if self.downbeats.size == 0:
self.calculate_downbeats()
start_sample = int(self._downbeats[offset] * SAMPLE_RATE)
end_sample = int(self._downbeats[offset + length] * SAMPLE_RATE)
self._audio = self._audio[start_sample : end_sample + 1]
logger.info(
f"Cropped {self} audio between downbeats {offset} and {offset + length}"
)
def calculate_bpm(self) -> float:
"""
Determine BPM for audio using essentia
Returns
-------
bpm : float
tempo of audio file
"""
rhythm_extractor = es.RhythmExtractor2013(method="degara")
self._bpm, _, _, _, _ = rhythm_extractor(self._audio)
assert self._bpm is not None
logger.info(f"Calculated tempo for {self} at {round(self._bpm, 2)}")
return self._bpm
def calculate_downbeats(self) -> None:
"""
Use madmom downbeat tracking to estimate downbeat time points for audio file.
"""
proc = DBNDownBeatTrackingProcessor(beats_per_bar=[3, 4], fps=100)
act = RNNDownBeatProcessor()(self._audio)
proc_res = proc(act)
self._downbeats = proc_res[proc_res[:, 1] == 1, 0].astype(np.float32)
logger.info(f"Calculated downbeats for {self}")
class TrackGroupProcessor:
def __init__(self) -> None:
self._tracks: list[TrackProcessor] = []
self._bpm: Optional[float] = None
@property
def bpm(self) -> Optional[float]:
return self._bpm
@property
def tracks(self) -> list[TrackProcessor]:
return self._tracks
def add_track(self, track: TrackProcessor) -> None:
"""
Add a track to the track group.
Parameters
----------
track : TrackProcessor
track to be added
"""
self._tracks.append(track)
self.calculate_bpm()
def calculate_bpm(self):
"""
Calculate average bpm of current tracks in group.
"""
track_bpms = [track.bpm for track in self._tracks]
self._bpm = sum(track_bpms) / len(track_bpms)
| joekitsmith/mixer | mixer/processors/track.py | track.py | py | 5,050 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_numb... |
72498628583 | import os
import sys
import zipfile
import urllib.request
import filecmp
import shutil
import errno
import typing
import orjson
VERSIONS_JSON = "https://launchermeta.mojang.com/mc/game/version_manifest.json"
RELEASE_TYPES = typing.Literal["release", "snapshot"]
def fetch_json(url: str):
response = urllib.request.urlopen(url)
return orjson.loads(response.read())
def get_urls(type: RELEASE_TYPES, number: int) -> list[str]:
global VERSIONS_JSON
urls = {}
for item in fetch_json(VERSIONS_JSON)["versions"]:
if len(urls) < (number + 1) and item["type"] == type:
urls[item["id"]] = item["url"]
return list(urls.values())
def save_temp(urls: list[str]) -> list[str]:
names = []
if not os.path.exists("temp"):
os.mkdir("temp")
for url in urls:
name = fetch_json(url)["id"]
names.append(name)
os.mkdir(f"temp/{name}")
with open(f"temp/{name}.zip", "wb") as f:
f.write(
urllib.request.urlopen(
fetch_json(url)["downloads"]["client"]["url"]
).read()
)
zip_ref = zipfile.ZipFile(f"temp/{name}.zip", "r")
zip_ref.extractall(f"temp/{name}")
zip_ref.close()
return names
def diff_folders(new: str, old: str, type: RELEASE_TYPES, delete_folder: bool = False):
added = []
changed = []
deleted = []
if not delete_folder:
diff_folders(old, new, type, delete_folder=True)
for root, _, files in os.walk(f"temp/{new}"):
for name in files:
src = os.path.join(root, name)
if f"temp/{new}/assets/minecraft/textures/" in src:
dest = src.replace(new, old, 1)
if not delete_folder:
if not os.path.exists(dest):
added.append(src)
elif not filecmp.cmp(src, dest):
changed.append(src)
elif not os.path.exists(dest):
deleted.append(src)
for item in added:
save_diff(new, f"../{type.capitalize()}s/{new}/added", item)
for item in changed:
save_diff(new, f"../{type.capitalize()}s/{new}/changed", item)
for item in deleted:
save_diff(new, f"../{type.capitalize()}s/{old}/deleted", item)
def save_diff(base_folder: str, new_folder: str, item: str):
src = item
dest = item.replace(f"{base_folder}/assets/minecraft/textures/", f"{new_folder}/")
try:
shutil.copy(src, dest)
except IOError as e:
if e.errno != errno.ENOENT:
raise
os.makedirs(os.path.dirname(dest))
e = shutil.copy(src, dest)
def main():
release_type = sys.argv[1]
number = int(sys.argv[2])
if release_type not in {"release", "snapshot"}:
print("Invalid release type")
return
if typing.TYPE_CHECKING:
release_type = typing.cast(RELEASE_TYPES, release_type)
print("Getting files...")
urls = get_urls(release_type, number)
folders = save_temp(urls)
print("Comparing files...")
for x in range(number):
diff_folders(folders[x], folders[x + 1], release_type)
if __name__ == "__main__":
main()
| AstreaTSS/mc-texture-changes | compare.py | compare.py | py | 3,236 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "typing.Literal",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "urllib.request.request.urlopen",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 16,
"usage_type": "attribute"
},
{
"ap... |
70376331945 | import XInput
from pynput import keyboard
from pygame import mixer
mixer.init()
import time
class XinputHandler(XInput.EventHandler):
def __init__(self, keyMan):
super().__init__(0, 1, 2, 3)
self.keyMan = keyMan
def process_button_event(self, event):
if event.type == XInput.EVENT_BUTTON_PRESSED:
if event.button_id in [1,2,4,8, 4096, 8192, 16384, 32768]:
self.keyMan.ckey = '1'
if event.button_id in [256, 512]:
self.keyMan.ckey = '2'
class KeyMan:
def __init__(self):
self.ckey = ''
keyboard.Listener(on_press=self.press).start()
handler = XinputHandler(self)
thread = XInput.GamepadThread()
thread.add_event_handler(handler)
def reset(self):
self.ckey = '0'
def press(self, key: keyboard.HotKey):
try:
if key.char in 'jf':
self.ckey = '1'
elif key.char in 'kd':
self.ckey = '2'
elif key.char in 'q':
self.ckey = 'q'
except AttributeError:
pass
class Waiter:
def __init__(self):
self.lastTime = None
def init(self):
self.lastTime = time.time()
def wait(self, sec):
nextTime = self.lastTime + sec
sleepTime = nextTime - time.time()
if sleepTime > 0:
time.sleep(nextTime - time.time())
self.lastTime = nextTime
key = KeyMan()
beep1 = mixer.Sound('beep1')
beep2 = mixer.Sound('beep2')
#
# エディター
#
def editor(beats, secPerBeat, length, soundSpan, lag):
if secPerBeat/2-lag < 0 or secPerBeat/2+lag < 0:
print('lag値が大きすぎます')
return
sheet = []
input('(press enter key to start)')
key.reset()
waiter = Waiter()
waiter.init()
for _ in range(length):
line = ''
beep1.play()
waiter.wait(secPerBeat/2+lag)
line += key.ckey
key.reset()
waiter.wait(secPerBeat/2-lag)
for i in range(beats-1):
if (i+1) % soundSpan == 0:
beep2.play()
waiter.wait(secPerBeat/2+lag)
line += key.ckey
key.reset()
waiter.wait(secPerBeat/2-lag)
print(line)
sheet.append(line)
return sheet
def editor_ex(bpm: int, base: int, beats: int, length: int, soundSpan: int=1, lag=0):
# base : 基本の拍数
# beats : 1小節内の拍数
# length : 小節数
# soundSpan : 音を鳴らす頻度
# lag : 入力の遅延補正
secPerMeasure = (60/bpm) * base
secPerBeat = secPerMeasure / beats
return editor(beats, secPerBeat, length, soundSpan, lag)
| tsoushi/SimpleRealtimeTJAEditor | taiko_nothread.py | taiko_nothread.py | py | 2,738 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.mixer.init",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.mixer",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "XInput.EventHandler",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "XInput.EVENT_BUT... |
13422218557 | import colors
##################################################################
#This is the module used for testing correctness. It performs #
#safety, liveliness and fairness test on the list of values sent #
#from the monitor. #
##################################################################
def testCorrectness(rows,istoken):
color = colors.bcolors()
if safetyTest(rows,istoken) == True:
print (color.OKGREEN+'Safety Test Passed!!!')
else:
print (color.FAIL+'Safety Test Failed!!!')
if livelinessTest(rows) == True:
print (color.OKGREEN + 'Liveliness Test Passed!!!')
else:
print (color.WARNING + 'Liveliness Test Failed!!!')
if fairnessTest(rows) == True:
print (color.OKGREEN + 'Fairness Test Passed!!!')
else:
print (color.WARNING + 'Fairness Test Failed!!!')
print (color.ENDC)
def safetyTest(rows,istoken):
""" This function tests the safety property of the algorithm.
It does that in 2 steps: 1) CSSafe Test, 2) ReleaseSyncTest
1) CSSafe Test: This test ensures that at any time 'T' only one process uses CS.
2) ReleaseSync Test: This test ensures that only the process which executed CS, is releasing a resource.
"""
csTest = isCSSafe(rows)
if istoken:
return csTest
releaseTest = isReleaseSync(rows)
return csTest and releaseTest
def isCSSafe(rows):
processesInCS = {}
flag = True
for row in rows:
if (row[2]!='None'):
if row[0] not in processesInCS:
processesInCS[row[0]] = row[2]
else:
print ('!!!!!!!!!!!!!!!!'+str(row[2]) + ' and ' + str(processesInCS[row[0]]) + 'are in the CS at the same time T=' + str(row[0]))
flag = False
print ("Is CS safe: " + str(flag))
return flag
def isReleaseSync(rows):
currentlyInCS = 'None';
for row in rows:
if row[2] != 'None':
if currentlyInCS == 'None':
currentlyInCS = row[2]
else:
return False
if row[3] != 'None':
if row[3] == currentlyInCS:
currentlyInCS = 'None'
else:
return False
print ("Release is sync")
return True
def livelinessTest(rows):
""" This function checks if every process that requests for CS, eventually gets served"""
firstEntry = True
requestCount = 0
processInCS = 'None'
release = False
for row in rows:
if row[1] != 'None':
if firstEntry or processInCS != 'None':
requestCount += 1
elif release:
pass
else:
print ("Process " + str(row[1]) + " is unneccessarily waiting for CS at time " + str(row[0]))
return False
if row[2] != 'None':
firstEntry = False
processInCS = row[2]
if row[3] == processInCS:
processInCS = 'None'
release = True
continue
if (processInCS == 'None' and requestCount != 0 and not firstEntry):
return False
release = False
return True
def fairnessTest(rows):
""" This function tests if the processes are being served in a fair way. The one who is waiting for long time
must be given priority over others(FIFO)"""
queue = []
flag = True
color = colors.bcolors()
for row in rows:
if row[1] != 'None':
queue.append(row[1])
continue
if row[2] != 'None' and queue[0] != row[2]:
print (color.WARNING + "Process " + str(row[2]) + "jumped ahead of the queue. Fairness violated at " + str(row[0]) )
return False
elif row[2] != 'None':
queue.remove(row[2])
return True
| NishanthMuruganandam/AsynchronousSystems | Correctness_Verif_Performance_Measure_DistAlgos/correctnessTester.py | correctnessTester.py | py | 3,322 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "colors.bcolors",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "colors.bcolors",
"line_number": 120,
"usage_type": "call"
}
] |
19340358372 | import json
import logging
import math
from django.db import IntegrityError
from django.db.models import F
from datetime import datetime, timedelta
from django.template.loader import render_to_string
from anodyne import settings
from api.models import Reading, Station, StationInfo, StationParameter, \
Exceedance, SMSAlert
from api.utils import send_mail, send_sms
log = logging.getLogger('vepolink')
class ToDatabase:
def __init__(self, **kwargs):
self.kwargs = kwargs
def send_alert(self, exceedances):
log.info('Sending Alert')
log.info(exceedances)
try:
for exceedance in exceedances:
station = exceedance.get('station')
parameter = exceedance.get('parameter')
param = StationParameter.objects.get(
station=station,
allowed=True,
parameter__name=parameter
)
context = {
'param': parameter,
'value': '%s %s against Pres. Stand. %s %s' % (
exceedance.get('value'), param.parameter.unit,
param.maximum, parameter),
'category': station.industry.type,
'industry': '%s, %s, %s' % (
station.industry, station.industry.city,
station.industry.state),
'timestamp': exceedance.get('timestamp').strftime(
'%a, %d-%b-%Y %H:%M'),
'alert_type': station.monitoring_type,
'location': param.monitoring_id
}
mail_receipients = station.user_email.split(';')
html_content = render_to_string(
'alerts-mail/exceedance.html', context)
send_mail(subject='Exceedance Alert',
recipient_list=mail_receipients,
cc=['info@anodyne.in'],
html_message=html_content,
message='',
from_email=settings.EMAIL_HOST_USER
)
phone_receipients = station.user_ph
sms_context = "SMS ALERT FROM VEPOLINK%nALERT: {alert_type}%nIndustry Name:{industry}%nCATEGORY:{category}%nLOCATION:{location}%nEXCEEDING PARAMETER:{param}%nVALUE:{value}%n{timestamp}%nAvg Value for last 15 Min%nRespond at customercare@anodyne.in".format(**context)
log.info('Initiating Exceedance SMS')
send_sms(numbers=phone_receipients, content=sms_context)
except:
log.exception('Failing to Send Mail alert')
def check_exceedance(self, station, reading):
log.info('Checking exceedance %s' % station)
q = {
'param': F('parameter__name'),
'min': F('minimum'),
'max': F('maximum')
}
params = StationParameter.objects.filter(
station=station,
allowed=True
).values(**q)
exceedances_rec = []
for meta in params:
exceedances = {}
param = meta.get('param')
pmax = float(meta.get('max', 0))
pmin = float(meta.get('min', 0))
if pmin == pmax or pmax == 0:
continue
else:
current_val = float(reading.get(param, 0))
if current_val > pmax:
exceedances.update({
'parameter': param,
'value': current_val,
})
if param.lower() == 'ph' and pmin > current_val > pmax:
exceedances.update({
'parameter': param,
'value': current_val,
})
if exceedances:
log.info('Exceedances %s' % exceedances)
exceedances.update({'timestamp': reading.get('timestamp'),
'station': station})
exceedances_rec.append(exceedances)
if exceedances_rec:
try:
Exceedance.objects.bulk_create(
[Exceedance(**q) for q in exceedances_rec])
log.info('Exceedance observed %s' % station)
except IntegrityError:
pass
self.send_alert(exceedances_rec)
def _clean_reading(self, reading):
if reading:
clean_reading = {}
for k, v in reading.items():
if k.lower() == 'timestamp':
k = k.lower()
clean_reading[k] = v
else:
try:
value = float(v)
if not math.isnan(value):
clean_reading[k] = float('{0:.2f}'.format(value))
except ValueError:
pass
if len(clean_reading.keys()) > 1:
return clean_reading
def insert(self):
basename = self.kwargs.get('basename')
response = {
'success': False,
'msg': ''
}
db_status = {
'db': response
}
log.info('Adding to database:%s' % self.kwargs)
try:
readings = self._clean_reading(self.kwargs.get('readings'))
if readings:
station = Station.objects.get(prefix=self.kwargs.get('prefix'))
self.check_exceedance(station, readings)
Reading.objects.create(
station=station,
reading=readings
)
station.status = 'Live'
station.save()
sinfo, created = StationInfo.objects.get_or_create(
station=station)
obj = sinfo if sinfo else created
obj.last_seen = readings.get('timestamp')
obj.last_upload_info = json.dumps(response)
readings['timestamp'] = readings.get('timestamp').strftime(
'%Y-%m-%d %H:%M:%S ')
obj.readings = json.dumps(readings)
obj.save()
log.info('Added to Reading successfully')
response['success'] = True
response['msg'] = "%s: Added Readings" % basename
else:
response['success'] = False
response['msg'] = "%s: No Readings Found" % basename
except IntegrityError:
response['msg'] = "%s: Reading exists." % basename
return db_status
except Exception as err:
response['success'] = False
response['msg'] = "%s: Failed to readings to databse %s" % (
basename, err
)
log.exception('DB ERROR')
return db_status
| anodyneweb/aw_backend | anodyne/anodyne/connectors/to_database.py | to_database.py | py | 6,957 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "api.models.StationParameter.objects.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "api.models.StationParameter.objects",
"line_number": 29,
"usage_type": "attribu... |
38043839602 | import xlrd #读取excel
import xlwt #写入excel
from datetime import date,datetime
def read_excel(name):
#打开文件
workbook = xlrd.open_workbook('../data/' + name + '.xlsx')
#获取所有sheet
# print(workbook.sheet_names()) #只有一张表
sheet_name = workbook.sheet_names()[0]
#根据sheet索引或者名称获取sheet内容
sheet = workbook.sheet_by_index(0) #sheet索引从0开始
# sheets = workbook.sheet_by_name('Sheet1')
# sheet的名称,行数,列数
# print(sheet.name,sheet.nrows,sheet.ncols)
#获取整行, 整列的值(数组)
# rows = sheet.row_values(1) #获取第二行的内容
f = open('../data/' + name + '.csv','w+')
string = ''
for k in range(sheet.nrows):
rows = sheet.row_values(k)
# print(rows)
for i in range(sheet.ncols):
if i == 0:
if k == 0:
string = str(rows[i])
else:
string = str(int(rows[i]))
else:
if k == 0:
string += ',' + str(rows[i])
else:
string += ',' + str(int(rows[i]))
print(string, file = f)
string = ''
# cols = sheet.col_values(2) #获取第三列的内容
# print('rows:',rows)
# print('cols:',cols)
#获取单元格内容
# print(sheet.cell(0,0).value)
# print(sheet.cell(0,0).value.encode('utf-8'))
#获取单元格内容的数据类型
# print(sheet.cell(1,0).ctype)
if __name__ == "__main__":
roads = ['airport','lihua','zhenning','jianshe4','jianshe3','jianshe2','jianshe1']
for i in range(len(roads)):
read_excel(roads[i]) | MrLeedom/TSC_RL | CSP/preprocess/third.py | third.py | py | 1,744 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "xlrd.open_workbook",
"line_number": 7,
"usage_type": "call"
}
] |
74131826985 | import pytest
from framework.base_case import BaseCase
from framework.my_requests import MyRequests
from tests.assertions import Assertions
from tests.data_list_for_test import DataForCommon
id_req = '123-abc-321'
name = 'Jack'
surname = 'Lee'
age = 50
method = 'select'
filter_phone = '1234567890'
class TestCommon(BaseCase):
def test_request_empty(self):
response = MyRequests.any_method(data='{}')
Assertions.check_failure_status_in_response(response=response)
@pytest.mark.parametrize('data', DataForCommon.data_list_type_method)
def test_check_type_methods(self, data):
response = MyRequests.any_method(data=data)
Assertions.check_failure_status_in_response(response=response)
@pytest.mark.parametrize('data', DataForCommon.data_list_without_field_method)
def test_without_field_method(self, data):
response = MyRequests.any_method(data=data)
Assertions.check_failure_status_in_response(response=response)
@pytest.mark.parametrize('data', DataForCommon.data_list_method_is_none)
def test_field_method_none(self, data):
response = MyRequests.any_method(data=data)
Assertions.check_failure_status_in_response(response=response)
| Bozmanok/qa-test | tests/test_common.py | test_common.py | py | 1,229 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "framework.base_case.BaseCase",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "framework.my_requests.MyRequests.any_method",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "framework.my_requests.MyRequests",
"line_number": 17,
"usage_typ... |
20603617401 | from selenium import webdriver
page_type = -1 # set default
driver = webdriver.Firefox(executable_path="./geckodriver")
driver.fullscreen_window()
driver.implicitly_wait(30)
# for signal
import signal
'''
driver.window_handles[0] : Happy face (default)
driver.window_handles[1] : Map
driver.window_handles[2] : Sad face
'''
def signal_SIGUSR1_handler(signum, frame):
print("Signal switching by signum", signum)
global driver
driver.switch_to.window(window_name=driver.window_handles[0])
def signal_SIGUSR2_handler(signum, frame):
print("Signal switching by signum", signum)
global driver
driver.switch_to.window(window_name=driver.window_handles[1])
def signal_SIGUSR3_handler(signum, frame):
print("Signal switching by signum", signum)
global driver
driver.switch_to.window(window_name=driver.window_handles[2])
signal.signal(signal.SIGUSR1, signal_SIGUSR1_handler) # mac : kill -30 {pid}
# ps | grep chromeOpener | awk 'NR<2{print $1}' | xargs kill -30
signal.signal(signal.SIGUSR2, signal_SIGUSR2_handler) # mac : kill -31 {pid}
# ps | grep chromeOpener | awk 'NR<2{print $1}' | xargs kill -31
signal.signal(signal.SIGINFO, signal_SIGUSR3_handler) # mac : kill -29 {pid}
# ps | grep chromeOpener | awk 'NR<2{print $1}' | xargs kill -29
while True:
# default
if page_type == -1:
driver.get("http://localhost:8080/")
driver.execute_script("window.open('');")
driver.switch_to.window(window_name=driver.window_handles[1])
driver.get("http://localhost:8080/sample2")
driver.execute_script("window.open('');")
driver.switch_to.window(window_name=driver.window_handles[2])
driver.get("http://localhost:8080/sample3")
# set default page
driver.switch_to.window(window_name=driver.window_handles[0])
elif page_type == 0:
print("self switch to page number 0")
driver.switch_to.window(window_name=driver.window_handles[0])
elif page_type == 1:
print("self switch to page number 1")
driver.switch_to.window(window_name=driver.window_handles[1])
elif page_type == 2:
print("self switch to page number 2")
driver.switch_to.window(window_name=driver.window_handles[2])
else:
print("^^ㅗ")
break
page_type = int(input())
driver.quit()
| INYEONGKIM/tony-and-naeyo | ref/display-switching/selenium-ver/firefoxOpener.py | firefoxOpener.py | py | 2,350 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Firefox",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "signal.signal",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "signal.SIGU... |
21527384857 | # -*- coding: utf-8 -*-
"""Document directory_store here."""
import codecs
import logging
import os
import platform
from six import string_types
from six.moves.urllib import parse as urllib
from oaiharvest.record import Record
class DirectoryRecordStore(object):
def __init__(self, directory, createSubDirs=False):
self.directory = directory
self.createSubDirs = createSubDirs
self.logger = logging.getLogger(__name__).getChild(self.__class__.__name__)
def write(self, record: Record, metadataPrefix: str):
fp = self._get_output_filepath(record.header, metadataPrefix)
self._ensure_dir_exists(fp)
self.logger.debug("Writing to file {0}".format(fp))
with codecs.open(fp, "w", encoding="utf-8") as fh:
fh.write(record.metadata)
def delete(self, record: Record, metadataPrefix: str):
fp = self._get_output_filepath(record.header, metadataPrefix)
try:
os.remove(fp)
except OSError:
# File probably does't exist in destination directory
# No further action needed
self.logger.debug("")
pass
def _get_output_filepath(self, header, metadataPrefix):
filename = "{0}.{1}.xml".format(header.identifier(), metadataPrefix)
protected = []
if platform.system() != "Windows":
protected.append(":")
if self.createSubDirs:
if isinstance(self.createSubDirs, string_types):
# Replace specified character with platform path separator
filename = filename.replace(self.createSubDirs, os.path.sep)
# Do not escape path separators, so that sub-directories
# can be created
protected.append(os.path.sep)
filename = urllib.quote(filename, "".join(protected))
fp = os.path.join(self.directory, filename)
return fp
def _ensure_dir_exists(self, fp):
if not os.path.isdir(os.path.dirname(fp)):
# Missing base directory or sub-directory
self.logger.debug("Creating target directory {0}".format(self.directory))
os.makedirs(os.path.dirname(fp))
| bloomonkey/oai-harvest | oaiharvest/stores/directory_store.py | directory_store.py | py | 2,189 | python | en | code | 62 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "oaiharvest.record.Record",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "codecs.open",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "oaiharvest.rec... |
17170052915 | import logging
from flask import Flask, request
from picstitch import load_review_stars, load_amazon_prime, load_fonts, \
PicStitch
from gcloud import storage
import boto
import io
import time
import os
# # ---- Logging prefs -----
log_format = "[%(asctime)s] [%(process)d] [%(levelname)-1s] %(message)s"
date_format = "%Y-%m-%d %H:%M:%S"
logging.basicConfig(level=logging.INFO,
format=log_format,
datefmt=date_format)
application = Flask(__name__)
def get_s3():
# aws stuff
s3_region = 'us-east-1'
s3_bucket_name = 'if-kip-chat-images'
conn = boto.s3.connect_to_region(s3_region)
s3_bucket = conn.get_bucket(s3_bucket_name)
return s3_bucket
def upload_to_s3(image, s3_bucket=get_s3()):
tmp_img = io.BytesIO()
image.created_image.save(tmp_img, 'PNG', quality=90)
k = s3_bucket.new_key(image.uniq_fn)
k.set_contents_from_string(tmp_img.getvalue(), headers={
"Content-Type": "image/png"})
s3_base = 'https://s3.amazonaws.com/' + image.bucket_name + '/'
img_url = s3_base + image.uniq_fn
return img_url
def get_gcloud():
# gcloud stuff
gcloud_config = {
'proj_name': 'kip_styles',
'key': 'gcloud-picstitch.json',
'bucket': 'if-kip-chat-images'
}
gcloud_key_file = os.path.join(
os.path.dirname(__file__),
'gcloud_key',
gcloud_config['key']
)
gcloud_client = storage.Client(project=gcloud_config['proj_name'])
gcloud_client = gcloud_client.from_service_account_json(gcloud_key_file)
gcloud_bucket = gcloud_client.get_bucket(gcloud_config['bucket'])
# gcloud_bucket.make_public(future=True)
return gcloud_bucket
def upload_to_gcloud(image, gcloud_bucket=get_gcloud()):
start = time.time()
tmp_img = io.BytesIO()
image.created_image.save(tmp_img, 'PNG', quality=90)
saved = time.time()
object_upload = gcloud_bucket.blob(
os.path.join(image.origin, image.uniq_fn))
blobbed = time.time()
object_upload.upload_from_string(
tmp_img.getvalue(), content_type='image/png')
uploaded = time.time()
if time.time() - start > 1:
logging.info(
'slow upload. save: %.2fs, blob create: %.2fs, string upload %2fs',
saved - start, blobbed - saved, uploaded - blobbed)
# public_url is a property func that appears to just be a string-format
# call. Probably no value in instrumenting.
return object_upload.public_url
@application.route('/', methods=['GET', 'POST'])
def main():
'''
return upload_image_tos_s3(create_image(request.json))
'''
t1 = time.time()
img_req = request.json
logging.info('received req to make image')
pic = PicStitch(img_req=img_req,
# bucket=s3_bucket,
# gcloud_bucket=gcloud_bucket,
amazon_prime_image=amazon_images,
review_stars_images=review_star_images,
font_dict=font_dict)
t2 = time.time()
gc_url = upload_to_gcloud(pic, gcloud_bucket)
t3 = time.time()
logging.info('request complete. make: %.2fs, upload: %.2fs, total: %.2fs to %s',
t2 - t1, t3 - t2, t3 - t1, gc_url)
return gc_url
@application.route('/health')
def kubernetes_heath_check():
return 'health'
# load connections to gcloud and aws
gcloud_bucket = get_gcloud()
review_star_images = load_review_stars()
amazon_images = load_amazon_prime()
font_dict = load_fonts()
if __name__ == '__main__':
port_num = 5000
# run app
logging.info('__not_threaded__')
logging.info('running app on port ' + str(port_num))
application.run(host='0.0.0.0', port=port_num, debug=True)
| Interface-Foundry/IF-root | src/image_processing/server.py | server.py | py | 3,767 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.Flask",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "boto.s3.connect_to_... |
40403373260 | from dash import Dash, html, dcc
import plotly.express as px
import pandas as pd
import numpy as np
import statsmodels as sm
from scipy.stats import ttest_1samp
from statsmodels.stats.power import TTestPower
import plotly.express as px
import plotly.offline as pyo
import plotly.io as pio
from jupyter_dash import JupyterDash
from dash import Dash, dcc, html, Input, Output
from dash.dependencies import State
import chart_studio.plotly as py
app = Dash(__name__)
app.layout = html.Div([
html.H1("P-value Simulation"),
# html.Div([
html.H4('# of Sims:', style={'display':'inline-block','margin-left':20,'margin-right':5}),
dcc.Input(
id='nSims',
value='Initial Value',
type = "number",
),
# ]),
# html.Div([
html.H4('Sample Mean:', style={'display':'inline-block','margin-left':20,'margin-right':5}),
dcc.Input(
id='sample-mean',
value='Initial Value',
type = "number",
),
# ]),
# html.Div([
html.H4('Sample Size:', style={'display':'inline-block','margin-left':20,'margin-right':5}),
dcc.Input(
id='sample-size',
value='Initial Value',
type = "number",
),
# ])
html.H4('Std. Dev:', style={'display':'inline-block','margin-left':20,'margin-right':5}),
dcc.Input(
id='std-dev',
value='Initial Value',
type = "number",
),
html.Br(),
html.Button('Submit', id='submit_val'),
html.Div(id='container-button-basic',
children='Enter all parameters and click submit'),
html.Hr(),
html.Label('Output'),
html.Div(id='output-submit')
])
@app.callback(
Output('output-submit', 'children'),
[Input('submit_val', 'n_clicks'),
# Input('input-1-submit', 'n_blur'),
# Input('input-2-submit', 'n_submit'),
# Input('input-2-submit', 'n_blur')
],
[State('nSims', 'value'),
State('sample-mean', 'value'),
State('sample-size', 'value'),
State('std-dev', 'value')
]
)
# Use the below function to get all the input variables and calculate the p-values
def simulations_output(clicked, nSims, sample_mean, sample_size, std_dev):
if clicked:
p_value_list = [] # Initialize a list that will store all the p-values
np.random.seed(1)
for i in range(1,nSims):
x = np.random.normal(loc=sample_mean, scale=std_dev, size=sample_size)
t_stat, p_value = ttest_1samp(x, popmean=100)
p_value_list.insert(i,p_value)
# return p_value_list
hist_df = pd.DataFrame({"p_values":p_value_list})
bars = 20
fig = px.histogram(hist_df, x="p_values")
fig.update_traces(xbins=dict( # bins used for histogram
start=0.0,
end=1.0,
size=0.05
))
fig.update_layout(yaxis_range=[0,nSims], yaxis_title="Frequency of p-values", margin=dict(l=5, r=5, t=5, b=5))
fig.add_hline(y=nSims/bars, line_width=3, line_dash="dash", line_color="red")
return fig.show()
if __name__ == '__main__':
app.run_server(debug=True)
| ashton77/statistical-simulations | simulation_app.py | simulation_app.py | py | 3,106 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dash.Dash",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dash.html.Div",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dash.html",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "dash.html.H1",
"line_number": 1... |
42242738770 | #!/usr/bin/env python
import numpy
import scipy.integrate
from pylab import *
datafile="../../../Mathematica/calculated_vals.tsv"
tag,x,e,f = numpy.loadtxt("data.txt",unpack=True)
tags=numpy.unique(tag)
flimit = numpy.zeros(len(tags))
for i in range(0,len(tags)):
itag=tags[i]
inds = numpy.where(tag == itag)
xplot=x[inds]
yplot=-f[inds]*31e-15
isort=numpy.argsort(xplot)
xplot = xplot[isort]
yplot = yplot[isort]
plot(xplot,yplot)
flimit[i] = scipy.integrate.trapz(xplot,-yplot)
yscale('log')
xscale('log')
savefig('integrands.png')
clf()
dist,fpfa,fnaive,fright,ftemp=numpy.loadtxt(datafile,unpack=True)
dist=dist*1e6
plot(tags,flimit)
plot(dist,fpfa)
plot(dist,fright)
plot(dist,ftemp)
xscale('log')
yscale('log')
show()
| charlesblakemore/opt_lev_analysis | casimir/scuffCode/Comparison/byXi/plot_integrand.py | plot_integrand.py | py | 773 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.loadtxt",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number"... |
7405277670 | import numpy as np
import math
import re
import feedparser as fp
def loadDataSet():
postingList = [['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1] #1是侮辱性文字,0是正常言论
return postingList, classVec
def createVocabList(dataSet):
vocabSet = set([])
for document in dataSet: #添加每篇文章中出现的新词
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet): #朴素贝叶斯的词集模型
returnVec = [0] * len(vocabList) #获取输入表的长度,建立一个所有元素都为0的向量
for word in inputSet: #遍历需要检查的集合,有匹配的将值变为1
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else:
print("the word: %s is not in my Vocabulary!"%word)
return returnVec
def trainNB0(trainMatrix, trainCategory): #trainMatrix是经过01转换的矩阵
numTrainDocs = len(trainMatrix) #获取训练矩阵的长度
numWords = len(trainMatrix[0]) #获取训练矩阵的列数
pAbusive = sum(trainCategory)/float(numTrainDocs) #表示侮辱类概率
p0Num = np.ones(numWords)
p1Num = np.ones(numWords)
p0Denom = 2.0
p1Denom = 2.0
for i in range(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = p1Num / p1Denom
p1Vect = [math.log(x) for x in p1Vect]
p0Vect = p0Num / p0Denom
p0Vect = [math.log(x) for x in p0Vect]
return p0Vect, p1Vect, pAbusive
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1 = sum(vec2Classify * p1Vec) + math.log(pClass1)
p0 = sum(vec2Classify * p0Vec) + math.log(1.0 - pClass1)
if p1 > p0:
return 1
else:
return 0
def testingNB():
listOPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listOPosts)
trainMat = []
for postinDoc in listOPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V, p1V, pAb = trainNB0(np.array(trainMat), np.array(listClasses))
testEntry = ['love', 'my', 'dalmation']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry, 'classified as:', classifyNB(thisDoc, p0V, p1V ,pAb))
testEntry = ['stupid', 'garbage']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print(testEntry, 'classified as:', classifyNB(thisDoc, p0V, p1V, pAb))
def bagOfWords2VecMN(vocabList, inputSet): #朴素贝叶斯的词袋模型
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec
def textParse(bigString):
listOfTokens = re.split(r'\W*', bigString) #使用split函数根据除了数字和英文字母以外的字符做分割
return [tok.lower() for tok in listOfTokens if len(tok) > 2] #转换为小写字母
def spamTest():
docList=[]
classList = []
fullText =[]
for i in range(1,26):
wordList = textParse(open('email/spam/%d.txt' %i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(1) #在读取所有spam文件夹中的文件后,最后加入标记1
wordList = textParse(open('email/ham/%d.txt' %i).read())
docList.append(wordList)
fullText.extend(wordList)
classList.append(0) #在读取所有ham文件夹中的文件后,最后加入标记0
vocabList = createVocabList(docList)
trainingSet = range(50)
testSet=[]
for i in range(10): #随机选择其中的10份邮件
randIndex = int(np.random.uniform(0, len(trainingSet))) #uniform(x,y)表示在[x,y)之间随机生成一个实数
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat=[]
trainClasses = []
for docIndex in trainingSet:
trainMat.append(bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = trainNB0(np.array(trainMat), np.array(trainClasses))
errorCount = 0
for docIndex in testSet:
wordVector = bagOfWords2VecMN(vocabList, docList[docIndex])
if classifyNB(np.array(wordVector),p0V,p1V,pSpam) != classList[docIndex]:
errorCount += 1
print("classification error",docList[docIndex])
print('the error rate is: ',float(errorCount) / len(testSet))
select = int(input("请输入你要选择的操作:"))
if select == 1:
listPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listPosts)
print(setOfWords2Vec(myVocabList, listPosts[0]))
print(setOfWords2Vec(myVocabList, listPosts[3]))
elif select == 2:
listPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listPosts)
trainMat = []
for postinDoc in listPosts:
trainMat.append(setOfWords2Vec(myVocabList, postinDoc))
p0V, p1V, pAb = trainNB0(trainMat, listClasses)
print(pAb)
print(p0V)
print(p1V)
elif select == 3:
testingNB()
elif select == 4:
emailText = open('email/ham/6.txt').read()
regEx = re.compile('\\W*')
print(regEx.split(emailText))
elif select == 5:
spamTest()
elif select == 6:
ny = fp.parse('http://newyork.craigslist.org/stp/index.rss')
print(len(ny['entries']))
| GuoBayern/MachineLearning | bayes.py | bayes.py | py | 5,884 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 45,
... |
71326379303 | import numpy as np
import sympy as sp
import math
from matplotlib import pyplot as plt
#BEGIN EXERCISE 1
def left_endpoint_sum(f,a,b,n):
d = (b-a) / n
sum = 0
for i in range(0,n):
sum += f(a + i*d)
return d*sum
def right_endpoint_sum(f,a,b,n):
d = (b-a) / n
sum = 0
for i in range(1,n+1):
sum += f(a + i*d)
return d*sum
#END EXERCISE 1
#BEGIN EXERCISE 2
x = sp.symbols('x')
f = lambda x : math.e**(-1*x**2)
LRS5 = left_endpoint_sum(f, 0, 0.5, 5) #Left Riemann Sum with 5 intervals
RRS5 = right_endpoint_sum(f, 0, 0.5, 5) #Right Riemann Sum with 5 intervals
LRS10 = left_endpoint_sum(f, 0, 0.5, 10)
RRS10 = right_endpoint_sum(f, 0, 0.5, 10)
LRS100 = left_endpoint_sum(f, 0, 0.5, 100)
RRS100 = right_endpoint_sum(f, 0, 0.5, 100)
xfunc = np.linspace(0,0.5,1001)
x5 = np.linspace(0,0.5,6)
x10 = np.linspace(0,0.5,11)
x100 = np.linspace(0,0.5,101)
yfunc = f(xfunc)
y5 = f(x5)
y10 = f(x10)
y100 = f(x100)
plt.step(x5, y5, 'b', where="post", label="Left Riemann Sum = " + str(LRS5))
plt.step(x5, y5, 'm', where="pre", label="Right Riemann Sum = " + str(RRS5))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 5 divisions")
plt.legend(fontsize=9)
plt.show()
plt.step(x10, y10, 'b', where="post", label="Left Riemann Sum = " + str(LRS10))
plt.step(x10, y10, 'm', where="pre", label="Right Riemann Sum = " + str(RRS10))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 10 divisions")
plt.legend(fontsize=9)
plt.show()
plt.step(x100, y100, 'b', where="post", label="Left Riemann Sum = " + str(LRS100))
plt.step(x100, y100, 'm', where="pre", label="Right Riemann Sum = " + str(RRS100))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 100 divisions")
plt.legend(fontsize=9)
plt.show()
#END EXERCISE 2
#BEGIN EXERCISE 3
def midpoint_sum(f,a,b,n):
d = (b-a) / n
sum = 0
for i in range(0.5, n+0.5, 1):
sum += f(a + i*d)
return d*sum
def trapezoid_sum(f,a,b,n):
d = (b-a) / n
sum = 0
for i in range(0, n+1):
if i == 0:
sum += f(a)
elif i == n:
sum += f(b)
else:
sum += 2*f(a + i*d)
return 0.5*d*sum
#END EXERCISE 3
#BEGIN EXERCISE 4
x = sp.symbols('x')
f = lambda x : math.e**(-1*x**2)
MPS5 = midpoint_sum(f, 0, 0.5, 5) #Midpoint Sum with 5 intervals
TPS5 = trapezoid_sum(f, 0, 0.5, 5) #Trapezoidal Sum with 5 intervals
MPS10 = midpoint_sum(f, 0, 0.5, 10)
TPS10 = trapezoid_sum(f, 0, 0.5, 10)
MPS100 = midpoint_sum(f, 0, 0.5, 100)
TPS100 = trapezoid_sum(f, 0, 0.5, 100)
xfunc = np.linspace(0,0.5,1001)
x5 = np.linspace(0,0.5,6)
x10 = np.linspace(0,0.5,11)
x100 = np.linspace(0,0.5,101)
yfunc = f(xfunc)
y5 = f(x5)
y10 = f(x10)
y100 = f(x100)
plt.step(x5, y5, 'y', where="mid", label="Midpoint Sum = " + str(MPS5))
plt.plot(x5, y5, 'r', label="Trapezoidal Sum = " + str(TPS5))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 5 divisions")
plt.legend(fontsize=9)
plt.show()
plt.step(x10, y10, 'y', where="mid", label="Midpoint Sum = " + str(MPS10))
plt.plot(x10, y10, 'r', label="Trapezoidal Sum = " + str(TPS10))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 10 divisions")
plt.legend(fontsize=9)
plt.show()
plt.step(x100, y100, 'y', where="mid", label="Midpoint Sum = " + str(MPS100))
plt.plot(x100, y100, 'r', label="Trapezoidal Sum = " + str(TPS100))
plt.plot(xfunc,yfunc, 'k')
plt.title("f(x) = e**(-x**2) with 100 divisions")
plt.legend(fontsize=9)
plt.show()
#END EXERCISE 4
#BEGIN EXERCISE 5
def simpsons_rule_sum(f,a,b,n):
assert n % 2 == 0, "n must be even"
d = (b-a) / n
sum = 0
for i in range(0,n+1):
if i == 0:
sum += f(a)
elif i == n:
sum += f(b)
elif i % 2 == 0:
sum += 2*f(a + i*d)
else:
sum += 4*f(a + i*d)
return (1/3)*d*sum
#END EXERCISE 5
#BEGIN EXERCISE 6
x = sp.symbols('x')
f = lambda x : math.e**(-1*x**2)
SRS10 = simpsons_rule_sum(f, 0, 0.5, 10) #Simpsons Rule Sum with 10 intervals
SRS100 = simpsons_rule_sum(f, 0, 0.5, 100)
print(SRS10)
print(SRS100)
#END EXERCISE 6
Approximations = []
Approximations += [("LRS5",LRS5), ("LRS10",LRS10), ("LRS100",LRS100)]
Approximations += [("RRS5",RRS5), ("RRS10",RRS10), ("RRS100",RRS100)]
Approximations += [("MPS5",MPS5), ("MPS10",MPS10), ("MPS100",MPS100)]
Approximations += [("TPS5",TPS5), ("TPS10",TPS10), ("TPS100",TPS100)]
Approximations += [("SRS10",SRS10), ("SRS100",SRS100)]
Approximations.sort(key = lambda y : y[1])
print(Approximations)
#BEGIN EXERCISE 7
x = sp.symbols('x')
f = lambda x : math.e**(-1*x**2)
fppmag = lambda x : abs(4*x**2 * math.e**(-1*x**2) - 2*math.e**(-1*x**2))
xtest = np.linspace(0,0.5,101)
ytest = fppmag(xtest)
plt.plot(xtest,ytest)
plt.show()
K = fppmag(0)
def midpoint_errorf(k,a,b,n):
return (k*(b-a)**3) / (24*n**2)
def trapezoid_errorf(k,a,b,n):
return (k*(b-a)**3) / (12*n**2)
def simpsons_errorf(k,a,b,n):
return (k*(b-a)**5) / (180*n**4)
T10 = trapezoid_errorf(K,0,0.5,10)
M10 = midpoint_errorf(K,0,0.5,10)
S10 = simpsons_errorf(K,0,0.5,10)
Trange = (TPS10 - T10, TPS10 + T10)
Mrange = (MPS10 - M10, MPS10 + M10)
Srange = (SRS10 - S10, SRS10 + S10)
print(Trange) #T10
print(Mrange) #M10
print(Srange) #S10
#END EXERCISE 7
#BEGIN EXERCISE 8
x = sp.symbols('x')
f = lambda x : 1/x
L10 = left_endpoint_sum(f,1,5,10)
R10 = left_endpoint_sum(f,1,5,10)
T10 = trapezoid_sum(f,1,5,10)
M10 = midpoint_sum(f,1,5,10)
S10 = simpsons_rule_sum(f,1,5,10)
fppmag = lambda x : abs(2/(x**3))
xtest = np.linspace(1,5,101)
yest = fppmag(xtest)
plt.plot(xtest,ytest)
plt.show()
K = fppmag(1)
LE = L10-R10
RE = L10-R10
TE = trapezoid_errorf(K,1,5,10)
ME = midpoint_errorf(K,1,5,10)
SE = simpsons_errorf(K,1,5,10)
Lrange = (L10-LE,L10+LE)
Rrange = (R10-RE,R10+RE)
Trange = (T10-TE,T10+TE)
Mrange = (M10-ME,M10+ME)
Srange = (S10-SE,S10+SE)
print(Lrange)
print(Rrange)
print(Trange)
print(Mrange)
print(Srange)
LET = L10 - math.log(5)
RET = R10 - math.log(5)
TET = T10 - math.log(5)
MET = M10 - math.log(5)
SET = S10 - math.log(5)
print(LET) #True lefthand error
print(RET) #True righthand error
print(TET) #True trapezoidal error
print(MET) #True midpoint error
print(SET) #True simpsons error
#END EXERCISE 8
| Drew-Morris/Real-Analysis-PY | Integration/Integration-Vim.py | Integration-Vim.py | py | 6,124 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sympy.symbols",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.e",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.linspace",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_n... |
21577690614 | import yaml
import torch
import torch.nn as nn
from . import layers
class Model(nn.Module):
def __init__(self, yaml_file):
super(Model, self).__init__()
with open(yaml_file, 'r') as file:
try:
model_cfg = yaml.load(file.read(), Loader=yaml.FullLoader)
except:
# for old verion or yaml
model_cfg = yaml.load(file.read())
self.layers = []
self.sources = []
self.nums = []
self.input_indexs = set()
layers_config = model_cfg['layers']
for n, line in enumerate(layers_config):
sources, layer_name, args, kwargs, num = line
if isinstance(sources, int):
sources = [sources]
if not isinstance(num, int) or num <= 0:
assert False, "layer's num must be int and > 0"
self.layers.append(
eval(f"layers.{layer_name}")(*args, **kwargs)
)
indexs = []
for source in sources:
if source < 0:
index = len(self.sources) + source
assert index >= 0, "找不到输入层"
indexs.append(index)
else:
self.input_indexs.add(n)
indexs.append(-(source + 1))
self.sources.append(indexs)
self.nums.append(num)
# get output layers index
all_indexs = set()
index_been_used = set()
for i, indexs in enumerate(self.sources):
all_indexs.add(i)
for index in indexs:
index_been_used.add(index)
self.output_indexs = all_indexs - index_been_used
self.layers = nn.Sequential(*self.layers)
def get_layer_output(self, index, forward_dict):
if index in forward_dict.keys():
return forward_dict[index]
else:
source_outputs = []
for source_index in self.sources[index]:
source_outputs.append(self.get_layer_output(source_index, forward_dict))
output = self.layers[index](*source_outputs)
for i in range(self.nums[index] - 1):
if not isinstance(output, list):
output = [output]
output = self.layers[index](*output)
forward_dict[index] = output
return output
def forward(self, *inputs, **kwargs):
assert len(inputs) == len(self.input_indexs), ""
forward_dict = {}
for i, input in enumerate(inputs):
forward_dict[-(i + 1)] = input
outputs = [self.get_layer_output(output_index, forward_dict) for output_index in self.output_indexs]
if len(outputs) == 1:
return outputs[0]
return outputs
| IMath123/imath | Model/__init__.py | __init__.py | py | 2,826 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "yaml.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "yaml.FullLoader",
"line_num... |
30239035937 | """
This module provides functions for justifying Unicode text in a monospaced
display such as a terminal.
We used to have our own implementation here, but now we mostly rely on
the 'wcwidth' library.
"""
from unicodedata import normalize
from wcwidth import wcswidth, wcwidth
from ftfy.fixes import remove_terminal_escapes
def character_width(char: str) -> int:
r"""
Determine the width that a character is likely to be displayed as in
a monospaced terminal. The width for a printable character will
always be 0, 1, or 2.
Nonprintable or control characters will return -1, a convention that comes
from wcwidth.
>>> character_width('車')
2
>>> character_width('A')
1
>>> character_width('\N{ZERO WIDTH JOINER}')
0
>>> character_width('\n')
-1
"""
return wcwidth(char)
def monospaced_width(text: str) -> int:
r"""
Return the number of character cells that this string is likely to occupy
when displayed in a monospaced, modern, Unicode-aware terminal emulator.
We refer to this as the "display width" of the string.
This can be useful for formatting text that may contain non-spacing
characters, or CJK characters that take up two character cells.
Returns -1 if the string contains a non-printable or control character.
>>> monospaced_width('ちゃぶ台返し')
12
>>> len('ちゃぶ台返し')
6
>>> monospaced_width('owl\N{SOFT HYPHEN}flavored')
12
>>> monospaced_width('example\x80')
-1
A more complex example: The Korean word 'ibnida' can be written with 3
pre-composed characters or 7 jamo. Either way, it *looks* the same and
takes up 6 character cells.
>>> monospaced_width('입니다')
6
>>> monospaced_width('\u110b\u1175\u11b8\u1102\u1175\u1103\u1161')
6
The word "blue" with terminal escapes to make it blue still takes up only
4 characters, when shown as intended.
>>> monospaced_width('\x1b[34mblue\x1b[m')
4
"""
# NFC-normalize the text first, so that we don't need special cases for
# Hangul jamo.
#
# Remove terminal escapes before calculating width, because if they are
# displayed as intended, they will have zero width.
return wcswidth(remove_terminal_escapes(normalize("NFC", text)))
def display_ljust(text, width, fillchar=" "):
"""
Return `text` left-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Left" here means toward the beginning of the string, which may actually
appear on the right in an RTL context. This is similar to the use of the
word "left" in "left parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_ljust(line, 20, '▒'))
Table flip▒▒▒▒▒▒▒▒▒▒
(╯°□°)╯︵ ┻━┻▒▒▒▒▒▒▒
ちゃぶ台返し▒▒▒▒▒▒▒▒
This example, and the similar ones that follow, should come out justified
correctly when viewed in a monospaced terminal. It will probably not look
correct if you're viewing this code or documentation in a Web browser.
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
# There's a control character here, so just don't add padding
return text
padding = max(0, width - text_width)
return text + fillchar * padding
def display_rjust(text, width, fillchar=" "):
"""
Return `text` right-justified in a Unicode string whose display width,
in a monospaced terminal, should be at least `width` character cells.
The rest of the string will be padded with `fillchar`, which must be
a width-1 character.
"Right" here means toward the end of the string, which may actually be on
the left in an RTL context. This is similar to the use of the word "right"
in "right parenthesis".
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_rjust(line, 20, '▒'))
▒▒▒▒▒▒▒▒▒▒Table flip
▒▒▒▒▒▒▒(╯°□°)╯︵ ┻━┻
▒▒▒▒▒▒▒▒ちゃぶ台返し
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
return text
padding = max(0, width - text_width)
return fillchar * padding + text
def display_center(text, width, fillchar=" "):
"""
Return `text` centered in a Unicode string whose display width, in a
monospaced terminal, should be at least `width` character cells. The rest
of the string will be padded with `fillchar`, which must be a width-1
character.
>>> lines = ['Table flip', '(╯°□°)╯︵ ┻━┻', 'ちゃぶ台返し']
>>> for line in lines:
... print(display_center(line, 20, '▒'))
▒▒▒▒▒Table flip▒▒▒▒▒
▒▒▒(╯°□°)╯︵ ┻━┻▒▒▒▒
▒▒▒▒ちゃぶ台返し▒▒▒▒
"""
if character_width(fillchar) != 1:
raise ValueError("The padding character must have display width 1")
text_width = monospaced_width(text)
if text_width == -1:
return text
padding = max(0, width - text_width)
left_padding = padding // 2
right_padding = padding - left_padding
return fillchar * left_padding + text + fillchar * right_padding
| rspeer/python-ftfy | ftfy/formatting.py | formatting.py | py | 5,798 | python | en | code | 3,623 | github-code | 36 | [
{
"api_name": "wcwidth.wcwidth",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "wcwidth.wcswidth",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "ftfy.fixes.remove_terminal_escapes",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "u... |
22889044030 | import sys
from PyQt5 import QtCore, QtWidgets, uic
import mysql.connector as mc
from PyQt5.QtWidgets import QTableWidgetItem
from PyQt5.QtWidgets import QMessageBox
from FrmMatakuliah import WindowMatakuliah
qtcreator_file = "dashboard_admin.ui" # Enter file here.
Ui_MainWindow, QtBaseClass = uic.loadUiType(qtcreator_file)
class WindowDashboardAdmin(QtWidgets.QMainWindow, Ui_MainWindow):
def __init__(self):
QtWidgets.QMainWindow.__init__(self)
Ui_MainWindow.__init__(self)
self.setupUi(self)
# Event Setup
self.actionExit.triggered.connect(self.app_exit)
self.actionMatakuliah_2.triggered.connect(self.app_matakuliah)
def app_exit(self):
sys.exit()
def app_matakuliah(self):
winmatakuliah.setWindowModality(QtCore.Qt.ApplicationModal)
winmatakuliah.show()
def messagebox(self, title, message):
mess = QMessageBox()
mess.setWindowTitle(title)
mess.setText(message)
mess.setStandardButtons(QMessageBox.Ok)
mess.exec_()
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
window = WindowDashboardAdmin()
winmatakuliah = WindowMatakuliah()
window.showFullScreen()
sys.exit(app.exec_())
else:
app = QtWidgets.QApplication(sys.argv)
window = WindowDashboardAdmin()
winmatakuliah = WindowMatakuliah() | freddywicaksono/python_login_multiuser | DashboardAdmin.py | DashboardAdmin.py | py | 1,432 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "PyQt5.uic.loadUiType",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.Q... |
42263303655 | from django import template
from all_products.queryutil import ShirtQuery
register = template.Library()
@register.filter
def shirt_price(shirt):
shirt_query = ShirtQuery(shirt)
for size in shirt_query.sizes:
stock = shirt_query.get_stock(size)
if stock > 0:
return shirt_query.get_price(size) | drivelous/ecmrc | shirts/templatetags/shirt_price.py | shirt_price.py | py | 319 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "all_products.queryutil.ShirtQuery",
"line_number": 9,
"usage_type": "call"
}
] |
32520478741 | # -*- coding: utf-8 -*-
"""
Created on Mon Dec 24 13:13:32 2018
@author: Administrator
"""
import wget, time
import os
# 网络地址
DATA_URL = 'http://164.52.0.183:8000/file/findTrace/2018-12-24.txt'
# DATA_URL = '/home/xxx/book/data.tar.gz'
out_fname = '2018-12-24.txt'
def download(DATA_URL):
out_fname = '2018-12-24.txt'
date = time.ctime()
path = str(date.split(' ')[1] +'-' + date.split(' ')[2])
wget.download(DATA_URL, out=out_fname)
if not os.path.exists('./' + out_fname):
wget.download(DATA_URL, out=out_fname)
else:
#os.remove('./' + out_fname)
print("today's data has been download")
mkdir(path)
return path
def mkdir(path):
# 去除首位空格
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists=os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
os.makedirs(path)
print(path +' 创建成功')
return True
else:
# 如果目录存在则不创建,并提示目录已存在
print(path +' 目录已存在')
return False
# 提取压缩包
#tar = tarfile.open(out_fname)
#tar.extractall()
#tar.close()
# 删除下载文件
#os.remove(out_fname)
# 调用函数
path = download(DATA_URL)
file = open("./" + out_fname)
lines = file.readlines()
output = {}
temp = ""
cnt = 0
for line in lines:
line=line.strip('\n')
if line.startswith("FPS"):
fps_split = line.split("=")
#print(fps_split)
fps_temp = fps_split[1]
for i in range(1,cnt+1):
output[temp][-i] += " "+fps_temp
cnt = 0
elif line.startswith("ID:dokidoki/mlinkm/"):
Channel_ID_1200 = line[19:]
if Channel_ID_1200 in output:
temp = Channel_ID_1200 + "_high"
else:
output[Channel_ID_1200 + "_high"] = []
temp = Channel_ID_1200 + "_high"
cnt = 0
elif line.startswith("ID:EXT-ENC-0/dokidoki/mlinkm/"):
Channel_ID_500 = line[29:]
if Channel_ID_1200 in output:
temp = Channel_ID_500 + "_low"
else:
output[Channel_ID_500 + "_low"] = []
temp = Channel_ID_500 + "_low"
cnt = 0
else:
output[temp].append(line)
cnt += 1
for key,value in output.items():
f_file = open("./" + path + "/" + str(key) + ".csv","w")
for idx in range(len(value)):
data = value[idx].replace(" ",",")
data += "\n"
f_file.write(data)
#print(output)
#print(Channel_ID_500)
| Y1ran/Pensieve-A3C-Streaming-Adaptive-Bitrate-Model | final/download_data.py | download_data.py | py | 2,603 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "time.ctime",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "wget.download",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
29432294183 | from pymongo import MongoClient
client = MongoClient('localhost', 27017)
database = client.mflix
pipline = [
{'$unwind':'$cast'},
{'$group':
{
'_id':'$cast',
'count':{'$sum':1}
}},
{
'$sort':{'count':-1}
}]
actors = database.movies.aggregate(pipline)
for actor in actors:
print(actor)
| RezaeiShervin/MaktabSharif89 | Shervin_Rezaei_HW18_MaktabSharif89/Shervin_Rezaei_HW18_MaktabSharif89(7).py | Shervin_Rezaei_HW18_MaktabSharif89(7).py | py | 355 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 3,
"usage_type": "call"
}
] |
39350644450 | import copy
import numpy as np
import cv2
import time
import random
import argparse
def draw_obstacles(canvas,clr=5,unknown=False, map_flag=1):
"""
@brief: This function goes through each node in the canvas image and checks for the
obstacle space using the half plane equations.
If the node is in obstacle space, the color is changed to blue.
:param canvas: Canvas Image
:param unknown: Unknown Map flag
"""
# Uncomment to use the cv2 functions to create the obstacle space
# cv2.circle(canvas, (300,65),45,(255,0,0),-1)
# cv2.fillPoly(canvas, pts = [np.array([[115,40],[36,65],[105,150],[80,70]])], color=(255,0,0)) #Arrow
# cv2.fillPoly(canvas, pts = [np.array([[200,110],[235,130],[235,170],[200,190],[165,170],[165,130]])], color=(255,0,0)) #Hexagon
height,width,_ = canvas.shape
for i in range(width):
for j in range(height):
if map_flag == 1:
# Map 01
if(i<=clr) or (i>=(400-clr)) or (j<=clr) or (j>=(250-clr)):
canvas[j][i] = [255,0,0]
if ((i-300)**2+(j-65)**2-((40+clr)**2))<=0:
canvas[j][i] = [255,0,0]
if (j+(0.57*i)-213.53)>=clr and (j-(0.57*i)+5.04+clr)>=0 and (i-235-clr)<=0 and (j+(0.57*i)-305.04-clr)<=0 and (j-(0.57*i)-76.465-clr)<=0 and (i-155-clr)>=0:
canvas[j][i] = [255,0,0]
if ((j+(0.316*i)-66.1483-clr)>=0 and (j+(0.857*i)-140.156-clr)<=0 and (j-(0.114*i)-55.909-clr)<=0) or ((j-(1.23*i)-23.576-clr)<=0 and (j-(3.2*i)+197.763+clr)>=0 and (j-(0.114*i)-55.909-clr)>=0):
canvas[j][i] = [255,0,0]
elif map_flag == 2:
# Map 02
if(i<=clr) or (i>=(400-clr)) or (j<=clr) or (j>=(250-clr)):
canvas[j][i] = [255,0,0]
if ((i>=118-clr) and (i<=148+clr) and (j>=clr) and (j<=63)):
canvas[j][i] = [255,0,0]
if ((i>=118-clr) and (i<=148+clr) and (j>=103-clr) and (j<=147+clr)):
canvas[j][i] = [255,0,0]
if ((i>=118-clr) and (i<=148+clr) and (j>=187-clr) and (j<=(250-clr))):
canvas[j][i] = [255,0,0]
if ((i>=251-clr) and (i<=281+clr) and (j>=42-clr) and (j<=105+clr)):
canvas[j][i] = [255,0,0]
if ((i>=251-clr) and (i<=281+clr) and (j>=145-clr) and (j<=208+clr)):
canvas[j][i] = [255,0,0]
if(unknown):
for i in range(width):
for j in range(height):
if map_flag == 1:
if ((i-110)**2+(j-210)**2-((35)**2))<=0:
canvas[j][i] = [0,255,0]
elif map_flag == 2:
if ((i-70)**2+(j-190)**2-((35)**2))<=0:
canvas[j][i] = [0,255,0]
if ((i-200)**2+(j-140)**2-((20)**2))<=0:
canvas[j][i] = [0,255,0]
return canvas
def is_obstacle(next_width,next_height,canvas,unknown=False):
"""
@brief: This function checks if the node is present in the obstacle.
If the node is in obstacle space, the function returns true.
:param canvas: Canvas Image
:param unknown: Unknown Map flag
"""
if(unknown):
if canvas[int(round(next_height))][int(round(next_width))][1]==255 or canvas[int(round(next_height))][int(round(next_width))][0]==255:
return True
else:
return False
else:
if canvas[int(round(next_height))][int(round(next_width))][0]==255:
# print("In obstacle")
return True
else:
return False
def cost_to_goal(node,final):
"""
@brief: This function computes the euclidean distance between the current node and the final goal node.
:param node: present node
:param final: final node (Goal Node)
"""
return np.sqrt(np.power(node[0]-final[0],2)+np.power(node[1]-final[1],2))
def compute_distance(node1,node2):
"""
@brief: This function computes the euclidean distance between the two given nodes. Mainly used to compute
the edge length.
:param node1: First Node
:param node2: Second Node
"""
return np.sqrt(np.power(node1[0]-node2[0],2)+np.power(node1[1]-node2[1],2))
def nearest(sample, TV):
"""
@brief: This function returns the nearest node from the previously generated vertices.
:param sample: sampled node
:param TV: Tree Vertices
"""
dist = []
for vertex in TV:
dist.append(compute_distance(sample,vertex))
nearest_vertex = TV[dist.index(min(dist))]
return nearest_vertex
def neighbor_nodes(sample, TV, d=10):
"""
@brief: This function computes the nearby neighbouring nodes at a particular distance threshold from
the sample and the tree vertices.
:param sample: Node from which the neighbouring nodes are to be returned
:param TV: Tree vertices
"""
neighbors = []
for vertex in TV:
if compute_distance(sample,vertex) < d:
neighbors.append(vertex)
return neighbors
def collision_free(X_nearest,X_rand,canvas,unknown=False): #Replace X_rand with X_new for steer function
"""
@brief: This function samples the edge and checks for the validity of the edge by checking the obstacle space
:param X_nearest: Nearest Node
:param X_rand: Random node
:param canvas: Map
:unknown: Flag for unknown map
"""
if X_rand[0] != X_nearest[0]:
x1 = min(X_nearest[0],X_rand[0])
if(X_nearest[0] == x1):
for w in range(X_nearest[0],X_rand[0]+1):
h = ((X_rand[1] - X_nearest[1])/(X_rand[0] - X_nearest[0]))*(w - X_nearest[0]) + X_nearest[1]
if(is_obstacle(int(w),int(h),canvas,unknown)):
# print("Collision!")
return False
else:
for w in range(X_rand[0],X_nearest[0]+1):
h = ((X_nearest[1] - X_rand[1])/(X_nearest[0] - X_rand[0]))*(w - X_rand[0]) + X_rand[1]
if(is_obstacle(int(w),int(h),canvas,unknown)):
# print("Collision!")
return False
else:
y1 = min(X_nearest[1],X_rand[1])
if(y1 == X_nearest[1]):
for h in range(X_nearest[1],X_rand[1]+1):
if(is_obstacle(int(X_nearest[0]),int(h),canvas,unknown)):
# print("Collision!")
return False
else:
for h in range(X_rand[1],X_nearest[1]+1):
if(is_obstacle(int(X_rand[0]),int(h),canvas,unknown)):
# print("Collision!")
return False
return True
def rewire(node1,node2,node_dict,canvas,final_state):
"""
@brief: This function rewires the edge between the nodes by checking the edge length and the cost to goal.
:param node1: Node 1
:param node2: Node 2
:param node_dict: Dictionary containing the Parent nodes and costs
:param canvas: Map
:param final_state: Goal Node
"""
# print("In rewire")
parent = []
if collision_free(node1,node2, canvas) is True:
if (compute_distance(node1, node2) + cost_to_goal(node1, final_state)) < cost_to_goal(node2, final_state):
node_dict[tuple(node2)] = [node1, compute_distance(node1, node2) + cost_to_goal(node1, final_state)]
parent = node1.copy()
if len(parent) != 0:
return parent
else:
return node_dict[tuple(node2)][0]
def mod_rrt_star(initial_state,final_state,canvas):
"""
@brief: This function generates the random tree for the given obstacle map.
:param initial_state: Start Node
:param final_state: final node (Goal Node)
:param canvas: Map
"""
TV = []
TE = {}
TV.append(final_state)
node_dict = {}
node_dict[tuple(final_state)] = final_state
while True:
width_rand = random.randint(0,canvas.shape[1]-1)
height_rand = random.randint(0,canvas.shape[0]-1)
X_rand = [width_rand,height_rand]
# print("Random sample: ", X_rand)
X_nearest = nearest(X_rand,TV)
#Steer function to be implemented later for non-holonomic constraints.
#X_new <- Steer(X_rand, X_nearest)
#Here X_rand is X_new
if(collision_free(X_nearest, X_rand, canvas) is False):
continue
X_parent = X_nearest.copy()
node_dict[tuple(X_rand)] = [X_parent, cost_to_goal(X_nearest,final_state) + compute_distance(X_nearest,X_rand)]
X_neighbors = neighbor_nodes(X_rand, TV, 10)
for n in X_neighbors:
X_parent = rewire(n, X_rand, node_dict, canvas, final_state)
TV.append(X_rand)
TE[tuple(X_rand)] = X_parent.copy()
# print("X_parent", X_parent)
for n in X_neighbors:
X_parent_temp = rewire(X_rand, n, node_dict, canvas, final_state)
if X_parent_temp == X_rand:
# print("Before Pop", n)
TE.pop(tuple(n))
TE[tuple(n)] = X_rand.copy()
if compute_distance(X_rand,initial_state) < 5:
print("RRT* Converged!")
return TE, TV, X_rand
def backtrack(initial_state, final_state, edges, canvas):
"""
@brief: This function backtracks the path from the goal node to the start node.
:param initial_state: start node
:param final_state: final node (Goal Node)\
:param edges: Edges between the nodes
"""
state = initial_state.copy()
path = []
while True:
node = edges[tuple(state)]
path.append(state)
# cv2.line(canvas, tuple(state), tuple(node), (0, 255, 230), 3)
if(tuple(node) == tuple(final_state)):
path.append(final_state)
print("Back Tracking Done!")
break
state = node.copy()
return path
def path_sampling(path):
"""
@brief: This function samples the generated path
:param path: path from start node to the goal node
"""
sampled_path = []
for i in range(0,len(path)-1):
X_rand = path[i]
X_nearest = path[i+1]
if X_rand[0] != X_nearest[0]:
x1 = min(X_nearest[0],X_rand[0])
if(X_nearest[0] == x1):
for w in range(X_nearest[0],X_rand[0]+1):
h = ((X_rand[1] - X_nearest[1])/(X_rand[0] - X_nearest[0]))*(w - X_nearest[0]) + X_nearest[1]
sampled_path.append([int(w),int(h)])
else:
for w in range(X_rand[0],X_nearest[0]+1):
h = ((X_nearest[1] - X_rand[1])/(X_nearest[0] - X_rand[0]))*(w - X_rand[0]) + X_rand[1]
sampled_path.append([int(w),int(h)])
else:
print("vertical line", X_nearest[1], X_rand[1])
y1 = min(X_nearest[1],X_rand[1])
print("y1 ", y1)
if(y1 == X_nearest[1]):
for h in range(X_nearest[1],X_rand[1]+1):
sampled_path.append([int(X_nearest[0]),int(h)])
else:
for h in range(X_rand[1],X_nearest[1]+1):
sampled_path.append([int(X_rand[0]),int(h)])
return sampled_path
def path_smoothening(sampled_path, final_state, canvas, unknown = False):
"""
@brief: This function smoothenes the path by connecting the start nodes with the most feasible node starting
from the goal node
:param sampled path: Sampled Path
:param final_state: final node (Goal Node)
:param canvas: Map
:param unknown: Flag for dynamic map ( unknown obstacles )
"""
shortest_path = []
# if len(sampled_path) > 0:
shortest_path.append(sampled_path[0])
print("Length of Sampled Path: ",len(sampled_path))
while (tuple(shortest_path[-1]) != tuple(sampled_path[-1])):
# print(sampled_path.index(shortest_path[-1]))
for i in range(sampled_path.index(shortest_path[-1]),len(sampled_path)):
if collision_free(shortest_path[-1],sampled_path[len(sampled_path)-1-i+sampled_path.index(shortest_path[-1])], canvas, unknown):
shortest_path.append(sampled_path[len(sampled_path)-1-i+sampled_path.index(shortest_path[-1])])
break
# print(shortest_path)
return shortest_path
def path_replanning(path, dynamic_map, edges, vertices, initial, final):
"""
@brief: This function replans the path based on the dynamic obstacles present in the map.
:param path: actual path to be followed
:param dynamic_map: Dynamic map
:param edges: Edges
:param vertices: Vertices of the tree
:param initial: starting node
:param final: Goal Node
"""
replanned_path = path.copy()
print("in path replanning")
for i in range(1,len(path)):
node = replanned_path[i]
X_next = node.copy()
if is_obstacle(node[0],node[1],dynamic_map,True) or (not collision_free(replanned_path[i-1],X_next,dynamic_map, unknown=True)):
X_curr = replanned_path[i-1].copy()
X_candi = []
X_near = neighbor_nodes(X_curr, vertices, d=50)
for near in X_near:
if collision_free(X_curr,near,dynamic_map, unknown=True):
if near not in replanned_path:
X_candi.append(near)
X_next = pareto(X_candi,X_curr,final)
# X_next = X_candi[0]
if(X_next is not None):
# print("Nearby node found!")
# X_curr = X_next.copy()
# print("Previous: ",replanned_path[i])
replanned_path[i] = X_next.copy()
# print("Updated: ",replanned_path[i])
else:
print("Not Enough Samples found nearby and hence the path goes through the obstacle")
# print("Replanned Path: ", replanned_path)
return replanned_path
def pareto(X_candi,initial,final):
"""
@brief: This function returns the most dominant node by using the pareto dominance theory
:param X_candi: Candidate Nodes
:param initial: Initial Node
:paran final: Final Node (Goal Node)
"""
paretos = []
for candidates in X_candi:
paretos.append([compute_distance(candidates,initial), cost_to_goal(candidates,final)])
if(len(paretos) != 0):
pareto_dict = {}
for i in range(0,len(paretos)):
dominant_node = paretos[i].copy()
ID = 0
OD = 0
for j in range(0,len(paretos)):
if(tuple(paretos[i]) == tuple(paretos[j])):
continue
elif ((paretos[j][0]<=dominant_node[0] and paretos[j][1]<=dominant_node[1]) and (paretos[j][0]<dominant_node[0] or paretos[j][1]<dominant_node[1])):
ID += 1
elif (((paretos[j][0]>=dominant_node[0] and paretos[j][1]>=dominant_node[1]) and (paretos[j][0]>dominant_node[0] or paretos[j][1]>dominant_node[1]))):
OD += 1
pareto_dict[tuple(dominant_node)] = [ID, OD]
pareto_keys = list(pareto_dict.keys())
pareto_IDs = []
pareto_ODs = []
for p_key in pareto_keys:
pareto_IDs.append(pareto_dict[tuple(p_key)][0])
pareto_ODs.append(pareto_dict[tuple(p_key)][1])
zero_ID_index = list(np.where(np.array(pareto_IDs)==0))[0]
# print("Zero ID Index Type: ",type(zero_ID_index), zero_ID_index)
if(len(zero_ID_index)>1):
zero_ID_keys = []
for i in zero_ID_index:
zero_ID_keys.append(pareto_keys[i])
zero_ID_max_OD = []
for key in zero_ID_keys:
zero_ID_max_OD.append(pareto_dict[tuple(key)][1])
max_OD = np.max(zero_ID_max_OD)
max_OD_key = zero_ID_keys[zero_ID_max_OD.index(max_OD)]
# print(max_OD_key)
return X_candi[paretos.index(list(max_OD_key))]
elif(len(zero_ID_index)==1):
return X_candi[paretos.index(list(pareto_keys[zero_ID_index[0]]))]
else:
print("NO PARETO!")
else:
print("No Candidate Nodes")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--map1', action='store_true',
help="Loads Map 01")
parser.add_argument('--map2', action='store_true',
help="Loads Map 02")
args = parser.parse_args()
#Gives the time at which the program has started
canvas = np.ones((250,400,3),dtype="uint8") #Creating a blank canvas
if args.map1:
flag = 1
elif args.map2:
flag = 2
canvas = draw_obstacles(canvas,clr=5,unknown=False,map_flag=flag) #Draw the obstacles in the canvas, default point robot with 5 units of clearance
initial_state,final_state = [10,10], [350,150] #Take the start and goal node from the user
#Changing the cartesian coordinates to image coordinates:
initial_state[1] = canvas.shape[0]-1 - initial_state[1]
final_state[1] = canvas.shape[0]-1 - final_state[1]
#Write a condition to check if the initial state and final state are in the obstacle space and exit from program and ask to rerun with valid start and goal positions
if(canvas[initial_state[1]][initial_state[0]][0]==255 or canvas[final_state[1]][final_state[0]][0]==255):
print("Given Start or Goal Node is in the Obstacle Region. Please re-run with Valid Coordinates")
exit()
start_time = time.time()
#Start Node and Goal Node
cv2.circle(canvas,(int(initial_state[0]),int(initial_state[1])),5,(0,255,0),-1)
cv2.circle(canvas,(int(final_state[0]),int(final_state[1])),5,(0,0,255),-1)
#Generate the graph from the Modified RRT* Algorithm
edges, vertices, s_node = mod_rrt_star(initial_state,final_state,canvas) #Compute the path using A Star Algorithm
tree_canvas = canvas.copy()
#Draw the edges
for key in edges:
# cv2.line(canvas, tuple(key), tuple(edges[key]), (255, 128, 223), 2)
cv2.line(tree_canvas, tuple(key), tuple(edges[key]), (0, 0, 255), 3)
cv2.circle(tree_canvas,(key[0], key[1]), 1, (0,255,255), -1)
cv2.circle(tree_canvas,(edges[key][0],edges[key][1]), 1, (0,255,0), -1)
# cv2.imshow("Modified RRT* Tree Expansion", canvas)
#Generate a dynamic map
dynamic_map = np.ones((250,400,3),dtype="uint8") #Creating a blank canvas
dynamic_map = draw_obstacles(dynamic_map,clr=5,unknown=True, map_flag=flag) #Draw the obstacles in the canvas, default point robot with 5 units of clearance
cv2.imshow("Known Map with Unknown Obstacles", dynamic_map)
#Backtack the path to reach from start node to goal node
path = backtrack(s_node, final_state, edges, canvas)
rrt_path_canvas = tree_canvas.copy()
for i in range(1,len(path)):
cv2.line(rrt_path_canvas, tuple(path[i-1]), tuple(path[i]), (0, 255, 0), 3)
# cv2.imshow("Modified RRT* Path", rrt_path_canvas)
#Sample and Smoothen the path from the list returned from the backtracking function.
sampled_path = path_sampling(path)
smoothened_path = path_smoothening(sampled_path.copy(),final_state,canvas,unknown = False)
smooth_rrt_path_canvas = rrt_path_canvas.copy()
for i in range(1,len(smoothened_path)):
cv2.line(smooth_rrt_path_canvas, tuple(smoothened_path[i-1]), tuple(smoothened_path[i]), (255, 255, 255), 3)
# cv2.imshow("Smoothened Modified RRT* Path", smooth_rrt_path_canvas)
#Resample the smoothened path
sampled_path = path_sampling(smoothened_path)
#Replan the path from the dynamic obstacles
replanned_path = path_replanning(sampled_path, dynamic_map, edges, vertices,s_node,final_state)
replanned_path_canvas = dynamic_map.copy()
for i in range(0,len(replanned_path)):
cv2.circle(replanned_path_canvas,tuple(replanned_path[i]), 2, (0,145,145), -1)
n_path = []
prev_path = []
for i in range(0,len(replanned_path)):
if(tuple(sampled_path[i]) == tuple(replanned_path[i])):
prev_path.append(sampled_path[i])
continue
else:
# print("N Path Append")
n_path.append(sampled_path[i-2])
n_path.append(sampled_path[i-1])
for j in range(i,len(replanned_path)):
n_path.append(replanned_path[j])
break
# cv2.imshow("Replanned Modified RRT* Path", replanned_path_canvas)
# replanned_sampled = path_sampling(replanned_path)
# print("New path ",n_path)
new_replanned_path = path_smoothening(n_path.copy(), final_state, dynamic_map, unknown=True)
smooth_replanned_path_canvas = replanned_path_canvas.copy()
for i in range(1,len(sampled_path)):
cv2.line(dynamic_map, tuple(sampled_path[i-1]), tuple(sampled_path[i]), (0, 137, 255), 3)
# print(replanned_path)
for i in range(1,len(prev_path)):
cv2.line(smooth_replanned_path_canvas, tuple(prev_path[i-1]), tuple(prev_path[i]), (255, 128, 223), 2)
for i in range(1,len(new_replanned_path)):
cv2.line(smooth_replanned_path_canvas, tuple(new_replanned_path[i-1]), tuple(new_replanned_path[i]), (255, 128, 223), 2)
# cv2.circle(dynamic_map,tuple(replanned_path[i]), 3, (255,255,255), -1)
# cv2.imshow("Smoothened Replanned Modified RRT* Path",dynamic_map)
end_time = time.time() #Time taken to run the whole algorithm to find the optimal path
cv2.imshow("Known Map with Initial & Final Nodes", canvas)
cv2.imshow("Modified RRT* Tree Expansion", tree_canvas)
# cv2.imshow("Known Map with Unknown Obstacles", dynamic_map)
cv2.imshow("Modified RRT* Path", rrt_path_canvas)
cv2.imshow("Smoothened Modified RRT* Path", smooth_rrt_path_canvas)
# cv2.imshow("Replanned Pareto Dominant Nodes", replanned_path_canvas)
cv2.imshow("Smoothened Replanned Modified RRT* Path", smooth_replanned_path_canvas)
cv2.waitKey(0)
cv2.destroyAllWindows()
# print("Code Execution Time: ",end_time-start_time) #Prints the total execution time | okritvik/MOD-RRT-Star-Implementation-Point-Robot | mod_rrt_star.py | mod_rrt_star.py | py | 22,275 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 110,
"usage_type": "call"
},
{
"api_name": "numpy.power",
"line_number":... |
24324519488 | from pathlib import Path
from typing import IO
def sentencepiece_load(file):
"""Load a SentencePiece model"""
from sentencepiece import SentencePieceProcessor
spm = SentencePieceProcessor()
spm.Load(str(file))
return spm
# source: https://github.com/allenai/allennlp/blob/master/allennlp/common/file_utils.py#L147 # NOQA
def http_get_temp(url: str, temp_file: IO) -> None:
import requests
import warnings
from urllib3.exceptions import InsecureRequestWarning
# temporary fix for dealing with this SSL certificate issue:
# https://github.com/bheinzerling/bpemb/issues/63
with warnings.catch_warnings():
warnings.simplefilter("ignore", InsecureRequestWarning)
req = requests.get(url, stream=True, verify=False)
req.raise_for_status()
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
try:
from tqdm import tqdm
progress = tqdm(unit="B", total=total)
except ImportError:
progress = None
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
if progress is not None:
progress.update(len(chunk))
temp_file.write(chunk)
if progress is not None:
progress.close()
return req.headers
# source: https://github.com/allenai/allennlp/blob/master/allennlp/common/file_utils.py#L147 # NOQA
def http_get(url: str, outfile: Path, ignore_tardir=False) -> None:
import tempfile
import shutil
with tempfile.NamedTemporaryFile() as temp_file:
headers = http_get_temp(url, temp_file)
# we are copying the file before closing it, flush to avoid truncation
temp_file.flush()
# shutil.copyfileobj() starts at current position, so go to the start
temp_file.seek(0)
outfile.parent.mkdir(exist_ok=True, parents=True)
if headers.get("Content-Type") == "application/x-gzip":
import tarfile
tf = tarfile.open(fileobj=temp_file)
members = tf.getmembers()
if len(members) != 1:
raise NotImplementedError("TODO: extract multiple files")
member = members[0]
if ignore_tardir:
member.name = Path(member.name).name
tf.extract(member, str(outfile.parent))
extracted_file = outfile.parent / member.name
assert extracted_file == outfile, "{} != {}".format(
extracted_file, outfile)
else:
with open(str(outfile), 'wb') as out:
shutil.copyfileobj(temp_file, out)
return outfile
def load_word2vec_file(word2vec_file, add_pad=False, pad="<pad>"):
"""Load a word2vec file in either text or bin format."""
from gensim.models import KeyedVectors
word2vec_file = str(word2vec_file)
binary = word2vec_file.endswith(".bin")
vecs = KeyedVectors.load_word2vec_format(word2vec_file, binary=binary)
if add_pad:
if pad not in vecs:
add_embeddings(vecs, pad)
else:
raise ValueError("Attempted to add <pad>, but already present")
return vecs
def add_embeddings(keyed_vectors, *words, init=None):
import numpy as np
if init is None:
init = np.zeros
vectors_to_add = init((len(words), keyed_vectors.vectors.shape[1]))
keyed_vectors.add_vectors(words, vectors_to_add)
return keyed_vectors.vectors.shape[0]
| bheinzerling/bpemb | bpemb/util.py | util.py | py | 3,501 | python | en | code | 1,146 | github-code | 36 | [
{
"api_name": "sentencepiece.SentencePieceProcessor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "typing.IO",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "warnings.catch_warnings",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": ... |
15514519322 | import json
import os
import shutil
import sys
import tempfile
import unittest
from compare_perf_tests import LogParser
from compare_perf_tests import PerformanceTestResult
from compare_perf_tests import ReportFormatter
from compare_perf_tests import ResultComparison
from compare_perf_tests import TestComparator
from compare_perf_tests import main
from compare_perf_tests import parse_args
from test_utils import captured_output
class TestPerformanceTestResult(unittest.TestCase):
def test_init(self):
header = "#,TEST,SAMPLES,MIN,MAX,MEAN,SD,MEDIAN"
log_line = "1,AngryPhonebook,20,10664,12933,11035,576,10884"
r = PerformanceTestResult.fromOldFormat(header, log_line)
self.assertEqual(r.test_num, 1)
self.assertEqual(r.name, "AngryPhonebook")
self.assertEqual(
(r.num_samples, r.min_value, r.max_value, r.mean, r.sd, r.median),
(20, 10664, 12933, 11035, 576, 10884),
)
self.assertEqual(r.samples, [])
header = "#,TEST,SAMPLES,MIN,MAX,MEAN,SD,MEDIAN,MAX_RSS"
log_line = "1,AngryPhonebook,1,12045,12045,12045,0,12045,10510336"
r = PerformanceTestResult.fromOldFormat(header, log_line)
self.assertEqual(r.max_rss, 10510336)
def test_init_quantiles(self):
header = "#,TEST,SAMPLES,MIN(μs),MEDIAN(μs),MAX(μs)"
log = "1,Ackermann,3,54383,54512,54601"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual(r.test_num, 1)
self.assertEqual(r.name, "Ackermann")
self.assertEqual(
(r.num_samples, r.min_value, r.median, r.max_value),
(3, 54383, 54512, 54601)
)
self.assertAlmostEqual(r.mean, 54498.67, places=2)
self.assertAlmostEqual(r.sd, 109.61, places=2)
self.assertEqual(r.samples, [54383, 54512, 54601])
header = "#,TEST,SAMPLES,MIN(μs),MEDIAN(μs),MAX(μs),MAX_RSS(B)"
log = "1,Ackermann,3,54529,54760,55807,266240"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual((len(r.samples), r.max_rss), (3, 266240))
header = "#,TEST,SAMPLES,MIN(μs),Q1(μs),Q2(μs),Q3(μs),MAX(μs)"
log = "1,Ackermann,5,54570,54593,54644,57212,58304"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual(
(r.num_samples, r.min_value, r.median, r.max_value),
(5, 54570, 54644, 58304)
)
self.assertEqual((r.q1, r.q3), (54581.5, 57758))
self.assertEqual(len(r.samples), 5)
header = "#,TEST,SAMPLES,MIN(μs),Q1(μs),Q2(μs),Q3(μs),MAX(μs),MAX_RSS(B)"
log = "1,Ackermann,5,54686,54731,54774,55030,63466,270336"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual(r.num_samples, 5)
self.assertEqual(len(r.samples), 5)
self.assertEqual(r.max_rss, 270336)
def test_init_delta_quantiles(self):
# 2-quantile from 2 samples in repeated min, when delta encoded,
# the difference is 0, which is omitted -- only separator remains
header = "#,TEST,SAMPLES,MIN(μs),𝚫MEDIAN,𝚫MAX"
log = "202,DropWhileArray,2,265,,22"
r = PerformanceTestResult.fromQuantileFormat(header, log)
self.assertEqual((r.num_samples, r.min_value, r.median, r.max_value),
(2, 265, 276, 287))
self.assertEqual(len(r.samples), 2)
self.assertEqual(r.num_samples, 2)
def test_init_oversampled_quantiles(self):
"""When num_samples is < quantile + 1, some of the measurements are
repeated in the report summary. Samples should contain only true
values, discarding the repeated artifacts from quantile estimation.
The test string is slightly massaged output of the following R script:
subsample <- function(x, q) {
quantile(1:x, probs=((0:(q-1))/(q-1)), type=1)}
tbl <- function(s) t(sapply(1:s, function(x) {
qs <- subsample(x, s); c(qs[1], diff(qs)) }))
sapply(c(3, 5, 11, 21), tbl)
TODO: Delete this test when we delete quantile support from the
benchmark harness. Reconstructing samples from quantiles as this code is
trying to do is not really statistically sound, which is why we're going
to delete most of this in favor of an architecture where the
lowest-level benchmarking logic reports samples, we store and pass
raw sample data around as much as possible, and summary statistics are
only computed as necessary for actual reporting (and then discarded,
since we can recompute anything we need if we always have the raw
samples available).
"""
def validatePTR(deq): # construct from delta encoded quantiles string
deq = deq.split(",")
num_samples = deq.count("1")
r = PerformanceTestResult(
["0", "B", str(num_samples)] + deq, quantiles=True, delta=True
)
self.assertEqual(len(r.samples), num_samples)
self.assertEqual(r.samples, range(1, num_samples + 1))
delta_encoded_quantiles = """
1,,
1,,1
1,,,,
1,,,1,
1,,1,1,
1,,1,1,1
1,,,,,,,,,,
1,,,,,,1,,,,
1,,,,1,,,1,,,
1,,,1,,,1,,1,,
1,,,1,,1,,1,,1,
1,,1,,1,,1,1,,1,
1,,1,1,,1,1,,1,1,
1,,1,1,1,,1,1,1,1,
1,,1,1,1,1,1,1,1,1,
1,,1,1,1,1,1,1,1,1,1
1,,,,,,,,,,,,,,,,,,,,
1,,,,,,,,,,,1,,,,,,,,,
1,,,,,,,1,,,,,,,1,,,,,,
1,,,,,,1,,,,,1,,,,,1,,,,
1,,,,,1,,,,1,,,,1,,,,1,,,
1,,,,1,,,1,,,,1,,,1,,,1,,,
1,,,1,,,1,,,1,,,1,,,1,,,1,,
1,,,1,,,1,,1,,,1,,1,,,1,,1,,
1,,,1,,1,,1,,1,,,1,,1,,1,,1,,
1,,,1,,1,,1,,1,,1,,1,,1,,1,,1,
1,,1,,1,,1,,1,,1,1,,1,,1,,1,,1,
1,,1,,1,,1,1,,1,,1,1,,1,,1,1,,1,
1,,1,,1,1,,1,1,,1,1,,1,1,,1,1,,1,
1,,1,1,,1,1,,1,1,,1,1,1,,1,1,,1,1,
1,,1,1,,1,1,1,,1,1,1,,1,1,1,,1,1,1,
1,,1,1,1,,1,1,1,1,,1,1,1,1,,1,1,1,1,
1,,1,1,1,1,1,,1,1,1,1,1,1,,1,1,1,1,1,
1,,1,1,1,1,1,1,1,1,,1,1,1,1,1,1,1,1,1,
1,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
1,,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1"""
map(validatePTR, delta_encoded_quantiles.split("\n")[1:])
def test_init_meta(self):
header = (
"#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),"
+ "MEDIAN(μs),PAGES,ICS,YIELD"
)
log = "1,Ackermann,200,715,1281,726,47,715,7,29,15"
r = PerformanceTestResult.fromOldFormat(header, log)
self.assertEqual((r.test_num, r.name), (1, "Ackermann"))
self.assertEqual(
(r.num_samples, r.min_value, r.max_value, r.mean, r.sd, r.median),
(200, 715, 1281, 726, 47, 715),
)
self.assertEqual((r.mem_pages, r.involuntary_cs, r.yield_count), (7, 29, 15))
header = (
"#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),"
+ "MAX_RSS(B),PAGES,ICS,YIELD"
)
log = "1,Ackermann,200,715,1951,734,97,715,36864,9,50,15"
r = PerformanceTestResult.fromOldFormat(header, log)
self.assertEqual(
(r.num_samples, r.min_value, r.max_value, r.mean, r.sd, r.median),
(200, 715, 1951, 734, 97, 715),
)
self.assertEqual(
(r.mem_pages, r.involuntary_cs, r.yield_count, r.max_rss),
(9, 50, 15, 36864),
)
header = "#,TEST,SAMPLES,MIN(μs),MAX(μs),PAGES,ICS,YIELD"
log = "1,Ackermann,200,715,3548,8,31,15"
r = PerformanceTestResult.fromOldFormat(header, log)
self.assertEqual((r.num_samples, r.min_value, r.max_value), (200, 715, 3548))
self.assertEqual(r.samples, [])
self.assertEqual((r.mem_pages, r.involuntary_cs, r.yield_count), (8, 31, 15))
header = "#,TEST,SAMPLES,MIN(μs),MAX(μs),MAX_RSS(B),PAGES,ICS,YIELD"
log = "1,Ackermann,200,715,1259,32768,8,28,15"
r = PerformanceTestResult.fromOldFormat(header, log)
self.assertEqual((r.num_samples, r.min_value, r.max_value), (200, 715, 1259))
self.assertEqual(r.samples, [])
self.assertEqual(r.max_rss, 32768)
self.assertEqual((r.mem_pages, r.involuntary_cs, r.yield_count), (8, 28, 15))
def test_merge(self):
tests = [
"""{"number":1,"name":"AngryPhonebook",
"samples":[12045]}""",
"""{"number":1,"name":"AngryPhonebook",
"samples":[12325],"max_rss":10510336}""",
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616],"max_rss":10502144}""",
"""{"number":1,"name":"AngryPhonebook",
"samples":[12270],"max_rss":10498048}"""
]
results = [PerformanceTestResult(json) for json in tests]
def as_tuple(r):
return (
r.num_samples,
r.min_value,
r.max_value,
round(r.mean, 2),
round(r.sd, 2),
r.median,
r.max_rss,
)
r = results[0]
self.assertEqual(as_tuple(r), (1, 12045, 12045, 12045, 0, 12045, None))
r.merge(results[1])
self.assertEqual(
as_tuple(r),
(2, 12045, 12325, 12185, 197.99, 12185, 10510336),
)
r.merge(results[2])
self.assertEqual(
as_tuple(r),
(3, 11616, 12325, 11995.33, 357.1, 12045, 10502144),
)
r.merge(results[3])
self.assertEqual(
as_tuple(r),
(4, 11616, 12325, 12064, 322.29, 12157.5, 10498048),
)
def test_legacy_merge(self):
header = """#,TEST,NUM_SAMPLES,MIN,MAX,MEAN,SD,MEDIAN, MAX_RSS"""
tests = [
"""1,AngryPhonebook,8,12045,12045,12045,0,12045""",
"""1,AngryPhonebook,8,12325,12325,12325,0,12325,10510336""",
"""1,AngryPhonebook,8,11616,11616,11616,0,11616,10502144""",
"""1,AngryPhonebook,8,12270,12270,12270,0,12270,10498048"""
]
results = [PerformanceTestResult.fromOldFormat(header, row) for row in tests]
def as_tuple(r):
return (
r.num_samples,
r.min_value,
r.max_value,
round(r.mean, 2),
round(r.sd, 2) if r.sd is not None else None,
r.median,
r.max_rss,
)
r = results[0]
self.assertEqual(as_tuple(r), (8, 12045, 12045, 12045, 0, 12045, None))
r.merge(results[1])
self.assertEqual(
as_tuple(r), # Note: SD, Median are lost
(16, 12045, 12325, 12185, None, None, 10510336),
)
r.merge(results[2])
self.assertEqual(
as_tuple(r),
(24, 11616, 12325, 11995.33, None, None, 10502144),
)
r.merge(results[3])
self.assertEqual(
as_tuple(r),
(32, 11616, 12325, 12064, None, None, 10498048),
)
class TestResultComparison(unittest.TestCase):
def setUp(self):
self.r0 = PerformanceTestResult(
"""{"number":101,"name":"GlobalClass",
"samples":[0,0,0,0,0],"max_rss":10185728}"""
)
self.r01 = PerformanceTestResult(
"""{"number":101,"name":"GlobalClass",
"samples":[20,20,20],"max_rss":10185728}"""
)
self.r1 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[12325],"max_rss":10510336}"""
)
self.r2 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616],"max_rss":10502144}"""
)
self.r3 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616,12326],"max_rss":10502144}"""
)
def test_init(self):
rc = ResultComparison(self.r1, self.r2)
self.assertEqual(rc.name, "AngryPhonebook")
self.assertAlmostEqual(rc.ratio, 12325.0 / 11616.0)
self.assertAlmostEqual(rc.delta, (((11616.0 / 12325.0) - 1) * 100), places=3)
# handle test results that sometimes change to zero, when compiler
# optimizes out the body of the incorrectly written test
rc = ResultComparison(self.r0, self.r0)
self.assertEqual(rc.name, "GlobalClass")
self.assertAlmostEqual(rc.ratio, 1)
self.assertAlmostEqual(rc.delta, 0, places=3)
rc = ResultComparison(self.r0, self.r01)
self.assertAlmostEqual(rc.ratio, 0, places=3)
self.assertAlmostEqual(rc.delta, 2000000, places=3)
rc = ResultComparison(self.r01, self.r0)
self.assertAlmostEqual(rc.ratio, 20001)
self.assertAlmostEqual(rc.delta, -99.995, places=3)
# disallow comparison of different test results
self.assertRaises(AssertionError, ResultComparison, self.r0, self.r1)
def test_values_is_dubious(self):
self.assertFalse(ResultComparison(self.r1, self.r2).is_dubious)
# new.min < old.min < new.max
self.assertTrue(ResultComparison(self.r1, self.r3).is_dubious)
# other way around: old.min < new.min < old.max
self.assertTrue(ResultComparison(self.r3, self.r1).is_dubious)
class FileSystemIntegration(unittest.TestCase):
def setUp(self):
# Create a temporary directory
self.test_dir = tempfile.mkdtemp()
def tearDown(self):
# Remove the directory after the test
shutil.rmtree(self.test_dir)
def write_temp_file(self, file_name, data):
temp_file_name = os.path.join(self.test_dir, file_name)
with open(temp_file_name, "w") as f:
for line in data:
f.write(line)
f.write('\n')
return temp_file_name
class OldAndNewLog(unittest.TestCase):
old_log_content = [
"""{"number":1,"name":"AngryPhonebook","""
+ """"samples":[10458,12714,11000],"max_rss":10204365}""",
"""{"number":2,"name":"AnyHashableWithAClass","""
+ """"samples":[247027,319065,259056,259056],"max_rss":10250445}""",
"""{"number":3,"name":"Array2D","""
+ """"samples":[335831,400221,346622,346622],"max_rss":28297216}""",
"""{"number":4,"name":"ArrayAppend","""
+ """"samples":[23641,29000,24990,24990],"max_rss":11149926}""",
"""{"number":34,"name":"BitCount","samples":[3,4,4,4],"max_rss":10192896}""",
"""{"number":35,"name":"ByteSwap","samples":[4,6,4,4],"max_rss":10185933}"""
]
new_log_content = [
"""{"number":265,"name":"TwoSum","samples":[5006,5679,5111,5111]}""",
"""{"number":35,"name":"ByteSwap","samples":[0,0,0,0,0]}""",
"""{"number":34,"name":"BitCount","samples":[9,9,9,9]}""",
"""{"number":4,"name":"ArrayAppend","samples":[20000,29000,24990,24990]}""",
"""{"number":3,"name":"Array2D","samples":[335831,400221,346622,346622]}""",
"""{"number":1,"name":"AngryPhonebook","samples":[10458,12714,11000,11000]}"""
]
def makeResult(json_text):
return PerformanceTestResult(json.loads(json_text))
old_results = dict(
[
(r.name, r) for r in map(makeResult, old_log_content)
]
)
new_results = dict(
[
(r.name, r) for r in map(makeResult, new_log_content)
]
)
def assert_report_contains(self, texts, report):
assert not isinstance(texts, str)
for text in texts:
self.assertIn(text, report)
class TestLogParser(unittest.TestCase):
def test_parse_results_csv(self):
"""Ignores unknown lines, extracts data from supported formats."""
log = """#,TEST,SAMPLES,MIN(us),MAX(us),MEAN(us),SD(us),MEDIAN(us)
7,Array.append.Array.Int?,20,10,10,10,0,10
21,Bridging.NSArray.as!.Array.NSString,20,11,11,11,0,11
42,Flatten.Array.Tuple4.lazy.for-in.Reserve,20,3,4,4,0,4
Total performance tests executed: 1
"""
parser = LogParser()
results = parser.parse_results(log.splitlines())
self.assertTrue(isinstance(results[0], PerformanceTestResult))
self.assertEqual(results[0].name, "Array.append.Array.Int?")
self.assertEqual(results[1].name, "Bridging.NSArray.as!.Array.NSString")
self.assertEqual(results[2].name, "Flatten.Array.Tuple4.lazy.for-in.Reserve")
def test_parse_results_tab_delimited(self):
log = "34\tBitCount\t20\t3\t4\t4\t0\t4"
parser = LogParser()
results = parser.parse_results(log.splitlines())
self.assertTrue(isinstance(results[0], PerformanceTestResult))
self.assertEqual(results[0].name, "BitCount")
def test_parse_results_formatted_text(self):
"""Parse format that Benchmark_Driver prints to console"""
log = """
# TEST SAMPLES MIN(μs) MAX(μs) MEAN(μs) SD(μs) MEDIAN(μs) MAX_RSS(B)
3 Array2D 20 2060 2188 2099 0 2099 20915200
Total performance tests executed: 1
"""
parser = LogParser()
results = parser.parse_results(log.splitlines()[1:]) # without 1st \n
self.assertTrue(isinstance(results[0], PerformanceTestResult))
r = results[0]
self.assertEqual(r.name, "Array2D")
self.assertEqual(r.max_rss, 20915200)
def test_parse_quantiles(self):
"""Gathers samples from reported quantiles. Handles optional memory."""
r = LogParser.results_from_string(
"""#,TEST,SAMPLES,QMIN(μs),MEDIAN(μs),MAX(μs)
1,Ackermann,3,54383,54512,54601"""
)["Ackermann"]
self.assertEqual(r.samples, [54383, 54512, 54601])
r = LogParser.results_from_string(
"""#,TEST,SAMPLES,QMIN(μs),MEDIAN(μs),MAX(μs),MAX_RSS(B)
1,Ackermann,3,54529,54760,55807,266240"""
)["Ackermann"]
self.assertEqual(r.samples, [54529, 54760, 55807])
self.assertEqual(r.max_rss, 266240)
def test_parse_delta_quantiles(self):
r = LogParser.results_from_string( # 2-quantile aka. median
"#,TEST,SAMPLES,QMIN(μs),𝚫MEDIAN,𝚫MAX\n0,B,1,101,,"
)["B"]
self.assertEqual(
(r.num_samples, r.min_value, r.median, r.max_value, len(r.samples)),
(1, 101, 101, 101, 1),
)
r = LogParser.results_from_string(
"#,TEST,SAMPLES,QMIN(μs),𝚫MEDIAN,𝚫MAX\n0,B,2,101,,1"
)["B"]
self.assertEqual(
(r.num_samples, r.min_value, r.median, r.max_value, len(r.samples)),
(2, 101, 101.5, 102, 2),
)
r = LogParser.results_from_string( # 20-quantiles aka. ventiles
"#,TEST,SAMPLES,QMIN(μs),𝚫V1,𝚫V2,𝚫V3,𝚫V4,𝚫V5,𝚫V6,𝚫V7,𝚫V8,"
+ "𝚫V9,𝚫VA,𝚫VB,𝚫VC,𝚫VD,𝚫VE,𝚫VF,𝚫VG,𝚫VH,𝚫VI,𝚫VJ,𝚫MAX\n"
+ "202,DropWhileArray,200,214,,,,,,,,,,,,1,,,,,,2,16,464"
)["DropWhileArray"]
self.assertEqual(
(r.num_samples, r.min_value, r.max_value, len(r.samples)),
(200, 214, 697, 0),
)
def test_parse_meta(self):
r = LogParser.results_from_string(
"#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),"
+ "PAGES,ICS,YIELD\n"
+ "0,B,1,2,2,2,0,2,7,29,15"
)["B"]
self.assertEqual(
(r.min_value, r.mem_pages, r.involuntary_cs, r.yield_count), (2, 7, 29, 15)
)
r = LogParser.results_from_string(
"#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),"
+ "MAX_RSS(B),PAGES,ICS,YIELD\n"
+ "0,B,1,3,3,3,0,3,36864,9,50,15"
)["B"]
self.assertEqual(
(r.min_value, r.mem_pages, r.involuntary_cs, r.yield_count, r.max_rss),
(3, 9, 50, 15, 36864),
)
r = LogParser.results_from_string(
"#,TEST,SAMPLES,QMIN(μs),MAX(μs),PAGES,ICS,YIELD\n" + "0,B,1,4,4,8,31,15"
)["B"]
self.assertEqual(
(r.min_value, r.mem_pages, r.involuntary_cs, r.yield_count), (4, 8, 31, 15)
)
r = LogParser.results_from_string(
"#,TEST,SAMPLES,QMIN(μs),MAX(μs),MAX_RSS(B),PAGES,ICS,YIELD\n"
+ "0,B,1,5,5,32768,8,28,15"
)["B"]
self.assertEqual(
(r.min_value, r.mem_pages, r.involuntary_cs, r.yield_count, r.max_rss),
(5, 8, 28, 15, 32768),
)
def test_results_from_merge(self):
"""Parsing concatenated log merges same PerformanceTestResults"""
concatenated_logs = """#,TEST,SAMPLES,MIN,MAX,MEAN,SD,MEDIAN
4,ArrayAppend,20,23641,29000,24990,0,24990
4,ArrayAppend,1,20000,20000,20000,0,20000"""
results = LogParser.results_from_string(concatenated_logs)
self.assertEqual(list(results.keys()), ["ArrayAppend"])
result = results["ArrayAppend"]
self.assertTrue(isinstance(result, PerformanceTestResult))
self.assertEqual(result.min_value, 20000)
self.assertEqual(result.max_value, 29000)
class TestTestComparator(OldAndNewLog):
def test_init(self):
def names(tests):
return [t.name for t in tests]
tc = TestComparator(self.old_results, self.new_results, 0.05)
self.assertEqual(names(tc.unchanged), ["AngryPhonebook", "Array2D"])
# self.assertEqual(names(tc.increased), ["ByteSwap", "ArrayAppend"])
self.assertEqual(names(tc.decreased), ["BitCount"])
self.assertEqual(names(tc.added), ["TwoSum"])
self.assertEqual(names(tc.removed), ["AnyHashableWithAClass"])
# other way around
tc = TestComparator(self.new_results, self.old_results, 0.05)
self.assertEqual(names(tc.unchanged), ["AngryPhonebook", "Array2D"])
self.assertEqual(names(tc.increased), ["BitCount"])
self.assertEqual(names(tc.decreased), ["ByteSwap", "ArrayAppend"])
self.assertEqual(names(tc.added), ["AnyHashableWithAClass"])
self.assertEqual(names(tc.removed), ["TwoSum"])
# delta_threshold determines the sorting into change groups;
# report only change above 100% (ByteSwap's runtime went to 0):
tc = TestComparator(self.old_results, self.new_results, 1)
self.assertEqual(
names(tc.unchanged),
["AngryPhonebook", "Array2D", "ArrayAppend", "BitCount"],
)
self.assertEqual(names(tc.increased), ["ByteSwap"])
self.assertEqual(tc.decreased, [])
class TestReportFormatter(OldAndNewLog):
def setUp(self):
super(TestReportFormatter, self).setUp()
self.tc = TestComparator(self.old_results, self.new_results, 0.05)
self.rf = ReportFormatter(self.tc, changes_only=False)
self.markdown = self.rf.markdown()
self.git = self.rf.git()
self.html = self.rf.html()
def assert_markdown_contains(self, texts):
self.assert_report_contains(texts, self.markdown)
def assert_git_contains(self, texts):
self.assert_report_contains(texts, self.git)
def assert_html_contains(self, texts):
self.assert_report_contains(texts, self.html)
def test_values(self):
self.assertEqual(
ReportFormatter.values(
PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[10664,12933,11035,10884]}"""
)
),
("AngryPhonebook", "10664", "12933", "11379", "—"),
)
self.assertEqual(
ReportFormatter.values(
PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[12045],"max_rss":10510336}"""
)
),
("AngryPhonebook", "12045", "12045", "12045", "10510336"),
)
r1 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[12325],"max_rss":10510336}"""
)
r2 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616],"max_rss":10510336}"""
)
self.assertEqual(
ReportFormatter.values(ResultComparison(r1, r2)),
("AngryPhonebook", "12325", "11616", "-5.8%", "1.06x"),
)
self.assertEqual(
ReportFormatter.values(ResultComparison(r2, r1)),
("AngryPhonebook", "11616", "12325", "+6.1%", "0.94x"),
)
r1 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[12325],"max_rss":10510336}"""
)
r2 = PerformanceTestResult(
"""{"number":1,"name":"AngryPhonebook",
"samples":[11616,12326],"max_rss":10510336}"""
)
self.assertEqual(
ReportFormatter.values(ResultComparison(r1, r2))[4],
"1.06x (?)", # is_dubious
)
def test_justified_columns(self):
"""Table columns are all formated with same width, defined by the
longest value.
"""
self.assert_markdown_contains(
[
"AnyHashableWithAClass | 247027 | 319065 | 271051 | 10250445",
"Array2D | 335831 | 335831 | +0.0% | 1.00x",
]
)
self.assert_git_contains(
[
"AnyHashableWithAClass 247027 319065 271051 10250445",
"Array2D 335831 335831 +0.0% 1.00x",
]
)
def test_column_headers(self):
"""Report contains table headers for ResultComparisons and changed
PerformanceTestResults.
"""
performance_test_result = self.tc.added[0]
self.assertEqual(
ReportFormatter.header_for(performance_test_result),
("TEST", "MIN", "MAX", "MEAN", "MAX_RSS"),
)
comparison_result = self.tc.increased[0]
self.assertEqual(
ReportFormatter.header_for(comparison_result),
("TEST", "OLD", "NEW", "DELTA", "RATIO"),
)
self.assert_markdown_contains(
[
"TEST | OLD | NEW | DELTA | RATIO",
":--- | ---: | ---: | ---: | ---: ",
"TEST | MIN | MAX | MEAN | MAX_RSS",
]
)
self.assert_git_contains(
[
"TEST OLD NEW DELTA RATIO",
"TEST MIN MAX MEAN MAX_RSS",
]
)
self.assert_html_contains(
[
"""
<th align='left'>OLD</th>
<th align='left'>NEW</th>
<th align='left'>DELTA</th>
<th align='left'>RATIO</th>""",
"""
<th align='left'>MIN</th>
<th align='left'>MAX</th>
<th align='left'>MEAN</th>
<th align='left'>MAX_RSS</th>""",
]
)
def test_emphasize_speedup(self):
"""Emphasize speedup values for regressions and improvements"""
# tests in No Changes don't have emphasized speedup
self.assert_markdown_contains(
[
"BitCount | 3 | 9 | +199.9% | **0.33x**",
"ByteSwap | 4 | 0 | -100.0% | **4001.00x**",
"AngryPhonebook | 10458 | 10458 | +0.0% | 1.00x ",
"ArrayAppend | 23641 | 20000 | -15.4% | **1.18x (?)**",
]
)
self.assert_git_contains(
[
"BitCount 3 9 +199.9% **0.33x**",
"ByteSwap 4 0 -100.0% **4001.00x**",
"AngryPhonebook 10458 10458 +0.0% 1.00x",
"ArrayAppend 23641 20000 -15.4% **1.18x (?)**",
]
)
self.assert_html_contains(
[
"""
<tr>
<td align='left'>BitCount</td>
<td align='left'>3</td>
<td align='left'>9</td>
<td align='left'>+199.9%</td>
<td align='left'><font color='red'>0.33x</font></td>
</tr>""",
"""
<tr>
<td align='left'>ByteSwap</td>
<td align='left'>4</td>
<td align='left'>0</td>
<td align='left'>-100.0%</td>
<td align='left'><font color='green'>4001.00x</font></td>
</tr>""",
"""
<tr>
<td align='left'>AngryPhonebook</td>
<td align='left'>10458</td>
<td align='left'>10458</td>
<td align='left'>+0.0%</td>
<td align='left'><font color='black'>1.00x</font></td>
</tr>""",
]
)
def test_sections(self):
"""Report is divided into sections with summaries."""
self.assert_markdown_contains(
[
"""<details open>
<summary>Regression (1)</summary>""",
"""<details >
<summary>Improvement (2)</summary>""",
"""<details >
<summary>No Changes (2)</summary>""",
"""<details open>
<summary>Added (1)</summary>""",
"""<details open>
<summary>Removed (1)</summary>""",
]
)
self.assert_git_contains(
[
"Regression (1): \n",
"Improvement (2): \n",
"No Changes (2): \n",
"Added (1): \n",
"Removed (1): \n",
]
)
self.assert_html_contains(
[
"<th align='left'>Regression (1)</th>",
"<th align='left'>Improvement (2)</th>",
"<th align='left'>No Changes (2)</th>",
"<th align='left'>Added (1)</th>",
"<th align='left'>Removed (1)</th>",
]
)
def test_report_only_changes(self):
"""Leave out tests without significant change."""
rf = ReportFormatter(self.tc, changes_only=True)
markdown, git, html = rf.markdown(), rf.git(), rf.html()
self.assertNotIn("No Changes", markdown)
self.assertNotIn("AngryPhonebook", markdown)
self.assertNotIn("No Changes", git)
self.assertNotIn("AngryPhonebook", git)
self.assertNotIn("No Changes", html)
self.assertNotIn("AngryPhonebook", html)
def test_single_table_report(self):
"""Single table report has inline headers and no elaborate sections."""
self.tc.removed = [] # test handling empty section
rf = ReportFormatter(self.tc, changes_only=True, single_table=True)
markdown = rf.markdown()
self.assertNotIn("<details", markdown) # no sections
self.assertNotIn("\n\n", markdown) # table must not be broken
self.assertNotIn("Removed", markdown)
self.assert_report_contains(
[
"\n**Regression** ",
"| **OLD**",
"| **NEW**",
"| **DELTA**",
"| **RATIO**",
"\n**Added** ",
"| **MIN**",
"| **MAX**",
"| **MEAN**",
"| **MAX_RSS**",
],
markdown,
)
# Single delimiter row:
self.assertIn("\n:---", markdown) # first column is left aligned
self.assertEqual(markdown.count("| ---:"), 4) # other, right aligned
# Separator before every inline header (new section):
self.assertEqual(markdown.count(" | | | | "), 2)
git = rf.git()
self.assertNotIn("): \n", git) # no sections
self.assertNotIn("REMOVED", git)
self.assert_report_contains(
[
"\nREGRESSION ",
" OLD ",
" NEW ",
" DELTA ",
" RATIO ",
"\n\nADDED ",
" MIN ",
" MAX ",
" MEAN ",
" MAX_RSS ",
],
git,
)
# Separator before every inline header (new section):
self.assertEqual(git.count("\n\n"), 2)
class Test_parse_args(unittest.TestCase):
required = ["--old-file", "old.log", "--new-file", "new.log"]
def test_required_input_arguments(self):
with captured_output() as (_, err):
self.assertRaises(SystemExit, parse_args, [])
self.assertIn("usage: compare_perf_tests.py", err.getvalue())
args = parse_args(self.required)
self.assertEqual(args.old_file, "old.log")
self.assertEqual(args.new_file, "new.log")
def test_format_argument(self):
self.assertEqual(parse_args(self.required).format, "markdown")
self.assertEqual(
parse_args(self.required + ["--format", "markdown"]).format, "markdown"
)
self.assertEqual(parse_args(self.required + ["--format", "git"]).format, "git")
self.assertEqual(
parse_args(self.required + ["--format", "html"]).format, "html"
)
with captured_output() as (_, err):
self.assertRaises(
SystemExit, parse_args, self.required + ["--format", "bogus"]
)
self.assertIn(
"error: argument --format: invalid choice: 'bogus' "
"(choose from 'markdown', 'git', 'html')",
err.getvalue(),
)
def test_delta_threshold_argument(self):
# default value
args = parse_args(self.required)
self.assertEqual(args.delta_threshold, 0.05)
# float parsing
args = parse_args(self.required + ["--delta-threshold", "0.1"])
self.assertEqual(args.delta_threshold, 0.1)
args = parse_args(self.required + ["--delta-threshold", "1"])
self.assertEqual(args.delta_threshold, 1.0)
args = parse_args(self.required + ["--delta-threshold", ".2"])
self.assertEqual(args.delta_threshold, 0.2)
with captured_output() as (_, err):
self.assertRaises(
SystemExit, parse_args, self.required + ["--delta-threshold", "2,2"]
)
self.assertIn(
" error: argument --delta-threshold: invalid float " "value: '2,2'",
err.getvalue(),
)
def test_output_argument(self):
self.assertEqual(parse_args(self.required).output, None)
self.assertEqual(
parse_args(self.required + ["--output", "report.log"]).output, "report.log"
)
def test_changes_only_argument(self):
self.assertFalse(parse_args(self.required).changes_only)
self.assertTrue(parse_args(self.required + ["--changes-only"]).changes_only)
class Test_compare_perf_tests_main(OldAndNewLog, FileSystemIntegration):
"""Integration test that invokes the whole comparison script."""
markdown = [
"<summary>Regression (1)</summary>",
"TEST | OLD | NEW | DELTA | RATIO",
"BitCount | 3 | 9 | +199.9% | **0.33x**",
]
git = [
"Regression (1):",
"TEST OLD NEW DELTA RATIO",
"BitCount 3 9 +199.9% **0.33x**",
]
html = ["<html>", "<td align='left'>BitCount</td>"]
def setUp(self):
super(Test_compare_perf_tests_main, self).setUp()
self.old_log = self.write_temp_file("old.log", self.old_log_content)
self.new_log = self.write_temp_file("new.log", self.new_log_content)
def execute_main_with_format(self, report_format, test_output=False):
report_file = self.test_dir + "report.log"
args = [
"compare_perf_tests.py",
"--old-file",
self.old_log,
"--new-file",
self.new_log,
"--format",
report_format,
]
sys.argv = args if not test_output else args + ["--output", report_file]
with captured_output() as (out, _):
main()
report_out = out.getvalue()
if test_output:
with open(report_file, "r") as f:
report = f.read()
# because print adds newline, add one here, too:
report_file = str(report + "\n")
else:
report_file = None
return report_out, report_file
def test_markdown(self):
"""Writes Markdown formatted report to stdout"""
report_out, _ = self.execute_main_with_format("markdown")
self.assert_report_contains(self.markdown, report_out)
def test_markdown_output(self):
"""Writes Markdown formatted report to stdout and `--output` file."""
report_out, report_file = self.execute_main_with_format(
"markdown", test_output=True
)
self.assertEqual(report_out, report_file)
self.assert_report_contains(self.markdown, report_file)
def test_git(self):
"""Writes Git formatted report to stdout."""
report_out, _ = self.execute_main_with_format("git")
self.assert_report_contains(self.git, report_out)
def test_git_output(self):
"""Writes Git formatted report to stdout and `--output` file."""
report_out, report_file = self.execute_main_with_format("git", test_output=True)
self.assertEqual(report_out, report_file)
self.assert_report_contains(self.git, report_file)
def test_html(self):
"""Writes HTML formatted report to stdout."""
report_out, _ = self.execute_main_with_format("html")
self.assert_report_contains(self.html, report_out)
def test_html_output(self):
"""Writes HTML formatted report to stdout and `--output` file."""
report_out, report_file = self.execute_main_with_format(
"html", test_output=True
)
self.assertEqual(report_out, report_file)
self.assert_report_contains(self.html, report_file)
if __name__ == "__main__":
unittest.main()
| apple/swift | benchmark/scripts/test_compare_perf_tests.py | test_compare_perf_tests.py | py | 38,114 | python | en | code | 64,554 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "compare_perf_tests.PerformanceTestResult.fromOldFormat",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "compare_perf_tests.PerformanceTestResult",
"line_number": 23,
... |
40243466051 | import streamlit as st
import cv2
import time
import os
import tempfile
import matplotlib.pyplot as plt
from src.utils.streamlit import factors
from src.utils.onnx_process import load_model, load_label_map, video_predict
from src.utils.video_process import video_stitch
from src.utils.streamlit import save_uploaded_file
MODEL_PATH = "./results/models/onnx_dive/model.onnx"
LABEL_PATH = "./results/models/onnx_dive/label_map.pbtxt"
MODEL_INPUT_SIZE = (640, 640) # width, height
NUM_CLASSES = 5
CONF_THRESHOLD = 0.2
NMS_THRESHOLD = 0.1
##STEP 1 Load Model
with st.spinner(text="Loading Model ... Please be patient!"):
session, input_name, output_name = load_model(MODEL_PATH)
##STEP 2 Upload Video
st.write("# Upload diving video:\n")
with st.expander("How to Use YOEO"):
st.write("............")
# create temp dir for storing video and outputs
temp_dir = tempfile.TemporaryDirectory()
temp_path = temp_dir.name
video_file = st.file_uploader(
"Choose a File", accept_multiple_files=False, type=["mp4", "mov"]
)
if video_file is not None:
file_details = {"FileName": video_file.name, "FileType": video_file.type}
st.write(file_details)
video_path = save_uploaded_file(video_file, temp_path)
st.write(video_path)
# get fps for optimization slider max value
fps = round(cv2.VideoCapture(video_path).get(cv2.CAP_PROP_FPS))
factors_fps = list(factors(fps))
# user options
marine_options = st.multiselect(
"What flora & fauna do you prefer",
["Fish", "Coral", "Turtle", "Shark", "Manta Ray"],
["Fish", "Coral", "Turtle", "Shark", "Manta Ray"],
help="Select the flora & fauna you want to be included in the final video",
)
label_map = load_label_map(LABEL_PATH)
new_label_map = {}
for key, val in label_map.items():
new_label_map[val["name"].lower().replace('"', "")] = key - 1
marine_options = [new_label_map[x.lower()] for x in marine_options]
# user advanced options
with st.expander("Advanced Options"):
st.write("###### Leave as default if unsure!")
opt_val = st.select_slider(
"Optimization", options=factors_fps, value=max(factors_fps)
) # num of frames per sec to do inferencing
strict_val = st.slider(
"Trimming Strictness", min_value=0, value=fps
) # number of frames prior to keep if current frame is to be kept
sharpen = st.checkbox("Sharpen Video")
color_grade = st.checkbox("Color Grade Video")
yt_link = st.text_input("Enter a Youtube Audio Link")
# start inferencing
trim_bt = st.button("Start Auto-Trimming!")
st.write(trim_bt)
if trim_bt:
with st.spinner(text="YOEO working its magic: IN PROGRESS ..."):
(
frame_predictions,
bbox_class_score,
orig_frames,
origi_shape,
fps,
) = video_predict(
video_path,
"frames",
session,
input_name,
output_name,
LABEL_PATH,
MODEL_INPUT_SIZE,
NUM_CLASSES,
CONF_THRESHOLD,
NMS_THRESHOLD,
opt_val,
)
bbox_video_path = os.path.join(temp_path, "orig_video")
video_stitch(
frame_predictions,
bbox_video_path,
video_file.name.replace(".mp4", ""),
origi_shape,
fps,
)
# recode video using ffmpeg
video_bbox_filename = os.path.join(bbox_video_path, video_file.name)
video_bbox_recode_filename = video_bbox_filename.replace(".mp4", "_recoded.mp4")
os.system(
"ffmpeg -i {} -vcodec libx264 {}".format(
os.path.join(bbox_video_path, video_file.name),
video_bbox_recode_filename,
)
)
tab_od, tab_trim, tab_beauty = st.tabs(
[
"YOEO's Object Detection Results",
"Your Trimmed Video",
"Beautiful Photos Captured By You",
]
)
with tab_od:
st.write(video_bbox_filename)
# st.write(os.listdir(os.path.join(RESULTS_PATH, latest_folder)))
st.write(video_bbox_recode_filename)
st.subheader("YOEO's Object Detection Results:")
st.video(video_bbox_recode_filename)
st.subheader("Flora & Fauna Detected: ")
col1, col2, col3 = st.columns(3)
col1.metric("# Species Detected", "2")
col2.metric("Turtle", "1")
col3.metric("Fish", "23")
with tab_trim:
st.subheader("YOEO's Trimmed Video:")
with tab_beauty:
st.subheader("YOEO's Beautiful Photos:")
with st.expander("About YOEO"):
st.write(
"YOEO (You Only Edit Once) is an object detection model and web application created by data scientists and AI practitioners who are diving enthusiasts!"
)
st.write("The Model is trained on ...")
##STEP 3
# st.write("# 3. YOEO working its magic: ")
# st.write("-> to insert model inference and stich algo in progress bar")
# my_bar = st.progress(0)
# for percent_complete in range(100):
# time.sleep(0.1)
# my_bar.progress(percent_complete + 1)
##STEP 4
# st.write("# 4. Objects of interest detected and trimmed video output: ")
# col1, col2, col3 = st.columns(3)
# col1.metric("# Species Detected", "2")
# col2.metric("Turtle", "1")
# col3.metric("Fish", "23")
# st.video(vid_file)
| teyang-lau/you-only-edit-once | streamlit_app_onnx.py | streamlit_app_onnx.py | py | 5,602 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "streamlit.spinner",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "src.utils.onnx_process.load_model",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "streamlit.write",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "s... |
33899004274 | import json
from copy import deepcopy
import numpy as np
import pandas as pd
from CWiPy import settings
from CWiPy.MembershipFunction import MembershipFunction
from CWiPy.Modifier import dict_modifiers
def get_synonyms(word):
"""
Args:
word:
Returns:
list of objects containing term and similarity from -100 to 100
Raises:
IOException: when not found, you should load words first
"""
word = word.replace('-', '_')
data_file = \
f"{settings.BASE_DIR}/{settings.STATIC_DIR}/thesaurus/{word}.json"
result = []
with open(data_file) as f:
thesaurus_data = json.load(f)
# print(thesaurus_data['data']['definitionData']['definitions'])
for entry in thesaurus_data["data"]["definitionData"]["definitions"]:
for synonym in entry["synonyms"]:
result.append({
'term': synonym['term'],
'similarity': int(synonym['similarity']),
})
f.close()
return result
def get_modifiers_synonyms(limit=100):
"""
Args:
limit: similarity limit
Returns:
dict of synonym modifiers: {synonym: modifier}
"""
result = {}
for modifier in dict_modifiers().keys():
for synonym in get_synonyms(modifier):
if synonym['similarity'] < limit:
continue
term = synonym['term']
if term not in result:
result[term] = set()
result[term].add(modifier)
return result
class SyntaxException(BaseException):
pass
class FuzzyQuery:
def __init__(self, fuzzy_query, fields, limit=None, alpha_cut=None,
modifiers_included=None, round_values=None):
"""
Args:
fuzzy_query: fuzzy query string
fields: dict of querying numerical fields: {field_name, {membership_function_name: membership_function}}
limit: similarity limit for synonyms
alpha_cut: alpha cut applied for range filtering
modifiers_included: are modifiers included in query
round_values: round returning query values
Raises:
SyntaxException: on syntax error
"""
if limit is None:
limit = 100
if alpha_cut is None:
alpha_cut = 0.5
if modifiers_included is None:
modifiers_included = True
if round_values is None:
round_values = False
self.fuzzy_query = fuzzy_query
self.fields = fields
self.limit = limit
self.alpha_cut = alpha_cut
self.round_values = round_values
self.modifiers_included = modifiers_included
def extract_crisp_parameters(self):
"""
Converts fuzzy_query to crisp query parameters.
Fuzzy expression structure:
[composite modifier] [summarizer] [field] [connector]
[composite modifier] [summarizer] [field] [connector]
[composite modifier] [summarizer] [field] [connector] ...
[composite modifier] [summarizer] [field]
example fuzzy_query: middle age and very high salary
[connector] = {and, or, but}
Returns:
dict[field, [lower bound, upper bound, connector]]
"""
EOQ_TOKEN = "~~~END_TOKEN~~~"
if self.fuzzy_query == "":
raise SyntaxException("Empty query")
tokens = list(
filter(lambda x: len(x) > 0, self.fuzzy_query.split(' ')))
tokens.append(EOQ_TOKEN)
modifiers_synonyms = get_modifiers_synonyms(self.limit)
modifiers = dict_modifiers()
connectors = ["and", "or", "", "but", EOQ_TOKEN]
connector_sql = {
"and": "and",
"or": "or",
"but": "and",
EOQ_TOKEN: "",
}
expression = []
result = []
for token in tokens:
if token in connectors:
token = connector_sql[token]
if self.modifiers_included and len(expression) < 2:
raise SyntaxException(
f"Empty or incorrect expression {expression}")
original_expression = expression
expression.reverse()
if expression[0] not in self.fields.keys():
raise SyntaxException(
f"Unknown field {expression[0]} in expression "
f"{original_expression}")
field = expression.pop(0)
mf_name = expression[0]
if mf_name not in self.fields[field].keys():
raise SyntaxException(
f"Unknown membership function {mf_name} in expression "
f"{original_expression}")
mf: MembershipFunction = deepcopy(self.fields[field][mf_name])
expression.pop(0)
while len(expression) > 0:
if expression[0] not in modifiers and expression[0] \
not in modifiers_synonyms:
raise SyntaxException(
f"Unknown modifier {expression[0]} in expression "
f"{original_expression}")
if expression[0] in modifiers.keys():
mf.set_modifier(modifiers[expression[0]](mf.modifier))
else:
mf.set_modifier(
modifiers_synonyms[expression[0]][0](mf.modifier))
expression.pop(0)
l, r = mf.extract_range(self.alpha_cut)
result.append([field, l, r, token])
else:
expression.append(token)
return result
def to_sql(self):
"""
Returns:
Constructed SQL where clause
"""
crisp_query = ""
params = self.extract_crisp_parameters()
for (field, l, r, token) in params:
if self.round_values:
l, r = int(l), int(r)
crisp_query += f" {l} <= {field} and {field} <= {r} {token} "
return crisp_query
def matching(self, df: pd.DataFrame) -> pd.Series:
"""
Args:
df: Querying pandas dataframe
Returns:
Series matching fuzzy query
"""
params = self.extract_crisp_parameters()
result_series = pd.Series(np.ones(len(df), dtype=bool))
connector = ""
for (field, left, right, next_connector) in params:
if self.round_values:
left, right = int(left), int(right)
matching_series = (left <= df[field]) & (df[field] <= right)
if connector == "":
result_series = matching_series
elif connector == "or":
result_series = result_series | matching_series
else: # and
result_series = result_series & matching_series
connector = next_connector
return result_series
| akali/fuzzy | CWiPy/Syntax.py | Syntax.py | py | 7,092 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "CWiPy.settings.BASE_DIR",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "CWiPy.settings",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "CWiPy.settings.STATIC_DIR",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_na... |
932987117 | from django.db.models.signals import post_save
from django.dispatch import receiver
from .models import Piece
@receiver(post_save, sender=Piece)
def save_base64_thumbnail(**kwargs):
update_fields = kwargs["update_fields"]
# Without this, the signal will be called in an infinite loop.
if update_fields is not None and "image_b64_thumbnail" in update_fields:
return
piece = kwargs["instance"]
b64thumb = piece.generate_base64_data_thumbnail()
piece.image_b64_thumbnail = b64thumb
piece.save(update_fields=["image_b64_thumbnail"])
| ChrisCrossCrash/chriskumm.com_django | art/signals.py | signals.py | py | 569 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.dispatch.receiver",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.db.models.signals.post_save",
"line_number": 6,
"usage_type": "argument"
},
{
"api_name": "models.Piece",
"line_number": 6,
"usage_type": "name"
}
] |
28031460700 | import pickle
import numpy as np
import sklearn.base
from matplotlib.figure import figaspect
from sklearn.linear_model import LogisticRegression
from dataclasses import dataclass
from sklearn.preprocessing import StandardScaler
from . import network
from tqdm import tqdm
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
import warnings
def _cart2pol(x, y):
rho = np.sqrt(x ** 2 + y ** 2)
phi = np.arctan2(y, x) # * 180 / np.pi
return rho, phi
def _pol2cart(rho, phi):
x = rho * np.cos(phi)
y = rho * np.sin(phi)
return x, y
@dataclass
class HopfieldSimulation:
hopnet: network.Hopfield
states: np.ndarray
energies: np.ndarray
def save(self, filename):
"""
Save HopfieldSimulation object with pickle.
:param filename:
"""
with open(filename, 'wb') as f:
pickle.dump(self, f)
@dataclass
class HopfiledEmbedding:
hopnet: network.Hopfield
embedding_model: sklearn.base.BaseEstimator
attractors: dict
attractor_model: sklearn.base.BaseEstimator
attractor_model_dim: int
state_sample: np.ndarray
attractor_sample: np.ndarray
def save(self, filename):
"""
Save HopfieldEmbedding with pickle.
:param filename:
"""
with open(filename, 'wb') as f:
pickle.dump(self, f)
def plot(self,
activations=None,
plot_type='scatter',
legend=True,
density_bins=1000,
density_sigma=20,
ax=None,
regimes_fill_kwargs=dict(),
regimes_contour_kwargs=dict(),
attractor_plot_type='scatter',
attractor_kwargs=dict(),
legend_kwargs=None,
**kwargs):
"""
Plot the attractor regimes and the data in the embedding space.
:param activations: activations to plot
:param regimes_fill_kwargs: kwargs for attractor regimes fill
:param regimes_line_kwargs: kwargs for attractor regimes line
:param ax: matplotlib axis (with polar projection)
:param kwargs: kwargs for embedding model plot
"""
default_regimes_fill_kwargs = dict(alpha=.2, cmap='tab10')
default_regimes_fill_kwargs.update(regimes_fill_kwargs)
default_regimes_contour_kwargs = dict(colors="gray", linewidths=0.5)
default_regimes_contour_kwargs.update(regimes_contour_kwargs)
if attractor_plot_type == 'glassbrain':
default_attractor_kwargs = dict(display_mode='x', colorbar=False)
elif attractor_plot_type == 'scatter':
default_attractor_kwargs = dict()
else:
raise ValueError("Unknown attractor plot type.")
default_attractor_kwargs.update(attractor_kwargs)
if plot_type == 'scatter':
default_kwargs = dict(alpha=1.0, s=10, linewidths=0, c='black')
elif plot_type == 'line':
default_kwargs = dict()
elif plot_type == 'stream':
default_kwargs = dict(linewidth=5, color='gray', density=1.2, bins=10)
default_kwargs.update(kwargs)
elif plot_type == 'hist2d':
default_kwargs = dict(bins=100, cmap='gray_r')
elif plot_type == 'density':
default_kwargs = dict(cmap='gray_r')
elif plot_type == 'contour':
default_kwargs = dict(alpha=1, linewidths=0.1)
elif plot_type == 'contourf':
default_kwargs = dict(levels=20, antialiased=True, cmap='Greens')
else:
raise ValueError("Unknown type.")
default_kwargs.update(kwargs)
if plot_type == 'stream':
stream_linewidth = default_kwargs.pop('linewidth')
stream_bins = default_kwargs.pop('bins')
if ax is None:
fig = plt.gcf()
if plot_type == 'stream' or plot_type == 'density' or plot_type == 'contour' or plot_type == 'contourf':
ax_cart = fig.add_axes([0,0,1,1], polar=False, frameon=False)
ax = fig.add_axes([0,0,1,1], polar=True, frameon=False)
w, h = figaspect(1)
ax.figure.set_size_inches(w, h)
else:
ax.set_aspect('equal')
ax.patch.set_alpha(0)
if plot_type == 'stream' or plot_type == 'density' or plot_type == 'contour' or plot_type == 'contourf':
ax_cart = ax.figure.add_axes(ax.get_position(), polar=False, frameon=False, zorder=-1)
# todo: test if ax is specified
max_r = 1
for l, attractor in self.attractors.items():
att = StandardScaler().fit_transform(np.array(attractor).reshape(1, -1).T).T
att_cart = self.embedding_model.transform(att)[:, :2]
r, th = _cart2pol(att_cart[:, 0], att_cart[:, 1])
max_r = max(max_r, r.squeeze())
if plot_type == 'stream' or plot_type == 'density' or plot_type == 'contour' or plot_type == 'contourf':
ax_cart.set_xlim([-1.1 * max_r, 1.1 * max_r])
ax_cart.set_ylim([-1.1 * max_r, 1.1 * max_r])
ax_cart.set_xticks([])
ax_cart.set_yticks([])
ax_cart.grid(False)
ax.set_ylim([0, 1.1 * max_r])
# plot actual data
if activations is not None:
# transform activations to embedding space
activations = StandardScaler().fit_transform(activations.T).T
embedded = self.embedding_model.transform(activations)
r, th = _cart2pol(embedded[:, 0], embedded[:, 1])
if plot_type == 'scatter':
plot = ax.scatter(th, r, **default_kwargs)
# produce a legend with a cross-section of sizes from the scatter
if legend_kwargs is not None:
handles, labels = plot.legend_elements(prop="colors")
legend = ax.legend(handles, labels, **legend_kwargs)
elif plot_type == 'line':
plot = ax.plot(th, r, **default_kwargs)
elif plot_type == 'stream':
directions = embedded[1:, :] - embedded[:-1, :]
from scipy.stats import binned_statistic_2d
dir_x, x_edges, y_edges, _ = binned_statistic_2d(embedded[:-1, 1], embedded[:-1, 0], directions[:, 0],
statistic=np.mean,
bins=[np.linspace(-max_r*1.1, max_r*1.1, stream_bins),
np.linspace(-max_r*1.1, max_r*1.1, stream_bins)])
dir_y, x_edges, y_edges, _ = binned_statistic_2d(embedded[:-1, 1], embedded[:-1, 0], directions[:, 1],
statistic=np.mean,
bins=[np.linspace(-max_r * 1.1, max_r * 1.1,
stream_bins),
np.linspace(-max_r * 1.1, max_r * 1.1,
stream_bins)])
x, y = np.meshgrid((x_edges[1:] + x_edges[:-1]) / 2,
(y_edges[1:] + y_edges[:-1]) / 2)
speed = np.sqrt(dir_x ** 2 + dir_y ** 2)
ax_cart.streamplot(x, y, dir_x, dir_y,
linewidth= stream_linewidth * speed / speed[~ np.isnan(speed)].max(),
**default_kwargs)
elif plot_type == 'hist2d':
raise NotImplementedError("Not implemented yet.")
#plot = ax.hist2d(th, r, **default_kwargs)
elif plot_type == 'density' or plot_type == 'contour' or plot_type == 'contourf':
H, x_edges, y_edges = np.histogram2d(embedded[:, 1], embedded[:, 0],
bins=density_bins,
density=True,
range=[[-max_r*1.2, max_r*1.2], [-max_r*1.2, max_r*1.2]])
from scipy.ndimage import gaussian_filter
H = gaussian_filter(H, sigma=density_sigma, mode='wrap')
H[H<0.0001] = np.nan
x, y = np.meshgrid(x_edges,
y_edges) # rectangular plot of polar data
# calculate midpoints of bins
y = (y[: -1, :-1] + y[1:, 1:]) / 2
x = (x[: -1, :-1] + x[1:, 1:]) / 2
rad, theta = _cart2pol(x, y)
#theta = theta % (np.pi * 2)
# fill
if plot_type == 'density':
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax_cart.pcolormesh(x, y, H, **default_kwargs)
elif plot_type == 'contour':
ax_cart.contour(x, y, H, **default_kwargs)
elif plot_type == 'contourf':
ax_cart.contourf(x, y, H, **default_kwargs)
else:
raise ValueError("Unknown type.")
# plot attractor regimes
ax.set_prop_cycle(None)
def predict_label(x, y):
return self.attractor_model.predict(np.array([x, y]).T.reshape(-1, self.attractor_model_dim))
x, y = np.meshgrid(np.linspace(-max_r * 1.2, max_r * 1.2, 500),
np.linspace(-max_r * 1.2, max_r * 1.2, 500)) # rectangular plot of polar data
pred = predict_label(x, y).reshape(x.shape)
rad, theta = _cart2pol(x, y)
# fill
with warnings.catch_warnings():
warnings.simplefilter("ignore")
ax.pcolormesh(theta, rad, pred.T, **default_regimes_fill_kwargs)
# contour
ax.contour(theta, rad, pred.T, **default_regimes_contour_kwargs)
# plot attractor states
ax.set_prop_cycle(None)
for l, attractor in self.attractors.items():
att = StandardScaler().fit_transform(np.array(attractor).reshape(1, -1).T).T
att_cart = self.embedding_model.transform(att)[:, :2]
r, th = _cart2pol(att_cart[:, 0], att_cart[:, 1])
if attractor_plot_type == 'scatter':
ax.scatter(th, r, **default_attractor_kwargs)
elif attractor_plot_type == 'glassbrain':
trans = ax.transData.transform((th, r))
trans = ax.figure.transFigure.inverted().transform(trans).flatten()
network.State(attractor).plot(figure=ax.figure,
axes=(trans[0] - 0.05, trans[1] - 0.05, 0.1, 0.1),
**default_attractor_kwargs)
else:
raise ValueError("Unknown attractor_type.")
ax.set_xticks([0, 0.5 * np.pi, np.pi, 1.5 * np.pi], ["", "", "", ""])
ax.set_yticks(np.arange(0, np.round(max_r) + 1, 2))
ax.set_rlabel_position(0)
ax.tick_params(axis='y', colors='gray')
ax.spines['polar'].set_visible(False)
ax.xaxis.grid(True, linewidth=1, color='black')
ax.yaxis.grid(True, linewidth=0.5, color='lightgrey')
#ax.set_axisbelow('line')
return ax
def simulate_activations(connectome, noise_coef=1, num_iter=1000, init_state=None, signal=None,
progress=True, random_state=None, **kwargs):
"""
Simulate activations of a Hopfield network with a given connectome.
Factory function for HopfieldSimulation dataclass.
:param signal: non-null signal to be added to the noise in each iteration (list of length connectome.shape[0])
:param connectome: a 2D numpy array
:param noise_coef: noise coefficient
:param num_iter: number of iterations
:param init_state: initial state
:param random_state: random state
:param kwargs: additional arguments to network.Hopfield
:return: HopfieldSimulation object
"""
if not isinstance(connectome, np.ndarray) or connectome.ndim != 2 or connectome.shape[0] != connectome.shape[1]:
raise ValueError("Connectome must be a 2D quadratic numpy array!")
if signal is None:
signal = np.zeros(connectome.shape[0])
random = np.random.default_rng(random_state)
default_kwargs = {
"scale": True,
"threshold": 0,
"beta": 0.05
}
default_kwargs.update(kwargs)
hopnet = network.Hopfield(connectome, **default_kwargs)
states = np.zeros((num_iter + 1, hopnet.num_neuron))
energies = np.zeros(num_iter)
if init_state is None:
states[0] = network.State(random.normal(0, 1, hopnet.num_neuron))
else:
states[0] = network.State(init_state)
for i in tqdm(range(num_iter), disable=not progress):
new_state, n_iter, energy = hopnet.update(states[i], num_iter=1)
energies[i] = energy[-1]
# add noise
states[i + 1] = np.array(new_state) + random.normal(signal, noise_coef, hopnet.num_neuron)
return HopfieldSimulation(hopnet=hopnet, states=states[:-1], energies=energies)
def load_simulation(filename):
"""
Load a serialized (pickled) HopfieldSimulation dataclass.
:param filename:
:return: HopfieldSimulation object
"""
with open(filename, 'rb') as f:
return pickle.load(f)
def create_embeddings(simulation, attractor_sample=1000, num_hopfield_iter=100000, attractor_model_dim=2,
random_state=None, progress=True, **kwargs):
"""
Construct a new Hopfield embeddings of a connectome from a HopfieldSimulation object.
:param attractor_sample: ratio of states to be used for attractor model training
:param simulation: HopfieldSimulation object
:param kwargs: additional arguments to the embedding model (sklearn.decomposition.PCA)
:return:
"""
# PCA on simulated hopfield states
pca = PCA(**kwargs)
states = StandardScaler().fit_transform(simulation.states.T).T
embedded = pca.fit_transform(states)
# calculate attractor states for a subsample
random = np.random.default_rng(random_state)
attractors = dict()
attractor_labels = np.zeros(min(int(simulation.states.shape[0]), attractor_sample), dtype=int)
sample = random.choice(simulation.states.shape[0], min(int(simulation.states.shape[0]), attractor_sample),
replace=False)
for i, s in tqdm(enumerate(sample), total=len(sample), disable=not progress):
att, n_iter, energy = simulation.hopnet.update(simulation.states[s], num_iter=num_hopfield_iter)
if n_iter == num_hopfield_iter:
print(n_iter, '!!')
raise RuntimeWarning("Convergence error!")
if tuple(np.round(att, 6)) not in attractors.keys():
attractors[tuple(np.round(att, 6))] = len(attractors)
attractor_labels[i] = len(attractors) - 1
else:
attractor_labels[i] = attractors[tuple(np.round(att, 6))]
# invert dictionary
attractors = {v: np.array(k) for k, v in attractors.items()}
# Fit a Multinomial Logistic Regression model that predicts the attractors on the first two PCs
attractor_model = LogisticRegression(multi_class="multinomial")
attractor_model.fit(embedded[sample, :attractor_model_dim], attractor_labels)
return HopfiledEmbedding(hopnet=simulation.hopnet, embedding_model=pca,
attractors=attractors,
attractor_model=attractor_model,
attractor_model_dim=attractor_model_dim,
state_sample=simulation.states[sample],
attractor_sample=attractor_labels)
def load_embedding(filename):
"""
Load a serialized (pickled) HopfieldEmbedding dataclass.
:param filename: file to load
:return: HopfieldEmbedding object
"""
with open(filename, 'rb') as f:
return pickle.load(f)
| pni-lab/connattractor | connattractor/analysis.py | analysis.py | py | 16,176 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.arctan2",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.cos",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
"line_number": 25,... |
2180953342 | from flask import Flask, jsonify, request
import datetime
import fetchNavigationData
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
@app.route('/api', methods=['GET'])
def index():
first = request.args.get('first', '')
second = request.args.get('second', '')
json1 = fetchNavigationData.fetch_station_list(first, second)
json2 = fetchNavigationData.fetch_station_list(second, first)
json2.reverse()
d = {}
for index in range(len(json1)):
if index < len(json1) and index < len(json2):
time1 = datetime.datetime.strptime(json2[index]["time"], '%H:%M')
time2 = datetime.datetime.strptime(json1[index]["time"], '%H:%M')
diff = time2 - time1
diff = int(diff.total_seconds())
if diff < 0:
diff = -1 * diff
d[index] = diff
for k, v in sorted(d.items(), key=lambda x:x[1]):
result = json1[k]["name"]
if(len(json2) < len(json1)):
result = json2[k]["name"]
json2.reverse()
return jsonify({
'result': result,
'way': [json1, json2]
})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=80)
| 5ym/smaen | back/module/app.py | app.py | py | 1,208 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
4292641099 | from django.contrib.auth.models import User
from django.test import TestCase
from note.forms import NoteAddForm, NoteEditForm
from note.models import Note
class NoteFormsTestCase(TestCase):
def setUp(self):
# Arrange
self.user = User.objects.create_user(username='test_user', password='test_pass')
self.note_data = {
'title': 'Test Note',
'content': 'Test content',
}
def test_note_add_form_valid(self):
# Act
form = NoteAddForm(data=self.note_data)
# Assert
self.assertTrue(form.is_valid())
def test_note_add_form_invalid(self):
# Act
form_data = self.note_data.copy()
form_data['title'] = ''
form = NoteAddForm(data=form_data)
# Assert
self.assertFalse(form.is_valid())
def test_note_add_form_save(self):
# Act
form = NoteAddForm(data=self.note_data)
# Assert
self.assertTrue(form.is_valid())
# Act
note = form.save(commit=False)
note.author = self.user
note.save()
# Assert: the note is saved correctly
self.assertEqual(Note.objects.count(), 1)
saved_note = Note.objects.first()
self.assertEqual(saved_note.title, self.note_data['title'])
self.assertEqual(saved_note.content, self.note_data['content'])
self.assertEqual(saved_note.author, self.user)
def test_note_edit_form_valid(self):
# Act
note = Note.objects.create(title='Initial Title', content='Initial content', author=self.user)
form_data = {
'title': 'Updated Title',
'content': 'Updated content',
}
form = NoteEditForm(data=form_data, instance=note)
# Assert
self.assertTrue(form.is_valid())
def test_note_edit_form_invalid(self):
# Act
note = Note.objects.create(title='Initial Title', content='Initial content', author=self.user)
form_data = {
'title': '', # Empty title
'content': 'Updated content',
}
form = NoteEditForm(data=form_data, instance=note)
# Assert
self.assertFalse(form.is_valid())
def test_note_edit_form_save(self):
# Act
note = Note.objects.create(title='Initial Title', content='Initial content', author=self.user)
form_data = {
'title': 'Updated Title',
'content': 'Updated content',
}
form = NoteEditForm(data=form_data, instance=note)
# Assert
self.assertTrue(form.is_valid())
# Act
updated_note = form.save()
# Assert: note is updated correctly
self.assertEqual(updated_note.title, form_data['title'])
self.assertEqual(updated_note.content, form_data['content'])
| mehdirahman88/django_notes | note/tests/test_forms.py | test_forms.py | py | 2,820 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_user",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 11,
"usa... |
71001076263 | #!/usr/bin/env python
#
# This script downloads a game from OGS and produces a .game file
# that can be used by our test estimator.
#
import requests
import sys
def fetch_game(game_id):
res = requests.get('https://online-go.com/termination-api/game/%d/state' % game_id)
if res.status_code != 200:
sys.stderr.write('Unable to fetch game\n')
return None
data = res.json()
removal = data['removal']
board = data['board']
board = [
[-1 if x == 2 else x for x in row]
for row in board
]
last_move = data['last_move']
player_to_move = 0
if last_move['y'] == -1:
player_to_move = -board[last_move['y']][last_move['x']]
if player_to_move == 0:
player_to_move = 1
return board, removal, player_to_move
def print_game(output, board, removal, player_to_move):
output.write('# 1=black -1=white 0=open\n')
output.write('height %d\n' % len(board))
output.write('width %d\n' % len(board[0]))
output.write('player_to_move %d\n' % player_to_move)
for y in range(len(board)):
output.write(' '.join('%2d' % x for x in board[y]) + '\n')
output.write('\n');
for y in range(len(removal)):
output.write(' '.join('%2d' % x for x in removal[y]) + '\n')
if __name__ == "__main__":
if len(sys.argv) != 2 or int(sys.argv[1]) <= 0:
sys.stderr.write("Usage: ./fetch_ogs_game.py <game-id>\n")
else:
game_id = int(sys.argv[1])
filename = '%d.game' % game_id
with open(filename, 'w') as output:
board, removal, player_to_move = fetch_game(game_id)
print_game(output, board, removal, player_to_move)
print('Wrote %s. You will want to modify the stone removal map to fix any errors that happened in scoring.' % filename)
| online-go/score-estimator | tools/fetch_ogs_game.py | fetch_ogs_game.py | py | 1,827 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stderr.write",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_nu... |
25759812026 | #!/usr/bin/env python
import os
import json
from twitter import Api
# Custom import
from datetime import datetime
from datetime import date
import time
import re
import sys
def loadConfig(config_secret):
# Go to http://apps.twitter.com and create an app.
# The consumer key and secret will be generated for you after
global CONSUMER_KEY
global CONSUMER_SECRET
# After the step above, you will be redirected to your app's page.
# Create an access token under the the "Your access token" section
global ACCESS_TOKEN
global ACCESS_TOKEN_SECRET
with open(config_secret, 'r') as cred:
json_str = cred.read()
json_data = json.loads(json_str)
CONSUMER_KEY = json_data['consumer_key']
CONSUMER_SECRET = json_data['consumer_secret']
ACCESS_TOKEN = json_data['access_token']
ACCESS_TOKEN_SECRET = json_data['access_token_secret']
# Users to watch for should be a list. This will be joined by Twitter and the
# data returned will be for any tweet mentioning:
# @twitter *OR* @twitterapi *OR* @support.
#USERS = ['@twitter', '@twitterapi', '@support']
LOCATIONS = ['-6.38','49.87','1.77','55.81']
UK = ['-5.95459','49.979488','-0.109863','58.12432'] # United Kingdom
US = ['-123.960279', '33.080519', '-60.996094', '45.336702'] # US
AU = ['105.785815', '-44.513723', '154.301442', '-12.449423'] # Australia
NZ = ['164.772949', '-47.15984', '179.626465', '-33.94336'] # New Zealand
SEA = ['90.825760', '-11.836210', '153.766943', '21.217420'] # South East Asian
AF = ['-25.195408', '-35.880958', '32.812407', '31.960635'] # African
COUNTRIES = ['UK', 'US', 'AU', 'NZ', 'SEA', 'AF']
DAY_CYCLE = 2
def getLocation(country_code):
if country_code == 'UK':
return UK, 0
elif country_code == 'US':
return US, 1
elif country_code == 'AU':
return AU, 2
elif country_code == 'NZ':
return NZ, 3
elif country_code == 'SEA':
return SEA, 4
elif country_code == 'AF':
return AF, 5
else:
return UK, 0
def write_to_file(filename, text, append=True):
if append:
mode = 'a'
else:
mode = 'w'
with open(filename, mode) as fw:
fw.write(str(text) + '\n')
def make_sure_path_exists(path):
try:
os.makedirs(path)
except OSError as exception:
pass
def normalize_tweet_text(tweet_text):
# Normalize text
## Remove comma, linefeed, and tab
tweet_text = re.sub('[,\n\t]', ' ', tweet_text)
## Remove http link from tweet_text
tweet_text = re.sub('http?([-a-zA-Z0-9@:%_\+.~#?&//=])*', ' ', tweet_text)
## Remove multiple spaces
tweet_text = re.sub(' +',' ',tweet_text)
## Encode special character to utf-8 format, because ASCII is sucks (can't support wide range of characters)
tweet_text = tweet_text.encode('utf-8','ignore')
tweet_text = str(tweet_text)
return tweet_text
def extract_line(directory, today, line):
line = line.strip()
line = line.replace('\n', '\\n')
if line == '':
return
line = json.loads(line, strict=False)
try:
try:
lang = line['lang'] # String
# English only
if lang != 'en':
return
except:
pass
# Extract line information
try:
geo = line['geo'] # String
except Exception as ex:
#print('Geo Exception %s' % ex)
return
#geo = line['geo'] # Object
timestamp_ms = line['timestamp_ms'] # Long Integer
user = line['user'] # Object
#entities = line['entities'] # Object
tweet_id = line['id'] # Integer
tweet_text = line['text'] # String
retweet_count = line['retweet_count']
place = line['place']
ccode = 'NA'
cname = 'default'
if place is not None:
ccode = place['country_code']
cname = place['country']
# Extract user information
user_id = user['id'] # Integer
utc_offset = user['utc_offset'] # Integer
if utc_offset is None:
utc_offset = ''
else :
utc_offset = str(utc_offset).strip()
#friends_count = user['friends_count'] # Integer
#followers_count = user['followers_count'] # Integer
#statuses_count = user['statuses_count'] # Integer
# Extract entities information
#hashtags = entities['hashtags'] # Array of String
#user_mentions = entities['user_mentions'] # Dictionary
# Extract user_mentions information
#for user_mention in user_mentions:
# mentioned_id = user_mention['id']
#print(str(mentioned_id)+'\n')
# Print for testing
#print(str(geo))
#print(str(timestamp_ms))
#print(str(user_id))
#print(str(entities))
#print(str(tweet_id))
# For each geotagged tweets
if geo is not None:
#print(str(geo))
try:
coordinates = geo['coordinates'] # Array of Float
gps = []
for var in coordinates:
gps.append(str(var))
except Exception as ex:
print('Coordinate Exception {}'.format(ex))
return
#print(gps[0])
#print(gps[1])
# Normalize text
tweet_text = normalize_tweet_text(tweet_text)
# Write all logs
f_summary = 'summary_{0}_{1}.csv'.format(ccode, cname)
csv_output = '{0},{1},{2},{3},{4},{5},{6}'.format(tweet_id, user_id, timestamp_ms, gps[0], gps[1], tweet_text, utc_offset)
if csv_output != '':
write_to_file(directory + f_summary, csv_output)
#time.sleep(1)
except Exception as ex:
f_error = '{0}/error_{1}.txt'.format(directory, today)
make_sure_path_exists(directory)
with open(f_error, 'a') as fw:
fw.write('[{0}] Extract Exception {1}\n'.format(str(datetime.now()),ex))
fw.write('[{0}] {1}\n'.format(str(datetime.now()),line))
##########################
# Main function
##########################
def main():
arglen = len(sys.argv)
USING_TWITTER = False
if arglen == 3:
directory = sys.argv[1]
country_code = sys.argv[2]
LOCATIONS, selected = getLocation(country_code)
USING_TWITTER = True
elif arglen == 2:
directory = sys.argv[1]
else :
print('Please give two inputs: directory name and country code {US, UK, AU, NZ, SEA, AF}')
return
if directory != '':
directory = directory + '/'
if USING_TWITTER:
loadConfig('config_secret.json')
# Since we're going to be using a streaming endpoint, there is no need to worry
# about rate limits.
api = Api(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
# api.GetStreamFilter will return a generator that yields one status
# message (i.e., Tweet) at a time as a JSON dictionary.
try:
today = date.today()
if USING_TWITTER:
count_day = 0
counter = 0
count_thousands = 0
print(country_code)
print(today)
str_out = ''
while(True):
for line in api.GetStreamFilter(locations=LOCATIONS):
# warning: "limit"
try:
if date.today() != today :
# Change day
today = date.today()
try:
print('[{0}] Processed {1:,} tweets'.format(str(datetime.now()), count_thousands*1000 + counter))
print('--- End of the day ---')
except:
pass
counter = 0
count_thousands = 0
count_day += 1
print(today)
# Write remaining data into file
if str_out != '':
write_to_file(f_complete, str_out)
str_out = ''
if count_day == DAY_CYCLE:
count_day = 0
# Change the countries
selected = (selected + 1 ) % len(COUNTRIES)
country_code = COUNTRIES[selected]
LOCATIONS, selected = getLocation(country_code)
print(country_code)
break
# Write json to file
f_complete = '{0}/logs/log_{1}_{2}.txt'.format(directory, country_code, today)
#print json.dumps(line)
str_out = '{0}{1}\n'.format(str_out, json.dumps(line))
# Counter
counter = counter + 1
if counter % 25 == 0:
if str_out != '':
write_to_file(f_complete, str_out)
str_out = ''
if counter % 1000 == 0 and counter > 0:
counter = 0
count_thousands = count_thousands + 1
print('[{0}] Processed {1},000 tweets'.format(str(datetime.now()),count_thousands))
except Exception as ex:
f_error = '{0}/logs/error_{1}.txt'.format(directory, str(today))
with open(f_error, 'a') as fw:
fw.write('[{0}] Line Exception {1}\n'.format(str(datetime.now()),ex))
fw.write('[{0}] {1}\n'.format(str(datetime.now()),line))
else:
# Loop through os files
# and create similar filename but using csv
# Extract json and write into csv file
for subdir, dirs, files in os.walk(directory):
for file in files:
if file.startswith('log'):
print('[{0}] Processing file : {1}'.format(str(datetime.now()), file))
with open(directory + file, 'r') as fin:
for line in fin:
try:
extract_line(directory, today, line)
except:
pass
pass
print('Program finished ')
except Exception as ex:
f_error = '{0}/logs/error_{1}.txt'.format(directory, str(today))
make_sure_path_exists(directory + '/logs')
write_to_file(f_error, '[{0}] Outer Exception {1}\n'.format(str(datetime.now()),ex))
##########################
# End of Main
##########################
if __name__ == '__main__':
main() | gunarto90/twitter-stream | stream.py | stream.py | py | 11,136 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 80,
"us... |
1137077324 | import json
# things we need for NLP
import nltk
from nltk.stem.lancaster import LancasterStemmer
nltk.download('punkt')
stemmer = LancasterStemmer()
# things we need for Tensorflow
import numpy as np
import tflearn
import tensorflow as tf
import random
import pickle
class ModelBuilder(object):
def __init__(self):
with open('intents.json') as json_data:
self.intents = json.load(json_data)
self.words = []
self.classes = []
self.documents = []
self.ignore_words = [
'what', 'are', 'is', 'the', 'why',
'does', 'how', 'in', 'on', '?', 'my',
'I'
]
def parse_intents_doc(self):
# loop through each sentence in our intents patterns
for intent in self.intents['intents']:
for pattern in intent['patterns']:
# tokenize each word in the sentence
w = nltk.word_tokenize(pattern)
# add to our words list
self.words.extend(w)
# add to documents in our corpus
self.documents.append((w, intent['tag']))
# add to our classes list
if intent['tag'] not in self.classes:
self.classes.append(intent['tag'])
# stem and lower each word and remove duplicates
self.words = [stemmer.stem(w.lower()) for w in self.words if w not in self.ignore_words]
self.words = sorted(list(set(self.words)))
# remove duplicates
self.classes = sorted(list(set(self.classes)))
def build_training_data(self):
# create our training data
training = []
output = []
# create an empty array for our output
output_empty = [0] * len(self.classes)
# training set, bag of words for each sentence
for doc in self.documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
# create our bag of words array
for w in self.words:
if w in pattern_words:
bag.append(1)
else:
bag.append(0)
# output is a '0' for each tag and '1' for current tag
output_row = list(output_empty)
output_row[self.classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists
train_x = list(training[:,0])
train_y = list(training[:,1])
return train_x, train_y
def train_neural_network(self, train_x, train_y):
# reset underlying graph data
tf.reset_default_graph()
# Build neural network
net = tflearn.input_data(shape=[None, len(train_x[0])])
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, 8)
net = tflearn.fully_connected(net, len(train_y[0]), activation='softmax')
net = tflearn.regression(net)
# Define model and setup tensorboard
model = tflearn.DNN(net, tensorboard_dir='tflearn_logs')
# Start training (apply gradient descent algorithm)
model.fit(train_x, train_y, n_epoch=1000, batch_size=8, show_metric=True)
model.save('model.tflearn')
# save all of our data structures
pickle.dump({
'words': self.words,
'classes': self.classes,
'train_x': train_x,
'train_y': train_y
},
open('training_data', 'wb')
)
if __name__ == '__main__':
model_builder = ModelBuilder()
model_builder.parse_intents_doc()
train_x, train_y = model_builder.build_training_data()
model_builder.train_neural_network(train_x, train_y)
| nlokare/chatbot | chat_model.py | chat_model.py | py | 4,023 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.download",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "nltk.stem.lancaster.LancasterStemmer",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "nltk.word_... |
33989498564 | from django.contrib.auth.models import User
from django.shortcuts import render
from profile_app.models import UserProfileInfo
from video_app.models import Video
from comment_app.models import Comment
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
# Create your views here.
@login_required
def write_comment(request, video_id):
if request.method == 'POST':
user = request.user
userprofileinfo = UserProfileInfo.objects.get(user=user)
text = request.POST.get('text')
video = Video.objects.get(video_id=video_id)
comment = Comment(text=text, userprofileinfo = userprofileinfo, video=video)
comment.save()
response_data = {
'result': 'success',
'id': comment.id,
'text': comment.text,
'userprofileinfo': comment.userprofileinfo.user.username,
'date': comment.date,
'video': comment.video.title,
}
return JsonResponse(response_data)
else:
error = {'error': 'Non POST method not allowed'}
return JsonResponse(error)
| NathanA15/music-video | music_project/comment_app/views.py | views.py | py | 1,008 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "profile_app.models.UserProfileInfo.objects.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "profile_app.models.UserProfileInfo.objects",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "profile_app.models.UserProfileInfo",
"line_num... |
14416840611 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# 从数据库里面导出一定时间前的还持有份额的人员和产品数据,每个产品是一个Excel
import pymysql
from openpyxl import *
import os
#检索每一行的第一个产品代码字段,如果是同一个代码,要保存在一个Excel里,不同产品的数据,用不同的Excel保存
#fields是列名,data是数据集,path是保存的路径,如果空,则保存在当前目录下
def SaveData2Excel(fields, data, path = ''):
if path == '':
path = os.getcwd()
wb = Workbook()
ws = wb.active
fundcode = data[0][0]
fundname = data[0][1]
#第1行是列名
ws.append([f[0] for f in fields])
#把数据复制到Excel中
for row in data:
#不是同一个产品,先保存原来的Excel,再新建一个
if row[0] != fundcode:
wb.save(path + os.sep + fundname + "_持有人名册.xlsx")
print("{0}结束导出".format(fundname))
wb = Workbook()
ws = wb.active
fundcode = row[0]
fundname = row[1]
ws.append([f[0] for f in fields])
ws.append(row)
wb.save(path + os.sep + fundname + "_持有人名册.xlsx")
wb = None
#需要导出数据的日期范围
end_date = "2018-1-1"
#导出文件地址
path = "C:\\Users\\gaos\\Documents\\PyqtProject\\output"
#数据库地址
connection = pymysql.connect(host='192.168.40.98', port=3306,\
user='selling_query',password='123456',db='private_data',charset='utf8')
cursor = connection.cursor()
#导出的SQL
sql_base = "SELECT \
p.`code` as '产品代码', \
p.`name` AS '产品名', \
u.`name` AS '用户名', \
concat(" + '"' + "'"+ '"' + ",u.certificate_no) as '认证号码', \
h.current_share as '2017年末份额' \
FROM \
holders h, \
users u, \
products p, \
( \
SELECT \
h1.product_id, \
h1.user_id, \
max(h1.data_log_id) log_id \
FROM \
holders h1, \
users u, \
products p \
WHERE \
TO_DAYS(h1.hold_at_str) <= TO_DAYS('{0}') \
AND h1.product_id = p.id \
AND h1.user_id = u.id \
GROUP BY \
h1.product_id, \
h1.user_id \
) t \
WHERE \
1 = 1 \
AND t.log_id = h.data_log_id \
AND t.product_id = h.product_id \
and t.user_id = h.user_id \
and h.current_share > 0 \
AND h.product_id = p.id \
AND p.`status` = 0 \
AND p.company_id = 1 \
and h.user_id = u.id \
ORDER BY \
h.product_id, \
h.user_id;"
#从数据库获取数据
sql = sql_base.format(end_date)
cursor.execute(sql)
result = cursor.fetchall()
if len(result) > 0:
fields = cursor.description
SaveData2Excel(fields, result, path)
#关闭数据库
cursor.close()
connection.close() | matthew59gs/Projects | python/market/export_fund_share2.py | export_fund_share2.py | py | 2,544 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pymysql.connect",
"line_number"... |
10513613017 | from django.test import SimpleTestCase
from website.forms import CreateUserForm, SignUpForm, FeedbackForm, PatientForm, DocumentationP, EventForm, MessageForm, RequestForm
from website.models import Patient, SignUp, Feedback, Documentation, Event, Messages, Requests
class TestForms(SimpleTestCase):
def test_create_user_form(self):
form = CreateUserForm(data={
'model': ['Patient'],
'fields': ['dina', 'balua']
})
def test_sign_up_form(self):
form = SignUpForm(data={
'model': ['SignUp'],
'fields': ['lior', 'inbar', 16, 'man', 'dinab@gmail', +972855555555, 'Canada', 'write']
})
def test_feedback_form(self):
form = FeedbackForm(data={
'model': ['Feedback'],
'fields': ['dina', 'balua', 'message']
})
def test_patient_form(self):
form = PatientForm(data={
'model': ['Patient'],
'fields': ['dan']
})
def test_documentation_form(self):
form = DocumentationP(data={
'model': ['Documentation'],
'fields': ['inbar', 'balua', 'message', 'meeting', 'diagnosis']
})
def test_event_form(self):
form = EventForm(data={
'model': ['Event'],
'fields': ['avihai', 27/7/92]
})
def test_message_form(self):
form = MessageForm(data={
'model': ['Messages'],
'fields': ['vika', 18/3/98]
})
def test_request_form(self):
form = RequestForm(data={
'model': ['Requests'],
'fields': ['lior', 27/10/1994]
})
| liorco15/HealthTourism | test_forms.py | test_forms.py | py | 1,646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.test.SimpleTestCase",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "website.forms.CreateUserForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "website.forms.SignUpForm",
"line_number": 14,
"usage_type": "call"
},
{
"ap... |
74470473064 | """
Project Tasks that can be invoked using using the program "invoke" or "inv"
"""
import os
from invoke import task
# disable the check for unused-arguments to ignore unused ctx parameter in tasks
# pylint: disable=unused-argument
IS_WINDOWS = os.name == "nt"
if IS_WINDOWS:
# setting 'shell' is a work around for issue #345 of invoke
RUN_ARGS = {"pty": False, "shell": r"C:\Windows\System32\cmd.exe"}
else:
RUN_ARGS = {"pty": True}
def get_files():
"""
Get the files to run analysis on
"""
files = [
"dploy",
"tests",
"tasks.py",
]
files_string = " ".join(files)
return files_string
@task
def setup(ctx):
"""
Install python requirements
"""
ctx.run("python -m pip install -r requirements.txt", **RUN_ARGS)
@task
def clean(ctx):
"""
Clean repository using git
"""
ctx.run("git clean --interactive", **RUN_ARGS)
@task
def lint(ctx):
"""
Run pylint on this module
"""
cmds = ["pylint --output-format=parseable", "flake8"]
base_cmd = "python -m {cmd} {files}"
for cmd in cmds:
ctx.run(base_cmd.format(cmd=cmd, files=get_files()), **RUN_ARGS)
@task
def reformat_check(ctx):
"""
Run formatting check
"""
cmd = "black --check"
base_cmd = "python -m {cmd} {files}"
ctx.run(base_cmd.format(cmd=cmd, files=get_files()), **RUN_ARGS)
@task
def reformat(ctx):
"""
Run formatting
"""
cmd = "black"
base_cmd = "python -m {cmd} {files}"
ctx.run(base_cmd.format(cmd=cmd, files=get_files()), **RUN_ARGS)
@task
def metrics(ctx):
"""
Run radon code metrics on this module
"""
cmd = "radon {metric} --min B {files}"
metrics_to_run = ["cc", "mi"]
for metric in metrics_to_run:
ctx.run(cmd.format(metric=metric, files=get_files()), **RUN_ARGS)
@task()
def test(ctx):
"""
Test Task
"""
# Use py.test instead of the recommended pytest so it works on Python 3.3
cmd = "py.test --cov-report term-missing --cov=dploy --color=no"
ctx.run(cmd, **RUN_ARGS)
# pylint: disable=redefined-builtin
@task(test, lint, reformat_check)
def all(default=True):
"""
All tasks minus
"""
@task(clean)
def build(ctx):
"""
Task to build an executable using pyinstaller
"""
cmd = "pyinstaller -n dploy --onefile " + os.path.join("dploy", "__main__.py")
ctx.run(cmd, **RUN_ARGS)
| arecarn/dploy | tasks.py | tasks.py | py | 2,421 | python | en | code | 68 | github-code | 36 | [
{
"api_name": "os.name",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "invoke.task",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "invoke.task",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "invoke.task",
"line_number":... |
18760528871 | import numpy as np
import itertools
import cv2
def draw_epipolar_lines(img_left, img_right):
height = np.shape(img_left)[0]
divisions = 40.0
colors = [(255,0,0), (0,0,255), (0,255,0), (255,255,0), (255,255,255), (0,255,255)]
color_generator = itertools.cycle(colors)
step = int(np.floor(height/divisions))
stop = int(divisions*step)
img = np.hstack([img_left, img_right])
for col in range(0,stop-1, step):
img[col, :, :] = next(color_generator)
return img
def rectify_images(left_img, right_img, left_K, right_K, transl_RL_R, rot_RL, crop_parameter):
left_img_size = left_img.shape[0:2][::-1]
right_img_size = right_img.shape[0:2][::-1]
distCoeffs = None
R1,R2,P1,P2,Q,_,_ = cv2.stereoRectify(left_K, distCoeffs, right_K, distCoeffs, left_img_size, rot_RL, transl_RL_R, alpha=crop_parameter)
left_maps = cv2.initUndistortRectifyMap(left_K, distCoeffs, R1, P1, left_img_size, cv2.CV_16SC2)
right_maps = cv2.initUndistortRectifyMap(right_K, distCoeffs, R2, P2, right_img_size, cv2.CV_16SC2)
left_img_remap = cv2.remap(left_img, left_maps[0], left_maps[1], cv2.INTER_LANCZOS4)
right_img_remap = cv2.remap(right_img, right_maps[0], right_maps[1], cv2.INTER_LANCZOS4)
return left_img_remap, right_img_remap
def filter_images(bright_img, no_light_img, treshold=0):
mask = (bright_img<(no_light_img+treshold))
filtered_img = bright_img
filtered_img[mask] = 0
return filtered_img
def nothing(x):
pass
def stereo_SGBM_tuner(img1, img2):
win_name = 'window'
cv2.namedWindow(win_name)
cv2.createTrackbar("disparity_min", win_name, 20, 10, nothing)
cv2.createTrackbar("disparity_num", win_name, 20,50, nothing)
win_size = 5
min_disp = -1
max_disp = 63
num_disp = max_disp - min_disp
uniqueness_ratio = 5
block_size = 5
while(1):
min_disp = cv2.getTrackbarPos("disparity_min", win_name) * 16
num_disp = cv2.getTrackbarPos("disparity_num", win_name) * 16
print(num_disp)
assert(num_disp % 16 is 0)
stereo_SGBM = cv2.StereoSGBM_create(min_disp, num_disp, block_size)
disp = stereo_SGBM.compute(img2, img1)
cv2.imshow(win_name,disp)
k = cv2.waitKey(1) & 0xFF
if k == 27:
cv2.destroyAllWindows()
break
cv2.destroyAllWindows()
if __name__ == '__main__':
pass
| olaals/multivision-depr | multivision/oa_stereo_utils.py | oa_stereo_utils.py | py | 2,439 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.shape",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "itertools.cycle",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.hstack",
"line_number... |
17134241020 | from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
spark.conf.set('spark.sql.parquet.compression.codec', 'snappy')
spark.conf.set('hive.exec.dynamic.partition.mode', 'nonstrict')
spark.conf.set('spark.streaming.stopGracefullyOnShutdown', 'true')
spark.conf.set('hive.exec.max.dynamic.partitions', '3000')
spark.conf.set('hive.support.concurrency', 'true')
from pyspark.sql import functions as f
from pyspark.sql import types as t
# variables globales
class Modelacion_02_feat():
def __init__(self):
self.str1='First Class'
def export_table(self,TRAIN_POB_CAP,VAR_MES):
# Lectura en el server
datos_contacto = spark.read.table("cd_baz_bdclientes.cd_cte_datos_contacto_master") \
.select(
f.col('id_master'),
f.col('lentidad'),
f.col('genero'),
f.col('fecha_nacimiento'),
f.col('cposta').alias('cod_postal')) \
.withColumn('entidad', f.when(f.trim(f.col('lentidad')).isin('VERACRUZ', 'VERACRUZ DE IGNACIO DE LA LLAVE'), 'VERACRUZ') \
.otherwise(f.trim(f.col('lentidad')))) \
.drop(f.col('lentidad'))
recorrido = spark.read.table("cd_baz_bdclientes.cd_con_cte_recorrido") \
.select(
f.col('id_master'),
f.col('num_periodo_mes').alias('per_ref'),
f.col('cod_perfil_trx'),
f.col('saldo'),
f.col('potencial'),
f.col('recorrido')) \
.filter(f.col('per_ref') == str(VAR_MES)) \
.orderBy(f.col('id_master'))
# Secuencia de extracion de tablas
TT_train_feat_ren_ind = self.feat_cap(recorrido,datos_contacto,VAR_MES,TRAIN_POB_CAP)
respond = TT_train_feat_ren_ind
return respond
# Paso 1: Extraccion de informacion para el modelo de potenciales
def feat_cap(self,recorrido,datos_contacto,VAR_MES,TRAIN_POB_CAP):
_sdm = \
datos_contacto.alias('A').withColumn('genero', f.when(f.trim(f.col('genero')).isin('N', 'E'), 'X') \
.otherwise(f.col('genero'))) \
.withColumn('var_mes', f.to_date(f.lit(str(VAR_MES)+'01'), 'yyyyMMdd')) \
.withColumn('edad', f.round(f.months_between(f.col('var_mes'), f.col('fecha_nacimiento')) / 12, 0).cast(t.IntegerType())) \
.select(
f.col('id_master'),
f.col('edad'),
f.col('var_mes'),
f.col('genero'),
f.col('cod_postal'),
f.col('entidad')) \
.orderBy('id_master')
TT_train_feat_ren_ind = \
TRAIN_POB_CAP.alias('A').join(_sdm.alias('B'), f.col('A.id_master') == f.col('B.id_master'), 'left') \
.join(recorrido.alias('D'), f.col('A.id_master') == f.col('D.id_master'), 'left') \
.select(
f.col('A.id_master'),
f.col('A.per_ref'),
f.col('A.mto_ing_mes'),
f.coalesce(f.col('B.genero'), f.lit('VACIO')).alias('genero'),
f.coalesce(f.col('B.edad'), f.lit(0)).alias('edad'), # mayor a 18
f.coalesce(f.col('B.entidad'), f.lit('VACIO')).alias('entidad'),
f.coalesce(f.col('B.cod_postal'), f.lit(0)).alias('cod_postal'),
f.coalesce(f.col('D.saldo'), f.lit(0)).alias('saldo'),
f.coalesce(f.col('D.potencial'), f.lit(0)).alias('potencial'),
f.coalesce(f.col('D.recorrido'), f.lit(0)).alias('recorrido')) \
.orderBy('id_master')
del datos_contacto
del recorrido
del _sdm
return TT_train_feat_ren_ind
| ConMota/app_renta_indirecta_GS | Class_02_feat.py | Class_02_feat.py | py | 3,937 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.sql.SparkSession.builder.getOrCreate",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "pyspark.sql.SparkSession",
"line_number": 2,
"usage_type... |
36570276493 | import datetime
import urllib
import urllib.parse
from mpcomp import http_core
try:
import simplejson
from simplejson.decoder import JSONDecodeError
except ImportError:
JSONDecodeError = None
try:
# Try to import from django, should work on App Engine
from django.utils import simplejson
except ImportError:
# Should work for Python2.6 and higher.
import json as simplejson
__author__ = "j.s@google.com (Jeff Scudder)"
PROGRAMMATIC_AUTH_LABEL = "GoogleLogin auth="
AUTHSUB_AUTH_LABEL = "AuthSub token="
OAUTH2_AUTH_LABEL = "Bearer "
# This dict provides the AuthSub and OAuth scopes for all services by service
# name. The service name (key) is used in ClientLogin requests.
AUTH_SCOPES = {
"cl": ( # Google Calendar API
"https://www.google.com/calendar/feeds/",
"http://www.google.com/calendar/feeds/",
),
"gbase": ( # Google Base API
"http://base.google.com/base/feeds/",
"http://www.google.com/base/feeds/",
),
"blogger": ("http://www.blogger.com/feeds/",), # Blogger API
"codesearch": ( # Google Code Search API
"http://www.google.com/codesearch/feeds/",
),
"cp": ( # Contacts API
"https://www.google.com/m8/feeds/",
"http://www.google.com/m8/feeds/",
),
"finance": ("http://finance.google.com/finance/feeds/",), # Google Finance API
"health": ("https://www.google.com/health/feeds/",), # Google Health API
"writely": ( # Documents List API
"https://docs.google.com/feeds/",
"https://spreadsheets.google.com/feeds/",
"https://docs.googleusercontent.com/",
),
"lh2": ("http://picasaweb.google.com/data/",), # Picasa Web Albums API
"apps": ( # Google Apps Domain Info & Management APIs
"https://apps-apis.google.com/a/feeds/user/",
"https://apps-apis.google.com/a/feeds/policies/",
"https://apps-apis.google.com/a/feeds/alias/",
"https://apps-apis.google.com/a/feeds/groups/",
"https://apps-apis.google.com/a/feeds/compliance/audit/",
"https://apps-apis.google.com/a/feeds/migration/",
"https://apps-apis.google.com/a/feeds/emailsettings/2.0/",
),
"weaver": ("https://www.google.com/h9/feeds/",), # Health H9 Sandbox
"wise": ("https://spreadsheets.google.com/feeds/",), # Spreadsheets Data API
"sitemaps": ( # Google Webmaster Tools API
"https://www.google.com/webmasters/tools/feeds/",
),
"youtube": ( # YouTube API
"http://gdata.youtube.com/feeds/api/",
"http://uploads.gdata.youtube.com/feeds/api",
"http://gdata.youtube.com/action/GetUploadToken",
),
"books": ("http://www.google.com/books/feeds/",), # Google Books API
"analytics": ("https://www.google.com/analytics/feeds/",), # Google Analytics API
"jotspot": ( # Google Sites API
"http://sites.google.com/feeds/",
"https://sites.google.com/feeds/",
),
# "local": ("http://maps.google.com/maps/feeds/",), # Google Maps Data API
"code": ("http://code.google.com/feeds/issues",), # Project Hosting Data API
}
class Error(Exception):
pass
class UnsupportedTokenType(Error):
"""Raised when token to or from blob is unable to convert the token."""
pass
class OAuth2AccessTokenError(Error):
"""Raised when an OAuth2 error occurs."""
def __init__(self, error_message):
self.error_message = error_message
class OAuth2RevokeError(Error):
"""Raised when an OAuth2 token revocation was unsuccessful."""
def __init__(self, http_response, response_body=None):
"""Sets the HTTP information in the error.
Args:
http_response: The response from the server, contains error information.
response_body: string (optional) specified if the response has already
been read from the http_response object.
"""
body = response_body or http_response.read()
self.status = http_response.status
self.reason = http_response.reason
self.body = body
self.headers = http_core.get_headers(http_response)
self.error_msg = "Invalid response %s." % self.status
try:
json_from_body = simplejson.loads(body)
if isinstance(json_from_body, dict):
self.error_msg = json_from_body.get("error", self.error_msg)
except (ValueError, JSONDecodeError):
pass
def __str__(self):
return "OAuth2RevokeError(status=%i, error=%s)" % (self.status, self.error_msg)
REQUEST_TOKEN = 1
AUTHORIZED_REQUEST_TOKEN = 2
ACCESS_TOKEN = 3
class OAuth2Token(object):
"""Token object for OAuth 2.0 as described on
<http://code.google.com/apis/accounts/docs/OAuth2.html>.
Token can be applied to a gdata.client.GDClient object using the authorize()
method, which then signs each request from that object with the OAuth 2.0
access token.
This class supports 3 flows of OAuth 2.0:
Client-side web flow: call generate_authorize_url with `response_type='token''
and the registered `redirect_uri'.
Server-side web flow: call generate_authorize_url with the registered
`redirect_url'.
Native applications flow: call generate_authorize_url as it is. You will have
to ask the user to go to the generated url and pass in the authorization
code to your application.
"""
def __init__(
self,
client_id,
client_secret,
scope,
user_agent,
auth_uri="https://accounts.google.com/o/oauth2/auth",
token_uri="https://accounts.google.com/o/oauth2/token",
access_token=None,
refresh_token=None,
revoke_uri="https://accounts.google.com/o/oauth2/revoke",
):
"""Create an instance of OAuth2Token
Args:
client_id: string, client identifier.
client_secret: string client secret.
scope: string, scope of the credentials being requested.
user_agent: string, HTTP User-Agent to provide for this application.
auth_uri: string, URI for authorization endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
token_uri: string, URI for token endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
revoke_uri: string, URI for revoke endpoint. For convenience
defaults to Google's endpoints but any OAuth 2.0 provider can be used.
access_token: string, access token.
refresh_token: string, refresh token.
"""
self.client_id = client_id
self.client_secret = client_secret
self.scope = scope
self.user_agent = user_agent
self.auth_uri = auth_uri
self.token_uri = token_uri
self.revoke_uri = revoke_uri
self.access_token = access_token
self.refresh_token = refresh_token
# True if the credentials have been revoked or expired and can't be
# refreshed.
self._invalid = False
@property
def invalid(self):
"""True if the credentials are invalid, such as being revoked."""
return getattr(self, "_invalid", False)
def _refresh(self, request):
"""Refresh the access_token using the refresh_token.
Args:
request: The atom.http_core.HttpRequest which contains all of the
information needed to send a request to the remote server.
"""
body = urllib.parse.urlencode(
{
"grant_type": "refresh_token",
"client_id": self.client_id,
"client_secret": self.client_secret,
"refresh_token": self.refresh_token,
}
)
headers = {
"user-agent": self.user_agent,
}
http_request = http_core.HttpRequest(
uri=self.token_uri, method="POST", headers=headers
)
http_request.add_body_part(body, mime_type="application/x-www-form-urlencoded")
response = request(http_request)
body = response.read()
if response.status == 200:
self._extract_tokens(body)
else:
self._invalid = True
return response
def _extract_tokens(self, body):
d = simplejson.loads(body)
self.access_token = d["access_token"]
self.refresh_token = d.get("refresh_token", self.refresh_token)
if "expires_in" in d:
self.token_expiry = (
datetime.timedelta(seconds=int(d["expires_in"]))
+ datetime.datetime.now()
)
else:
self.token_expiry = None
def authorize(self, client):
"""Authorize a gdata.client.GDClient instance with these credentials.
Args:
client: An instance of gdata.client.GDClient
or something that acts like it.
Returns:
A modified instance of client that was passed in.
Example:
>>> c = gdata.client.GDClient(source='user-agent')
>>> c = token.authorize(c)
"""
client.auth_token = self
request_orig = client.http_client.request
def new_request(http_request):
response = request_orig(http_request)
if response.status == 401:
refresh_response = self._refresh(request_orig)
if self._invalid:
return refresh_response
self.modify_request(http_request)
return request_orig(http_request)
return response
client.http_client.request = new_request
return client
def modify_request(self, http_request):
"""Sets the Authorization header in the HTTP request using the token.
Returns:
The same HTTP request object which was passed in.
"""
http_request.headers["Authorization"] = "%s%s" % (
OAUTH2_AUTH_LABEL,
self.access_token,
)
return http_request
ModifyRequest = modify_request
def _make_credentials_property(name):
"""Helper method which generates properties.
Used to access and set values on credentials property as if they were native
attributes on the current object.
Args:
name: A string corresponding to the attribute being accessed on the
credentials attribute of the object which will own the property.
Returns:
An instance of `property` which is a proxy for the `name` attribute on the
credentials attribute of the object.
"""
def get_credentials_value(self):
return getattr(self.credentials, name)
def set_credentials_value(self, value):
setattr(self.credentials, name, value)
return property(get_credentials_value, set_credentials_value)
| MicroPyramid/opensource-job-portal | mpcomp/gauth.py | gauth.py | py | 10,945 | python | en | code | 336 | github-code | 36 | [
{
"api_name": "simplejson.decoder.JSONDecodeError",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "mpcomp.http_core.get_headers",
"line_number": 120,
"usage_type": "call"
},
{
"api_name": "mpcomp.http_core",
"line_number": 120,
"usage_type": "name"
},
{
... |
33040671101 | import io
from typing import List, Set, Tuple
from clvm import KEYWORD_FROM_ATOM, KEYWORD_TO_ATOM, SExp
from clvm import run_program as default_run_program
from clvm.casts import int_from_bytes
from clvm.EvalError import EvalError
from clvm.operators import OP_REWRITE, OPERATOR_LOOKUP
from clvm.serialize import sexp_from_stream, sexp_to_stream
from clvm_rs import STRICT_MODE, deserialize_and_run_program2, serialized_length
from clvm_tools.curry import curry, uncurry
from chia.types.blockchain_format.sized_bytes import bytes32
from chia.util.hash import std_hash
from .tree_hash import sha256_treehash
def run_program(
program,
args,
max_cost,
operator_lookup=OPERATOR_LOOKUP,
pre_eval_f=None,
):
return default_run_program(
program,
args,
operator_lookup,
max_cost,
pre_eval_f=pre_eval_f,
)
INFINITE_COST = 0x7FFFFFFFFFFFFFFF
class Program(SExp):
"""
A thin wrapper around s-expression data intended to be invoked with "eval".
"""
@classmethod
def parse(cls, f) -> "Program":
return sexp_from_stream(f, cls.to)
def stream(self, f):
sexp_to_stream(self, f)
@classmethod
def from_bytes(cls, blob: bytes) -> "Program":
f = io.BytesIO(blob)
result = cls.parse(f) # type: ignore # noqa
assert f.read() == b""
return result
def to_serialized_program(self) -> "SerializedProgram":
return SerializedProgram.from_bytes(bytes(self))
def __bytes__(self) -> bytes:
f = io.BytesIO()
self.stream(f) # type: ignore # noqa
return f.getvalue()
def __str__(self) -> str:
return bytes(self).hex()
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
return sha256_treehash(self, set(args))
def run_with_cost(self, max_cost: int, args) -> Tuple[int, "Program"]:
prog_args = Program.to(args)
cost, r = run_program(self, prog_args, max_cost)
return cost, Program.to(r)
def run(self, args) -> "Program":
cost, r = self.run_with_cost(INFINITE_COST, args)
return r
def curry(self, *args) -> "Program":
cost, r = curry(self, list(args))
return Program.to(r)
def uncurry(self) -> Tuple["Program", "Program"]:
r = uncurry(self)
if r is None:
return self, self.to(0)
return r
def as_int(self) -> int:
return int_from_bytes(self.as_atom())
def as_atom_list(self) -> List[bytes]:
"""
Pretend `self` is a list of atoms. Return the corresponding
python list of atoms.
At each step, we always assume a node to be an atom or a pair.
If the assumption is wrong, we exit early. This way we never fail
and always return SOMETHING.
"""
items = []
obj = self
while True:
pair = obj.pair
if pair is None:
break
atom = pair[0].atom
if atom is None:
break
items.append(atom)
obj = pair[1]
return items
def __deepcopy__(self, memo):
return type(self).from_bytes(bytes(self))
EvalError = EvalError
def _tree_hash(node: SExp, precalculated: Set[bytes32]) -> bytes32:
"""
Hash values in `precalculated` are presumed to have been hashed already.
"""
if node.listp():
left = _tree_hash(node.first(), precalculated)
right = _tree_hash(node.rest(), precalculated)
s = b"\2" + left + right
else:
atom = node.as_atom()
if atom in precalculated:
return bytes32(atom)
s = b"\1" + atom
return bytes32(std_hash(s))
def _serialize(node) -> bytes:
if type(node) == SerializedProgram:
return bytes(node)
else:
return SExp.to(node).as_bin()
class SerializedProgram:
"""
An opaque representation of a clvm program. It has a more limited interface than a full SExp
"""
_buf: bytes = b""
@classmethod
def parse(cls, f) -> "SerializedProgram":
length = serialized_length(f.getvalue()[f.tell() :])
return SerializedProgram.from_bytes(f.read(length))
def stream(self, f):
f.write(self._buf)
@classmethod
def from_bytes(cls, blob: bytes) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(blob)
return ret
@classmethod
def from_program(cls, p: Program) -> "SerializedProgram":
ret = SerializedProgram()
ret._buf = bytes(p)
return ret
def to_program(self) -> Program:
return Program.from_bytes(self._buf)
def uncurry(self) -> Tuple["Program", "Program"]:
return self.to_program().uncurry()
def __bytes__(self) -> bytes:
return self._buf
def __str__(self) -> str:
return bytes(self).hex()
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, str(self))
def __eq__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return False
return self._buf == other._buf
def __ne__(self, other) -> bool:
if not isinstance(other, SerializedProgram):
return True
return self._buf != other._buf
def get_tree_hash(self, *args: List[bytes32]) -> bytes32:
"""
Any values in `args` that appear in the tree
are presumed to have been hashed already.
"""
tmp = sexp_from_stream(io.BytesIO(self._buf), SExp.to)
return _tree_hash(tmp, set(args))
def run_safe_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, STRICT_MODE, *args)
def run_with_cost(self, max_cost: int, *args) -> Tuple[int, Program]:
return self._run(max_cost, 0, *args)
def _run(self, max_cost: int, flags, *args) -> Tuple[int, Program]:
# when multiple arguments are passed, concatenate them into a serialized
# buffer. Some arguments may already be in serialized form (e.g.
# SerializedProgram) so we don't want to de-serialize those just to
# serialize them back again. This is handled by _serialize()
serialized_args = b""
if len(args) > 1:
# when we have more than one argument, serialize them into a list
for a in args:
serialized_args += b"\xff"
serialized_args += _serialize(a)
serialized_args += b"\x80"
else:
serialized_args += _serialize(args[0])
# TODO: move this ugly magic into `clvm` "dialects"
native_opcode_names_by_opcode = dict(
("op_%s" % OP_REWRITE.get(k, k), op) for op, k in KEYWORD_FROM_ATOM.items() if k not in "qa."
)
cost, ret = deserialize_and_run_program2(
self._buf,
serialized_args,
KEYWORD_TO_ATOM["q"][0],
KEYWORD_TO_ATOM["a"][0],
native_opcode_names_by_opcode,
max_cost,
flags,
)
return cost, Program.to(ret)
NIL = Program.from_bytes(b"\x80")
| snight1983/chia-rosechain | chia/types/blockchain_format/program.py | program.py | py | 7,273 | python | en | code | 369 | github-code | 36 | [
{
"api_name": "clvm.operators.OPERATOR_LOOKUP",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "clvm.run_program",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "clvm.SExp",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "clvm.serial... |
32281752161 | import identity_server.logic.session.login_session.logged_in_state as lst
import identity_server.logic.session.login_session.waiting_for_permission as wfp
from mongodb.Application import Application
from mongodb.ApplicationAccount import ApplicationAccount
from django.http.response import HttpResponse
import identity_server.logic.session.login_session.login_session_context as ctx
from typing import Type, Union
from django.http.request import HttpRequest
import identity_server.logic.session.session as ssn
class InitialLoginState(ssn.SessionState):
"""
Session was not started.
Checks is request is valid, returns login page in case if yes and bad request otherwise.
"""
def required_request_params(self):
return [
'callback_url',
'client_id'
]
def route(self, request: HttpRequest) -> Union[Type[ssn.SessionState], None]:
assert isinstance(self.session_context, ctx.LoginSessionContext)
data = self._get_request_data(request)
client_id = data['client_id']
app = self.get_authorized_app(client_id)
if app:
self.session_context.authorized_clients[client_id] = app[0].permissions
return lst.LoggedIn
return super().route(request)
def process_request(self, request: HttpRequest, **kwargs) -> HttpResponse:
assert isinstance(self.session_context, ctx.LoginSessionContext)
data = self._get_request_data(request)
client_id = data['client_id']
scope, app_name = self._get_app_info(client_id)
is_logged_in = self.is_user_logged_in()
self.session_context.assign(
{'scope': scope, 'callback_url': data['callback_url'], 'client_id': client_id, 'app': app_name})
self.set_session_state(wfp.WaitingForPermissions)
return self.render_html(request, 'login_page.html', context={'scope': scope, 'app': app_name, 'clientId': client_id, 'is_logged_in': is_logged_in})
def is_user_logged_in(self):
assert isinstance(self.session_context, ctx.LoginSessionContext)
return self.session_context.user_id != ''
def get_authorized_app(self, client_id):
assert isinstance(self.session_context, ctx.LoginSessionContext)
user_id = self.session_context.user_id
if user_id:
authorized_app = ApplicationAccount.objects.filter(
worker_id=user_id, client_id=client_id)
return authorized_app
def _get_app_info(self, client_id) -> Application:
"""
Makes request to the database to get actual application name associated with given client id
"""
app = Application.objects.filter(client_id=client_id).first()
return app.permissions, app.name
| aI-lab-glider/oauth2-server-implementation | identity_server/logic/session/login_session/initial_login_state.py | initial_login_state.py | py | 2,766 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "identity_server.logic.session.session.SessionState",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "identity_server.logic.session.session",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.http.request.HttpRequest",
"line_number"... |
38971244001 | from django.db import models
from datetime import datetime
from multiselectfield import MultiSelectField
from realtors.models import Realtor
from areaprops.models import Area
# Create your models here
# Choices for amenities
amenities_choices = (
('security','security'),
('gymnasium','gymnasium'),
('waste disposal','waste disposal'),
('reserved parking','reserved_parking'),
('lift','lift'),
('club house','club house'),
('shopping center','shopping center'),
('rain water harvesting','rain water harvesting'),
('water plant','water plant'),
('landscape garden','landscape garden'),
('kids play area','kids play area'),
('cctv','cctv'),
('cycle track','cycle track')
)
# Type of property
type_of_property = (
("1/2/3 BHK APARTMENT","1/2/3 BHK APARTMENT"),
("1/2 BHK APARTMENT","1/2 BHK APARTMENT"),
("1 BHK APARTMENT","1 BHK APARTMENT"),
("2 BHK APARTMENT","2 BHK APARTMENT"),
("3 BHK APARTMENT","3 BHK APARTMENT"),
("3 BHK DUPLEX","3 BHK DUPLEX"),
("2 BHK DUPLEX","2 BHK DUPLEX"),
("VILLA","VILLA"),
("BUNGALOW","BUNGALOW"),
("PLOT","PLOT"),
("PENTHOUSE","PENTHOUSE")
)
# Create your models here.
class Listing(models.Model):
realtor = models.ForeignKey(Realtor, on_delete=models.DO_NOTHING)
title = models.CharField(max_length=200)
builder = models.CharField(max_length=200)
rera_id = models.CharField(max_length=200)
project_id = models.CharField(max_length=200)
address = models.CharField(max_length=200)
area = models.ForeignKey(Area, on_delete=models.DO_NOTHING)
city = models.CharField(max_length=30,default='bhopal')
state = models.CharField(max_length=30,default='MP')
zipcode = models.CharField(max_length=20)
description = models.TextField(blank=True)
amenities = MultiSelectField(choices=amenities_choices)
price_start = models.IntegerField()
price_end = models.IntegerField()
area_start = models.IntegerField()
area_end = models.IntegerField()
property_type = models.CharField(max_length=30,choices=type_of_property)
possesion = models.CharField(max_length=20)
photo_main = models.ImageField(upload_to='photos/%Y/%m/%d/')
photo_1 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_2 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_3 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_4 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_5 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
photo_6 = models.ImageField(upload_to='photos/%Y/%m/%d/', blank=True)
is_published = models.BooleanField(default=True)
list_date = models.DateTimeField(default=datetime.now, blank=True)
def __str__(self):
return self.title | Saxena611/bp_real_estate | listings/models.py | models.py | py | 2,835 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "django.db.models.ForeignKey",
"line_number": 44,
"usage_type": "call"
},
{
"api_name... |
8721596981 | from scipy.misc import comb
def exp(p, n):
total = 0.0
for k in range(n+1):
total += comb(n, k, exact=False) * p**k * (1-p) ** (n-k)
return total
def main():
for p in [0.3, 0.75, 0.8, 1.0, 0.0, 0.5]:
for n in range(1, 20):
print('Checking n=%d, p=%f' % (n, p))
print('Result: %f' % (exp(p, n)))
if __name__ == '__main__':
main()
| JelteF/statistics | 2/lab2_2_d.py | lab2_2_d.py | py | 395 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.misc.comb",
"line_number": 7,
"usage_type": "call"
}
] |
10905562253 | from pydantic import BaseModel
class SourceURL(BaseModel):
'''Source URL schema'''
source_url: str
class Config:
orm_mode = True
class URLInfo(SourceURL):
'''URL Information schema'''
short_url_key: str
short_url: str
| ScottyZA/backendend-challenge | url_shortener/schemas.py | schemas.py | py | 255 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 4,
"usage_type": "name"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.