seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
20543243144 | import jax
import numpy as np
from jax import lax
from jax import numpy as jnp
def gaussian(x, sigma):
return 1 / (sigma * np.sqrt(2 * np.pi)) * np.exp(x**2 / (-2 * sigma**2))
def make_gaussian_kernel(n, sigma, dx=0.001):
assert n % 2 == 1 # Make sure n is odd
# Compute gaussian on a symmetric grid
x = np.arange((-n + dx) / 2, n / 2, dx)
y = gaussian(x, sigma)
# Integrate the gaussian over each cell
# x = x.reshape((n, -1))
# xint = np.median(x, axis=-1)
y = y.reshape((n, -1))
yint = np.trapz(y, dx=dx, axis=-1)
# Make sure the kernel integrates to 1. It would anyway if n >> sigma.
yint /= np.sum(yint)
return yint
def compute_radial_distance_grid(rmax, ndim):
n = 2 * rmax + 1
# Compute the midpoint of each bin in each dimension.
midpoints = np.arange(-rmax, rmax + 1)
assert len(midpoints) == n
# Compute the squared Euclidean distance to every bin midpoint.
midsq = midpoints**2
dsq = np.zeros((n, ) * ndim)
for d in range(ndim):
reshape = [1] * ndim
reshape[d] = n
dsq += midsq.reshape(reshape)
return np.sqrt(dsq)
def make_spherical_top_hat(rmax, ndim, normalize=True):
grid = compute_radial_distance_grid(rmax, ndim)
np.less_equal(grid, rmax, out=grid)
if normalize:
grid /= np.sum(grid)
return grid
def conv3d(grid, kernel, padding="SAME"):
assert grid.ndim == 3
assert kernel.ndim == 3
# Put "batch" and "input feature" dimensions first.
grid = jnp.expand_dims(grid, axis=(0, 1))
kernel = jnp.expand_dims(kernel, axis=(0, 1))
# Do the convolution.
grid = lax.conv_general_dilated(grid,
kernel,
window_strides=(1, 1, 1),
padding=padding)
return jnp.squeeze(grid)
def conv3d_separable(grid, kernels, padding="SAME"):
ndim = grid.ndim
assert len(kernels) == ndim
# Do ndim separate convolutions, aligning the kernel with each of the
# spatial dimensions in turn.
for i, kernel in enumerate(kernels):
n, = kernel.shape
shape_3d = np.ones(ndim, dtype=int)
shape_3d[i] = n
grid = conv3d(grid, kernel.reshape(shape_3d), padding)
return jnp.squeeze(grid)
| cshallue/recon-cnn | recon/smoothing.py | smoothing.py | py | 2,316 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.exp",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 15,... |
32933105911 | import numpy as np
from PIL import Image
rainbow = np.zeros((521,512,3),'uint8')
for i in range(0,256):
rainbow[:,i,0] = 255-i
rainbow[:,i,1] = 0+i
for i in range(256,512):
rainbow[:,i,1] = 255-i
rainbow[:,i,2] = 0+i
image = Image.fromarray(rainbow)
image.save('rainbow.jpg') | hieumewmew/MultimediaCommunicationExam | bai5/rainbow.py | rainbow.py | py | 299 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 13,
"usage_type": "name"
}
] |
14102761586 | import platform
import yaml
import pkg_resources
import re
import logging
log = logging.getLogger(__name__)
def convert_conda_yaml_to_requirement(conda_array) :
'''
Convert the conda.yaml syntax to requirements.txt syntax :
for now :
- select "dependencies" key
- transform = into ==
- add pip packages dependencies to the list of other dependencies
Additionally remove python requirement (not supported by pkg_resources.require)
Also need to remove pip -e "install"
'''
# get dependencies
dep_array = [v for v in conda_array["dependencies"] if type(v) == str]
pip_require = [v for v in conda_array["dependencies"]
if type(v) == dict and "pip" in v.keys()][0]["pip"]
# remove " -e " install type :
pip_require = [v for v in pip_require if (re.match(r"^ *-e ",v) == None)]
# need to add extra = if no < or >
dep_array_conv = [x.replace('=','==') for x in dep_array]
dep_array_conv = [x.replace(r'>==','>=').replace('<==','<=').replace('===','==')
for x in dep_array_conv]
# put back pip requirement in place
# assumes it is at the end
dep_array_conv = dep_array_conv + pip_require
# remove python version check
dep_array_conv = [x for x in dep_array_conv if re.match('^python[<.>,=]=',x) == None]
return dep_array_conv
def conda_python_version_requirement(conda_array):
'''
Return the python version required if present in the conda.yaml
Otherwise return None
'''
# get dependencies
dep_array = [v for v in conda_array["dependencies"] if type(v) == str]
# get Python version
python_req = [x for x in dep_array if re.match('^python[<.>,=]',x) != None]
if len(python_req) == 0 :
return None
else :
# Only return 1st occurence
return python_req[0].replace('python','')
def check_python(requirement, value) :
'''
Check if a Python version abide by a Python version requirement
WARNING :
this can only check 1 condition, can not check multiple conditions
separated by ,
'''
condition = re.findall('[<,>,=]=*', requirement)[0]
condition = condition.replace('=','==')
condition = condition.replace('<==','<=').replace('>==','>=').replace('===','==')
version_req = re.findall('[0-9.]+', requirement)[0]
len_version = len(version_req.split('.'))
value = ".".join(value.split('.')[0:len_version])
value = pkg_resources.parse_version(value)
version_req = pkg_resources.parse_version(version_req)
test = eval("value "+condition+" version_req")
return test
def check_environment(filename = 'conda.yaml') :
'''
Check that the current conda environment abide by the filename (conda.yaml)
and raise an error if not.
A good place to put the function is in the file ./src/{project_name}/pipeline.py
at the beginning of the create_pipelines function
'''
with open(filename) as stream :
values = yaml.safe_load(stream)
pkg_req = convert_conda_yaml_to_requirement(values)
pkg_resources.require(pkg_req)
python_req = conda_python_version_requirement(values)
if (python_req != None) :
python_ver = platform.python_version()
if not(check_python(python_req, python_ver)) :
raise(Exception(f"python version {python_ver} is not compatible "
f"with conda.yaml python requirement {python_req}"))
log.info(f"Conda environment matches the requirements of {filename}")
if __name__ == "__main__" :
check_environment()
| nasa/ML-airport-data-services | data_services/conda_environment_test.py | conda_environment_test.py | py | 3,612 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 49,
... |
25417748938 | #!/usr/bin/env python
import soundfile as sf
import math
class LoopableSample():
def __init__(self):
self.data = []
def addBuffer(self, buffer):
for d in buffer:
self.data.append(d)
def fromFile(self, file):
print("loading %s" % file)
(data, ignore) = sf.read(file, dtype="float32")
self.addBuffer(data[2 * 4410:])
return self
def length(self):
return len(self.data) - (5 * 4410)
def create(self, outFile):
l = self.length()
halfWay = math.floor(l / 2)
xFade = math.floor(0.75 * halfWay)
out = []
for s in range(l - xFade):
p = s + xFade - halfWay
if s < (halfWay - xFade):
out.append(self.data[s + halfWay])
elif s >= halfWay:
out.append(self.data[p])
else:
f = 1.0 * p / xFade
out.append((f * self.data[p]) + ((1.0 - f) * self.data[s + halfWay]))
sf.write(outFile, out, 44100)
| andrewbooker/samplescaper | capture/LoopableSample.py | LoopableSample.py | py | 1,055 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "soundfile.read",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "math.floor",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "soundfile.write",
"line_numb... |
26175621630 | from distutils.core import setup, Extension
from Cython.Build import cythonize
include_dirs_list = [
"../include",
"../Thirdparty/libccd/src", #libccd
"../Thirdparty/libccd/build/src", #libccd
"../Thirdparty/yaml-cpp/include", # yaml-cpp
"../Thirdparty/boost_1_7_0", # boost
"../Thirdparty/eigen", # eigen
"../Thirdparty/googletest/googletest/include", # gtest
"../Thirdparty/octomap/octomap/include", # octomap ?
"../Thirdparty/fcl/build/include", # fcl
"../Thirdparty/fcl/include", # fcl
]
setup(ext_modules = cythonize(Extension(
"pympl", # the extension name
sources=["pympl.pyx"], # the Cython source and additional C++ source files
language="c++", # generate and compile C++ code
include_dirs=include_dirs_list,
library_dirs=["../build"],
libraries=["mpl"],
extra_compile_args=["-std=c++11"]
))) | hfutcgncas/mpl_cpp | cython/setup.py | setup.py | py | 1,152 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "distutils.core.setup",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "Cython.Build.cythonize",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "distutils.core.Extension",
"line_number": 21,
"usage_type": "call"
}
] |
17498728037 | import os.path
import pandas
import numpy as np
def opt_report(reportPath, snrTh=0.9, debug=False, plotError=True):
df = pandas.read_csv(reportPath)
totalNbLoop = list(df["nbLoop"])[-1]
# print(totalNbLoop)
loopList = []
rmseList = []
avgErrorList = []
for loop_ in range(totalNbLoop + 1):
if debug:
print("------ Loop:{} -------".format(loop_))
itemList = []
dxPixList = []
dyPixList = []
snrList = []
dxList = []
dyList = []
for item, dxPix_, dyPix_, snr_ in zip(list(df["nbLoop"]), list(df["dxPix"]), list(df["dyPix"]),
list(df["SNR"])):
if item == loop_:
itemList.append(item)
dxPixList.append(dxPix_)
dyPixList.append(dyPix_)
snrList.append(snr_)
nanList = [item_ for item_ in snrList if item_ == 0]
snrThList = [item_ for item_ in snrList if item_ > snrTh]
dxPixAvg = np.nanmean(np.asarray(dxPixList))
dyPixAvg = np.nanmean(np.asarray(dyPixList))
dxPixRMSE = np.nanstd(np.asarray(dxPixList))
dyPixRMSE = np.nanstd(np.asarray(dyPixList))
xyErrorAvg = np.sqrt(dxPixAvg ** 2 + dyPixAvg ** 2)
xyRMSE = np.sqrt(dxPixRMSE ** 2 + dyPixRMSE ** 2)
if debug:
print("#GCPs:{} --> #NaNs:{} ; #snrTh >{}:{}".format(len(itemList), len(nanList), snrTh, len(snrThList)))
print("dxPixAvg:{} , xRMSE:{}".format("{0:.4f}".format(dxPixAvg),
"{0:.2f}".format(dxPixRMSE)))
print("dyPixAvg:{} , yRMSE:{}".format("{0:.4f}".format(dyPixAvg),
"{0:.2f}".format(dyPixRMSE)))
print("xyErrorAvg:{} , xyRMSE:{}".format("{0:.4f}".format(xyErrorAvg),
"{0:.2f}".format(xyRMSE)))
loopList.append(loop_)
rmseList.append(xyRMSE)
avgErrorList.append(xyErrorAvg)
indexMin = np.argmin(avgErrorList)
# if debug:
print("Loop of Min Error:{} --> RMSE:{:.3f} , avgErr:{:.3f}".format(loopList[indexMin], np.min(rmseList),
np.min(avgErrorList)))
if plotError:
import matplotlib.pyplot as plt
from matplotlib.ticker import (AutoMinorLocator)
fig, ax = plt.subplots()
ax.plot(loopList, rmseList, c="r", linestyle="--", marker="o", label="RMSE [pix]")
ax.plot(loopList, avgErrorList, c="g", linestyle="-", marker="o", label="meanErr [pix]")
ax.grid()
ax.legend()
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.tick_params(which='both', width=2, direction="in")
ax.set_xlabel('#iterations')
ax.set_ylabel("Error [pix]")
# plt.show()
fig.savefig(os.path.join(os.path.dirname(reportPath), "CoregistrationError.png"), dpi=400)
return loopList[indexMin], totalNbLoop, np.min(avgErrorList)
def parse_opt_report(opt_report_path):
df = pandas.read_csv(opt_report_path)
nb_loops = list(df["nbLoop"])[-1]
loopList = []
rmse = []
avg_error = []
for loop_ in range(nb_loops + 1):
itemList = []
dxPixList = []
dyPixList = []
snrList = []
for item, dxPix_, dyPix_, snr_ in zip(list(df["nbLoop"]), list(df["dxPix"]), list(df["dyPix"]),
list(df["SNR"])):
if item == loop_:
itemList.append(item)
dxPixList.append(dxPix_)
dyPixList.append(dyPix_)
snrList.append(snr_)
dxPixAvg = np.nanmean(np.asarray(dxPixList))
dyPixAvg = np.nanmean(np.asarray(dyPixList))
dxPixRMSE = np.nanstd(np.asarray(dxPixList))
dyPixRMSE = np.nanstd(np.asarray(dyPixList))
xyErrorAvg = np.sqrt(dxPixAvg ** 2 + dyPixAvg ** 2)
xyRMSE = np.sqrt(dxPixRMSE ** 2 + dyPixRMSE ** 2)
loopList.append(loop_)
rmse.append(xyRMSE)
avg_error.append(xyErrorAvg)
idx_min = np.argmin(avg_error)
loop_min_err = loopList[idx_min]
# print("Loop of Min Error:{} --> RMSE:{:.3f} , avgErr:{:.3f}".format(loopList[indexMin], np.min(rmse),
# np.min(avg_error)))
return rmse, avg_error, loop_min_err
| SaifAati/Geospatial-COSICorr3D | geoCosiCorr3D/geoTiePoints/misc.py | misc.py | py | 4,538 | python | en | code | 37 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.nanmean",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.nanmean",
"line_... |
26613373403 | from sklearn.linear_model import LogisticRegression
import pandas as pd
import numpy as np
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix, classification_report
import seaborn as sn
import matplotlib.pyplot as plt
data = pd.read_csv('./Data/wine.csv')
data = data.sample(frac=1, random_state=42).reset_index(drop=True)
ndata = data.shape[0]
ncolumn = data.shape[1]
train_rate = 0.7
ntrain = int(ndata * train_rate)
train_index = range(ntrain)
test_index = range(ntrain, ndata)
train, test = data.iloc[train_index,], data.iloc[test_index,]
train_x, train_y = train.iloc[:,:-1], train.iloc[:,-1]
test_x, test_y = test.iloc[:,:-1], test.iloc[:,-1]
log = LogisticRegression()
log.fit(train_x,train_y)
estimates = log.predict(train_x)
C=confusion_matrix(train_y,estimates)
TN, FP, FN, TP = C.ravel()
Accuracy= accuracy_score(train_y,estimates)
Precision=float(TP/(TP+FP))
Recall=float(TP/(TP+FN))
Specificity=float(TN/(TN+FP))
F1measure=float(2*Precision*Recall/(Precision+Recall))
Gmean=float(np.sqrt(Precision*Recall))
print("This solution is computed using train data")
print(C)
print("Accuracy using train data is: %.3f"%(Accuracy))
print("Precision : %.3f, Recall : %.3f, Specificity : %.3f, F1measure : %.3f, G-mean : %.3f" %(Precision, Recall, Specificity, F1measure, Gmean))
print("Type 1 error : %.3f, Type 2 error : %.3f\n"%(1-Specificity, 1-Recall))
estimates2 = log.predict(test_x)
C2=confusion_matrix(test_y,estimates2)
TN2, FP2, FN2, TP2 = C2.ravel()
Accuracy2 = accuracy_score(test_y, estimates2)
Precision2 = float(TP2 / (TP2 + FP2))
Recall2 = float(TP2 / (TP2 + FN2))
Specificity2 = float(TN2 / (TN2 + FP2))
F1measure2 = float(2 * Precision2 * Recall2 / (Precision2 + Recall2))
Gmean2 = float(np.sqrt(Precision2 * Recall2))
print("This solution is computed using test data")
print(C2)
print("Accuracy using test data is: %.3f" % (Accuracy2))
print("Precision : %.3f, Recall : %.3f, Specificity : %.3f, F1measure : %.3f, G-mean : %.3f" % (
Precision2, Recall2, Specificity2, F1measure2, Gmean2))
print("Type 1 error : %.3f, Type 2 error : %.3f\n" % (1 - Specificity2, 1 - Recall2))
df_cm = pd.DataFrame(C, ['Actual N','Actual P'],['Predicted N','Predicted P'])
df_cm2 = pd.DataFrame(C2, ['Actual N','Actual P'],['Predicted N','Predicted P'])
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.set(title='Confusion Matrix of Train Data')
ax2 = fig.add_subplot(212)
ax2.set(title='Confusion Matrix of Test Data')
sn.heatmap(df_cm, annot=True, fmt='d', ax=ax1, annot_kws={"size": 16})
sn.heatmap(df_cm2, annot=True, fmt='d', ax=ax2, annot_kws={"size": 16})
plt.tight_layout()
plt.show() | larocaroja/Advanced-Programming | Logistic Regression.py | Logistic Regression.py | py | 2,652 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.confusion_matrix",
"line_number": 28,
"usage_type": "call"
},
... |
26740758351 | #!/usr/bin/env python
import glob, os, sys, subprocess, shutil, string, argparse
parser = argparse.ArgumentParser(description="Wrapper script for MakePlots_HTopMultilep.py. This gets called on the PBS worker node via the PBS script generated by submit-PBS-ARRAY-MakePlots_HTopMultilep.py. The variable to be plotted gets retrieved via the PBS_ARRAYID index.")
parser.add_argument("--optstr", dest="optstr", action="store", type=str)
parser.add_argument("--varlist", dest="varlist", action="store", type=str, nargs="+")
parser.add_argument("--outputpath", dest="outputpath", action="store", type=str)
args = parser.parse_args()
if __name__ == '__main__':
# Read varlist from argparse.
# It will automagically re-create a python list from the multiple arguments of the input --varlist option.
varlist = args.varlist
# Get the var from the PBS_ARRAYID
pbs_array_idx = int(os.getenv('PBS_ARRAYID'))
var = varlist[pbs_array_idx]
print("Current job index PBS_ARRAYID={0}, var={1}".format(pbs_array_idx,var))
# OK, execute plotting script for this var!
# NB: it's crucial to make this call when running on the worker node, otherwise
# python will not be able to find modules in Plotter/
os.chdir(os.path.abspath(os.path.curdir)+"/HTopMultilepAnalysis/PlotUtils")
plotscript = os.path.abspath(os.path.curdir) + "/Plotter/MakePlots_HTopMultilep.py"
optlist = args.optstr.split(' ')
cmdlist = ['python',plotscript] + optlist + ['--submitPBSVar',var]
cmd = " ".join( "{0}".format(c) for c in cmdlist )
print("Executng command:\n{0}".format(cmd))
subprocess.call( cmd, shell = True )
# Now move the output to the target directory
outputpath = args.outputpath
if not outputpath[-1] == '/':
outputpath += '/'
# Get all subdirs in current location whose name starts with "OutputPlots_", rsync them to output directory, and remove the local copy
job_outdirs = [ dir for dir in os.listdir(".") if "OutputPlots_" in dir and os.path.isdir(dir) ]
for dir in job_outdirs:
thisdir = dir
if thisdir[-1] == '/':
thisdir = thisdir[:-1]
subprocess.call( ['rsync','-azP',thisdir,outputpath] )
shutil.rmtree(thisdir)
| mmilesi/HTopMultilepAnalysis | PlotUtils/Scripts/wrapper-MakePlots_HTopMultilep-PBS.py | wrapper-MakePlots_HTopMultilep-PBS.py | py | 2,258 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.chdir",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line... |
74352481703 | # -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
GUFY - Copyright (c) 2019, Fabian Balzer
Distributed under the terms of the GNU General Public License v3.0.
The full license is in the file LICENSE.txt, distributed with this software.
-------------------------------------------------------------------------------
@author: Fabian Balzer (fabian.balzer@studium.uni-hamburg.de)
A module for the progress bar updates and threading, including the heads of the
evaluation functions.
The module is structured as follows:
- The ProgressDialog class for creating a progress window when plotting
- The Worker classes for carrying out the plotting process
- Function for evaluating single file data
- Function for evaluating time series data
"""
import numpy as np
import traceback
import PyQt5.QtWidgets as QW
import PyQt5.QtCore as QC
import PyQt5.QtGui as QG
import simgui_modules.plots as splot # used in eval-commands
import simgui_modules.utils as sut
from simgui_modules.additionalWidgets import GUILogger
# %% The ProgressDialog class for creating a progress window when plotting
class ProgressDialog(QW.QDialog):
"""A dialog that pops up when a plot is being made. Shows a progressbar
and a status concerning plotting, and contains a button to stop the plot
process. Automatically closes upon finishing.
Parameters:
Param_Dict: For the plot parameters.
parent: QObject: preferably the main window.
all_: bool: unimplemented function to plot everything
"""
finished = QC.pyqtSignal(bool) # signal to indicate success
def __init__(self, Param_Dict, parent, mode, Request_Dict=None, slider=None):
super().__init__(parent=parent)
self.setModal(True)
self.setWindowFlags( # This will stop the close button from appearing
QC.Qt.Window |
QC.Qt.CustomizeWindowHint |
QC.Qt.WindowTitleHint |
QC.Qt.WindowMinimizeButtonHint |
QC.Qt.WindowStaysOnTopHint
)
self.Request_Dict = Request_Dict
self.mode = mode
self.initUi()
self.determineProgressLength(Param_Dict)
self.setWindowTitle("Plotting in progress...")
if mode == "Test":
self.parent().progressWorker = TestPlotWorker()
elif mode == "PlotSingle":
self.parent().progressWorker = PlotSingleWorker(Param_Dict) # Create worker for plotting
elif mode == "PlotAll":
self.parent().progressWorker = PlotMultipleWorker(Param_Dict,
Request_Dict,
slider)
else:
self.parent().progressWorker = TestPlotWorker()
self.parent().thread = QC.QThread() # Create thread for worker. For safety reasons leave a reference on the main window.
self.signalsConnection()
self.parent().progressWorker.moveToThread(self.parent().thread)
self.parent().thread.start() # Start the thread
self.resize(400, self.height())
self.setWindowIcon(QG.QIcon('simgui_registry/CoverIcon.png'))
self.show()
def initUi(self):
"""Initiates the visual elements, including a progress bar and a cancel
button"""
self.progressBar = QW.QProgressBar()
self.infoLabel = QW.QLabel()
self.cancelButton = QW.QPushButton("Cancel")
buttonBox = QW.QWidget()
buttonBoxLayout = QW.QHBoxLayout(buttonBox)
buttonBoxLayout.addStretch(1)
buttonBoxLayout.addWidget(self.cancelButton)
buttonBoxLayout.setContentsMargins(0, 0, 0, 0)
layout = QW.QVBoxLayout(self)
layout.addWidget(self.progressBar)
layout.addWidget(self.infoLabel)
if self.mode == "PlotAll":
self.multipleProgressBar = QW.QProgressBar()
self.multipleProgressBar.setRange(0, self.Request_Dict["PlotNumber"])
self.multipleProgressBar.setValue(0)
layout.addWidget(self.multipleProgressBar)
self.multiInfoLabel = QW.QLabel(f"Currently working on plot 1/{self.Request_Dict['PlotNumber']}...")
layout.addWidget(self.multiInfoLabel)
layout.addStretch(1)
layout.addWidget(buttonBox)
def determineProgressLength(self, Param_Dict):
"""Calculate the number of checkpoint steps for the current plot
settings and format the slider accordingly."""
self.progressBar.setRange(0, 0)
# startplot, modifications, annotations, startplot,
length = 6 # _setupPlots, finish
if Param_Dict["PlotMode"] == "Profile":
length += sut.calculateProfileAdditions(Param_Dict)
self.progressBar.setRange(0, length)
self.progressBar.setValue(0)
def updateProgress(self, message):
"""Updates the progressbar by one step and sets the text of the
infoLabel to message."""
value = self.progressBar.value()
self.progressBar.setValue(value + 1)
self.infoLabel.setText(f"{message}...")
def updateMultiProgress(self):
oldValue = self.multipleProgressBar.value()
self.multipleProgressBar.setValue(oldValue + 1)
self.progressBar.setValue(0)
text = f"{oldValue+2}/{self.Request_Dict['PlotNumber']}"
self.multiInfoLabel.setText(f"Currently working on plot {text}...")
GUILogger.debug(text)
def signalsConnection(self):
"""Connect the cancelButton"""
self.parent().progressWorker.progressUpdate.connect(self.updateProgress)
self.parent().progressWorker.finished.connect(lambda: self.close())
if self.mode == "PlotAll":
self.parent().progressWorker.multiProgress.connect(self.updateMultiProgress)
self.cancelButton.clicked.connect(self.stopProcess)
self.parent().thread.started.connect(self.parent().progressWorker.plot)
def keyPressEvent(self, event):
if event.key() == QC.Qt.Key_Escape:
self.stopProcess()
def closeEvent(self, event):
self.parent().thread.quit()
super().closeEvent(event)
def stopProcess(self):
self.infoLabel.setText(f"Plotting interrupted. Please wait until the current step is finished...")
self.cancelButton.setDisabled(True)
plotWindow = self.parent().Param_Dict["CurrentPlotWindow"]
plotWindow.restoreSettings.setDisabled(True)
plotWindow.writeFileButton.setDisabled(True)
plotWindow.externalWindowButton.setDisabled(True)
self.parent().progressWorker._isRunning = False
# %% The Worker classes for carrying out the plotting process
class WorkerBase(QC.QObject):
"""A base class for objects to be used during threading"""
finished = QC.pyqtSignal(bool)
progressUpdate = QC.pyqtSignal(str)
def __init__(self):
super().__init__()
self._isRunning = True
self.oldMessage = "Starting up"
class PlotSingleWorker(WorkerBase):
"""A worker to carry out a single plot"""
def __init__(self, Param_Dict):
super().__init__()
self.Param_Dict = Param_Dict
@QC.pyqtSlot() # This is necessary to make the threading work.
def plot(self):
try:
evaluateSingle(self.Param_Dict, self)
except sut.WorkingException as e:
GUILogger.error(str(e.args[0]))
except Exception as e:
traceback.print_exc() # This will print the complete traceback including links to the lines
GUILogger.exception("A yt-internal exception occured:<br><b><font color"
f'="DarkRed">{type(e).__name__}:</font><br>'
f"{e}</b>")
GUILogger.log(29, "I've printed the traceback for you.")
self._isRunning = False
self.finished.emit(self._isRunning)
class PlotMultipleWorker(WorkerBase):
"""A worker to carry out multiple consecutive plots"""
finished = QC.pyqtSignal(bool, str)
multiProgress = QC.pyqtSignal()
def __init__(self, Param_Dict, Request_Dict, slider):
super().__init__()
self.Param_Dict = Param_Dict
self.Request_Dict = Request_Dict
self.slider = slider
@QC.pyqtSlot() # This is necessary to make the threading work.
def plot(self):
try:
evaluateMultiple(self.Param_Dict, self.Request_Dict, self.slider, self)
except sut.WorkingException as e:
GUILogger.error(str(e.args[0]))
except Exception as e:
traceback.print_exc() # This will print the complete traceback including links to the lines
GUILogger.exception("A yt-internal exception occured:<br><b><font color"
f'="DarkRed">{type(e).__name__}:</font><br>'
f"{e}</b>")
GUILogger.log(29, "I've printed the traceback for you.")
self._isRunning = False
self.finished.emit(self._isRunning, self.Request_Dict["Directory"])
class TestPlotWorker(WorkerBase):
@QC.pyqtSlot() # Override this
def plot(self):
import time
for i in range(100):
if self._isRunning:
time.sleep(0.02)
self.progressUpdate.emit(str(i))
if self._isRunning:
self.success = True
self.finished.emit()
# %% Function for evaluating single file data
def evaluateSingle(Param_Dict, worker):
"""Handles the different cases needed for evaluation of a Data or
DataSetSeries object.
Parameters:
Param_Dict: For the information to be plotted
worker: Worker object the evaluation is initiated from
"""
mode = Param_Dict["PlotMode"]
sut.emitStatus(worker, f"Creating the initial {mode.lower()} plot")
GUILogger.log(29, f"Producing the requested {mode.lower()} plot...")
# For lineplotting we need to remember the grid unit
Param_Dict["oldGridUnit"] = Param_Dict["GridUnit"]
# Convenient way to choose the right function:
eval(f"splot.{mode}Plot(Param_Dict, worker)")
sut.emitStatus(worker, "Finishing")
# %% Function for evaluating time series data
def evaluateMultiple(Param_Dict, Request_Dict, slider, worker):
"""Evaluate the series according to the settings given from the
plotDialog. If the makeMovie-attribute from the dialog is True, ask for a
directory, create a folder and save the figures there."""
mode = Param_Dict["PlotMode"]
directory = Request_Dict["Directory"]
onlyEvery = Request_Dict["OnlyEvery"]
plotnum = Request_Dict["PlotNumber"]
GUILogger.log(29, f"Producing the requested {mode.lower()} plots...")
sut.emitStatus(worker, f"Creating the initial {mode.lower()} plot")
# For lineplotting we need to remember the grid unit
Param_Dict["oldGridUnit"] = Param_Dict["GridUnit"]
i = 0
for j in range(Request_Dict["Length"]):
if i % onlyEvery == 0:
# The following will set the plotWindow and dataset to the one we want
Param_Dict["SignalHandler"].getSliderInput(value=j, seriesEval=True)
# Convenient way to choose the right plot function
eval(f"splot.{mode}Plot(Param_Dict, worker)")
GUILogger.info(f"Progress: {int(i/onlyEvery+1)}/{plotnum} {mode.lower()} plots done.")
if Request_Dict["MakeMovie"]:
saveName = f"{directory}/{mode}plot_{i+1}"
Param_Dict["CurrentPlotWindow"].saveFigure(saveName)
sut.emitMultiStatus(worker, i, plotnum)
i += 1
slider.setValue(j)
| Fabian-Balzer/GUFY | GUFY/simgui_modules/threading.py | threading.py | py | 12,049 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "... |
6951210977 | import argparse
from algorithms.utils import timedcall
@timedcall
def count_inversions(array):
_, inversions = _count_inversions(array)
return inversions
def _count_inversions(array):
if len(array) < 2:
return array, 0
mid = len(array) // 2
left, left_inversions = _count_inversions(array[:mid])
right, right_inversions = _count_inversions(array[mid:])
array, cross_inversions = merge(left, right)
return array, left_inversions + right_inversions + cross_inversions
def merge(left, right):
array, inversions = [], 0
i = j = 0
while i < len(left) and j < len(right):
if left[i] > right[j]:
inversions += len(left) - i
array.append(right[j])
j += 1
else:
array.append(left[i])
i += 1
while i < len(left):
array.append(left[i])
i += 1
while j < len(right):
array.append(right[j])
j += 1
return array, inversions
@timedcall
def count_inversions_naive(array):
inversions = 0
for j in range(len(array)):
for i in range(j):
inversions += array[i] > array[j]
return inversions
def read_data(filepath):
with open(filepath, 'r') as fp:
data = map(int, fp.read().splitlines())
return list(data)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--in_file', default='data/data.txt')
return parser.parse_args()
def main():
args = parse_args()
data = read_data(args.in_file)
inversions = count_inversions(data)
print(inversions)
if __name__ == '__main__':
main()
| dfridman1/algorithms-coursera | algorithms/divide_and_conquer/week2/inversions.py | inversions.py | py | 1,642 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "algorithms.utils.timedcall",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "algorithms.utils.timedcall",
"line_number": 42,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 58,
"usage_type": "call"
}
] |
38817077412 | import requests
import random
from dotenv import load_dotenv
from PIL import ImageTk, Image
from io import BytesIO
import tkinter as tk
import os
class FetchAPI():
query: str
quantity: int
img_width: int
img_height: int
load_dotenv()
api_key = os.getenv('PEXELS_API_KEY')
def __init__(self, query: str, quantity: int) -> None:
self.img_width = 1280
self.img_height = 720
self.query = query
self.quantity = quantity
# Getter Method
def get_query(self):
return self.query
def get_quantity(self):
return self.quantity
# Setter Method
def set_query(self, newQuery):
self.query = newQuery
def set_quantity(self, newQuantity):
self.query = newQuantity
@staticmethod
def randomNumber() -> int:
return random.randint(1, 100)
def fetchAPI(self):
url = f'https://api.pexels.com/v1/search?query={self.get_query()}&per_page={self.get_quantity()}&page={self.randomNumber()}&orientation=landscape'
headers = {'Authorization': self.api_key}
response = requests.get(url, headers=headers)
return response.json()
def DisplayPhotos(self)-> None:
data = self.fetchAPI()
for photo in data['photos']:
photo_link = photo['src']['medium']
response = requests.get(photo_link)
image = Image.open(BytesIO(response.content))
root = tk.Tk()
root.wm_attributes("-topmost", 1)
tk_image = ImageTk.PhotoImage(image)
label = tk.Label(root, image=tk_image, text= self.get_query())
label.pack()
root.mainloop()
return None
| yethuhlaing/Car-Rental | src/fetchAPI.py | fetchAPI.py | py | 2,071 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_... |
10495557667 | from dateutil import rrule
import datetime
# 算两个时间的月数
def months_calculte(begin,end):
begin += '-01'
end += '-01'
d1 = datetime.datetime.strptime(begin,'%Y-%m-%d')
d2 = datetime.datetime.strptime(end,'%Y-%m-%d')
# d2 = datetime.date(2017, 4)
months = rrule.rrule(rrule.MONTHLY, dtstart=d1, until=d2).count()
return months
# 算两个时间的天数
def days_calculte(begin, end):
begin = begin.split('-')
end = end.split('-')
d = int(begin[2])
m = int(begin[1])
y = int(begin[0])
# difference in day
dd = int(end[2])
# difference in month
dm = int(end[1])
# difference in year
dy = int(end[0])
begind = datetime.date(y, m, d)
endd = datetime.date(dy, dm, dd)
return (endd - begind).days+1
#算年数
def years_calculte(begin,end):
begin = int(begin)
end = int(end)
return end-begin+1
#生成连续的日期
def dateRange(begin, end):
ymd = "%Y-%m-%d"
if len(begin) == 7:
ymd = "%Y-%m"
if len(begin) == 4:
c = int(end) - int(begin)+1
year = []
for i in range(c):
year.append(str(int(begin)+i))
return sorted(year)
dates = []
dt = datetime.datetime.strptime(begin, ymd)
date = begin[:]
while date <= end:
dates.append(date)
dt = dt + datetime.timedelta(1)
date = dt.strftime(ymd)
return sorted(set(dates))
def date_parmas_check(params):
if not params.get('time_kind'):
return False,'请表明要查寻的时间格式!'
if not params.get('start_time') or not params.get('end_time'):
return False,'缺少时间范围!'
if params.get('time_kind') == 'month' and (
not len(params.get('start_time')) == 7 or not len(params.get('end_time')) == 7):
return False,'按月统计时间范围有误!'
if params.get('time_kind') == 'year' and (
not len(params.get('start_time')) == 4 or not len(params.get('end_time')) == 4):
return False,'按年统计时间范围有误!'
if params.get('time_kind') == 'day' and (
not len(params.get('start_time')) == 10 or not len(params.get('end_time')) == 10):
return False,'按日统计时间范围有误!'
return True,'success'
#时间增长 几天几个月几年
def date_up(begin,several):
if several == 0:
return begin
several = several - 1
if len(begin) == 4:
return int(begin) + several
elif len(begin) == 7:
b = begin.split('-')
m = int(b[1]) + several
y = int(b[0])
if m > 12:
y = int(b[0]) + int(m / 12)
m = m % 12
result_date = str(y) + '-' + str(m)
return result_date
else:
b = begin.split('-')
s = datetime.date(int(b[0]), int(b[1]), int(b[2]))
result_date = s + datetime.timedelta(days=several)
return result_date.strftime('%Y-%m-%d')
#判断哪个时间大
def mix_min_check(start,end):
a = int(start.replace('-',''))
b = int(end.replace('-', ''))
if a>b:
return False
return True
| rantengfei/python-utility | compute_time.py | compute_time.py | py | 3,098 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.strptime",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 8,
"usage_type": "call"
},
{
"api_nam... |
42910133147 | from pymongo import MongoClient
import time
client = MongoClient('localhost', 27017)
db = client['sahamyab']
series_collection = db['tweets']
start_time = time.time()
series_collection.update_many(
{'hashtags':{'$in': ['فولاد', 'شستا', 'شبندر'] }},
{'$set':{'gov': True }})
end_time = time.time()
delta_time = end_time - start_time
print(delta_time)
| masoudrahimi39/Big-Data-Hands-On-Projects | NoSQL Databases (Cassandra, MongoDB, Neo4j, Elasticsearch)/MongoDB/1000 twiits/game3_2.py | game3_2.py | py | 397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
}
] |
16810461794 | import json
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.decorators import login_required
from django.core import serializers
from django.core.files.uploadhandler import FileUploadHandler
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, render_to_response, redirect
from django.template import RequestContext
from django.views.decorators.csrf import csrf_exempt
from outfit.forms import UserForm, ClothesForm
from outfit.models import Clothes, User
def register(request):
if request.method == 'POST':
form = UserForm(request.POST)
if form.is_valid():
user = form.save()
# allows users to be redirected to home page after register
messages.info(request, "Thanks for registering.")
new_user = authenticate(username=request.POST['username'],
password=request.POST['password1'])
else:
form = UserForm()
return render(request, 'registration/register.html', {
'form': form,
})
def login_redirect(request):
if request.user.gender == 'M':
return redirect('profile')
else:
return redirect('girly')
def profile(request):
big = Clothes.objects.all()
if request.method == 'POST':
form = ClothesForm(request.POST, request.FILES)
if form.is_valid():
clothes = form.save(commit=False)
clothes.client = request.user
clothes.save()
# FileUploadHandler(request.FILES['image'])
return HttpResponseRedirect('/profile')
else: form = ClothesForm()
clothes_tops = Clothes.objects.filter(type = 'T')
clothes_bottoms = Clothes.objects.filter(type = 'B')
clothes_accessories = Clothes.objects.filter(type = 'A')
clothes_shoes = Clothes.objects.filter(type = 'S')
clothes_headwear = Clothes.objects.filter(type = 'H')
return render_to_response('profile.html', RequestContext(request,
{'form': form, 'big': big, 'clothes_tops': clothes_tops, 'bottoms': clothes_bottoms,
'accessories': clothes_accessories, 'shoes': clothes_shoes, 'headwear': clothes_headwear, }))
def girly(request):
big = Clothes.objects.all()
useall = User.all()
if request.method == 'POST':
form = ClothesForm(request.POST, request.FILES)
if form.is_valid():
clothes = form.save(commit=False)
clothes.client = request.user
clothes.save()
# FileUploadHandler(request.FILES['image'])
return HttpResponseRedirect('/profile')
else: form = ClothesForm()
clothes_tops = Clothes.objects.filter(type = 'T')
clothes_bottoms = Clothes.objects.filter(type = 'B')
clothes_accessories = Clothes.objects.filter(type = 'A')
clothes_shoes = Clothes.objects.filter(type = 'S')
clothes_headwear = Clothes.objects.filter(type = 'H')
return render_to_response('girly.html', RequestContext(request,
{'form': form, 'big': big, 'clothes_tops': clothes_tops, 'bottoms': clothes_bottoms,
'accessories': clothes_accessories, 'shoes': clothes_shoes, 'headwear': clothes_headwear,}))
| SeanKapus/Fashion | outfit/views.py | views.py | py | 3,292 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "outfit.forms.UserForm",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.info",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages",
"line_number": 22,
"usage_type": "name"
},
{
"api_na... |
7813076216 | from datetime import datetime
from typing import Dict, List
import pytest
import sqlalchemy as sa
from httpx import AsyncClient
from sqlalchemy.ext.asyncio import AsyncSession
from aspen.api.views.tests.data.auth0_mock_responses import DEFAULT_AUTH0_USER
from aspen.auth.auth0_management import Auth0Client
from aspen.database.models import User
from aspen.test_infra.models.usergroup import group_factory, userrole_factory
# All test coroutines will be treated as marked.
pytestmark = pytest.mark.asyncio
async def test_users_me(http_client: AsyncClient, async_session: AsyncSession) -> None:
group = group_factory()
user = await userrole_factory(async_session, group)
async_session.add(group)
await async_session.commit()
response = await http_client.get(
"/v2/users/me", headers={"user_id": user.auth0_user_id}
)
assert response.status_code == 200
expected = {
"id": 1,
"name": "test",
"acknowledged_policy_version": None,
"agreed_to_tos": True,
"groups": [
{"id": group.id, "name": group.name, "roles": ["member"]},
],
"gisaid_submitter_id": None,
}
resp_data = response.json()
for key in expected:
assert resp_data[key] == expected[key]
assert len(resp_data["split_id"]) == 20
assert len(resp_data["analytics_id"]) == 20
async def test_users_view_put_pass(
auth0_apiclient: Auth0Client,
http_client: AsyncClient,
async_session: AsyncSession,
):
group = group_factory()
user = await userrole_factory(async_session, group, agreed_to_tos=False)
async_session.add(group)
await async_session.commit()
new_name = "Alice Alison"
auth0_apiclient.update_user.return_value = DEFAULT_AUTH0_USER.copy().update( # type: ignore
name=new_name
)
headers = {"user_id": user.auth0_user_id}
requests: List[Dict] = [
{"agreed_to_tos": True, "acknowledged_policy_version": "2022-06-22"},
{"agreed_to_tos": False},
{"acknowledged_policy_version": "2020-07-22"},
{"name": new_name},
{"gisaid_submitter_id": "alice_phd"},
]
for req in requests:
res = await http_client.put("/v2/users/me", headers=headers, json=req)
assert res.status_code == 200
# start a new transaction
await async_session.close()
async_session.begin()
updated_user = (
(
await async_session.execute(
sa.select(User).filter(User.auth0_user_id == user.auth0_user_id) # type: ignore
)
)
.scalars()
.one()
)
if "agreed_to_tos" in req:
assert updated_user.agreed_to_tos == req["agreed_to_tos"]
if "acknowledged_policy_verison" in req:
assert (
updated_user.acknowledged_policy_version
== datetime.strptime(
req["acknowledged_policy_version"], "%Y-%m-%d"
).date()
)
if "name" in req:
assert updated_user.name == req["name"]
if "gisaid_submitter_id" in req:
assert updated_user.gisaid_submitter_id == req["gisaid_submitter_id"]
async def test_usergroup_view_put_fail(
http_client: AsyncClient, async_session: AsyncSession
):
group = group_factory()
user = await userrole_factory(async_session, group, agreed_to_tos=False)
async_session.add(group)
await async_session.commit()
headers = {"user_id": user.auth0_user_id}
bad_requests = [
{"agreed_to_tos": 11, "acknowledged_policy_version": "2022-06-22"},
{"agreed_to_tos": True, "acknowledged_policy_version": "hello"},
]
for req in bad_requests:
res = await http_client.put("/v2/users/me", headers=headers, json=req)
assert res.status_code == 422
| chanzuckerberg/czgenepi | src/backend/aspen/api/views/tests/test_users.py | test_users.py | py | 3,879 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "pytest.mark",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "httpx.AsyncClient",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.ext.asyncio.AsyncSession",
"line_number": 18,
"usage_type": "name"
},
{
"api_name":... |
14248952393 | from collections import deque
#올바른 괄호열인지 판단 하는 함수
def isCorrect(p):
lst = [] #문자열의 문자를 하나하나 담을 lst
for i in range(len(p)):
if p[i] == '(': # 만약 열린 괄호이면 리스트에 넣는다.
lst.append(p[i])
elif p[i] == ')': # 만약 닫힌 괄호라면
if len(lst) == 0: # 닫힌 괄호인데 lst가 빈 상태라면
return False # 올바른 문자열이 아니다.
lst.pop() # lst에 있는 열린 괄호를 하나 뽑는다.
if len(lst): # lst에 열린 괄호가 남은 상태라면 올바른 문자열이 아니다.
return False
return True
def solution(p): #p는 균형잡힌 문자(열린 괄호와 닫힌 괄호가 같다.)
#종료 조건 -> 빈 문자열이라면 빈 문자열 반환
if p == "" or isCorrect(p): # 또는 문자열이 처음부터 올바른 문자열이라면 그대로 반환
return p
length = len(p) # 문자열의 길이를 담는다.
u = ""
v = ""
q = deque([p[0]]) # 문자열의 첫 번째 괄호를 큐에 넣는다.
idx = 0 # 문자열의 첫 번째 괄호를 큐에 넣고 index는 0으로 초기화(p에서 인덱스 0을 가리킴)
# 균형잡힌 문자열이 끝난 지점을 idx로 찾아나가는 작업
while q:
if q[-1] == p[idx + 1]: #만약 큐에 있는 괄호와 같은 괄호라면
q.append(p[idx + 1]) #큐에 집어넣는다.
idx += 1 # 현재 index를 다음 index로 업데이트
else: # 큐에 있는 괄호와 다른 괄호라면
q.pop() # 큐에 있는 괄호 하나를 제거
idx += 1 # index 업데이트
#인덱스를 기준으로 u와 v를 나누기
u = p[:idx + 1] # u는 더 이상 분리할 수 없는 균형잡힌 문자열을 담고 있다.
v = p[idx + 1:] # 나머지 열은 v에 담기
#만약 u가 '올바른 괄호 문자열'이라면 -> 즉 u의 시작 문자열이 열린 괄호라면(u가 균형잡힌 문자열이므로) v에 대하여 1단계부터 다시 수행
if u[0] == '(':
return u + solution(v)
else: #그렇지 않다면(u가 올바른 괄호가 아니라면)
answer = ""
p = solution(v)
#u의 앞 뒤 문자 제거하고 괄호 뒤집기
u = list(u)
u[0] = ""
u[-1] = ""
u = ''.join(u) #u를 다시 문자열로 변경
if u != "":
for i in range(len(u)):
if u[i] == '(':
u = list(u)
u[i] = ')'
u = ''.join(u)
else:
u = list(u)
u[i] = '('
u = ''.join(u)
answer += "(" + p + ")" + u
return answer | vmfaldwntjd/Algorithm | Programmers/DFS,BFS/괄호 변환/Programmers.py | Programmers.py | py | 2,869 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 24,
"usage_type": "call"
}
] |
17567691479 | from inpladesys.datatypes import Segment, Segmentation
from typing import List
import numpy as np
from inpladesys.datatypes.dataset import Dataset
from collections import Counter
from sklearn.model_selection import train_test_split
import time
import scipy.stats as st
def generate_segmentation(preprocessed_documents: List[List[tuple]], documents_features: List[np.ndarray],
document_label_lists, documents, task=None) -> List[Segmentation]:
assert len(documents_features) == len(preprocessed_documents)
segmentations = []
for i in range(len(documents_features)):
preprocessed_doc_tokens = preprocessed_documents[i]
doc_features = documents_features[i]
assert doc_features.shape[0] == len(preprocessed_doc_tokens)
labels = document_label_lists[i]
segments = []
for k in range(doc_features.shape[0]):
prep_token = preprocessed_doc_tokens[k]
segments.append(Segment(offset=prep_token[1],
length=prep_token[2] - prep_token[1],
author=labels[k]))
segmentations.append(Segmentation(author_count=max(labels) + 1,
segments=segments,
max_repairable_error=60,
document_length=len(documents[i])))
if task == 'a':
for segmentation in segmentations:
fix_segmentation_labels_for_plagiarism_detection(segmentation)
return segmentations
def fix_segmentation_labels_for_plagiarism_detection(segmentation, plagiarism_majority=False):
# the majority label should be 0 (original author)
assert segmentation.author_count == 2
author_segments = segmentation.by_author[0]
plagiarism_segments = segmentation.by_author[1]
author_len = sum(s.length for s in author_segments)
plagiarism_len = sum(s.length for s in plagiarism_segments)
swap = author_len < plagiarism_len
if plagiarism_majority:
swap = not swap
if swap:
for s in segmentation:
s.author = 1 - s.author
segmentation.by_author[0] = plagiarism_segments
segmentation.by_author[1] = author_segments
def custom_train_test_split(preprocessed_documents: List[List[tuple]], documents_features: List[np.ndarray],
dataset: Dataset, train_size, random_state):
# indices of every document
indices_of_docs = [i for i in range(len(preprocessed_documents))]
i_train, i_test = train_test_split(indices_of_docs, train_size=train_size, random_state=random_state)
prep_docs_train = [preprocessed_documents[i] for i in i_train]
prep_docs_test = [preprocessed_documents[i] for i in i_test]
doc_features_train = [documents_features[i] for i in i_train]
doc_features_test = [documents_features[i] for i in i_test]
author_counts_train = [dataset.segmentations[i].author_count for i in i_train]
author_counts_test = [dataset.segmentations[i].author_count for i in i_test]
dataset_train = Dataset([dataset.documents[i] for i in i_train],
[dataset.segmentations[i] for i in i_train])
dataset_test = Dataset([dataset.documents[i] for i in i_test],
[dataset.segmentations[i] for i in i_test])
return prep_docs_train, prep_docs_test, \
doc_features_train, doc_features_test, \
author_counts_train, author_counts_test, \
dataset_train, dataset_test
def find_cluster_for_noisy_samples(predicted_labels, context_size=10):
start = time.time()
len_ = len(predicted_labels)
counter = Counter(predicted_labels)
noisy = counter[-1]
unclustered_label = 0
if -1 in counter.keys():
if len(counter.most_common()) == 1:
predicted_labels[:] = unclustered_label
else:
for i in range(len_):
if predicted_labels[i] == -1:
left_diff = i - context_size
left = left_diff if left_diff >= 0 else 0
right_diff = i + context_size
right = right_diff if right_diff < len_ else len_
counter = Counter(predicted_labels[left:right])
if -1 in counter.keys():
if len(counter.most_common()) == 1:
predicted_labels[left:right] = unclustered_label
else:
found, curr = 0, 0
while found == 0:
if counter.most_common()[curr][0] != -1:
predicted_labels[i] = counter.most_common()[curr][0]
found = 1
curr += 1
# print('Noisy labels reclustered in {}'.format(time.time()-start))
return noisy
def perform_confidence_interval_test(samples: List, c_interval=0.95, p_normal_threshold=0.05):
n = len(samples)
# https://docs.scipy.org/doc/scipy-0.19.0/reference/generated/scipy.stats.normaltest.html
# https://stackoverflow.com/questions/12838993/scipy-normaltest-how-is-it-used
z, p_val = st.normaltest(samples, nan_policy='raise')
if p_val < p_normal_threshold:
print('A given sample is not from normal distribution: '
'p_val = {} < threshold = {}'.format(p_val, p_normal_threshold))
print('The confidence intervals cannot be calculated.')
else:
sem = st.sem(samples)
mean = np.mean(samples)
interval = st.t.interval(c_interval, n-1, loc=mean, scale=sem) # https://stackoverflow.com/questions/15033511/compute-a-confidence-interval-from-sample-data/34474255#34474255
print('Mean:', mean)
print('Standard error:', sem)
print('{}% confidence interval: {}\n'.format(c_interval * 100, interval))
| Coolcumber/inpladesys | software/inpladesys/models/misc/misc.py | misc.py | py | 5,944 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "inpladesys.datatypes.Segment",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "inpladesy... |
18316576813 | import functools
import inspect
import types
from typing import Dict, List, Optional, Type, Union
import pytest
import servo.utilities.inspect
class OneClass:
def one(self) -> None:
...
def two(self) -> None:
...
def three(self) -> None:
...
class TwoClass(OneClass):
def four(self) -> None:
...
def five(self) -> None:
...
class ThreeClass(TwoClass):
def six(self) -> None:
...
@pytest.mark.parametrize(
"cls, stop_at_parent, method_names",
[
(OneClass, None, ["one", "two", "three"]),
(TwoClass, None, ["four", "five"]),
(TwoClass, OneClass, ["one", "two", "three", "four", "five"]),
(ThreeClass, OneClass, ["one", "two", "three", "four", "five", "six"]),
(ThreeClass, TwoClass, ["four", "five", "six"]),
],
)
def test_get_instance_methods(cls, stop_at_parent, method_names) -> None:
methods = servo.utilities.inspect.get_instance_methods(
cls, stop_at_parent=stop_at_parent
)
assert list(methods.keys()) == method_names
def test_get_instance_methods_invalid_parent() -> None:
with pytest.raises(TypeError) as e:
servo.utilities.inspect.get_instance_methods(OneClass, stop_at_parent=int)
assert (
str(e.value)
== "invalid parent type \"<class 'int'>\": not found in inheritance hierarchy"
)
def test_get_instance_methods_returns_bound_methods_if_possible() -> None:
methods = servo.utilities.inspect.get_instance_methods(
ThreeClass(), stop_at_parent=OneClass
)
assert list(methods.keys()) == ["one", "two", "three", "four", "five", "six"]
assert functools.reduce(
lambda bound, m: bound & inspect.ismethod(m), methods.values(), True
)
def test_get_instance_methods_returns_finds_dynamic_instance_methods() -> None:
def seven() -> None:
...
instance = ThreeClass()
instance.seven = types.MethodType(seven, instance)
methods = servo.utilities.inspect.get_instance_methods(
instance, stop_at_parent=OneClass
)
assert list(methods.keys()) == [
"one",
"two",
"three",
"four",
"five",
"six",
"seven",
]
assert functools.reduce(
lambda bound, m: bound & inspect.ismethod(m), methods.values(), True
)
def test_get_instance_methods_returns_ignores_attributes() -> None:
class FourClass(ThreeClass):
ignore_me: str = "ignore_me"
instance = FourClass()
methods = servo.utilities.inspect.get_instance_methods(
instance, stop_at_parent=OneClass
)
assert list(methods.keys()) == ["one", "two", "three", "four", "five", "six"]
assert functools.reduce(
lambda bound, m: bound & inspect.ismethod(m), methods.values(), True
)
def test_resolution_none() -> None:
def test_type() -> None:
...
def test_str() -> "None":
...
res_type, res_str = servo.utilities.inspect.resolve_type_annotations(
inspect.Signature.from_callable(test_type).return_annotation,
inspect.Signature.from_callable(test_str).return_annotation,
)
assert res_type == res_str
def test_resolution_none() -> None:
def test_type() -> None:
...
def test_str() -> "None":
...
res_type, res_str = servo.utilities.inspect.resolve_type_annotations(
inspect.Signature.from_callable(test_type).return_annotation,
inspect.Signature.from_callable(test_str).return_annotation,
)
assert res_type == res_str
def test_aliased_types() -> None:
import servo
import servo.types
from servo import types
from servo.types import Duration
def test_type_path() -> servo.types.Duration:
...
def test_type_abbr() -> types.Duration:
...
def test_type() -> Duration:
...
def test_str_path() -> "servo.types.Duration":
...
def test_str_abbr() -> "types.Duration":
...
def test_str() -> "Duration":
...
resolved = servo.utilities.inspect.resolve_type_annotations(
inspect.Signature.from_callable(test_type_path).return_annotation,
inspect.Signature.from_callable(test_type_abbr).return_annotation,
inspect.Signature.from_callable(test_type).return_annotation,
inspect.Signature.from_callable(test_str_path).return_annotation,
inspect.Signature.from_callable(test_str_abbr).return_annotation,
inspect.Signature.from_callable(test_str).return_annotation,
globalns=globals(),
localns=locals(),
)
assert set(resolved) == {Duration}
# TODO: Compare compound return types, generic, skipping arguments...
# None, None.__class__, 'None'
# Optional[str], Dict[str, int], Dict[str, List[float]]
# omit argument, extra argument, argument with wrong type
# @pytest.mark.parametrize(
# "reference_callable"
# )
import typing
from typing import Any
def test_equal_callable_descriptors() -> None:
import servo
import servo.types
def test_one() -> typing.Dict:
...
def test_two() -> typing.Dict[str, Any]:
...
def test_three() -> typing.Dict[str, int]:
...
def test_four() -> typing.Dict[float, str]:
...
sig1 = inspect.Signature.from_callable(test_one)
sig2 = inspect.Signature.from_callable(test_two)
with pytest.raises(TypeError) as e:
servo.utilities.inspect.assert_equal_callable_descriptors(
servo.utilities.inspect.CallableDescriptor(
signature=sig1, globalns=globals(), localns=locals()
),
servo.utilities.inspect.CallableDescriptor(
signature=sig2, globalns=globals(), localns=locals()
),
)
assert (
str(e.value)
== 'invalid callable "() -> Dict": incompatible return type annotation "typing.Dict[str, typing.Any]" in callable signature "() -> Dict[str, Any]", expected "typing.Dict"'
)
servo.utilities.inspect.assert_equal_callable_descriptors(
servo.utilities.inspect.CallableDescriptor(
signature=inspect.Signature.from_callable(test_two),
globalns=globals(),
localns=locals(),
),
servo.utilities.inspect.CallableDescriptor(
signature=inspect.Signature.from_callable(test_three),
globalns=globals(),
localns=locals(),
),
)
# before_handler_signature = inspect.Signature.from_callable(__before_handler)
# servo.utilities.inspect.assert_equal_callable_descriptors(
# servo.utilities.inspect.CallableDescriptor(signature=before_handler_signature, module=event.module, globalns=event_globalns, localns=None),
# servo.utilities.inspect.CallableDescriptor(signature=handler_signature, module=handler_module, globalns=handler_globalns, localns=handler_localns),
# name=name,
# )
# servo.utilities.inspect.assert_equal_callable_descriptors()
# ...
MaybeNumeric = Optional[Union[float, int]]
@pytest.mark.parametrize(
"types_, error_message",
[
# Success cases
([dict, dict], None),
([str, str], None),
([None, None], None),
([List[str], List[str]], None),
([Dict[str, int], Dict[str, int]], None),
([dict[str, int], Dict[str, int]], None),
([Any, str], None),
([Any, List[str]], None),
([List[Any], List[str]], None),
([Dict[str, Any], Dict[str, int]], None),
# Subclassing
([OneClass, TwoClass], None),
([List[OneClass], List[TwoClass]], None),
([Dict[str, OneClass], Dict[str, TwoClass]], None),
# Special forms
([MaybeNumeric, MaybeNumeric], None),
([MaybeNumeric, Optional[Union[int, float]]], None),
# ---
# Failure cases
(
[dict, int],
"Incompatible type annotations: expected <class 'dict'>, but found <class 'int'>",
),
(
[Dict[str, int], dict],
"Incompatible type annotations: expected typing.Dict[str, int], but found <class 'dict'>",
),
(
[List[str], List[int]],
"Incompatible type annotations: expected typing.List[str], but found <class 'str'>",
),
(
[MaybeNumeric, float],
"Incompatible type annotations: expected typing.Union[float, int, NoneType], but found <class 'float'>",
),
(
[dict, Dict[str, Any]],
"Incompatible type annotations: expected <class 'dict'>, but found typing.Dict[str, typing.Any]",
),
(
[TwoClass, MaybeNumeric],
"Incompatible type annotations: expected <class 'inspect_test.TwoClass'>, but found typing.Union[float, int, NoneType]",
),
(
[TwoClass, OneClass],
"Incompatible type annotations: expected <class 'inspect_test.TwoClass'>, but found <class 'inspect_test.OneClass'>",
),
],
)
def test_assert_equal_types(types_: List[Type], error_message: Optional[str]) -> None:
if error_message:
with pytest.raises(TypeError) as e:
servo.utilities.inspect.assert_equal_types(*types_)
assert str(e.value) == error_message
else:
servo.utilities.inspect.assert_equal_types(*types_)
| opsani/servox | tests/utilities/inspect_test.py | inspect_test.py | py | 9,377 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "servo.utilities.inspect.utilities.inspect.get_instance_methods",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "servo.utilities.inspect.utilities",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "servo.utilities.inspect",
"line_number"... |
34222159351 | from django.shortcuts import render, redirect
from .models import Aricle
from .forms import ArticleForm
def new(request):
if request.method == 'POST':
article_form = ArticleForm(request.POST)
if article_form.is_valid():
article = article_form.save()
return redirect('blog:detail', article.id)
elif request.method == 'GET':
article_form = ArticleForm()
context = {
'article_form' : article_form,
}
return render(request, 'blog/form_new.html', context)
| kimhyunso/exampleCode | django/MTV/blog/new_views.py | new_views.py | py | 542 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "forms.ArticleForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "forms.ArticleForm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.s... |
9766193824 | import sys
import librosa
import numpy as np
#import soundfile as sf
import functools
import torch
#from torch.nn.functional import cosine_similarity
#import essentia.standard as es
def logme(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
print('\n-----------------\n')
print(' MODEL: {}'.format(f.__name__.upper()))
print('\n-----------------\n')
return f(*args, **kwargs)
return wrapped
class ProgressBar:
"""Progress bar
"""
def __init__ (self, valmax, maxbar, title):
if valmax == 0: valmax = 1
if maxbar > 200: maxbar = 200
self.valmax = valmax
self.maxbar = maxbar
self.title = title
print ('')
def update(self, val, avg_loss=0):
# format
if val > self.valmax: val = self.valmax
# process
perc = round((float(val) / float(self.valmax)) * 100)
scale = 100.0 / float(self.maxbar)
bar = int(perc / scale)
# render
if avg_loss:
# out = '\r %20s [%s%s] %3d / %3d cost: %.2f r_loss: %.0f l_loss: %.4f clf_loss: %.4f' % (
out = '\r %20s [%s%s] %3d / %3d loss: %.5f' % (
self.title,
'=' * bar, ' ' * (self.maxbar - bar),
val,
self.valmax,
avg_loss,
)
else:
out = '\r %20s [%s%s] %3d / %3d ' % (self.title, '=' * bar, ' ' * (self.maxbar - bar), val, self.valmax)
sys.stdout.write(out)
sys.stdout.flush()
def pad(l, sr):
# 0-Pad 10 sec at fs hz and add little noise
z = np.zeros(10*sr, dtype='float32')
z[:l.size] = l
z = z + 5*1e-4*np.random.rand(z.size).astype('float32')
return z
def compute_spectrogram(filename, sr=22000, n_mels=96):
# zero pad and compute log mel spec
try:
audio, sr = librosa.load(filename, sr=sr, res_type='kaiser_fast')
except:
audio, o_sr = sf.read(filename)
audio = librosa.core.resample(audio, o_sr, sr)
try:
x = pad(audio, sr)
except ValueError:
x = audio
audio_rep = librosa.feature.melspectrogram(y=x, sr=sr, hop_length=512, n_fft=1024, n_mels=n_mels, power=1.)
audio_rep = np.log(audio_rep + np.finfo(np.float32).eps)
return audio_rep
def return_spectrogram_max_nrg_frame(spectrogram):
frames = librosa.util.frame(np.asfortranarray(spectrogram), frame_length=96, hop_length=12)
idx_max_nrg = np.argmax(np.sum(np.sum(frames, axis=0), axis=0))
return frames[:,:,idx_max_nrg]
def return_spectrogram_3_max_nrg_frames(spectrogram):
frames = librosa.util.frame(np.asfortranarray(spectrogram), frame_length=96, hop_length=12)
idxes_max_nrg = (-np.sum(np.sum(frames, axis=0), axis=0)).argsort()[:3]
return frames[:,:,idxes_max_nrg]
def spectrogram_to_audio(filename, y, sr=22000):
y = np.exp(y)
x = librosa.feature.inverse.mel_to_audio(y, sr=sr, n_fft=1024, hop_length=512, power=1.)
librosa.output.write_wav(filename, x, sr)
def extract_spectrogram(filename, sr=16000, n_mels=48):
audio = cut_audio(filename, sampleRate=sr, segment_duration=29.1)
frames = melspectrogram(audio, sampleRate=sr, frameSize=512, hopSize=256, numberBands=[48],
warpingFormula='slaneyMel', window='hann', normalize='unit_tri')
return frames['mel_48_db'].T
def melspectrogram(audio, sampleRate=44100, frameSize=2048, hopSize=1024,
window='blackmanharris62', zeroPadding=0, center=True,
numberBands=[128, 96, 48, 32, 24, 16, 8],
lowFrequencyBound=0, highFrequencyBound=None,
weighting='linear', warpingFormula='slaneyMel', normalize='unit_tri'):
if highFrequencyBound is None:
highFrequencyBound = sampleRate/2
windowing = es.Windowing(type=window, normalized=False, zeroPadding=zeroPadding)
spectrum = es.Spectrum()
melbands = {}
for nBands in numberBands:
melbands[nBands] = es.MelBands(numberBands=nBands,
sampleRate=sampleRate,
lowFrequencyBound=lowFrequencyBound,
highFrequencyBound=highFrequencyBound,
inputSize=(frameSize+zeroPadding)//2+1,
weighting=weighting,
normalize=normalize,
warpingFormula=warpingFormula,
type='power')
norm10k = es.UnaryOperator(type='identity', shift=1, scale=10000)
log10 = es.UnaryOperator(type='log10')
amp2db = es.UnaryOperator(type='lin2db', scale=2)
results = essentia.Pool()
for frame in es.FrameGenerator(audio, frameSize=frameSize, hopSize=hopSize,
startFromZero=not center):
spectrumFrame = spectrum(windowing(frame))
for nBands in numberBands:
melFrame = melbands[nBands](spectrumFrame)
results.add('mel_' + str(nBands)+'_db', amp2db(melFrame))
results.add('mel_' + str(nBands)+'_log1+10kx', log10(norm10k(melFrame)))
results.add('mel_' + str(nBands), melFrame)
return results
def cut_audio(filename, sampleRate=44100, segment_duration=None):
audio = es.MonoLoader(filename=filename, sampleRate=sampleRate)()
if segment_duration:
segment_duration = round(segment_duration*sampleRate)
segment_start = (len(audio) - segment_duration) // 2
segment_end = segment_start + segment_duration
else:
segment_start = 0
segment_end = len(audio)
if segment_start < 0 or segment_end > len(audio):
raise ValueError('Segment duration is larger than the input audio duration')
return audio[segment_start:segment_end]
def kullback_leibler(y_hat, y):
"""Generalized Kullback Leibler divergence.
:param y_hat: The predicted distribution.
:type y_hat: torch.Tensor
:param y: The true distribution.
:type y: torch.Tensor
:return: The generalized Kullback Leibler divergence\
between predicted and true distributions.
:rtype: torch.Tensor
"""
return (y * (y.add(1e-5).log() - y_hat.add(1e-5).log()) + (y_hat - y)).sum(dim=-1).mean()
def embeddings_to_cosine_similarity_matrix(z):
"""Converts a a tensor of n embeddings to an (n, n) tensor of similarities.
"""
cosine_similarity = torch.matmul(z, z.t())
embedding_norms = torch.norm(z, p=2, dim=1)
embedding_norms_mat = embedding_norms.unsqueeze(0)*embedding_norms.unsqueeze(1)
cosine_similarity = cosine_similarity / (embedding_norms_mat)
return cosine_similarity
def contrastive_loss(z_audio, z_tag, t=1):
"""Computes contrastive loss following the paper:
A Simple Framework for Contrastive Learning of Visual Representations
https://arxiv.org/pdf/2002.05709v1.pdf
TODO: make it robust to NaN (with low values of t it happens).
e.g Cast to double float for exp calculation.
"""
z = torch.cat((z_audio, z_tag), dim=0)
s = embeddings_to_cosine_similarity_matrix(z)
N = int(s.shape[0]/2)
s = torch.exp(s/t)
try:
s = s * (1 - torch.eye(len(s), len(s)).cuda())
# s[range(len(s)), range(len(s))] = torch.zeros((len(s),)).cuda()
except AssertionError:
s = s * (1 - torch.eye(len(s), len(s)))
denom = s.sum(dim=-1)
num = torch.cat((s[:N,N:].diag(), s[N:,:N].diag()), dim=0)
return torch.log((num / denom) + 1e-5).neg().mean()
| andrebola/contrastive-mir-learning | utils.py | utils.py | py | 7,635 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "functools.wraps",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "sys.stdout.flush",
... |
23702793306 | # This is just a sample program to show you how to do
# basic image operations using python and the Pillow library.
#
# By Eriya Terada, based on earlier code by Stefan Lee,
# lightly modified by David Crandall, 2020
# Import the Image and ImageFilter classes from PIL (Pillow)
from PIL import Image, ImageFilter, ImageDraw, ImageFont
import random
import numpy as np
import sys
# Step 3 Convert image to gray scale
def grayscale_pad(image, padding_size):
im = Image.open(image).convert("L")
im_width = im.width
im_height = im.height
new_width = (2 * padding_size) + im_width
new_height = (2 * padding_size) + im_height
# Create a new blank grayscale image with padding
gray_im = Image.new("L", (new_width, new_height), color=255)
# Loop over the new image with padding
for x in range(new_width):
for y in range(new_height):
# fill in areas that are not padding
if x > padding_size and x < new_width - padding_size:
if y > padding_size and y < new_height - padding_size:
# convert the original image to grayscale
l_value = im.getpixel((x - padding_size, y - padding_size))
gray_im.putpixel((x, y), l_value)
# Save the image
gray_im.save("gray.png")
return gray_im
# Step 4 Convolution with separable kernel
def convolve(image, hx, hy):
im_width = image.width
im_height = image.height
hx_len = len(hx)
hy_len = len(hy)
image=np.array(image).astype(np.uint8)
new_image = np.zeros(image.shape)
vertimage = np.zeros(image.shape)
# convolve vertically
for x in range(im_height-hy_len+1):
for y in range(im_width):
row_sum=0
col_sum=0
for v in range(hy_len):
row_sum+=image[x+v][y]*hy[v]
vertimage[x][y]=row_sum
# convolve horizontally
img = Image.fromarray(np.uint8(vertimage * 255))
for x in range(im_height):
for y in range(im_width-hx_len+1):
row_sum=0
col_sum=0
for h in range(hx_len):
col_sum+=vertimage[x][y+h]*hx[h]
new_image[x][y]=col_sum
img = Image.fromarray(np.uint8(new_image * 255))
# img.show()
return img
# Canny edge detection
def sobel_edge_detection(gray_img):
gray_img=np.array(gray_img).astype(np.uint8)
# Sobels filter
v = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
h = np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]])
print(gray_img.shape)
im_height, im_width = gray_img.shape
new_image_h = np.zeros(gray_img.shape)
new_image_v = np.zeros(gray_img.shape)
new_image = np.zeros(gray_img.shape)
for i in range(0, im_height-3+1):
for j in range(0, im_width-3+1):
horizontalGrad=0
verticalGrad=0
for x in range(h.shape[0]):
for y in range(h.shape[1]):
horizontalGrad+=h[x][y]*gray_img[i+x,j+y]
new_image_h[i, j] = abs(horizontalGrad)
for x in range(v.shape[0]):
for y in range(v.shape[1]):
verticalGrad+=v[x][y]*gray_img[i+x,j+y]
new_image_v[i, j] = abs(verticalGrad)
# Edge Magnitude
edge_mag = np.sqrt(pow(horizontalGrad, 2.0) + pow(verticalGrad, 2.0))
new_image[i, j] = edge_mag
img = Image.fromarray(np.uint8(new_image * 255))
img.show()
# Create binary edge map
new_image[new_image!= 0.0]=1
new_image[new_image== 0.0]=0
print(new_image.shape)
return new_image
def get_region_colors(im, t_height, t_width, coordinate):
# coordinate is the x,y value of where the region starts in the image
# region_colors is the same size as the template
region_colors = []
for i in range(coordinate[0], coordinate[0]+t_height):
row = []
for j in range(coordinate[1], coordinate[1]+t_width):
row.append(im.getpixel((j, i)))
region_colors.append(row)
return region_colors
def compareImages(region, template):
# takes 2 matrices with the color values
# region and template are the same size
t_height = len(template)
t_width = len(template[0])
total_score = 0
for i in range(t_height):
for j in range(t_width):
region_pixel = region[i][j]
t_pixel = template[i][j]
# changed similarity function to use 255 instead of 1 since grayscale values are from 0-255
pixel_similarity = (region_pixel * t_pixel) + (255-region_pixel) * (255-t_pixel)
total_score += pixel_similarity
return total_score
'''
Function to calculate hamming distance i.e. step 5 in the assignment
'''
def hammingDist(im, t_im, combine, color, text_file_list, symbol_type, p, dist):
im_width = im.width
im_height = im.height
t_width = t_im.width
t_height = t_im.height
# get the template and it's score to compare with image regions later on
t_region = get_region_colors(t_im, t_height, t_width, (0,0))
perfect_score = compareImages(t_region, t_region)
#t_found = Image.new("L", (im_width, im_height), color=255)
combine = combine.copy().convert("RGB")
d = {}
# loop through the image
for i in range(im_height-t_height):
for j in range(im_width-t_width):
# get image region
im_region = get_region_colors(im, t_height, t_width, (i, j))
# score the region
region_score = compareImages(im_region, t_region)
# compare the image region score to the template score
if region_score >= (0.87 * perfect_score):
max_val = region_score
it_val = (i,j)
for y in range(3):
for z in range(3):
if (i-y,j-z) in d:
if d[(i-y,j-z)] >= region_score:
max_val = region_score
it_val = (i-y,j-z)
else:
del d[(i-y,j-z)]
elif (i-y,j+z) in d:
if d[(i-y,j+z)] >= region_score:
max_val = region_score
it_val = (i-y,j+z)
else:
del d[(i-y,j+z)]
d[it_val] = max_val
for k,v in d.items():
i,j = k
region_score = v
draw = ImageDraw.Draw(combine)
top_left = (j,i)
bottom_right = (j + t_width, i + t_height)
#draw.rectangle(((100, 100), (200, 200)), (0, 255, 0))
draw.rectangle((top_left, bottom_right), fill=None, outline = color,width=2)
pitch = '_'
if symbol_type == 'filled_note':
for q in range(int(dist/2)):
if q+i in p:
pitch = p[q+i]
elif i-q in p:
pitch = p[i-q]
font = ImageFont.truetype("/usr/share/fonts/dejavu/DejaVuLGCSansMono.ttf")
# font = ImageFont.truetype("/usr/share/fonts/msttcorefonts/arial.ttf") load_default()
draw.text((j-10, i-2),pitch,(255,0,0),font=font)
text_file_list.append([j, i, t_height, t_width, symbol_type, pitch, float(round((region_score/perfect_score*100), 2))])
# combine.save("step5.png")
return combine, text_file_list
# Step 6: Template matching using convolution
def template_matching(image, template):
m=template.shape[0]
n=template.shape[1]
F=np.zeros((image.shape))
D=np.zeros((image.shape))
# X=np.array(image)
#
# X[X==0]=np.inf
# X[X==1]=0
# Find the coordinates of edges
v,w=np.where(image!=0)
loc=np.stack((v,w),axis=1)
# Find coordinates of whole image
v1,w1=np.where(image==0)
loc1=np.stack((v1,w1),axis=1)
loc2=np.vstack((loc,loc1))
# Calculate D matrix which stores the distance of each pixel from its nearest edge pixel
temp=np.zeros(loc.shape[0])
for i in range(loc2.shape[0]):
temp=np.sqrt((loc2[i][0]-loc[:,0])**2+(loc2[i][1]-loc[:,1])**2)
D[loc2[i][0],loc2[i][1]]=np.min(temp)
img = Image.open(im_name)
draw = ImageDraw.Draw(img)
sum=0
for k in range(0,m):
for l in range(0,n):
sum+=(template[k][l])*(template[k][l])
score=sum
max_D=np.max(D)
# Calculate template scoring
for i in range(0,image.shape[0]-m+1):
for j in range(0,image.shape[1]-n+1):
sum=0
for k in range(0,m):
for l in range(0,n):
sum+=((template[k][l])*((max_D-D[i+k][j+l])/max_D))
F[i][j]=sum
if sum>=0.95*score:
draw.rectangle(((j,i), (j+n,i+m)), fill=None,outline="red")
img.save("output-6.png")
def hough_line(edge):
thetas = np.arange(0, 180, 1)
cos = np.cos(np.deg2rad(theta))
sin = np.sin(np.deg2rad(theta))
rho_range = round(math.sqrt(edge.shape[0]*2 + edge.shape[1]*2))
accumulator = np.zeros((2 * rho_range, len(theta)), dtype=np.uint8)
edge_pixels = np.where(edge == 1)
coordinates = list(zip(edge_pixels[0], edge_pixels[1]))
for p in range(len(coordinates)):
for theta in range(len(theta)):
rho = int(round(coordinates[p][1] * cos[theta] + coordinates[p][0] * sin[theta]))
accumulator[rho, t] += 1
#print(np.max(accumulator))
return accumulator
def hough(image):
# im = image.load()
# im_h, im_w = image.size
# th_val, r_val = 500, 1200
# hough_im = Image.new("L", (th_val, r_val), 255)
# him = hough_im.load()
# rho = {}
# rmax = hypot(im_h, im_w)
# dr = rmax / int(r_val/2)
# dth = pi / th_val
# for x in range(im_h):
# for y in range(im_w):
# if im[x, y] != 255:
# for m in range(th_val):
# th = dth * m
# r = x*cos(th) + y*sin(th)
# n = int(r_val/2) + int(r/dr+0.5)
# him[m, n] -= 1
dist = 0
img = image.convert('L') #conversion to gray scale
bw = img.point(lambda x: 0 if x<128 else 255, '1')
img_bin = np.array(bw).astype(np.uint8)
x, y = img_bin.shape
d = {}
for i in range(0,x):
d[i] = 0
for j in range(y):
if img_bin[i][j]==0:
d[i] +=1
l = [k for k,v in d.items() if v > y/2]
for i in range(0,len(l)-1):
if l[i]+1 != l[i+1]:
if dist == 0:
dist = l[i+1]-l[i]
elif dist == l[i+1]-l[i]:
break
lines = [l[0]]
p = l[0]
for i in range(1,len(l)):
if l[i] - p > dist*2:
lines.append(l[i])
p = l[i]
return dist, lines
def rescale(template,dist):
temp = Image.open(template).convert("L")
factor = dist/temp.height
temp = temp.resize((int(temp.width * factor), int(temp.height * factor)))
return temp
def pitch(lines,dist):
p = {}
j = 1
for i in lines:
if j%2 ==0:
p[i-dist*1.5] = 'D'
p[i-dist] = 'C'
p[i-dist*0.5] = 'B'
p[i] = 'A'
p[i+dist*0.5] = 'G'
p[i+dist] = 'F'
p[i+dist*1.5] = 'E'
p[i+dist*2] = 'D'
p[i+dist*2.5] = 'C'
p[i+dist*3] = 'B'
p[i+dist*3.5] = 'G'
p[i+dist*4] = 'F'
p[i+dist*4.5] = 'E'
else:
p[i-dist*0.5] = 'G'
p[i] = 'F'
p[i+dist*0.5] = 'E'
p[i+dist] = 'D'
p[i+dist*1.5] = 'C'
p[i+dist*2] = 'B'
p[i+dist*2.5] = 'A'
p[i+dist*3] = 'G'
p[i+dist*3.5] = 'F'
p[i+dist*4] = 'E'
p[i+dist*4.5] = 'D'
p[i+dist*5] = 'B'
j += 1
return p
if __name__ == '__main__':
music_file = sys.argv[1]
im_name = "../test-images/" + music_file
template1 = "../test-images/template1.png"
template2 = "../test-images/template2.png"
template3 = "../test-images/template3.png"
template4 = "../test-images/template4.png"
template5 = "../test-images/template5.png"
image = Image.open(im_name)
# finding the scale of the template
dist, lines = hough(image)
temp1 = rescale(template1,dist)
temp2 = rescale(template2,dist*3)
temp3 = rescale(template3,dist*2.5)
temp4 = rescale(template4,dist*3)
temp5 = rescale(template5,dist*8)
gray_im = image.convert("L")
# temp1 = Image.open(template1).convert("L")
# temp2 = Image.open(template2).convert("L")
# temp3 = Image.open(template3).convert("L")
# hx=[1,2,1]
# hy=[1,2,1]
# image=convolve(gray_im, hx, hy)
# edge1=sobel_edge_detection(gray_im)
# edge2=sobel_edge_detection(temp1)
# template_matching(edge1,edge2)
result_list = []
l =[]
p = pitch(lines,dist)
result1, result_list = hammingDist(gray_im, temp1, gray_im, "red", result_list, "filled_note", p, dist)
result2, result_list = hammingDist(gray_im, temp2, result1, "green", result_list, "eighth_rest", p, dist)
result3, result_list = hammingDist(gray_im, temp3, result2, "blue", result_list, "quarter_rest", p, dist)
result4, l = hammingDist(gray_im, temp4, result3, "yellow", l, "quarter_rest", p, dist)
result5, l = hammingDist(gray_im, temp5, result4, "pink", l, "quarter_rest", p, dist)
text_list = result_list
np.savetxt("detected.txt", text_list, fmt="%s") # Saving the results in a txt file
result5.save("detected.png")
| dhruvabhavsar/Optical-Music-Recognition | python-sample/omr.py | omr.py | py | 13,907 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PIL.Image.new",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number":... |
31553967278 | from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
import json
import string
import re
ps = PorterStemmer()
punctuation = list(string.punctuation)
stop = stopwords.words('english') + punctuation + ['rt', '#rt', '#follow', 'via', 'donald', 'trump', '…', "trump's",
'new']
emoticons_str = r"""
(?:
[:=;] # Eyes
[oO\-]? # Nose (optional)
[D\)\]\(\]/\\OpP] # Mouth
)"""
regex_str = [
emoticons_str,
r'<[^>]+>', # HTML tags
r'(?:@[\w_]+)', # @-mentions
r"(?:\#+[\w_]+[\w\'_\-]*[\w_]+)", # hash-tags
r'http[s]?://(?:[a-z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-f][0-9a-f]))+', # URLs
r'(?:(?:\d+,?)+(?:\.?\d+)?)', # numbers
r"(?:[a-z][a-z'\-_]+[a-z])", # words with - and '
r'(?:[\w_]+)', # other words
r'(?:\S)' # anything else
]
tokens_re = re.compile(r'('+'|'.join(regex_str)+')', re.VERBOSE | re.IGNORECASE)
emoticon_re = re.compile(r'^'+emoticons_str+'$', re.VERBOSE | re.IGNORECASE)
def tokenize(s):
return tokens_re.findall(s)
def preprocess(s, lowercase=True): #what does lowercase=True do?
tokens = tokenize(s)
if lowercase:
tokens = [token if emoticon_re.search(token) else ps.stem(token.lower()) for token in tokens]
return tokens
def normalize_text():
with open('Tweets.json', 'r') as f:
for line in f:
try:
tweet = json.loads(line) # load it as Python dict
tokens = preprocess(tweet['text'])
print([w for w in tokens if not w in stop])
except BaseException as e:
continue
normalize_text()
| henrydambanemuya/socialsensing | ConflictSensingApp/TextNormalizer.py | TextNormalizer.py | py | 1,705 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.stem.PorterStemmer",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 11,
"usage_type": "call"
},
{
"api_na... |
19041324588 | # Author: Trevor Sherrard
# Since: Feb. 21, 2022
# Purpose: This file contains functionallity needed to run inference on a single image
import cv2
import numpy as np
import tensorflow as tf
import keras
# declare file paths
model_file_loc = "../../models/saved_unet_model.h5"
test_image_loc = "../../dataset/semantic_drone_dataset/original_images/000.jpg"
# declare goal image sizes
img_height = 800
img_width = 1200
def preprocess_image(img_file):
def func(img_file):
img_file = img_file.decode()
img = cv2.imread(img_file, cv2.IMREAD_COLOR)
img = cv2.resize(img, (img_width, img_height))
img = img / 255.0
img = img.astype(np.float32)
return img
image = tf.convert_to_tensor(tf.numpy_function(func, [img_file], [tf.float32]))
image = tf.reshape(image, (img_height, img_width, 3))
return image
def load_image_as_dataset(img_file):
dataset = tf.data.Dataset.from_tensor_slices(img_file)
dataset = dataset.map(preprocess_image)
dataset = dataset.batch(1)
return dataset
def run_inference(image_loc):
# load image
dataset = load_image_as_dataset([image_loc])
# load model
model = keras.models.load_model(model_file_loc)
# run inference
pred = model.predict(dataset)
# extract results
predictions = np.argmax(pred, axis=3)
single_channel_pred = predictions[0]
single_channel_pred = single_channel_pred.astype("uint8")
# show mono mask image
cv2.imshow("test", single_channel_pred)
cv2.waitKey(0)
if(__name__ == "__main__"):
run_inference(test_image_loc)
| Post-Obstruction-Assessment-Capstone/Drone-Road-Segmentation | utils/deep_learning/single_image_inference.py | single_image_inference.py | py | 1,599 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_COLOR",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "cv2.resize",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"line... |
20510556949 | from __future__ import print_function
import numpy as np
import ad3.factor_graph as fg
import time
def test_random_instance(n):
costs = np.random.rand(n)
budget = np.sum(costs) * np.random.rand()
scores = np.random.randn(n)
tic = time.clock()
x = solve_lp_knapsack_ad3(scores, costs, budget)
toc = time.clock()
print('ad3: {:.2f}'.format(toc - tic))
try:
tic = time.clock()
x_gold = solve_lp_knapsack_lpsolve(scores, costs, budget)
toc = time.clock()
print('lpsolve: {:.2f}'.format(toc - tic))
res = x - x_gold
assert np.linalg.norm(res) < 1e-6
except ImportError:
print('lpsolve not available')
def solve_lp_knapsack_ad3(scores, costs, budget):
factor_graph = fg.PFactorGraph()
binary_variables = []
for i in range(len(scores)):
binary_variable = factor_graph.create_binary_variable()
binary_variable.set_log_potential(scores[i])
binary_variables.append(binary_variable)
factor_graph.create_factor_knapsack(binary_variables, costs=costs,
budget=budget)
# Run AD3.
_, posteriors, _, _ = factor_graph.solve()
return posteriors
def solve_lp_knapsack_gurobi(scores, costs, budget):
from gurobipy import Model, LinExpr, GRB
n = len(scores)
# Create a new model.
m = Model("lp_knapsack")
# Create variables.
for i in range(n):
m.addVar(lb=0.0, ub=1.0)
m.update()
vars = m.getVars()
# Set objective.
obj = LinExpr()
for i in range(n):
obj += scores[i] * vars[i]
m.setObjective(obj, GRB.MAXIMIZE)
# Add constraint.
expr = LinExpr()
for i in range(n):
expr += costs[i] * vars[i]
m.addConstr(expr, GRB.LESS_EQUAL, budget)
# Optimize.
m.optimize()
assert m.status == GRB.OPTIMAL
x = np.zeros(n)
for i in range(n):
x[i] = vars[i].x
return x
def solve_lp_knapsack_lpsolve(scores, costs, budget):
import lpsolve55 as lps
relax = True
n = len(scores)
lp = lps.lpsolve('make_lp', 0, n)
# Set verbosity level. 3 = only warnings and errors.
lps.lpsolve('set_verbose', lp, 3)
lps.lpsolve('set_obj_fn', lp, -scores)
lps.lpsolve('add_constraint', lp, costs, lps.LE, budget)
lps.lpsolve('set_lowbo', lp, np.zeros(n))
lps.lpsolve('set_upbo', lp, np.ones(n))
if not relax:
lps.lpsolve('set_int', lp, [True] * n)
else:
lps.lpsolve('set_int', lp, [False] * n)
# Solve the ILP, and call the debugger if something went wrong.
ret = lps.lpsolve('solve', lp)
assert ret == 0
# Retrieve solution and return
x, _ = lps.lpsolve('get_variables', lp)
x = np.array(x)
return x
if __name__ == "__main__":
n = 100
test_random_instance(n)
| andre-martins/AD3 | examples/python/example_knapsack.py | example_knapsack.py | py | 2,835 | python | en | code | 68 | github-code | 36 | [
{
"api_name": "numpy.random.rand",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.random.rand",
"l... |
20436716291 | # pxy7896@foxmail.com
# 2020/8/1
__doc__ = """
获取中公教育每日一练内容;获取国务院政府工作报告。
"""
import requests
from bs4 import BeautifulSoup
import os
# 服务器反爬虫机制会判断客户端请求头中的User-Agent是否来源于真实浏览器,所以,我们使用Requests经常会指定UA伪装成浏览器发起请求
headers = {'user-agent': 'Mozilla/5.0'}
# 写文件
def writedoc(raw_ss, i, ii):
# 打开文件
# 编码为utf-8
start = raw_ss.find("模拟试题")
end = raw_ss.find("免责声明")
ss = raw_ss[start+5: end-5]
with open("result\\第" + str(ii) + "页.txt", 'a', encoding='utf-8') as f:
# 写文件
f.write(ss + "\n\n")
#print("问题" + str(i) + "文件写入完成" + "\n")
# 根据详细页面url获取目标字符串
def geturl(url):
# 请求详细页面
r = requests.get(url, headers=headers)
# 改编码
r.encoding = "GB2312"
soup = BeautifulSoup(r.text, "html.parser")
# 找出类名为 info-zi mb15 下的所有p标签
#ans = soup.find_all(["p", ".info-zi mb15"])
ans = soup.find_all(["p", ".offcn_shocont"])
# 用来储存最后需要写入文件的字符串
mlist = ""
for tag in ans:
# 获取p标签下的string内容,并进行目标字符串拼接
mlist = mlist + str(tag.string)
# 返回目标字符串
return mlist
# 获取目标网址第几页
def getalldoc(ii):
#string_ans_li = []
if ii == 1:
testurl = "http://www.offcn.com/mianshi/mryl/"
else:
# 字符串拼接成目标网址
testurl = "http://www.offcn.com/mianshi/mryl/" + str(ii) + ".html"
# 使用request去get目标网址
res = requests.get(testurl, headers=headers)
# 更改网页编码--------不改会乱码
res.encoding = "GB2312"
# 创建一个BeautifulSoup对象
soup = BeautifulSoup(res.text, "html.parser")
# 找出目标网址中所有的small标签
# 函数返回的是一个list
ans = soup.find_all("a")
# 用于标识问题
cnt = 1
# 先创建目录
# 如果需要分页爬取,那么路径只要写到对应就好了
#mkdir("result\\第" + str(ii) + "页\\")
for tag in ans:
# 获取a标签下的href网址
#string_ans = str(tag.a.get("href"))
string_ans = str(tag.get("href"))
if string_ans.find("/mianshi/2020/") == -1 and string_ans.find("/mianshi/2019/") == -1 and string_ans.find("/mianshi/2020/") == -1:
continue
#string_ans_li.append(string_ans)
# 请求详细页面
# 返回我们需要的字符串数据
string_write = geturl(string_ans)
# 写文件到磁盘
writedoc(string_write, cnt, ii)
cnt = cnt + 1
#print("第", ii, "页写入完成")
#return string_ans_li
"""
def mkdir(path):
# 去除首位空格
path = path.strip()
# 去除尾部 \ 符号
path = path.rstrip("\\")
# 判断路径是否存在
# 存在 True
# 不存在 False
isExists = os.path.exists(path)
# 判断结果
if not isExists:
# 如果不存在则创建目录
# 创建目录操作函数
os.makedirs(path)
return True
else:
# 如果目录存在则不创建,并提示目录已存在
return False
"""
def getall():
for i in range(1, 10, 1):
getalldoc(i)
#print(ss)
print(str(i) + " end!")
#break
def get_gov(testurl, file):
res = requests.get(testurl, headers=headers)
# 更改网页编码--------不改会乱码
res.encoding = "utf-8"
# 创建一个BeautifulSoup对象
soup = BeautifulSoup(res.text, "html.parser")
ans = soup.find_all([["p","h5"], "conlun2_box_text"])
# 用来储存最后需要写入文件的字符串
mlist = ""
for tag in ans:
# 获取p标签下的string内容,并进行目标字符串拼接
s = str(tag.string)
if s == 'None': continue
mlist = mlist + s + "\n"
# 返回目标字符串
with open(file, "a+") as file:
file.write(mlist)
if __name__ == "__main__":
#getall()
get_gov("http://www.gov.cn/guowuyuan/zfgzbg.htm","gov-2020.txt")
get_gov("http://www.gov.cn/guowuyuan/2019zfgzbg.htm","gov-2019.txt")
| pxy7896/PlayWithPython3 | 获取某网站每日一练.py | 获取某网站每日一练.py | py | 4,447 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"... |
8385928022 | from django.db.models import Model, Q, OuterRef, Max, Count
from django.conf import settings
from django.core import mail
from django.http import HttpResponse
from django.template import Context, Template, loader
from django.utils.translation import gettext_lazy as _
from django.contrib import admin
import os, glob
from pathlib import Path
import pyexcel
import markdown
from markdown_link_attr_modifier import LinkAttrModifierExtension
from urllib.parse import quote
def get_current_site(request):
from .models import Site
try: return Site.objects.get_current(request)
except Site.DoesNotExist: pass
return None
def user_programs(queryset, path, request, or_cond=None):
if request.user.is_superuser:
if not request.user.site: return queryset
cond = Q(**{path+'sites': request.user.site})
return queryset.filter(cond | or_cond if or_cond else cond)
cond = Q(**{path+'user': request.user})
return queryset.filter(cond | or_cond if or_cond else cond)
def create_model(name, fields, app_label='formative', module='',
program=None, meta=None, base_class=Model):
class Meta:
pass
setattr(Meta, 'app_label', app_label)
if meta is not None:
for key, value in meta.__dict__.items():
if key[:2] == '__' or key == 'abstract': continue
setattr(Meta, key, value)
setattr(Meta, 'db_table', name)
if not module: module = app_label
attrs = {'__module__': module, 'Meta': Meta}
attrs.update(dict(fields))
# Create the class, which automatically triggers ModelBase processing
model = type(name, (base_class,), attrs)
if program: model._meta.program_slug = program
return model
def remove_p(text):
s = text.strip()
if s[-3-1:] == '</p>':
i = s.rindex('<p>')
return s[i+3:-3-1]
return text
def send_email(template, to, subject, context={}, connection=None):
new_context = { 'settings': settings }
new_context.update(context)
context = Context(new_context)
if type(template) != Template: context = new_context # wtf, Django
sub = ' '.join(subject.render(context).splitlines()).rstrip()
message = template.render(context)
email = mail.EmailMessage(sub, message, settings.CONTACT_EMAIL, [to],
connection=connection)
return email.send()
class TabularExport:
def __init__(self, form, queryset, **kwargs):
self.args, self.fields, self.collections = kwargs, [], {}
names = []
for name in self.args:
if not self.args[name]: continue
if name.startswith('block_'): names.append(name[len('block_'):])
elif name.startswith('collection_') and self.args[name] != 'no':
cname = name[len('collection_'):]
self.collections[cname] = [0, []]
if self.args[name] == 'combine':
self.collections[cname][0] = -1
blocks = { 'block_'+b.name: b
for b in form.submission_blocks().filter(name__in=names) }
self.items = {}
if self.collections:
item_model = form.item_model
# item_model's _submission rel doesn't recognize original queryset
qs = form.model.objects.filter(pk__in=queryset) # but this works
sub_items = item_model.objects.filter(_submission__in=qs)
items_qs = sub_items.filter(_collection__in=self.collections)
# TODO order should be by block_rank, cf Submission._collections()
for item in items_qs.order_by('_collection', '_block', '_rank'):
app = self.items.setdefault(item._submission_id, {})
app_col = app.setdefault(item._collection, [])
app_col.append(item)
for c in self.collections:
if self.collections[c][0] < 0: continue
lengths = [ len(app[c])
for app in self.items.values() if c in app ]
self.collections[c][0] = lengths and max(lengths) or 0
for name in self.args:
if name.startswith('block_'):
if blocks[name].block_type() == 'stock':
for n in blocks[name].stock.widget_names():
self.fields.append(blocks[name].stock.field_name(n))
else: self.fields.append(blocks[name].name)
elif name.startswith('cfield_'):
cname, field = name[len('cfield_'):].split('.')
if cname not in self.collections: continue
self.collections[cname][1].append(field)
def header_row(self):
ret = ['email']
for name in self.fields:
if name.startswith('_'): ret.append(name[1:])
else: ret.append(name)
for collection, (n, fields) in self.collections.items():
if not n: continue
cfields = []
for field in fields:
if field == '_file': cfields.append(collection + '_file')
else: cfields.append(collection + '_' + field)
if n < 0: ret += cfields
else: ret += cfields * n
return ret
def data_row(self, submission, sub_items):
row = [submission._email]
for name in self.fields:
val = getattr(submission, name)
if val is None: out = ''
else: out = str(val)
row.append(out)
def item_val(item, field):
if field == '_file' and item._file:
return 'https://' + settings.DJANGO_SERVER + item._file.url
val = getattr(item, field)
if val is None: return ''
return str(val)
for collection, (n, fields) in self.collections.items():
col_items = sub_items.setdefault(collection, [])
if n < 0:
for field in fields:
vals = [ item_val(item, field) for item in col_items ]
sep = ' ' if field == '_file' else ', '
out = sep.join(vals)
row.append(out)
else:
for item in col_items:
for field in fields: row.append(item_val(item, field))
row.extend([''] * (n-len(col_items)) * len(fields))
return row
def data_rows(self, queryset):
ret = []
for submission in queryset:
sub_items = self.items.setdefault(submission._id, {})
row = self.data_row(submission, sub_items)
ret.append(row)
return ret
def data(self, queryset):
ret = [self.header_row()]
ret += self.data_rows(queryset)
return ret
def csv_response(self, filename, queryset):
data = self.data(queryset)
stream = pyexcel.save_as(array=data, dest_file_type='csv')
response = HttpResponse(stream, content_type='text/csv')
disp = f"attachment; filename*=UTF-8''" + quote(filename)
response['Content-Disposition'] = disp
return response
def submission_link(s, form, rest=''):
server = settings.DJANGO_SERVER
if ':' in server or server.endswith('.local'): proto = 'http'
else: proto = 'https'
if s._valid > 1 and not rest:
if s._valid == form.num_pages(): rest = f'page-{form.num_pages()}'
else: rest = f'page-{s._valid + 1}'
return f'{proto}://{server}/{form.program.slug}/{form.slug}/{s._id}/{rest}'
def get_file_extension(name):
return Path(name).suffix[1:].lower()
def thumbnail_path(path, ext=None):
idx = path.rindex('.')
return path[:idx] + '_tn' + (ext and '.'+ext or path[idx:])
def subtitle_path(path, lang):
idx = path.rindex('.')
return path[:idx] + '_s_' + lang + '.vtt'
def delete_submission_files(files_recs):
for rec in files_recs:
submission_dir = os.path.join(settings.MEDIA_ROOT, str(rec.submission))
if not os.path.isdir(submission_dir): continue
for filename in os.listdir(submission_dir):
os.remove(os.path.join(submission_dir, filename))
os.rmdir(submission_dir)
def delete_file(file):
if os.path.isfile(file.path): os.remove(file.path)
thumb = thumbnail_path(file.path)
if os.path.isfile(thumb): os.remove(thumb)
for path in glob.glob(subtitle_path(file.path, '*')):
os.remove(path)
def human_readable_filesize(size, decimal_places=2):
for unit in ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB']:
if size < 1024 or unit == 'PB': break
size /= 1024
return f"{size:.{decimal_places}f} {unit}"
def any_name_field(**kwargs):
Qs = [ Q(**{ namen + (k != '_' and k or ''): v for k, v in kwargs.items() })
for namen in ('name1', 'name2', 'name3') ]
return Qs[0] | Qs[1] | Qs[2]
def get_tooltips():
return {
'previoustip': _('Previous Page'),
# 'sortabletip': _('Drag to reorder'),
# 'uploadtip': _('Replace File'),
}
class MarkdownFormatter(markdown.Markdown):
def __init__(self):
super().__init__(extensions=[
LinkAttrModifierExtension(new_tab='external_only')
])
def convert(self, text):
self.reset() # in our context this seems to be always needed
return super().convert(text)
| johncronan/formative | formative/utils.py | utils.py | py | 9,508 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "models.Site.objects.get_current",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "models.Site.objects",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "models.Site",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "... |
36750215907 | from flask import (g, abort, get_flashed_messages, request, flash, redirect,
url_for)
from sqlalchemy.sql import functions
from buddyup.app import app
from buddyup.database import (Course, Visit, User, BuddyInvitation,
Location, Major, Event, Language, db)
from buddyup.templating import render_template
from buddyup.util import form_get, check_empty
from functools import wraps
def admin_required(f):
@wraps(f)
def func(*args, **kwargs):
if g.user and g.user.user_name == app.config.get("ADMIN_USER", u""):
return f(*args, **kwargs)
else:
abort(403)
return func
@app.route("/admin")
@admin_required
def admin_dashboard():
variables = {}
variables['group_count'] = Event.query.count()
variables['unique_visits'] = Visit.query.count()
query = db.session.query(functions.sum(Visit.requests))
variables['total_visits'] = query.scalar()
variables['total_groups'] = Event.query.count()
variables['total_invites'] = BuddyInvitation.query.count()
# Maybe only count users who have logged in?
variables['total_users'] = User.query.count()
variables['courses'] = Course.query.order_by(Course.name).all()
variables['majors'] = Major.query.order_by(Major.name).all()
variables['locations'] = Location.query.order_by(Location.name).all()
variables['languages'] = Language.query.order_by(Language.name).all()
return render_template('admin/dashboard.html', **variables)
@app.route("/admin/course/add", methods=['POST'])
@admin_required
def admin_add_course():
name = form_get('name')
check_empty(name, "Course Name")
instructor = form_get('instructor')
check_empty(instructor, "Professor Name")
if not get_flashed_messages():
course = Course(name=name, instructor=instructor)
db.session.add(course)
db.session.commit()
flash("Added Course " + name)
return redirect(url_for('admin_dashboard'))
#return render_template('admin/dashboard.html', **get_stats())
@app.route("/admin/course/delete", methods=['POST'])
@admin_required
def admin_delete_course():
course_ids = map(int, request.form.getlist('courses'))
for course_id in course_ids:
Course.query.filter_by(id=course_id).delete()
db.session.commit()
flash('Course deleted')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/location/add", methods=['POST'])
@admin_required
def admin_add_location():
name = form_get('location')
check_empty(name, "Location Name")
if not get_flashed_messages():
loc = Location(name=name)
db.session.add(loc)
db.session.commit()
flash("Added Course " + name)
return redirect(url_for('admin_dashboard'))
@app.route("/admin/location/delete", methods=['POST'])
@admin_required
def admin_delete_location():
location_ids = map(int, request.form.getlist('location'))
for location_id in location_ids:
Location.query.filter_by(id=location_id).delete()
db.session.commit()
flash('Location deleted')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/major/add", methods=['POST'])
@admin_required
def admin_add_major():
name = form_get('major')
check_empty(name, "Major Name")
if not get_flashed_messages():
major = Major(name=name)
db.session.add(major)
db.session.commit()
flash("Added Course " + name)
return redirect(url_for('admin_dashboard'))
@app.route("/admin/major/delete", methods=['POST'])
@admin_required
def admin_delete_major():
major_ids = map(int, request.form.getlist('majors'))
for major_id in major_ids:
Major.query.filter_by(id=major_id).delete()
db.session.commit()
flash('Majors deleted')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/language/add", methods=['POST'])
@admin_required
def admin_add_language():
name = form_get('language')
check_empty(name, "Language Name")
if not get_flashed_messages():
language = Language(name=name)
db.session.add(language)
db.session.commit()
flash("Added Language " + name)
return redirect(url_for('admin_dashboard'))
@app.route("/admin/language/delete", methods=['POST'])
@admin_required
def admin_delete_language():
language_ids = map(int, request.form.getlist('languages'))
for language_id in language_ids:
Language.query.filter_by(id=language_id).delete()
db.session.commit()
flash('Languages deleted')
return redirect(url_for('admin_dashboard'))
@app.route("/admin/users")
@admin_required
def admin_user_management():
users = User.query.all()
return render_template('admin/userManagement.html', users=users)
@app.route("/admin/forums")
@admin_required
def admin_forum_management():
pass
@app.route("/admin/stats")
@admin_required
def admin_stats():
variables = {}
variables['group_count'] = Event.query.count()
variables['unique_visits'] = Visit.query.count()
# This requires something with func.sum. Not sure what.
variables['total_visits'] = Visit.query.sum(Visit.requests)
variables['total_groups'] = Event.query.count()
variables['total_invites'] = BuddyInvitation.query.count()
# Maybe only count users who have logged in?
variables['total_users'] = User.query.filter(User.activated == True).count()
render_template('admin_stats.html', **variables)
| thangatran/Buddy-Up | buddyup/pages/admin.py | admin.py | py | 5,477 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.g.user",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.g",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "buddyup.app.app.config.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "buddyup.app.app.... |
23856814731 | from collections import deque
GENERATOR = 0
MICROCHIP = 1
floors = [[] for _ in range(4)]
elev = 0
elems = dict()
def is_safe(arrangement):
floors, _ = arrangement
for floor in floors:
chips = set()
hasg = False
for e in floor:
if e & 1 == MICROCHIP:
chips.add(e >> 1)
for e in floor:
if e & 1 == GENERATOR:
if e >> 1 in chips:
chips.remove(e >> 1)
hasg = True
if len(chips) > 0 and hasg:
return False
return True
def moves(arrangement):
res = []
floors, elev = arrangement
nelevs = []
if elev > 0:
nelevs.append(elev-1)
if elev < 3:
nelevs.append(elev+1)
for nelev in nelevs:
ne = len(floors[elev])
for i in range(ne):
for j in range(i, ne):
cand = [list(x) for x in floors]
cand[nelev].append(floors[elev][i])
cand[elev][i] = None
if j != i:
cand[nelev].append(floors[elev][j])
cand[elev][j] = None
cand[elev].remove(None)
cand[elev].remove(None)
for k, _ in enumerate(cand):
cand[k].sort()
cand[k] = tuple(cand[k])
narr = (tuple(cand), nelev)
if is_safe(narr):
res.append(narr)
return res
def append(lst, e):
e0, e1 = e
if not e0 in elems:
elems[e0] = len(elems)
e0 = elems[e0]
if e1 == 'generator':
e1 = GENERATOR
else:
e1 = MICROCHIP
lst.append(2*e0+e1)
with open('day11/input.txt') as h:
for i, line in enumerate(h):
line = line.strip('.\n')
words = line.split()
for j, word in enumerate(words):
word = word.strip(',')
if word == 'generator':
append(floors[i], (words[j-1], word))
elif word == 'microchip':
append(floors[i], (words[j-1][:-11], word))
if i == 0:
append(floors[i], ('elerium', 'generator'))
append(floors[i], ('elerium', 'microchip'))
append(floors[i], ('dilithium', 'generator'))
append(floors[i], ('dilithium', 'microchip'))
floors[i].sort()
floors[i] = tuple(floors[i])
floors = tuple(floors)
initial = (floors, elev)
final = [[list(x) for x in floors], 3]
for i in range(3):
final[0][3].extend(final[0][i])
final[0][i] = []
for i in range(4):
final[0][i].sort()
final[0][i] = tuple(final[0][i])
final[0] = tuple(final[0])
final = tuple(final)
qfront = deque([(initial, 0)])
qback = deque([(final, 0)])
sfront, sback = {initial: 0}, {final: 0}
dfront, dback = 0, 0
cont = True
while cont:
while len(qfront) > 0 and qfront[0][1] == dfront:
arr, _ = qfront.popleft()
for narr in moves(arr):
if narr in sback:
print(dfront + sback[narr] + 1)
cont = False
break
if narr in sfront:
continue
sfront[narr] = dfront + 1
qfront.append((narr, dfront + 1))
if not cont:
break
if not cont:
break
dfront += 1
while len(qback) > 0 and qback[0][1] == dback:
arr, _ = qback.popleft()
for narr in moves(arr):
if narr in sfront:
print(sfront[narr] + dback + 1)
cont = False
break
if narr in sback:
continue
sback[narr] = dback + 1
qback.append((narr, dback + 1))
if not cont:
break
dback += 1
| mahiuchun/adventofcode-2016 | day11/part2.py | part2.py | py | 3,739 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 96,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 97,
"usage_type": "call"
}
] |
41703057518 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0006_auto_20160109_0000'),
]
operations = [
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('content', models.TextField()),
('date_ts', models.DateField()),
],
options={
},
bases=(models.Model,),
),
]
| zachswift615/zachswift | portfolio/migrations/0007_blog.py | 0007_blog.py | py | 701 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 14,
"usage_type": "call"
},
... |
10401144210 | #!/usr/bin/env python
"""
Testtool om een lokale HTTP server te starten die verbinding maakt
met dvs-daemon. Niet geschikt voor productie! Gebruik daar WSGI voor.
"""
import bottle
import argparse
import dvs_http_interface
import logging
# Initialiseer argparse
parser = argparse.ArgumentParser(description='DVS HTTP interface test tool')
parser.add_argument('-s', '--server', action='store', default='127.0.0.1', help='DVS server (standaard 127.0.0.1)')
parser.add_argument('-p', '--port', action='store', default='8120', help='DVS poort (standaard 8120)')
args = parser.parse_args()
dvs_http_interface.dvs_client_server = "tcp://%s:%s" % (args.server, args.port)
# Stel logger in:
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger(__name__)
logger.info("Server: %s", dvs_http_interface.dvs_client_server)
bottle.debug(True)
bottle.run(host='localhost', port=8080, reloader=True) | PaulWagener/rdt-infoplus-dvs | dvs-http.py | dvs-http.py | py | 906 | python | nl | code | null | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dvs_http_interface.dvs_client_server",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "logging.basicConfig",
"line_number": 23,
"usage_type": "call"
},
{
... |
27511260267 | # -*- coding: utf-8 -*-
"""
Created on Mo 12 Sept 2 13:15:51 2022
@author: FKAM
"""
import pandas as pd
import streamlit as st
import plotly.express as px
import plotly.graph_objs as go
#import altair as alt
#from bokeh.plotting import figure
def list_ext(uploads, radio3):
list_ = []
header_default = ["date [YYYYMMDD]",
"time [HHMMSS]",
"X [m]",
"Y [m]",
"Z [m]",
"Drain nr. [-]",
"Job nr. [-]",
"Base unit [-]",
"Operator [-]",
"Stitcher type [-]",
"Stitcher length [m]",
"Stitcher ballast [ton]",
"Drain type [-]",
"Anchoring [-]",
"Pattern type [0=square/1=triang.]",
"Pattern distance [m]",
"Pattern heading [deg]",
"Pattern X-position [m]",
"Pattern Y-position [m]",
"Prescribed depth [m]",
"Max. depth [m]",
"Pull back [m]",
"Cum. drain length [m]",
"Duration [s]",
"Max. force [kN]",
"Stitcher angle [deg]",
"ok",
"new roll",
"canceled",
"Log interval [m]",
"Data nr. [-]",
"Force [kN]"]
df_default = pd.DataFrame(columns=header_default)
for file_ in uploads:
for headerline in file_:
headerline = str(headerline)
if '#date' in headerline:
break
headerline = headerline[:-3]
headerlist = headerline.replace("b'#", "").split(',')
if 'Remarks' in headerlist:
headerlist.remove('Remarks')
headerlist.remove('')
for index, item in enumerate(headerlist):
if ' [ok' in item:
headerlist[index] = 'ok'
if 'canceled]' in item:
headerlist[index] = 'canceled'
df = pd.read_csv(file_, index_col=False, header=None)
nums = list(range(len(headerlist)))
headerdict = dict(zip(nums, headerlist))
df = df.rename(columns=headerdict)
df = df.rename(columns={' Drain nr. [-]' : 'Drain nr. [-]'})
force_1_loc = df.columns.get_loc('Force [kN]')
df_force = df.iloc[:, force_1_loc+1:-1]
for col in range(len(df_force.columns)):
df_force = df_force.rename(columns={df_force.columns[col] : f'Force_{col+2}'})
if radio3 == 'Default columns (recommended)':
if not header_default == headerlist:
df = pd.concat([df_default, df])
for col in df.columns:
if col not in header_default:
df = df.drop([col], axis=1)
elif radio3 == 'Columns from file':
for col in df.columns:
if type(col) == int:
df = df.drop([col], axis=1)
df = pd.concat([df, df_force], axis=1)
#####
list_.append(df)
### Sort list_ on df with most columns ##
a = max([x.shape[1] for x in list_])
indexa = [x.shape[1] for x in list_].index(a)
longest = list_[indexa]
del list_[indexa]
list_.insert(0, longest)
return list_, headerlist
def convert(list_, headerlist, wp_calc_method, fixed_nr):
frame = pd.concat(list_, axis=0, ignore_index=True)
## Rename columns ##
nums = list(range(len(headerlist)))
headerdict = dict(zip(nums, headerlist))
frame = frame.rename(columns=headerdict)
frame = frame.sort_values(['Base unit [-]', 'date [YYYYMMDD]', 'time [HHMMSS]'])
## Add date and time columns ##
#date_text = frame['date [YYYYMMDD]']
frame['date [YYYYMMDD]'] = pd.to_datetime(frame['date [YYYYMMDD]'], format='%Y%m%d').dt.date
frame['time [HHMMSS]'] = frame['time [HHMMSS]'].astype(int)
for pvd in frame.index:
if len(str(frame.loc[pvd, 'time [HHMMSS]'])) < 6:
frame.loc[pvd, 'time [HHMMSS]'] = (6 - len(str(frame.loc[pvd, 'time [HHMMSS]']))) * '0' + str(frame.loc[pvd, 'time [HHMMSS]'])
time_text = frame['time [HHMMSS]'].copy()
frame['time [HHMMSS]'] = pd.to_datetime(frame['time [HHMMSS]'], format='%H%M%S').dt.time
## Cable tension + wp thickness ##
if wp_calc_method == 'No':
wp_frame = 0
else:
wp_thickness = [100]*len(frame)
for pvd in range(len(frame)):
keys = list(frame)
force1 = keys.index('Force [kN]')
force_df = frame.iloc[:, force1:]
force_pvd = force_df.loc[pvd,:].values.tolist()
force_pvd = [i for i in force_pvd if i != 0] #remove zeros
force_pvd = force_pvd[2:-3] #remove first 2 and last 2 values
if len(force_pvd) > 0:
cable_tension = min(force_pvd)
if wp_calc_method == 'Lowest force plus fixed number':
cutoff = cable_tension + fixed_nr
elif wp_calc_method == 'Manual choice':
cutoff = fixed_nr
else:
cutoff = 0
cable_tension_index = force_pvd.index(cable_tension)
force_pvd = force_pvd[:cable_tension_index]
wp = (sum(i > cutoff for i in force_pvd) + 2) * frame['Log interval [m]'][pvd]
wp_thickness[pvd] = wp
wp_frame = frame[['X [m]', 'Y [m]']]
wp_frame['wp [m]'] = wp_thickness
wp_frame['csx'] = [528374]*len(frame)
wp_frame['csy'] = [507360]*len(frame)
tofloat = ['Z [m]',
'Drain nr. [-]',
'Max. depth [m]',
'Max. force [kN]',
'Prescribed depth [m]',
'Stitcher angle [deg]']
for col in tofloat:
if col in frame.columns:
frame[col] = frame[col].astype(float)
else:
continue
return frame, time_text
def show_delay(frame_filtered, delta, start_time, end_time, date, base_unit):
time_text = frame_filtered['time_text']
time_text = pd.concat([start_time, time_text, end_time])
time_text = list(pd.to_datetime(time_text, format='%H%M%S'))
start = time_text[:-1].copy()
end = time_text[1:].copy()
fig, ax = plt.subplots(figsize=[18,3], facecolor='white')
periods = []
for pvd in range(len(start)):
periods.append((start[pvd], end[pvd] - start[pvd]))
periods_op = [tup for tup in periods if tup[1] <= np.timedelta64(int(delta), 's')]
periods_delay = [tup for tup in periods if tup[1] > np.timedelta64(int(delta), 's')]
ax.broken_barh(
periods_delay,
(0.1, 0.2),
color='#FF6861',
#edgecolor="black"
)
ax.broken_barh(
periods_op,
(-0.1, 0.2),
color='green',
# edgecolor="black"
)
ax.set_yticks([0, 0.2])
ax.set_yticklabels(['Operational', 'Delay'])
ax.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M'))
fig.suptitle(f'{date} - {base_unit}', fontsize=20)
ax.grid(linestyle="--")
fig.autofmt_xdate()
st.write(fig)
total_op = total_delay = datetime.timedelta()
for pvd in periods_op:
total_op += pvd[1]
for pvd in periods_delay:
total_delay += pvd[1]
st.write('Operational time: ', str((datetime.datetime.min + total_op).time()))
st.write('Delay time: ', str((datetime.datetime.min + total_delay).time()))
st.write('Efficiency: ', str(round(100 * total_op.total_seconds() / (total_op.total_seconds() + total_delay.total_seconds()))), '%')
fn = f'{date} - {base_unit}.png'
img = io.BytesIO()
plt.savefig(img, format='png')
st.download_button(
label='Download as image',
data=img,
file_name=fn,
mime='image/png'
)
def show_preview(frame):
scale = ["date [YYYYMMDD]",
"time [HHMMSS]",
"Z [m]",
"Drain nr. [-]",
"Base unit [-]",
"Operator [-]",
"Stitcher type [-]",
"Prescribed depth [m]",
"Max. depth [m]",
"Max. force [kN]",
"Stitcher angle [deg]"]
choose_scale = st.selectbox('Choose plot parameter:',
scale,
help='Choose from the list what you want to plot in the figure below', index=8)
frame.columns[10] == choose_scale
if choose_scale in frame.columns:
fig = px.scatter(data_frame = frame,
x=frame['X [m]'],
y=frame['Y [m]'],
color=choose_scale,
color_continuous_scale='turbo')
fig.update_yaxes(scaleanchor='x', scaleratio=1)
st.write(fig)
else:
st.write(f'{choose_scale} not found')
# from streamlit_plotly_events import plotly_events
# clickedPoint = plotly_events(fig, key="line")
# st.write(f"Clicked Point: {clickedPoint}")
def show_wp(wp_frame, cs):
# st.write('**Working platform thickness:**')
# #fig1 = go.Figure()
# fig1 = px.scatter(data_frame = wp_frame,
# x=wp_frame['X [m]'],
# y=wp_frame['Y [m]'],
# color='wp [m]',
# color_continuous_scale='turbo',
# range_color=[0,5])
# fig1.update_yaxes(scaleanchor='x', scaleratio=1)
# st.write(fig1)
st.write('**Working platform thickness:**')
fig1 = go.Figure()
fig1.add_trace(go.Scatter(x=wp_frame['X [m]'],
y=wp_frame['Y [m]'],
mode='markers',
name='PVD points',
marker_color=wp_frame['wp [m]']))
x = [507360, 507460]
y = [cs, cs]
fig1.add_trace(go.Scatter(x=x, y=y,
mode='lines',
name='Cross section'))
fig1.update_yaxes(scaleanchor='x', scaleratio=1)
st.write(fig1)
#st.write(fig1)
# fig3 = go.Figure(data=fig1.data + fig2.data)
# st.write(fig3)
| KempiG/Master | PVD_funcs.py | PVD_funcs.py | py | 11,030 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 83,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"l... |
74436148265 | # osandvold
# 5 Jul 2022
# Adapted script from checkData_UPENN.m script from Heiner (Philips)
# for validating and reading log data from DMS of CT Benchtop
import numpy as np
import pandas as pd
import glob
import os.path
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from matplotlib.widgets import Button, RadioButtons, CheckButtons
# Script adapted from Heiner to read DMS data (.dat) which is a binary file
# Each projection contains a header 64 entries (unit16) and detector data
# of size (nrows * (ncols+8)) = 16 * (672+8)
# Example: acquiring 4000 views will mean you have 4000 headers and
# 4000x the detector data
# Function to read .dat file and return dcm (dms data)
def read_dms_dat_nrows(dat_fname):
dcm = {}
nCols = 672
tailer = 8 # the tailer has 8 bytes
header = 64
# read and open binary .dat file
file_id = open(dat_fname, 'rb')
nRows = list(file_id.read(1))[0]
print(f'Number of detected rows: {nRows}')
# read binary file
cirs_data = np.fromfile(dat_fname, dtype='uint16')
bytes_per_view = ((nCols + tailer) * nRows + header)
nViews = int(np.floor(len(cirs_data) / bytes_per_view))
print(f'Number of detected views: {nViews}')
headers = np.zeros((nViews, 64), dtype=float)
# loop to collect the header information
for counter in range(nViews):
headers[counter, :] = cirs_data[counter*bytes_per_view + np.arange(64)]
# TODO: use projData mainly
projData = np.zeros((nCols, nRows, nViews))
projDataHz = np.zeros((nCols, nRows, nViews))
projDataRaw = np.zeros((nCols, nRows, nViews))
# loop to collect the projection data
for v in range(nViews):
for c in range(nRows):
projData_ind = header + v*bytes_per_view + (nCols+tailer)*c + np.arange(nCols)
projData[:,c,v] = np.exp(-1 * cirs_data[projData_ind]
/ 2048 * np.log(2))
projDataHz_ind = header + v*bytes_per_view + (nCols+tailer)*c + np.arange(nCols)
projDataHz[:,c,v] = 8e6 * (2 ** (-cirs_data[projDataHz_ind] / 2048))
# projDataRaw_ind = header + v*bytes_per_view + (nCols+tailer)*c + np.arange(nCols)
# projDataRaw[:,c,v] = cirs_data[projDataRaw_ind]
# create output structure/dictionary
dcm['FloatProjectionData'] = projData
dcm['FloatProjectionDataHz'] = projDataHz
# dcm['FloatProjectionDataRaw'] = projDataRaw
dcm['IntegrationPeriod'] = headers[:,3]*0.125
dcm['kV'] = headers[:, 50]
dcm['mA'] = headers[:, 49]
dcm['gridV1'] = headers[:, 42]
dcm['gridV2'] = headers[:, 43]
dcm['LeftDMSTemperature'] = headers[:,5] * 1/16
dcm['RightDMSTemperature'] = headers[:,6] * 1/16
dcm['RotationAngle'] = headers[:, 18] * 360/13920
dcm['IPcounter'] = headers[:,17]
return dcm
# Read the ; delimited csv log file and save as a table
# Header:
# Timestamp[us]; Grid_1[V]; Grid_2[V]; Anode_Voltage[V]; Cathode_Voltage[V];
# High_Voltage[V]; Analog_In_2[mV]; Analog_In_3[mV]; HighVoltage[0,1];
# HighGrid[0,1]; Resend[0,1]; X_Act_S[0,1]; TH_Rising[0,1]; TH_Falling[0,1];
# Phase[3Bit]; IP_Counter[16Bit]; Phantom_Index[0,1]; Phantom_AP0[0,1];
# Phantom_AP90[0,1]; DMS_AP0[0,1]; DMS_AP90[0,1]; DMS_Even_Rea[0,1]
def read_log(log_fname):
log_data = pd.read_csv(log_fname, sep=';')
return log_data
# Send in a directory path with the .dat and .csv file
# Output:
# - Will display the DMS data
# - Will display the wave form of the data
# - Will print a PNG to the same data directory
def check_data(data_path):
paths = check_contents(data_path)
# Exit if the csv and dat file are not in the same directory
if len(paths) == 0:
print('Must have only one dat and one log file in the same directory')
return 0
logdata = read_log(paths[0])
dcm = read_dms_dat_nrows(paths[1])
row = 40 # set in the original script
# where to grab data
indStart = round(0.4/(1e-6)) # 400000
indEnd = len(logdata.loc[:,'Timestamp[us]'])-100
# compute correct time axis
timeLog = np.arange(1, len(logdata.loc[:,'Timestamp[us]'])) * 1.0e-6
dd = np.mean(np.mean(np.mean(dcm['FloatProjectionData'][300:400,:,:])))
KV = np.transpose(round(logdata.loc[:,'High_Voltage[V]'][indStart:indEnd]/1000))
PD = np.transpose(-logdata.loc[:,'Analog_In_2[mV]'][indStart:indEnd])
time = timeLog[indStart:indEnd] - timeLog[indStart]
IPsignal = logdata.loc[:,'DMS_Even_Rea[0,1]'][indStart:indEnd]
nx, ny, nz = dcm['FloatProjectionData'].shape
print(f'Shape of projection data: ({nx}, {ny}, {nz})')
# plot the data from the dms
plt.figure()
plt.imshow(np.transpose(np.mean(dcm['FloatProjectionData'], 2)), vmax=0.05)
# plot the signals
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((2, 4), (0, 0))
# plt.hist(dcm['IntegrationPeriod'], 100) # bins=100
low_ip = dcm['IntegrationPeriod'][1:-1:2]
high_ip = dcm['IntegrationPeriod'][2:-1:2]
bins = np.linspace(200,600,100)
plt.hist(low_ip, bins, alpha=0.5, label='low')
plt.hist(high_ip, bins, alpha=0.5, label='high')
plt.xlabel('IP [us]')
plt.ylabel('frequency')
plt.legend()
plt.title('IP')
# TODO: print the mode (IP) of each peak on the graph
# plot the projection for a single detector at row 40, col 341
plt.subplot2grid((2, 4), (0, 1))
ys = dcm['FloatProjectionData'][341, row, :].squeeze()
plt.plot(ys)
plt.xlim([150, 200]) # views ranges from 1-nViews
plt.title('Profile of projection at (40,341)')
plt.xlabel('view')
plt.ylabel('DMS signal for single pixel')
plt.ylim([np.mean(ys)-3*np.std(ys), np.mean(ys)+3*np.std(ys)])
plt.subplot2grid((2, 4), (0, 2), colspan=2)
plt.imshow(np.transpose(dcm['FloatProjectionData'][:, row, 1:-1:2].squeeze()),
vmin=0, vmax= 0.5, aspect='auto', cmap='gray')
plt.title('dms low')
plt.xlabel('columns')
plt.ylabel('views')
plt.xlim([1, 672])
plt.ylim([nz/2-99, nz/2])
# Will see either switching or no switching
plt.subplot2grid((2, 4), (1, 0))
plt.plot(time, KV)
plt.plot(time, np.transpose(IPsignal*max(KV)))
plt.title('Genrator kVp')
plt.xlabel('time')
plt.ylabel('voltage [kVp]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.ylim([60, 150])
plt.grid()
# refrence diode detector
plt.subplot2grid((2, 4), (1, 1))
plt.plot(time, PD)
plt.title('photodiode signal')
plt.xlabel('time')
plt.ylabel('voltage [mV]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.grid()
plt.subplot2grid((2, 4), (1, 2), colspan=2)
plt.imshow(np.transpose(dcm['FloatProjectionData'][:,row, 2:-1:2].squeeze()),
vmin=0, vmax= 0.5, aspect='auto', cmap='gray')
plt.title('dms high')
plt.xlabel('columns')
plt.ylabel('views')
plt.xlim([1, 672])
plt.ylim([nz/2-99, nz/2])
plt.tight_layout()
# Save the png figure to the same data_path
# plt.savefig()
# display profiles
plt.figure(figsize=(11.5, 6))
plt.subplot(2,2,1)
plt.plot(dcm['FloatProjectionDataHz'][342,row,1:-1:2].squeeze())
plt.xlim([90, 672])
plt.title('profiles low (40, 342)')
plt.subplot(2,2,2)
plt.plot(dcm['FloatProjectionDataHz'][342,row,2:-1:2].squeeze())
plt.xlim([90, 672])
plt.title('profiles high (40, 342)')
plt.subplot(2,2,3)
plt.plot(dcm['FloatProjectionDataHz'][600,row,1:-1:2].squeeze())
plt.xlim([90, 672])
plt.title('profiles low (40,600)')
plt.subplot(2,2,4)
plt.plot(dcm['FloatProjectionDataHz'][600,row,2:-1:2].squeeze())
plt.xlim([90, 672])
plt.title('profiles high (40,600)')
# plt.savefig()
# show the histograms
plt.figure()
plt.subplot(1,2,1)
data = dcm['FloatProjectionData'][342,row,101:-1:2]
plt.hist(data[:],1000)
plt.title('histogram low data')
plt.subplot(1,2,2)
data = dcm['FloatProjectionData'][342,row,102:-1:2]
plt.hist(data[:],1000)
plt.title('histogram high data')
return 0
# TODO: add foldername as an input path, or use a defult ./results path
def display_main_figure(paths, foldername):
# read the data
logdata = read_log(paths[0])
dcm = read_dms_dat_nrows(paths[1])
row = 40 # set in the original script
# where to grab data
indStart = round(0.4/(1e-6)) # 400000
indEnd = len(logdata.loc[:,'Timestamp[us]'])-100
# compute correct time axis
timeLog = np.arange(1, len(logdata.loc[:,'Timestamp[us]'])) * 1.0e-6
dd = np.mean(np.mean(np.mean(dcm['FloatProjectionData'][300:400,:,:])))
KV = np.transpose(round(logdata.loc[:,'High_Voltage[V]'][indStart:indEnd]/1000))
PD = np.transpose(-logdata.loc[:,'Analog_In_2[mV]'][indStart:indEnd])
time = timeLog[indStart:indEnd] - timeLog[indStart]
IPsignal = logdata.loc[:,'DMS_Even_Rea[0,1]'][indStart:indEnd]
nx, ny, nz = dcm['FloatProjectionData'].shape
# plot the signals
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((2, 4), (0, 0))
# plt.hist(dcm['IntegrationPeriod'], 100) # bins=100
low_ip = dcm['IntegrationPeriod'][1:-1:2]
high_ip = dcm['IntegrationPeriod'][2:-1:2]
bins = np.linspace(200,600,100)
plt.hist(low_ip, bins, alpha=0.5, label='low')
plt.hist(high_ip, bins, alpha=0.5, label='high')
plt.xlabel('IP [us]')
plt.ylabel('frequency')
plt.legend()
plt.title('IP')
# plot the projection for a single detector at row 40, col 341
plt.subplot2grid((2, 4), (0, 1))
ys = dcm['FloatProjectionData'][341, row, :].squeeze()
plt.plot(ys)
plt.xlim([150, 200]) # views ranges from 1-nViews
plt.title('Profile of projection at (40,341)')
plt.xlabel('view')
plt.ylabel('DMS signal for single pixel')
plt.ylim([np.mean(ys)-3*np.std(ys), np.mean(ys)+3*np.std(ys)])
plt.subplot2grid((2, 4), (0, 2), colspan=2)
plt.imshow(np.transpose(dcm['FloatProjectionData'][:, row, 1:-1:2].squeeze()),
vmin=0, vmax= 0.5, aspect='auto', cmap='gray')
plt.title('dms low')
plt.xlabel('columns')
plt.ylabel('views')
plt.xlim([1, 672])
plt.ylim([nz/2-99, nz/2])
# Will see either switching or no switching
plt.subplot2grid((2, 4), (1, 0))
plt.plot(time, KV)
plt.plot(time, np.transpose(IPsignal*max(KV)))
plt.title('Genrator kVp')
plt.xlabel('time')
plt.ylabel('voltage [kVp]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.ylim([60, 150])
plt.grid()
# refrence diode detector
plt.subplot2grid((2, 4), (1, 1))
plt.plot(time, PD)
plt.title('photodiode signal')
plt.xlabel('time')
plt.ylabel('voltage [mV]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.grid()
plt.subplot2grid((2, 4), (1, 2), colspan=2)
plt.imshow(np.transpose(dcm['FloatProjectionData'][:,row, 2:-1:2].squeeze()),
vmin=0, vmax= 0.5, aspect='auto', cmap='gray')
plt.title('dms high')
plt.xlabel('columns')
plt.ylabel('views')
plt.xlim([1, 672])
plt.ylim([nz/2-99, nz/2])
plt.tight_layout()
# TODO:
# plt.savefig(foldername) UNCOMMENT THIS AT THE END
#button for IP
ip_button = plt.axes([0.17, 0.535, 0.04, 0.02]) #xposition,yposition,width,height
enlarge_button = Button(ip_button, "Enlarge", color="white", hovercolor="green")
def enlarge(val):
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((1, 1), (0, 0))
# plt.hist(dcm['IntegrationPeriod'], 100) # bins=100
low_ip = dcm['IntegrationPeriod'][1:-1:2]
high_ip = dcm['IntegrationPeriod'][2:-1:2]
bins = np.linspace(200,600,100)
plt.hist(low_ip, bins, alpha=0.5, label='low')
plt.hist(high_ip, bins, alpha=0.5, label='high')
plt.xlabel('IP [us]')
plt.ylabel('frequency')
plt.legend()
plt.title("IP")
plt.show()
enlarge_button.on_clicked(enlarge)
#button for Generator kVp
kvp_button = plt.axes([0.17, 0.045, 0.04, 0.02]) #xposition,yposition,width,height
enlarge_button2 = Button(kvp_button, "Enlarge", color="white", hovercolor="green")
def enlarge2(val):
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((1, 1), (0, 0))
plt.plot(time, KV)
plt.plot(time, np.transpose(IPsignal*max(KV)))
plt.title('Genrator kVp')
plt.xlabel('time')
plt.ylabel('voltage [kVp]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.ylim([60, 150])
plt.grid()
plt.title("Generator kVp")
plt.show()
enlarge_button2.on_clicked(enlarge2)
#button for profile of projection
proj_button = plt.axes([0.425, 0.535, 0.04, 0.02]) #xposition,yposition,width,height
enlarge_button3 = Button(proj_button, "Enlarge", color="white", hovercolor="green")
def enlarge3(val):
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((0, 0), (0, 0))
ys = dcm['FloatProjectionData'][341, row, :].squeeze()
plt.plot(ys)
plt.xlim([150, 200]) # views ranges from 1-nViews
plt.xlabel('view')
plt.ylabel('DMS signal for single pixel')
plt.ylim([np.mean(ys)-3*np.std(ys), np.mean(ys)+3*np.std(ys)])
plt.title("Profile of projection")
plt.show()
enlarge_button3.on_clicked(enlarge3)
#button for photodiode signal
photodi_button = plt.axes([0.425, 0.045, 0.04, 0.02]) #xposition,yposition,width,height
enlarge_button4 = Button(photodi_button, "Enlarge", color="white", hovercolor="green")
def enlarge4(val):
plt.figure(figsize=(11.5, 6))
plt.subplot2grid((1, 1), (0, 0))
plt.plot(time, PD)
plt.xlabel('time')
plt.ylabel('voltage [mV]')
plt.xlim([0.1, 0.102]) # time ranges from time[0] to time[-1]
plt.grid()
plt.title("photodiode signal")
plt.show()
enlarge_button4.on_clicked(enlarge4)
plt.show()
#def enlarge2(val):
#plt.figure()
#plt.title("Generator kVp")
#plt.show()
#enlarge_button2.on_clicked(enlarge2)
#def enlarge3(val):
#plt.figure()
#plt.title("Profile of projection")
#plt.show()
#enlarge_button3.on_clicked(enlarge3)
#def enlarge4(val):
#plt.figure()
#plt.title("photodiode signal")
#plt.show()
#enlarge_button4.on_clicked(enlarge5)
# Checks the directory path has one .dat and one .csv file
# Returns the path to those files in an array if true
# or an empty array if false
def check_contents(data_path):
log_files = glob.glob(os.path.join(data_path, '*.csv'))
dat_files = glob.glob(os.path.join(data_path, '*.dat'))
if len(log_files) == 1 and len(dat_files) == 1:
return log_files + dat_files
else:
print(f'Directory {data_path} contains: ')
print(f'- Log files: {log_files}')
print(f'- Dat files: {dat_files}')
return []
# check_data('E:/CT_BENCH/2022-06-24_17_34_15-Edgar-140kv_100mAs')
# check_data('E:/CT_BENCH/data/2022_07_15/smaller_col/')
# plt.show()
# data = read_log('E:/CT_BENCH/2022-07-13/2022_07_13_UPENN_140kVp_80kVp_1150V_705V_330mA_285ms_thresh_94kV_tkeep_40-test_photodiode_13 Jul 2022_14_39_17_converted_.csv')
# print(data.columns)
# print(data.loc[102900:300000, 'Analog_In_3[mV]'])
| chan-andrew/LACTI | check_data.py | check_data.py | py | 14,721 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.fromfile",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.floor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_numbe... |
7111352063 | import urllib
import urllib2
from django import template
from django.conf import settings
from django.template.defaultfilters import truncatewords
from django.utils.html import strip_tags
from django.utils.safestring import mark_safe
from utils.acm_auth import get_ip
register = template.Library()
def fix_trunc(text):
""" Removes the space that truncatewords adds to strings before the
ellipses.
"""
return "%s..." % text[:-4]
@register.filter
def get_meta(obj):
""" Returns the meta name of the object. """
return obj._meta.verbose_name
@register.filter
def get_title(article, chars):
""" Return the title, and truncate the letters if chars is not None. """
return article.get_title()[:chars]
@register.simple_tag(takes_context=True)
def get_video_url(context, video):
""" This filter takes an article object, and an IP address to return an
embedable video URL for videos from the DL.
"""
request = context['request']
session = request.session
video_url = "%(video)s%(joiner)s%(query)s" % {
'video': video,
'joiner': '&' if '?' in video else '?',
'query': urllib.urlencode({
'CFID': session[settings.ACM_SESSION_VARS['CFID']],
'CFTOKEN': session[settings.ACM_SESSION_VARS['CFTOKEN']],
'ip': get_ip(request),
'websvc': 1,
}),
}
opener = urllib2.build_opener()
opener.addheaders = [('User-agent', settings.ACM_USER_AGENT)]
return opener.open(video_url).read().strip()
@register.simple_tag(takes_context=True)
def get_article_body(context, article):
""" Gets the body of the DL article using the user's IP address. """
request = context['request']
ip = get_ip(request)
body = article.get_body(ip=ip)
return mark_safe(body)
@register.simple_tag(takes_context=True)
def get_article_abstract(context, article, words):
""" Gets the abstract of the article using the user's IP address. """
abstract = article.get_abstract()
if abstract in ["", settings.BLANK_ARTICLE_TEXT]:
ip = get_ip(context['request'])
abstract = article.get_body(ip=ip)
return truncatewords(strip_tags(abstract), words)
| mnadifi/cie | source/apps/articles/templatetags.py | templatetags.py | py | 2,209 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.template.Library",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "urllib.urlencode",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "django.conf.... |
17013425141 | import datetime
from lambda_function import handler
from components import line_bot_api
from utils import utils_database
from linebot.models import (
JoinEvent,
MemberJoinedEvent,
MemberLeftEvent,
TextSendMessage
)
@handler.add(JoinEvent)
def handle_join(event):
group_id = event.source.group_id
group_summary = line_bot_api.get_group_summary(group_id)
event_info = {
"group_id": group_summary.group_id,
"group_name": group_summary.group_name,
"datetime": datetime.datetime.now().strftime("%Y-%m-%dT%H:%M:%S")
}
utils_database.insert_joined_group_info(event_info)
if not utils_database.check_is_allowed_collect_event_event_info_group(event.source.group_id):
msg = "該群組尚未開通收納訊息功能,請向管理員申請權限,以便收納通報訊息"
message = TextSendMessage(text=msg)
line_bot_api.reply_message(event.reply_token, message)
return
@handler.add(MemberJoinedEvent)
def handle_member_joined(event):
current_dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
group_id = event.source.group_id
summary = line_bot_api.get_group_summary(group_id)
group_name = summary.group_name
user_id = event.joined.members[0].user_id
profile = line_bot_api.get_group_member_profile(group_id, user_id)
display_name = profile.display_name
picture_url = profile.picture_url
event_info = {
"datetime": current_dt,
"group_id": group_id,
"group_name": group_name,
"user_id": user_id,
"display_name": display_name,
"picture_url": picture_url
}
try:
utils_database.insert_user_info_when_join_group(event_info)
except Exception as e:
print(e)
msg = f'嗨,{ display_name }\n歡迎加入【防汛護水志工第六大隊颱風豪雨事件通報】,麻煩您輸入您的志工編號,方便老六紀錄您的通報結果哦!本群組會收納所有您提供的通報訊息與照片,敬請避免在本群組聊天、傳送問候圖,感謝您的配合與諒解,也謝謝您熱心協助!'
line_bot_api.reply_message(event.reply_token, TextSendMessage(text=msg))
@handler.add(MemberLeftEvent)
def handle_member_left(event):
current_dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
group_id = event.source.group_id
user_id = event.left._members[0]["userId"]
event_info = {
"datetime": current_dt,
"group_id": group_id,
"user_id": user_id
}
status = utils_database.update_user_info_when_left_group(event_info)
return status
| jialiang8931/WRA06-Volunteer-LineBot | src/components/handler_event_group.py | handler_event_group.py | py | 2,658 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "components.line_bot_api.get_group_summary",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "components.line_bot_api",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
... |
73387258343 | from datetime import datetime
from __init__ import db
from flask_login import UserMixin
from sqlalchemy.sql import func
class OrganizerEvent(db.Model):
id = db.Column(db.Integer, primary_key=True)
organizer_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('event.id'), nullable=False)
class User(db.Model,UserMixin):
id = db.Column(db.Integer, primary_key=True)
firstname = db.Column(db.String(50), nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(255), nullable=False)
is_organizer = db.Column(db.Boolean, default=False)
tickets = db.relationship('Ticket', backref='attendee', lazy=True)
class Event(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(100), nullable=False)
description = db.Column(db.String(500), nullable=False)
venue = db.Column(db.String(100), nullable=False)
date = db.Column(db.DateTime(timezone=True), default=func.now())
capacity = db.Column(db.Integer, nullable=False)
tickets = db.relationship('Ticket', backref='event', lazy=True)
class Ticket(db.Model):
id = db.Column(db.Integer, primary_key=True)
barcode = db.Column(db.String(25), nullable=False, unique=True)
attendee_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)
event_id = db.Column(db.Integer, db.ForeignKey('event.id'), nullable=False)
| ntoghrul/Evento | models.py | models.py | py | 1,496 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "__init__.db.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "__init__.db",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "__init__.db.Column",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "__init__.db",
... |
26703340293 | import speech_recognition as sr
import wave
import sys
import os
import uuid
pcmfn = sys.argv[1]
wavefn = os.path.join(str(uuid.uuid4().hex))
with open(pcmfn, 'rb') as pcm:
pcmdata = pcm.read()
with wave.open(wavefn, 'wb') as wavfile: #convert pcm to wav
wavfile.setparams((2, 2, 48000, 0, 'NONE', 'NONE'))
wavfile.writeframes(pcmdata)
try:
r = sr.Recognizer()
with sr.AudioFile(wavefn) as source:
audio = r.record(source)
except:
print('SR failed')
os.remove(wavefn)
try:
print(r.recognize_google(audio))
except:
print('!Unrecognizable')
| nfsmith/DiscordStenographer | transcribePCM.py | transcribePCM.py | py | 586 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "uuid.uuid4",
"line_number": ... |
28482960968 | import pandas as pd
import numpy as np
import os, sys
import warnings
import matplotlib.pyplot as plt
import gmplot
from sklearn.cluster import DBSCAN
import random
import json
def remove_invalid_coord(df): #[-90; 90]
#return df.query('lat >= -90 & lat <= 90').query('lon >= -90 & lat <= 90')
return df.query('lat != 0 & lon != 0')
def read_data(day='monday', city='chicago', types='crimes'):
data_file = open('data/{0}/{1}_2018_{2}.csv'.format(day, types, city), 'r')
crime_list = []
for line in data_file:
line = line.strip().split(',')
item = {}
item['datetime'] = pd.to_datetime(str(line[0]), format='%Y/%m/%d %H:%M')
item['month'] = pd.to_datetime(str(line[0]), format='%Y/%m/%d %H:%M').month
item['hour'] = pd.to_datetime(str(line[0]), format='%Y/%m/%d %H:%M').hour
item['lat'] = float(line[1])
item['lon'] = float(line[2])
item['type'] = line[3].strip()
item['export'] = 0
crime_list.append(item)
df = pd.DataFrame(crime_list)
df.set_index('datetime', inplace=True)
return remove_invalid_coord(df)
def read_all_data(city='chicago', types='crimes'):
df = []
for day in ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']:
if len(df) == 0:
df = read_data(day, city=city, types=types)
else:
df = pd.concat([df, read_data(day, city=city, types=types)])
return df
def see_density():
# Le os dados
df = read_all_data()
#print(df.head())
df_month_type = df.groupby(['month', 'type']).count()
#print(min(df_month_type['export']))
#print(max(df_month_type['export']))
crimes = df.groupby('type').all().index
# for c in crimes:
# df_crime = df.query("type == '%s'" % c)
# filtered = df_crime.groupby(['month']).count()
# plt.figure()
# months = ['', 'Jan.', 'Feb.', 'Mar.', 'Apr.', 'May', 'Jun.',
# 'Jul.', 'Aug.', 'Sep.', 'Oct.', 'Nov.', 'Dec.']
# filtered['export'].plot(legend=None, title=c, style='.:')
# plt.xlabel('Months')
# plt.ylabel('Quantity of Crimes')
# plt.xticks(range(13), months, rotation=50)
# plt.yticks(range(0, 7000, 500), [x for x in range(0, 7000, 500)])
# if not os.path.exists('density'):
# os.makedirs('density')
# plt.savefig('density/'+ c + '.pdf', bbox_inches="tight", format='pdf')
# plt.clf()
# Export
df.groupby(['month', 'type']).count()['export'].to_csv('density_austin.csv')
###############################################################################################################
###############################################################################################################
###############################################################################################################
def colors(n):
ret = []
for i in range(n):
r = int(random.random() * 256)
g = int(random.random() * 256)
b = int(random.random() * 256)
r = int(r) % 256
g = int(g) % 256
b = int(b) % 256
ret.append('#{:02X}{:02X}{:02X}'.format(r,g,b))
return ret
def plot_heat(clusters, day, city, types):
plt.clf()
gmap = gmplot.GoogleMapPlotter(clusters.iloc[0]['lat'], clusters.iloc[0]['lon'], 11)
lats, longs = [], []
for indx, cluster in clusters.iterrows():
lats.append(float(cluster['lat']))
longs.append(float(cluster['lon']))
gmap.heatmap(lats, longs)
if not os.path.exists('plottest'):
os.makedirs('plottest')
gmap.draw('plottest/{0}_{1}_{2}.html'.format(city, types, day))
def see_distribution():
city='chicago'
types='crimes'
# for day in ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']:
# df = read_data(day, city, types)
# df = df.drop(['type', 'hour', 'month', 'export'], axis=1)
# clustering = DBSCAN(eps=0.001, min_samples=3).fit_predict(df)
# df['cluster'] = clustering
# plot_heat(df.query('cluster != -1'), day, city, types)
df = read_all_data(city, types)
df = df.drop(['type', 'hour', 'month', 'export'], axis=1)
clustering = DBSCAN(eps=0.001, min_samples=3).fit_predict(df)
df['cluster'] = clustering
plot_heat(df.query('cluster != -1'), 'all', city, types)
###############################################################################################################
###############################################################################################################
###############################################################################################################
def format_clusters(data):
clusters = []
clusters.append([])
lastid = 0
data = data.query('cluster > -1')
for indx, row in data.iterrows():
if row['cluster'] > lastid:
clusters.append([])
lastid = row['cluster']
clusters[-1].append((row['lat'], row['lon']))
return clusters
def get_coords(cluster):
lat, lon = [], []
for i in cluster:
lat.append(i[0])
lon.append(i[1])
return lat, lon
def plot_dots(clusters, day, city, types, each):
plt.clf()
if len(clusters) > 0 and len(clusters[0]) > 0:
gmap = gmplot.GoogleMapPlotter(float(clusters[0][0][0]), float(clusters[0][0][1]), 11)
color_list = colors(len(clusters))
indx = 0
for cluster in clusters:
lat, lon = get_coords(cluster)
gmap.scatter(lat, lon, color_list[indx], edge_width=5, marker=False)
indx += 1
#break
if not os.path.exists('plottest'):
os.makedirs('plottest')
gmap.draw('plottest/{0}_{1}_{2}_{3}_dots.html'.format(city, types, day, each))
def load_clusters(day):
with open(str(os.path.dirname(os.path.abspath(__file__)))+"/clusters/" + str(day) + '.json', "r") as file:
return json.load(file)
def see_maps():
city='austin'
types='crashes'
day='monday'
clusters = load_clusters(day)['{0}_2018_{1}'.format(types, city)]['January']['unkown']
for each in clusters:
plot_dots(clusters[each], day, city, types, each)
see_distribution()
#see_maps()
| lucaslzl/ponche | timewindow/lookdata.py | lookdata.py | py | 5,789 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.to_datetime",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "pandas.DataF... |
22354796740 | from django.shortcuts import render
from remarcable_app.models import SearchHistory
from remarcable_app.query_functions import (
delete_old_searches,
pull_all_products,
pull_all_tagged_products,
pull_all_categories,
pull_all_tags,
products_to_array,
search_products,
tags_to_dictionary,
filter_by_tag,
filter_by_category,
strip_search_results
)
# this view defines the home landing page
def home(request):
"""
we want to pull all of the tables of data we want to use first, so that they can be manipulated by filters.
NOTE: This may not scale well with a large database, however for this case.. It may even be slower to join an entire table,
then filter in one line of code each time we need specific data VS. pulling everything once and continually
filtering that down like shown here...
"""
product_table = pull_all_products()
tag_product_table = pull_all_tagged_products()
categories = pull_all_categories()
just_tags = pull_all_tags()
if request.method == "POST":
# pull the currently selected category and tag values from the html radio button
category_filter = request.POST.get('category')
tag_filter = request.POST.get('tag')
# since we have two different filter functions, we must call each one and update the product_table
product_table = filter_by_category(product_table, category_filter, categories)
product_table = filter_by_tag(product_table, tag_filter,just_tags)
else:
category_filter = 'None'
tag_filter = 'None'
# utilize helper functions to parse our final sorted/filtered tables into usuable data for the front end
product_data = products_to_array(product_table)
tag_data = tags_to_dictionary(tag_product_table)
return render(request,'home.html',
{
'product_data': product_data,
'tag_data':tag_data,
'categories':categories,
'tags':just_tags,
'category_filter':category_filter,
'tag_filter': tag_filter
})
# this view defines the search results page
def search_results(request):
"""
we want to pull all of the tables of data we want to use first, so that they can be manipulated by filters.
"""
product_table = pull_all_products()
tag_product_table = pull_all_tagged_products()
categories = pull_all_categories()
just_tags = pull_all_tags()
search_list = []
final_products = []
category_filter = 'None'
tag_filter = 'None'
"""
pull the last search term so that if search_results page is refreshed without submitting a new search,
the search results are still shown and filters can be applied.
"""
raw_search = str(SearchHistory.objects.last())
# check if the POST method is from search bar, otherwise it must be from the filters
if request.method == "POST" and request.POST.get('text_input') is not None:
# pull the raw text from tax string from the search bar
raw_search = request.POST.get('text_input')
# create a new search_name object and send it to the database
latest_search = SearchHistory.objects.create(search_name=raw_search)
"""
in order to keep the SearchHistory database from getting too large, we will check to see if it is larger
than 15 entries. If so, call the delete_old_searches function and delete the 10 oldest searches.
"""
if len(SearchHistory.objects.all().values_list()) > 15:
delete_old_searches()
# strip the raw seach string of all white space and store remaining words in an array of strings
search_list = strip_search_results(raw_search)
# check to make sure the array is not empty
if len(search_list) > 0:
# utilize the search_products function to search entire database and return a list of matching product_ids
final_products = search_products(search_list,product_table,tag_product_table)
# filter the displayed product_table based on the matching product_ids found above
product_table = product_table.filter(id__in = final_products)
else:
#if no new search is posted.. it must mean filters have been applied
# strip the raw seach (last search result) string of all white space and store remaining words in an array of strings
search_list = strip_search_results(raw_search)
# check to make sure the array is not empty
if len(search_list) > 0:
# utilize the search_products function to search entire database and return a list of matching product_ids
final_products = search_products(search_list,product_table,tag_product_table)
# filter the displayed product_table based on the matching product_ids found above
product_table = product_table.filter(id__in = final_products)
# pull the currently selected category and tag values from the html radio button
category_filter = request.POST.get('category')
tag_filter = request.POST.get('tag')
# since we have two different filter functions, we must call each one and update the product_table
product_table = filter_by_category(product_table, category_filter, categories)
product_table = filter_by_tag(product_table, tag_filter,just_tags)
# utilize helper functions to parse our final sorted/filtered tables into usuable data for the front end
product_data = products_to_array(product_table)
tag_data = tags_to_dictionary(tag_product_table)
return render(request, 'search.html',
{
'product_data': product_data,
'tag_data':tag_data,
'raw_search':raw_search,
'categories':categories,
'tags':just_tags,
'category_filter':category_filter,
'tag_filter': tag_filter
}) | stephenv13/remarcableproject | remarcable_app/views.py | views.py | py | 5,899 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "remarcable_app.query_functions.pull_all_products",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "remarcable_app.query_functions.pull_all_tagged_products",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "remarcable_app.query_functions.pull_all_... |
23063800044 | from src.pipeline.predict_pipeline import camera
from src.utils import emotion_average
import spotipy
from spotipy.oauth2 import SpotifyClientCredentials
from src.utils import normalize
from src.utils import string
from src.exception import CustomException
import sys
import pandas as pd
def recommender(emotion,preference):
try:
sp = spotipy.Spotify(auth_manager=SpotifyClientCredentials(client_id="12496220faa84eb39d6fdd22d53f3599",
client_secret="bc1f341b8551410c98f12d749c49fd33"))
if preference=="1":
playlist_link ="https://open.spotify.com/playlist/37i9dQZEVXbLZ52XmnySJg"
elif preference=="2":
playlist_link = "https://open.spotify.com/playlist/37i9dQZEVXbMDoHDwVN2tF"
playlist_URI = playlist_link.split("/")[-1].split("?")[0]
results = sp.playlist(playlist_URI, fields='tracks,next')
tracks=results['tracks']
audio_features_list = []
while tracks:
for item in tracks['items']:
track = item['track']
# Get the audio features for the track
audio_features = sp.audio_features(track['id'])[0]
# Add the audio features to the list
audio_features_list.append(audio_features)
# Get the next page of tracks (if there is one)
tracks = sp.next(tracks)
# Convert the list of audio features to a Pandas DataFrame
b = pd.DataFrame(audio_features_list)
# Iterate over each dictionary in the list and append it to a
b['valence']=normalize(b['valence'])
b['energy']=normalize(b['energy'])
b['tempo']=normalize(b['tempo'])
b['emotional_state']=(b['tempo']+b['valence'])/2
emotions=[]
for val in b['emotional_state']:
if val>0:
emotions.append(1)
else:
emotions.append(0)
b['emotion']=emotions
extract1=b[b['emotion']==1]
extract2=b[b['emotion']==0]
random_row1 = extract1.sample(n=1)
random_row2 = extract2.sample(n=1)
track1 = sp.track(string(random_row1))
track2 = sp.track(string(random_row2))
if emotion==1:
return(track1['id'])
else :
return(track2['id'])
except Exception as e:
raise CustomException(e,sys)
| AnshulDubey1/Music-Recommendation | src/pipeline/song_predictor.py | song_predictor.py | py | 2,474 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "spotipy.Spotify",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "spotipy.oauth2.SpotifyClientCredentials",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 38,
"usage_type": "call"
},
{
"api_name... |
16411706948 | import os
import shutil
import numpy as np
import cv2
import random
import copy
from keras.models import Sequential
from keras.layers.core import Dense, Flatten, Dropout
import tensorflow as tf
def qpixmap_to_array(qtpixmap):
# qpixmap转换成array
img = qtpixmap.toImage()
temp_shape = (img.height(), img.bytesPerLine() * 8 // img.depth())
temp_shape += (4,)
ptr = img.bits()
ptr.setsize(img.byteCount())
result = np.array(ptr, dtype=np.uint8).reshape(temp_shape)
result = result[..., :3]
return result
def img_to_candy(img):
# 图片转换成灰色,灰色图片转换为轮廓
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
img_candy = cv2.Canny(img_gray, 100, 200)
return img_candy
def create_model(input_shape, output_dim, hidden_layer: dict):
convolutional_layer = hidden_layer.get("convolutional_layer")
fully_connected_layer = hidden_layer.get("fully_connected_layer")
# 创建模型
model = Sequential()
model.add(Flatten(input_shape=input_shape))
if convolutional_layer is not None:
# 待实现
# 处理卷积层
pass
if fully_connected_layer is not None:
# 处理全连接层
for index, item in enumerate(fully_connected_layer):
model.add(Dense(item, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(output_dim, activation='softmax'))
return model
def model_mutation():
# 模型变异-变异程度由低到高
# 1.完全继承/复制-降低lr重训练
# 2.完全继承/复制-并重训练
# 3.数量不变,结构重排,并重训练
# 4.降低5%结构,并重训练
# 5.增加5%结构,并重训练
pass
def arr_mutation_rearrange(arr_old: list):
# 随机重排,比如[1,2,3]排列成[2,1,3]
arr_new = copy.deepcopy(arr_old)
random.shuffle(arr_new)
return arr_new
def arr_mutation_merge(arr_old: list):
# 合并,层数减少,如[1,2,3]=>[3,3]或[1,5]
arr_new = copy.deepcopy(arr_old)
length = len(arr_new)
if length <= 1:
return arr_new
index1, index2 = random.sample(range(0, length), 2)
arr_new[index1] = arr_new[index1] + arr_new[index2]
del arr_new[index2]
return arr_new
def arr_mutation_split(arr_old: list):
# 分裂,层数增加,如如[3,4]=>[1,3,3]或[2,2,3]等
arr_new = copy.deepcopy(arr_old)
index_arr = []
for i, val in enumerate(arr_new):
if val > 1:
index_arr.append(i)
if len(index_arr) <= 0:
# 数组中没有可以分裂的
return arr_new
index_random = random.sample(index_arr, 1)[0]
val0 = arr_new[index_random]
val1 = random.randint(1, val0 - 1)
val2 = val0 - val1
del arr_new[index_random]
arr_new.insert(index_random, val2)
arr_new.insert(index_random, val1)
return arr_new
def arr_mutation_increase(arr_old: list):
arr_new = copy.deepcopy(arr_old)
length = len(arr_new)
random_index = random.randint(0, length - 1)
increase = int(arr_new[random_index] * 0.05)
if increase == 0:
increase = 1
arr_new[random_index] = arr_new[random_index] + increase
return arr_new
def arr_mutation_decrease(arr_old: list):
arr_new = copy.deepcopy(arr_old)
length = len(arr_new)
random_index = random.randint(0, length - 1)
decrease = int(arr_new[random_index] * 0.05)
if decrease == 0:
decrease = 1
arr_new[random_index] = arr_new[random_index] - decrease
if arr_new[random_index] <= 0:
arr_new[random_index] = 1
return arr_new
def hidden_layer_mutation(hidden_layer: dict):
convolutional_layer = hidden_layer.get("convolutional_layer")
fully_connected_layer = hidden_layer.get("fully_connected_layer")
return [
{
"mutation_type": "origin",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": copy.deepcopy(fully_connected_layer)
},
{
"mutation_type": "mutations_rearrange",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_rearrange(fully_connected_layer)
},
{
"mutation_type": "mutations_merge",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_merge(fully_connected_layer)
},
{
"mutation_type": "mutations_split",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_split(fully_connected_layer)
},
{
"mutation_type": "mutations_increase",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_increase(fully_connected_layer)
},
{
"mutation_type": "mutations_decrease",
"convolutional_layer": convolutional_layer,
"fully_connected_layer": arr_mutation_decrease(fully_connected_layer)
}
]
def model_save(model, model_path):
# 模型保存
if not os.path.exists(model_path):
os.makedirs(model_path)
tf.saved_model.save(model, model_path)
def model_load(model_path):
# 模型加载
if not os.path.exists(model_path):
print(f"[{model_path}] is not exists ,exit")
exit(-1)
model = tf.saved_model.load(model_path)
return model
def create_folder(folder_path):
# 删除模型
if not os.path.exists(folder_path):
os.makedirs(folder_path)
def remove_folder(folder_path):
# 删除模型
if os.path.exists(folder_path):
shutil.rmtree(folder_path)
if __name__ == '__main__':
a = [100, 101, 102, 103, 104, 105, 106, 107, 108]
a = [1, 2, 3]
print(a)
b = arr_mutation_merge(a)
c = arr_mutation_split(a)
print(b)
print(c)
| zhangxinzhou/game_explorer | game01_dino/new_test/game_utils.py | game_utils.py | py | 5,865 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"l... |
8444405928 | import cupy
import cupyx.scipy.fft
from cupy import _core
from cupy._core import _routines_math as _math
from cupy._core import fusion
from cupy.lib import stride_tricks
import numpy
_dot_kernel = _core.ReductionKernel(
'T x1, T x2',
'T y',
'x1 * x2',
'a + b',
'y = a',
'0',
'dot_product'
)
def _choose_conv_method(in1, in2, mode):
if in1.ndim != 1 or in2.ndim != 1:
raise NotImplementedError('Only 1d inputs are supported currently')
if in1.dtype.kind in 'bui' or in2.dtype.kind in 'bui':
return 'direct'
if _fftconv_faster(in1, in2, mode):
return 'fft'
return 'direct'
def _fftconv_faster(x, h, mode):
"""
.. seealso:: :func: `scipy.signal._signaltools._fftconv_faster`
"""
# TODO(Dahlia-Chehata): replace with GPU-based constants.
return True
def convolve(a, v, mode='full'):
"""Returns the discrete, linear convolution of two one-dimensional sequences.
Args:
a (cupy.ndarray): first 1-dimensional input.
v (cupy.ndarray): second 1-dimensional input.
mode (str, optional): `valid`, `same`, `full`
Returns:
cupy.ndarray: Discrete, linear convolution of a and v.
.. seealso:: :func:`numpy.convolve`
""" # NOQA
if a.size == 0:
raise ValueError('a cannot be empty')
if v.size == 0:
raise ValueError('v cannot be empty')
if v.ndim > 1:
raise ValueError('v cannot be multidimensional array')
if v.size > a.size:
a, v = v, a
a = a.ravel()
v = v.ravel()
method = _choose_conv_method(a, v, mode)
if method == 'direct':
out = _dot_convolve(a, v, mode)
elif method == 'fft':
out = _fft_convolve(a, v, mode)
else:
raise ValueError('Unsupported method')
return out
def _fft_convolve(a1, a2, mode):
offset = 0
if a1.shape[-1] < a2.shape[-1]:
a1, a2 = a2, a1
offset = 1 - a2.shape[-1] % 2
# if either of them is complex, the dtype after multiplication will also be
if a1.dtype.kind == 'c' or a2.dtype.kind == 'c':
fft, ifft = cupy.fft.fft, cupy.fft.ifft
else:
fft, ifft = cupy.fft.rfft, cupy.fft.irfft
dtype = cupy.result_type(a1, a2)
n1, n2 = a1.shape[-1], a2.shape[-1]
out_size = cupyx.scipy.fft.next_fast_len(n1 + n2 - 1)
fa1 = fft(a1, out_size)
fa2 = fft(a2, out_size)
out = ifft(fa1 * fa2, out_size)
if mode == 'full':
start, end = 0, n1 + n2 - 1
elif mode == 'same':
start = (n2 - 1) // 2 + offset
end = start + n1
elif mode == 'valid':
start, end = n2 - 1, n1
else:
raise ValueError(
'acceptable mode flags are `valid`, `same`, or `full`.')
out = out[..., start:end]
if dtype.kind in 'iu':
out = cupy.around(out)
return out.astype(dtype, copy=False)
def _dot_convolve(a1, a2, mode):
offset = 0
if a1.size < a2.size:
a1, a2 = a2, a1
offset = 1 - a2.size % 2
dtype = cupy.result_type(a1, a2)
n1, n2 = a1.size, a2.size
a1 = a1.astype(dtype, copy=False)
a2 = a2.astype(dtype, copy=False)
if mode == 'full':
out_size = n1 + n2 - 1
a1 = cupy.pad(a1, n2 - 1)
elif mode == 'same':
out_size = n1
pad_size = (n2 - 1) // 2 + offset
a1 = cupy.pad(a1, (n2 - 1 - pad_size, pad_size))
elif mode == 'valid':
out_size = n1 - n2 + 1
stride = a1.strides[0]
a1 = stride_tricks.as_strided(a1, (out_size, n2), (stride, stride))
output = _dot_kernel(a1, a2[::-1], axis=1)
return output
def clip(a, a_min, a_max, out=None):
"""Clips the values of an array to a given interval.
This is equivalent to ``maximum(minimum(a, a_max), a_min)``, while this
function is more efficient.
Args:
a (cupy.ndarray): The source array.
a_min (scalar, cupy.ndarray or None): The left side of the interval.
When it is ``None``, it is ignored.
a_max (scalar, cupy.ndarray or None): The right side of the interval.
When it is ``None``, it is ignored.
out (cupy.ndarray): Output array.
Returns:
cupy.ndarray: Clipped array.
.. seealso:: :func:`numpy.clip`
Notes
-----
When `a_min` is greater than `a_max`, `clip` returns an
array in which all values are equal to `a_max`.
"""
if fusion._is_fusing():
return fusion._call_ufunc(_math.clip,
a, a_min, a_max, out=out)
# TODO(okuta): check type
return a.clip(a_min, a_max, out=out)
# sqrt_fixed is deprecated.
# numpy.sqrt is fixed in numpy 1.11.2.
sqrt = sqrt_fixed = _core.sqrt
cbrt = _core.create_ufunc(
'cupy_cbrt',
('e->e', 'f->f', 'd->d'),
'out0 = cbrt(in0)',
doc='''Elementwise cube root function.
.. seealso:: :data:`numpy.cbrt`
''')
square = _core.create_ufunc(
'cupy_square',
('b->b', 'B->B', 'h->h', 'H->H', 'i->i', 'I->I', 'l->l', 'L->L', 'q->q',
'Q->Q', 'e->e', 'f->f', 'd->d', 'F->F', 'D->D'),
'out0 = in0 * in0',
doc='''Elementwise square function.
.. seealso:: :data:`numpy.square`
''')
absolute = _core.absolute
fabs = _core.create_ufunc(
'cupy_fabs',
('e->e', 'f->f', 'd->d'),
'out0 = abs(in0)',
doc='''Calculates absolute values element-wise.
Only real values are handled.
.. seealso:: :data:`numpy.fabs`
''')
_unsigned_sign = 'out0 = in0 > 0'
_complex_sign = '''
if (in0.real() == 0) {
out0 = (in0.imag() > 0) - (in0.imag() < 0);
} else {
out0 = (in0.real() > 0) - (in0.real() < 0);
}
'''
sign = _core.create_ufunc(
'cupy_sign',
('b->b', ('B->B', _unsigned_sign), 'h->h', ('H->H', _unsigned_sign),
'i->i', ('I->I', _unsigned_sign), 'l->l', ('L->L', _unsigned_sign),
'q->q', ('Q->Q', _unsigned_sign), 'e->e', 'f->f', 'd->d',
('F->F', _complex_sign), ('D->D', _complex_sign)),
'out0 = (in0 > 0) - (in0 < 0)',
doc='''Elementwise sign function.
It returns -1, 0, or 1 depending on the sign of the input.
.. seealso:: :data:`numpy.sign`
''')
heaviside = _core.create_ufunc(
'cupy_heaviside',
('ee->e', 'ff->f', 'dd->d'),
'''
if (isnan(in0)) {
out0 = in0;
} else if (in0 == 0) {
out0 = in1;
} else {
out0 = (in0 > 0);
}
''',
doc='''Compute the Heaviside step function.
.. seealso:: :data:`numpy.heaviside`
'''
)
_float_preamble = '''
#ifndef NAN
#define NAN __int_as_float(0x7fffffff)
#endif
'''
_float_maximum = ('out0 = (isnan(in0) | isnan(in1)) ? out0_type(NAN) : '
'out0_type(max(in0, in1))')
maximum = _core.create_ufunc(
'cupy_maximum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_maximum),
('ff->f', _float_maximum),
('dd->d', _float_maximum),
('FF->F', _float_maximum),
('DD->D', _float_maximum)),
'out0 = max(in0, in1)',
preamble=_float_preamble,
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.maximum`
''',
cutensor_op=('OP_MAX', 1, 1), scatter_op='max')
_float_minimum = ('out0 = (isnan(in0) | isnan(in1)) ? out0_type(NAN) : '
'out0_type(min(in0, in1))')
minimum = _core.create_ufunc(
'cupy_minimum',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', _float_minimum),
('ff->f', _float_minimum),
('dd->d', _float_minimum),
('FF->F', _float_minimum),
('DD->D', _float_minimum)),
'out0 = min(in0, in1)',
preamble=_float_preamble,
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the NaN.
.. seealso:: :data:`numpy.minimum`
''',
cutensor_op=('OP_MIN', 1, 1), scatter_op='min')
fmax = _core.create_ufunc(
'cupy_fmax',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', 'out0 = fmax(in0, in1)'),
('ff->f', 'out0 = fmax(in0, in1)'),
('dd->d', 'out0 = fmax(in0, in1)'),
'FF->F', 'DD->D'),
'out0 = max(in0, in1)',
doc='''Takes the maximum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmax`
''')
fmin = _core.create_ufunc(
'cupy_fmin',
('??->?', 'bb->b', 'BB->B', 'hh->h', 'HH->H', 'ii->i', 'II->I', 'll->l',
'LL->L', 'qq->q', 'QQ->Q',
('ee->e', 'out0 = fmin(in0, in1)'),
('ff->f', 'out0 = fmin(in0, in1)'),
('dd->d', 'out0 = fmin(in0, in1)'),
'FF->F', 'DD->D'),
'out0 = min(in0, in1)',
doc='''Takes the minimum of two arrays elementwise.
If NaN appears, it returns the other operand.
.. seealso:: :data:`numpy.fmin`
''')
_nan_to_num_preamble = '''
template <class T>
__device__ T nan_to_num(T x, T nan, T posinf, T neginf) {
if (isnan(x))
return nan;
if (isinf(x))
return x > 0 ? posinf : neginf;
return x;
}
template <class T>
__device__ complex<T> nan_to_num(complex<T> x, T nan, T posinf, T neginf) {
T re = nan_to_num(x.real(), nan, posinf, neginf);
T im = nan_to_num(x.imag(), nan, posinf, neginf);
return complex<T>(re, im);
}
'''
_nan_to_num = _core.create_ufunc(
'cupy_nan_to_num_',
('????->?', 'bbbb->b', 'BBBB->B', 'hhhh->h', 'HHHH->H',
'iiii->i', 'IIII->I', 'llll->l', 'LLLL->L', 'qqqq->q', 'QQQQ->Q',
('eeee->e',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('ffff->f',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('dddd->d',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('Ffff->F',
'out0 = nan_to_num(in0, in1, in2, in3)'),
('Dddd->D',
'out0 = nan_to_num(in0, in1, in2, in3)')),
'out0 = in0',
preamble=_nan_to_num_preamble,
doc='''Elementwise nan_to_num function.
.. seealso:: :func:`numpy.nan_to_num`
''')
def _check_nan_inf(x, dtype, neg=None):
if dtype.char in 'FD':
dtype = cupy.dtype(dtype.char.lower())
if dtype.char not in 'efd':
x = 0
elif x is None and neg is not None:
x = cupy.finfo(dtype).min if neg else cupy.finfo(dtype).max
elif cupy.isnan(x):
x = cupy.nan
elif cupy.isinf(x):
x = cupy.inf * (-1)**(x < 0)
return cupy.asanyarray(x, dtype)
def nan_to_num(x, copy=True, nan=0.0, posinf=None, neginf=None):
"""Replace NaN with zero and infinity with large finite numbers (default
behaviour) or with the numbers defined by the user using the `nan`,
`posinf` and/or `neginf` keywords.
.. seealso:: :func:`numpy.nan_to_num`
"""
if not isinstance(x, cupy.ndarray):
out = cupy.full((), x)
else:
out = cupy.empty_like(x) if copy else x
dtype = out.dtype
nan = _check_nan_inf(nan, dtype)
posinf = _check_nan_inf(posinf, dtype, False)
neginf = _check_nan_inf(neginf, dtype, True)
return _nan_to_num(x, nan, posinf, neginf, out=out)
def real_if_close(a, tol=100):
"""If input is complex with all imaginary parts close to zero, return real
parts.
"Close to zero" is defined as `tol` * (machine epsilon of the type for
`a`).
.. warning::
This function may synchronize the device.
.. seealso:: :func:`numpy.real_if_close`
"""
if not issubclass(a.dtype.type, cupy.complexfloating):
return a
if tol > 1:
f = numpy.finfo(a.dtype.type)
tol = f.eps * tol
if cupy.all(cupy.absolute(a.imag) < tol):
a = a.real
return a
@cupy._util.memoize(for_each_device=True)
def _get_interp_kernel(is_complex):
in_params = 'raw V x, raw U idx, '
in_params += 'raw W fx, raw Y fy, U len, raw Y left, raw Y right'
out_params = 'Z y' # output dtype follows NumPy's
if is_complex:
preamble = 'typedef double real_t;\n'
else:
preamble = 'typedef Z real_t;\n'
preamble += 'typedef Z value_t;\n'
preamble += cupy._sorting.search._preamble # for _isnan
code = r'''
U x_idx = idx[i] - 1;
if ( _isnan<V>(x[i]) ) { y = x[i]; }
else if (x_idx < 0) { y = left[0]; }
else if (x[i] == fx[len - 1]) {
// searchsorted cannot handle both of the boundary points,
// so we must detect and correct ourselves...
y = fy[len - 1];
}
else if (x_idx >= len - 1) { y = right[0]; }
else {
const Z slope = (value_t)(fy[x_idx+1] - fy[x_idx]) / \
((real_t)fx[x_idx+1] - (real_t)fx[x_idx]);
Z out = slope * ((real_t)x[i] - (real_t)fx[x_idx]) \
+ (value_t)fy[x_idx];
if (_isnan<Z>(out)) {
out = slope * ((real_t)x[i] - (real_t)fx[x_idx+1]) \
+ (value_t)fy[x_idx+1];
if (_isnan<Z>(out) && (fy[x_idx] == fy[x_idx+1])) {
out = fy[x_idx];
}
}
y = out;
}
'''
return cupy.ElementwiseKernel(
in_params, out_params, code, 'cupy_interp', preamble=preamble)
def interp(x, xp, fp, left=None, right=None, period=None):
""" One-dimensional linear interpolation.
Args:
x (cupy.ndarray): a 1D array of points on which the interpolation
is performed.
xp (cupy.ndarray): a 1D array of points on which the function values
(``fp``) are known.
fp (cupy.ndarray): a 1D array containing the function values at the
the points ``xp``.
left (float or complex): value to return if ``x < xp[0]``. Default is
``fp[0]``.
right (float or complex): value to return if ``x > xp[-1]``. Default is
``fp[-1]``.
period (None or float): a period for the x-coordinates. Parameters
``left`` and ``right`` are ignored if ``period`` is specified.
Default is ``None``.
Returns:
cupy.ndarray: The interpolated values, same shape as ``x``.
.. note::
This function may synchronize if ``left`` or ``right`` is not already
on the device.
.. seealso:: :func:`numpy.interp`
"""
if xp.ndim != 1 or fp.ndim != 1:
raise ValueError('xp and fp must be 1D arrays')
if xp.size != fp.size:
raise ValueError('fp and xp are not of the same length')
if xp.size == 0:
raise ValueError('array of sample points is empty')
if not x.flags.c_contiguous:
raise NotImplementedError('Non-C-contiguous x is currently not '
'supported')
x_dtype = cupy.common_type(x, xp)
if not cupy.can_cast(x_dtype, cupy.float64):
raise TypeError('Cannot cast array data from'
' {} to {} according to the rule \'safe\''
.format(x_dtype, cupy.float64))
if period is not None:
# The handling of "period" below is modified from NumPy's
if period == 0:
raise ValueError("period must be a non-zero value")
period = abs(period)
left = None
right = None
x = x.astype(cupy.float64)
xp = xp.astype(cupy.float64)
# normalizing periodic boundaries
x %= period
xp %= period
asort_xp = cupy.argsort(xp)
xp = xp[asort_xp]
fp = fp[asort_xp]
xp = cupy.concatenate((xp[-1:]-period, xp, xp[0:1]+period))
fp = cupy.concatenate((fp[-1:], fp, fp[0:1]))
assert xp.flags.c_contiguous
assert fp.flags.c_contiguous
# NumPy always returns float64 or complex128, so we upcast all values
# on the fly in the kernel
out_dtype = 'D' if fp.dtype.kind == 'c' else 'd'
output = cupy.empty(x.shape, dtype=out_dtype)
idx = cupy.searchsorted(xp, x, side='right')
left = fp[0] if left is None else cupy.array(left, fp.dtype)
right = fp[-1] if right is None else cupy.array(right, fp.dtype)
kern = _get_interp_kernel(out_dtype == 'D')
kern(x, idx, xp, fp, xp.size, left, right, output)
return output
| cupy/cupy | cupy/_math/misc.py | misc.py | py | 16,182 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "cupy._core.ReductionKernel",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cupy._core",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "cupy.fft",
"line_number": 89,
"usage_type": "attribute"
},
{
"api_name": "cupy.fft",
"l... |
16968857057 | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from operator import __or__ as OR
from functools import reduce
import six
from django.conf import settings
try:
from django.utils.encoding import force_unicode as force_text
except ImportError:
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
from django.template.response import TemplateResponse
from django.contrib.admin import helpers
from django.contrib.admin.utils import model_ngettext
from celery import chain
from edw.admin.entity.forms import EntitiesUpdateTermsAdminForm
def update_terms(modeladmin, request, queryset, task, template=None):
"""
ENG: Update terms for multiple entities
RUS: Обновляет термины для нескольких объектов
"""
CHUNK_SIZE = getattr(settings, 'EDW_UPDATE_TERMS_ACTION_CHUNK_SIZE', 100)
opts = modeladmin.model._meta
app_label = opts.app_label
if request.POST.get('post'):
form = EntitiesUpdateTermsAdminForm(request.POST)
if form.is_valid():
to_set = [x.id for x in form.cleaned_data['to_set']]
to_unset = [x.id for x in form.cleaned_data['to_unset']]
n = queryset.count()
if n and (to_set or to_unset):
i = 0
tasks = []
while i < n:
chunk = queryset[i:i + CHUNK_SIZE]
for obj in chunk:
obj_display = force_text(obj)
modeladmin.log_change(request, obj, obj_display)
tasks.append(task.si([x.id for x in chunk], to_set, to_unset))
i += CHUNK_SIZE
chain(reduce(OR, tasks)).apply_async()
modeladmin.message_user(request, _("Successfully proceed %(count)d %(items)s.") % {
"count": n, "items": model_ngettext(modeladmin.opts, n)
})
# Return None to display the change list page again.
return None
else:
form = EntitiesUpdateTermsAdminForm()
if len(queryset) == 1:
objects_name = force_text(opts.verbose_name)
else:
objects_name = force_text(opts.verbose_name_plural)
title = _("Update terms for multiple entities")
context = {
"title": title,
'form': form,
"objects_name": objects_name,
'queryset': queryset,
"opts": opts,
"app_label": app_label,
'action_checkbox_name': helpers.ACTION_CHECKBOX_NAME,
'media': modeladmin.media,
'action': 'update_terms',
}
# Display the confirmation page
kwargs = {} if six.PY3 else {'current_app': modeladmin.admin_site.name}
return TemplateResponse(request, template if template is not None else "edw/admin/base_actions/update_terms.html",
context, **kwargs)
update_terms.short_description = _("Modify terms for selected %(verbose_name_plural)s")
| infolabs/django-edw | backend/edw/admin/base_actions/update_terms.py | update_terms.py | py | 3,011 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "django.conf.settings",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "edw.admin.entity.forms.EntitiesUpdateTermsAdminForm",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "django.utils.encoding.force_text",
"line_number": 46,
"usage... |
19608346992 | # PROBLEM:
# Given an array A of non-negative integers, return an array
# consisting of all the even elements of A, followed by all
# the odd elements of A.
# You may return any answer array that satisfies this condition.
# EXAMPLE:
# Input: [3,1,2,4]
# Output: [2,4,3,1]
# The outputs [4,2,3,1], [2,4,1,3], and [4,2,1,3] would also be accepted.
from typing import List
class Solution:
# APPROACH: COMBINE 2 LIST
# - In this approach we can create and 'evens'
# and 'odds' list. Then we can itterate through
# the given input list and store numbers either
# in the evens list or odds list based on whether
# or not A[i] % 2 == 0. Then we can return the
# list concatenated together.
# Runtime: 72 ms
# Memory: 14.8 MB
# Faster than 98.15% of Python Submissions.
def approach(self, A: List[int]) -> List[int]:
evens = []
odds = []
for num in range(len(A)):
if A[num] % 2 == 0:
evens.append(A[num])
else:
odds.append(A[num])
# - If you want, you can have
# the lists sorted as well for
# output clarity.
# evens.sort()
# odds.sort()
return evens + odds
if __name__ == '__main__':
solution = Solution()
A = [3,1,2,4,7,8,9,15]
print(solution.approach(A)) | angiereyes99/coding-interview-practice | easy-problems/SortArrayByParity.py | SortArrayByParity.py | py | 1,364 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 27,
"usage_type": "name"
}
] |
6797262441 | from utils.faker_factory import faker
from ..mails import BaseMailView
class OpportunityReminderCloseMailView(BaseMailView):
"""
"""
template_name = 'mails/opportunity/opportunity_reminder_close.html'
mandatory_mail_args = [
'title',
'created_by_name',
'duedate_timedelta',
'duedate',
'public_url',
]
section = 'opportunities'
subject = '%(duedate_timedelta)s until opportunity closure'
def get_mock_data(self, optional=True):
mock_data = {
'title': '[Role Name] for [Project Name]',
'created_by_name': '[SDM Name]',
'duedate_timedelta': '3 days',
'duedate': '[May 29, 12AM]',
'disable_notification_url': None,
'public_url': '/{}'.format(faker.uri_path()),
}
return mock_data
| tomasgarzon/exo-services | service-exo-mail/mail/mailviews/opportunity_reminder_close.py | opportunity_reminder_close.py | py | 850 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "mails.BaseMailView",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "utils.faker_factory.faker.uri_path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "utils.faker_factory.faker",
"line_number": 29,
"usage_type": "name"
}
] |
25462981448 | import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
def system_of_odes(t, y):
# Define the system of second-order ODEs
# y is an array of shape (2n,), where n is the number of equations
# Compute coefficients
n = int(len(y) / 2)
y1 = y[:n] # x,y
y2 = y[n:] # dxdt, dydt
dy1_dt = y2
dy2_dt = -0.001*y2 - 3*y1
return np.concatenate([dy1_dt, dy2_dt])
# Define the initial conditions
initial_conditions = [1, 0] # Initial values for y1 and y2
initial_derivatives = [0, 1] # Initial values for the derivatives dy1/dt and dy2/dt
initial_state = np.concatenate([initial_conditions, initial_derivatives])
# Define the time span for the solution
time_span = (0, 5) # Solve from t=0 to t=1
# Solve the system of ODEs
solution = solve_ivp(system_of_odes, time_span, initial_state)
breakpoint()
# Access the solution
t_values = solution.t # Array of time values
n=2
y1_values = solution.y[:n] # Array of y1 values
y2_values = solution.y[n:] # Array of y2 values
# Plot the solution
plt.plot(solution.t, y1_values[0], label='y1')
plt.plot(solution.t, y2_values[0], label='y2')
plt.xlabel('Time')
plt.ylabel('Solution')
plt.title('Solution of the System of ODEs')
plt.legend()
plt.grid(True)
plt.show()
| mjanszen/Wind_turbine_aeroelasticity | src/dynamics_only_test.py | dynamics_only_test.py | py | 1,287 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.concatenate",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "scipy.integrate.solve_ivp",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "matplot... |
39303528940 | #!usr/bin/env python
# -*- coding:utf-8 -*-
"""
@author: admin
@file: main.py
@time: 2021/09/02
@desc:
"""
import time
import torch
from model import config
from model.data_process import PrepareData
from model.Transformer import make_model
from model.LabelSmoothing import LabelSmoothing
from model.opt import NoamOpt
from train_evaluate import train
from predict import predict
def main():
# 数据预处理
data = PrepareData(config.TRAIN_FILE, config.DEV_FILE)
src_vocab = len(data.en_word_dict)
tgt_vocab = len(data.cn_word_dict)
# src_vocab 5493
# tgt_vocab 2537
print("src_vocab %d" % src_vocab)
print("tgt_vocab %d" % tgt_vocab)
# 初始化模型
model = make_model(
src_vocab,
tgt_vocab,
config.LAYERS,
config.D_MODEL,
config.D_FF,
config.H_NUM,
config.DROPOUT
)
# 训练
print(">>>>>>> start train")
train_start = time.time()
criterion = LabelSmoothing(tgt_vocab, padding_idx=0, smoothing=0.0)
optimizer = NoamOpt(config.D_MODEL, 1, 2000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
train(data, model, criterion, optimizer)
print(f"<<<<<<< finished train, cost {time.time() - train_start:.4f} seconds")
# 预测
# 加载模型
model.load_state_dict(torch.load(config.SAVE_FILE))
# 开始预测
print(">>>>>>> start predict")
evaluate_start = time.time()
predict(data, model)
print(f"<<<<<<< finished evaluate, cost {time.time() - evaluate_start:.4f} seconds")
if __name__ == '__main__':
main()
| coinyue/Transformer | main.py | main.py | py | 1,629 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "model.data_process.PrepareData",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "model.config.TRAIN_FILE",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "model.config",
"line_number": 22,
"usage_type": "name"
},
{
"api_name... |
5547122437 | #!/local/cluster/bin/python
#biopython take three regions, invert middle, then put together
import sys
from Bio import SeqIO
strain=sys.argv[1]
#include 50 bp margin so as not to interrupt att site in rotated genome
largestart=int(sys.argv[2]) + 50
largeend=int(sys.argv[3]) - 50
infile="../" + strain + ".gbk"
outfile=strain + ".noSI.gbk"
record = SeqIO.read(infile, "genbank")
firstpart = record[1:largestart]
endpart = record[largeend:len(record.seq)]
newrecord = firstpart + endpart
fw=open(outfile,'w')
SeqIO.write(newrecord,fw,"genbank")
| osuchanglab/BradyrhizobiumGenomeArchitecture | remove_monopartite.py | remove_monopartite.py | py | 547 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "Bio.SeqIO.read",
"line_num... |
4419617010 | """
Simple BBS
簡単な掲示板
要件:
1. ページ上部に大きくSimple BBSと書かれている
2. Username と Messageを入力するフォームがある
3. 送信と書かれたスイッチがある
4. 入力された文字が掲示板に表示されていく(下段に追加されていく)
5. Username に何も入力されていない状態で送信された場合は名無しさんにする
6. Message に何も入力されていない状態で送信された場合は空欄にする
"""
import os
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/', methods=['GET', 'POST'])
def index():
# 初めてページが選択された時
if request.method == 'GET':
# コメントの読み込み
documents = []
if os.path.isfile("document.txt"):
with open('document.txt', 'r') as file:
line = file.readline()[:-1]
while line:
line_list = line.split(',')
# print(line_list)
documents.append(line_list)
line = file.readline()[:-1]
return render_template('BBS.html', documents=documents)
# 送信がクリックされた時
if request.method == 'POST':
username = request.form['username']
message = request.form['message']
# usernameがないときは"名無しさん"に変更
if username == '':
username = '名無しさん'
# コメントの書き込み
with open('document.txt', mode='a') as file:
file.write(f'{username},{message}\n')
# コメントの読み込み
with open('document.txt', 'r') as file:
documents = []
line = file.readline()[:-1]
while line:
line_list = line.split(',')
# print(line_list)
documents.append(line_list)
line = file.readline()[:-1]
return render_template('BBS.html', documents=documents)
if __name__ == '__main__':
app.run(debug=True)
| tetsuya-yamamoto-ai-learn/practice01-F | WebAP.py | WebAP.py | py | 2,101 | python | ja | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "os.path.isfile",
... |
40017524881 | from scipy.stats import zscore
from datetime import datetime as dt
import numpy as np
import pandas as pd
RAW_DIR = "raw/"
RAW_TRAIN_PATH = RAW_DIR + "raw_train_data.csv"
RAW_PREDICT_PATH = RAW_DIR + "raw_predict_data.csv"
CYCLE_AMOUNT_PATH = RAW_DIR + "cycle_amount.csv"
INPUT_DIR = "input/"
TRAIN_DATA_PATH = INPUT_DIR + "train_data.csv"
TEST_DATA_PATH = INPUT_DIR + "test_data.csv"
PREDICT_DATA_PATH = INPUT_DIR + "predict_data.csv"
TEST_PERCENTAGE = 0.1
AMOUNT_LOW_LIMIT = 80
AMOUNT_HIGH_LIMIT = 180
class WeatherDataGenerator:
#CLOSED_HOURS = [ "22:00", "23:00", "0:00", "1:00", "2:00", "3:00", "4:00", "5:00" ]
CLOSED_HOURS = [ "22:00", "23:00", "0:00", "1:00", "2:00", "3:00", "4:00", "5:00", "13:00", "14:00", "15:00", "19:00", "20:00", "21:00" ]
def __init__(self, raw_data=None, amount_data=None):
self.weather_data = pd.DataFrame()
self.raw_data = raw_data
self.amount_data = amount_data
def generate_data(self):
self.__store_split_datetime()
self.__store_real_values()
self.__drop_closed_hours()
self.__pivot_date_x_hour()
self.__store_categolized_values()
self.__store_label_values()
self.__drop_invalid_label_values()
def get_data(self):
return self.weather_data
def __store_split_datetime(self):
print("Splitting datetime to date and hour...")
# index 1, 2, 3 is used later
self.weather_data = self.raw_data[0].apply(lambda datehour: pd.Series(datehour.split(" "), index=[0,4]))
def __drop_closed_hours(self):
print("Dropping closed hours columns...")
drop_rows = self.weather_data.loc[self.weather_data[4].isin(self.CLOSED_HOURS)]
self.weather_data.drop(drop_rows.index, inplace=True)
def __store_real_values(self):
print("Storing temprature and precipiation and wind speed...")
for j in [ 1, 2, 3 ]:
#for j in [ 1, 3 ]: # Passing wind speed
self.weather_data[j] = self.raw_data[j]
def __normalize_real_values(self):
print("Normalizing real values...")
# Normalize real_value columns
for j in [ 1, 2, 3 ]:
#for j in [ 1, 3 ]: # Passing wind speed
# Regression problems doesn't need to be normalized?
self.weather_data[j] = zscore(self.weather_data[j], axis=0)
def __pivot_date_x_hour(self):
print("Pivoting columns date x hour...")
# Pivot data to date x hour
self.weather_data = self.weather_data.pivot(index=0, columns=4)
def __store_categolized_values(self):
print("Appending categolized values...")
# Append oter weathers and labels after pivot
for l in self.weather_data.index:
date = dt.strptime(l, "%Y/%m/%d")
self.weather_data.loc[l, 5] = date.month
self.weather_data.loc[l, 6] = date.weekday()
def __store_label_values(self):
# Reset indexes of self.weather_data as default interger, to match index of two dataframes
self.weather_data.reset_index(drop=True, inplace=True)
if self.amount_data is None:
print("Skipping appending label values...")
else:
print("Appending label values...")
self.weather_data[7] = self.amount_data[0]
def __drop_invalid_label_values(self):
print("Dropping invalid label values...")
#if self.weather_data[7] is None:
if self.amount_data is None:
print("Skipping dropping invalid label values...")
else:
drop_rows = self.weather_data[(AMOUNT_LOW_LIMIT <= self.weather_data[7]) & (self.weather_data[7] <= AMOUNT_HIGH_LIMIT)]
self.weather_data.drop(drop_rows.index, inplace=True)
def read_raw_data():
print("Reading weather and cycle amount data...")
# Adding 0 - 3 numbers as header names.
raw_train_data_df = pd.read_csv(RAW_TRAIN_PATH, header=None, names=np.arange(4))
raw_predict_data_df = pd.read_csv(RAW_PREDICT_PATH, header=None, names=np.arange(4))
amount_data_df = pd.read_csv(CYCLE_AMOUNT_PATH, header=None)
return raw_train_data_df, raw_predict_data_df, amount_data_df
def make_train_test_data(weather_df):
print("Make train and test data by TEST_PERCENTAGE...")
# Select random columns from whole weather data with directed percentage
test_df = weather_df.sample(frac=TEST_PERCENTAGE)
train_df = weather_df.drop(test_df.index.values)
return train_df, test_df
def raw_to_weather():
print('*********************************')
print('Generating train and test data...')
print('*********************************')
raw_train_data_df, raw_predict_data_df, amount_data_df = read_raw_data()
train_data_generator = WeatherDataGenerator(raw_train_data_df, amount_data_df)
train_data_generator.generate_data()
train_df, test_df = make_train_test_data(train_data_generator.get_data())
print('*********************************')
print('Saving train and test data...')
print('*********************************')
train_df.to_csv(TRAIN_DATA_PATH, header=None)
test_df.to_csv(TEST_DATA_PATH, header=None)
print('*********************************')
print('Generating predict data...')
print('*********************************')
predict_data_generator = WeatherDataGenerator(raw_predict_data_df)
predict_data_generator.generate_data()
predict_df = predict_data_generator.get_data()
print('*********************************')
print('Saving predict data...')
print('*********************************')
predict_df.to_csv(PREDICT_DATA_PATH, header=None)
def run():
raw_to_weather()
if __name__ == "__main__":
run()
| ytorii/park-amount | wdnn/raw_to_input_csv.py | raw_to_input_csv.py | py | 5,411 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "scipy.stats.zscore",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.s... |
18082073278 | #!/usr/bin/env python
"""
Neato control program to make a robot follow a line (like a roadway) and react
to signs in its path.
"""
import rospy
from geometry_msgs.msg import Twist, PoseWithCovariance, Pose, Point, Vector3
from sensor_msgs.msg import LaserScan, Image
import math
import numpy as np
import cv2
from cv_bridge import CvBridge
import helper_functions as hp
import signal
import sys
##### GLOBAL SPEED CONSTANT #####
rotate_speed_limit = 0.3
##### GLOBAl STATE CONSTANTS #####
DRIVE = 0
STOP = 1
LOOK_BOTH_WAYS = 2
class Controller:
def __init__(self):
##### ROS INITIALIZATION #####
rospy.init_node('caribou')
self.pub = rospy.Publisher('cmd_vel', Twist, queue_size=10)
self.command = Twist()
self.threshold = 0 # TODO: CHANGE THIS NUMBER
self.bridge = CvBridge()
rospy.Subscriber('/camera/image_raw', Image, self.react_to_image)
##### IMAGE SIZE #####
self.win_size = (640,480)
self.win_height_cropped = 480*0.9
##### SET STATE #####
self.state = DRIVE
##### INITIALIZE WINDOWS #####
cv2.namedWindow('set_bounds')
cv2.namedWindow('bw_window_cropped')
cv2.namedWindow('Output')
##### INITIALIZE SIFT #####
self.sift = cv2.SIFT()
self.bf = cv2.BFMatcher()
self.past_descriptors = []
##### SIGN REACTION BEHAVIOR #####
self.pause_duration = rospy.Duration(3)
self.ignore_stop_sign_threshold = self.pause_duration + rospy.Duration(3)
self.last_stop_sign = rospy.Time.now() - self.ignore_stop_sign_threshold
##### COLOR PARAMETERS (hand-tweaked) #####
settings_file = open('settings.txt', 'r')
self.grey_lb = int(settings_file.readline())
self.grey_ub = int(settings_file.readline())
self.red_lb = eval(settings_file.readline())
self.red_ub = eval(settings_file.readline())
settings_file.close()
##### CALIBRATION SLIDERS #####
cv2.createTrackbar('grey l', 'set_bounds', self.grey_lb , 255,
self.set_grey_lower)
cv2.createTrackbar('grey u', 'set_bounds', self.grey_ub , 255,
self.set_grey_upper)
cv2.createTrackbar('B l', 'set_bounds', self.red_lb[0], 255,
self.set_b_l)
cv2.createTrackbar('B u', 'set_bounds', self.red_ub[0], 255,
self.set_b_u)
cv2.createTrackbar('G l', 'set_bounds', self.red_lb[1] ,255,
self.set_g_l)
cv2.createTrackbar('G u', 'set_bounds', self.red_ub[1], 255,
self.set_g_u)
cv2.createTrackbar('R l', 'set_bounds', self.red_lb[2], 255,
self.set_r_l)
cv2.createTrackbar('R u', 'set_bounds', self.red_ub[2], 255,
self.set_r_u)
##### START OFF STOPPED #####
self.stop()
self.send()
def set_grey_lower(self, val):
""" Use sliders to set GREY lower bound. """
self.grey_lb = val
def set_grey_upper(self, val):
""" Use sliders to set GREY upper bound. """
self.grey_ub = val
def set_b_l(self, val):
""" Use sliders to set BLUE lower bound. """
self.red_lb[0] = val
def set_b_u(self, val):
""" Use sliders to set BLUE upper bound. """
self.red_ub[0] = val
def set_g_l(self, val):
""" Use sliders to set BLUE lower bound. """
self.red_lb[1] = val
def set_g_u(self, val):
""" Use sliders to set GREEN upper bound. """
self.red_ub[1] = val
def set_r_l(self, val):
""" Use sliders to set RED lower bound. """
self.red_lb[2] = val
def set_r_u(self, val):
""" Use sliders to set RED upper bound. """
self.red_ub[2] = val
def react_to_image(self, msg):
"""
Process image messages from ROS and stash them in an attribute called
cv_image for subsequent processing
Grabs image stream from camera, called cv_image, and processes the image for
line following and sign detection
"""
self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding="bgr8")
cv2.waitKey(5)
if self.state == DRIVE:
direction = hp.find_line(self.cv_image,
(0, self.win_height_cropped), self.win_size,
(self.grey_lb, self.grey_lb, self.grey_lb),
(self.grey_ub, self.grey_ub, self.grey_ub),
self.threshold)
self.drive(direction)
sign_test = hp.find_stop_sign(self.cv_image,
tuple(self.red_lb), tuple(self.red_ub))
if (sign_test and
(rospy.Time.now() - self.ignore_stop_sign_threshold) >
self.last_stop_sign):
rospy.Timer(self.pause_duration,
self.look_both_ways, oneshot=True)
self.state = STOP
elif self.state == STOP:
self.stop()
elif self.state == LOOK_BOTH_WAYS:
gray = cv2.cvtColor(self.cv_image, cv2.COLOR_BGR2GRAY)
kp, des = self.sift.detectAndCompute(gray, None)
if len(self.past_descriptors) > 10:
previous_des = self.past_descriptors.pop(0)
matches = self.bf.knnMatch(des, previous_des, k=2)
# Apply ratio test
good_count = 0
for m,n in matches:
if m.distance < 0.75*n.distance:
good_count += 1
if good_count > 0.6*len(previous_des):
self.state = DRIVE
self.past_descriptors.append(des)
cv2.imshow("Output", self.cv_image)
cv2.waitKey(5)
def look_both_ways(self, event):
""" Callback function to set the robot's state to LOOK_BOTH_WAYS """
self.last_stop_sign = rospy.Time.now()
self.state = LOOK_BOTH_WAYS
def drive(self, direction):
""" Changes self.command in response to the direction inputed """
if direction[1]:
if direction[0] == 0:
self.command.angular.z = 0
self.command.linear.x = .1
else:
proportion = (float(direction[0]) / (640/2))
self.command.angular.z = (min(proportion, rotate_speed_limit)
if proportion > 0 else max(proportion, -rotate_speed_limit))
self.command.linear.x = .1 * (1 - abs(proportion))
else:
self.stop()
def stop(self):
""" Sets self.command to stop all bot motion """
self.command.linear.x = 0
self.command.angular.z = 0
def send(self):
""" Publishes self.command to ROS """
self.pub.publish(self.command)
def signal_handler(self, signal, frame):
""" Saves calibration settings to settings.txt file before closing """
settings_file = open('settings.txt', 'w')
settings_file.write(str(self.grey_lb) + '\n')
settings_file.write(str(self.grey_ub) + '\n')
settings_file.write(str(self.red_lb) + '\n')
settings_file.write(str(self.red_ub) + '\n')
settings_file.close()
print('Exiting gracefully...')
sys.exit(0)
controller = Controller()
signal.signal(signal.SIGINT, controller.signal_handler)
while not rospy.is_shutdown():
controller.send() | lianilychee/project_caribou | scripts/caribou.py | caribou.py | py | 6,666 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rospy.init_node",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.Twist",
"line_number": 32,
"usage_type": "argument"
},
{
"api_name": "geometry_... |
38810777586 | ''' CAS schema's for the roads '''
__name__ = "CASSchema.py"
__author__ = "COUTAND Bastien"
__date__ = "07.12.22"
from datetime import datetime
from pydantic import BaseModel, Field
class CASBase(BaseModel):
'''
CAS Schema
'''
cas_ip: str = Field(
description='ip for the CAS'
)
cas_port: int = Field(
description='port for the CAS'
)
class CASCreate(CASBase):
'''
CAS schema for the creation of an CAS in the database.
'''
pass
class CASInDB(CASBase):
'''
CAS schema for the db
'''
id: int = Field(
description='ID in the database of the CAS'
)
created_at: datetime = Field(
default=datetime.utcnow,
description='Date of the creation for an CAS'
)
class Config:
orm_mode = True | coutand-bastien/Student-project | ENSIBS-4/eduroom/server/app-container/api/schemas/CASSchema.py | CASSchema.py | py | 838 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pydantic.Field",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pydantic.Field",
... |
4005677116 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 16:06:36 2018
@author: jose.molina
"""
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 5 15:39:40 2018
@author: jose.molina
"""
from bs4 import BeautifulSoup
from selenium import webdriver
import requests
from xml.etree import ElementTree
from time import sleep
import pandas as pd
from dateutil.relativedelta import relativedelta
import re
def getvalueofnode(node):
""" return node text or None """
return node if node is not None else None
dfcols = ['nombre', 'link', 'overall_rating','ranking','rango_precio','num_opiniones','ops_exc','ops_muybueno','ops_normal','ops_malo','ops_pesimo','punt_servicio','punt_comida','punt_calprecio','direccion','ubicacion','telefono']
df_xml = pd.DataFrame(columns=dfcols)
url = 'https://www.tripadvisor.es/Restaurants-g187514-Madrid.html#EATERY_OVERVIEW_BOX'
browser = webdriver.Chrome(r'C:\Users\Jose.Molina\Downloads\WinPython\projects\tripadvisor\chromedriver.exe')
#'/home/josemolina/programs_python/geckodriver'
browser.implicitly_wait(10)
browser.get(url)
#li id="alphabetical"
alpha = browser.find_element_by_id('alphabetical')
alpha.click()
browser.implicitly_wait(10)
contador = 0
next = True
#cada vez que empieza el bucle se recorre una página entera
while next == True:
html = BeautifulSoup(browser.page_source, 'html.parser')
table = html.find_all('div',{'data-index': re.compile(r".*")})
for row in table:
item = row.find('div', class_='title')
link = item.find('a')
link ="https://www.tripadvisor.es"+link['href']
browser.get(link)
#print(link['href'])
#elemento = browser.find_element_by_xpath('//a[@href="'+link['href']+'"]')
#elemento.click()
browser.get(browser.current_url)
bar_html = BeautifulSoup(browser.page_source,'html.parser')
#contenido a scrapear
name = bar_html.find('h1',{'class':'heading_title'})
rating = bar_html.find('span',{'class':'overallRating'})
ranking = (bar_html.find('span',{'class':'header_popularity'})).find('span')
print(ranking.text)
precio = (bar_html.find('span',{'class':['ui_column',"is-6","price"]})).find('span')
print(precio.text)
#fin contenido a scrapear
df_xml = df_xml.append(
pd.Series([getvalueofnode(name.text), getvalueofnode(link), getvalueofnode(rating.text),getvalueofnode(ranking.text),getvalueofnode(precio.text),'num_opiniones','ops_exc','ops_muybueno','ops_normal','ops_malo','ops_pesimo','punt_servicio','punt_comida','punt_calprecio','direccion','ubicacion','telefono'], index=dfcols),
ignore_index=True)
contador += 1
print(f'Contrato numero: {contador}')
browser.execute_script('window.history.go(-1)')
#if (times == 0):
browser.get(browser.current_url)
nextpage = browser.find_element_by_css_selector('a.nav').click()
# if class = disabled :
# next = False
# else:
#
#
# try:
# nextpage = browser.find_element_by_css_selector('a.nav').click()
## nextpage = browser.execute_script(" ta.restaurant_filter.paginate(this.getAttribute('data-offset'));; ta.trackEventOnPage('STANDARD_PAGINATION', 'next', '2', 0); return false;")
# if (nextpage):
# nextpage.click()
# else:
# next = False
# except:
# next = False
#browser.close()
# expediente = browser.get(link.get_attribute('href'))
#expediente.click()
df_xml.to_excel("tripadvisor_restaurantes_madrid.xlsx", index = False)
#coger id de ventana actual
#main_window = browser.cur | josemolinag/scraping | cosas.py | cosas.py | py | 3,820 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "bs4.Bea... |
16732411603 | from typing import List
class Solution:
def findReplaceString(self, s: str, indices: List[int], sources: List[str], targets: List[str]) -> str:
for i, source, target in sorted(list(zip(indices, sources, targets)), reverse=True):
l = len(source)
if s[i:i + l] == source:
s = s[:i] + target + s[i + l:]
return s
| wLUOw/Leetcode | 2023.08/833/Solution.py | Solution.py | py | 372 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 5,
"usage_type": "name"
}
] |
17359757102 | from typing import Optional, List
import torch
import uuid
from torch import nn
from supertransformerlib import Core
class DefaultParameterLayer(nn.Module):
"""
A NTM extension layer designed to contain within it the default
state for some sort of parameter and to be manipulatable to create,
interpolate, and reset batch elements to as fine a granularity as is provided
It also contains a unique id which identifies what parameter id
it is corrolated with.
"""
def __init__(self,
parameter: nn.Parameter
):
super().__init__()
self.ident = str(uuid.uuid1())
self.default_parameter = parameter
@torch.jit.export
def make_batch(self,
batch_shape: Core.StandardShapeType
):
"""
:param batch_shape: The shape of the batch, in terms of an int, a list of ints, or a 1d tensor
:return: A batch consisting of a broadcasted defaults
"""
broadcast_shape: List[int] = Core.standardize_shape(batch_shape, "batch_shape").tolist()
expansion_length = len(broadcast_shape)
broadcast_shape += [-1] * self.default_parameter.dim()
defaults = self.default_parameter
for _ in range(expansion_length):
defaults = defaults.unsqueeze(0)
tensor = defaults.expand(broadcast_shape)
return tensor
@torch.jit.export
def reset_to_parameters(self,
reset_probability: torch.Tensor,
tensor: torch.Tensor) -> torch.Tensor:
"""
A small helper method, this will accept a fully expanded tensor and
it's unbroadcasted defaults, then perform linear interpolation between them using the
reset probabilities. A value of 0 will mean do not reset, while 1
means completely reset
:param reset_probability: A float tensor of values between 0..1. The rank of this tensor can
only be greater than or equal to the rank of parameter 'tensor', and
the dimensions here must match the initial dimensions of 'tensor'
:param tensor: A data tensor which we wish to interpolate with.
:return: An interpolated tensor between the tensor and the defaults, mediated by the reset probability
"""
defaults = self.default_parameter
reset_values = defaults.expand_as(tensor)
while reset_probability.dim() < reset_values.dim():
reset_probability = reset_probability.unsqueeze(-1)
updated_tensor = tensor * (1 - reset_probability) + reset_values * reset_probability
return updated_tensor
@torch.jit.export
def force_reset_to_defaults(self,
reset_mask: torch.Tensor,
tensor: torch.Tensor)->torch.Tensor:
"""
Forces a reset to default where the reset mask is marked as true
:param reset_mask: A mask which matches tensor's dimensions on the initial dimensions. Elements
marked true will be reset to defaults
:param tensor: The tensor to reset
:return: A tensor which has had elements replaced with the mask where appropriate
"""
defaults = self.default_parameter
reset_values = defaults.expand_as(tensor)
while reset_mask.dim() < reset_values.dim():
reset_mask = reset_mask.unsqueeze(-1)
updated_tensor = torch.where(reset_mask, reset_values, tensor)
return updated_tensor
def make_memory_parameter(
memory_size: int,
memory_width: int,
ensemble_shape: Optional[Core.StandardShapeType] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None
)->DefaultParameterLayer:
"""
Creates a functional DefaultParameterLayer for representing a memory
parameter, which is capable of handling resetting to defaults.
"""
shape = [memory_size, memory_width]
if ensemble_shape is not None:
ensemble_shape_list: List[int] = Core.standardize_shape(ensemble_shape, "ensemble_shape").tolist()
shape = ensemble_shape_list + shape
parameter = torch.zeros(shape, dtype = dtype, device=device)
torch.nn.init.kaiming_uniform_(parameter)
parameter = nn.Parameter(parameter)
return DefaultParameterLayer(parameter)
def make_weights_parameter(memory_size: int,
num_heads: int,
ensemble_shape: Optional[Core.StandardShapeType] = None,
dtype: Optional[torch.dtype] = None,
device: Optional[torch.device] = None
) -> DefaultParameterLayer:
"""
Creates a functional weights layer to contain the default weights
values and to be responsible for resetting the weights.
:param memory_size: The size of the built memory
:param num_heads: The number of heads the memory will manage
:param ensemble_shape: The shape of the ensemble, if used
:param dtype: The dtype
:param device: The device.
:return:
"""
shape = [num_heads, memory_size]
if ensemble_shape is not None:
ensemble_shape_list: List[int] = Core.standardize_shape(ensemble_shape, "ensemble_shape").tolist()
shape = ensemble_shape_list + shape
parameter = torch.zeros(shape, dtype = dtype, device=device)
torch.nn.init.kaiming_uniform_(parameter)
parameter = nn.Parameter(parameter)
return DefaultParameterLayer(parameter) | smithblack-0/torch-supertransformerlib | src/supertransformerlib/NTM/defaults.py | defaults.py | py | 5,642 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Parameter",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"l... |
27140115657 | # -*- coding: utf-8 -*-
#######################
# deploy.urls
#######################
"""
1. 部署服务
2. add DNS
3. nginx设置
4. config check
5. 诊断
"""
from django.urls import path
from deploy import views
urlpatterns = [
path('health/', views.health,name="health"),
path('start/',views.deploy,name="deploy"),
path('setNginx/<env>/<target>/<project>/',views.set_project_nginx,name="set_project_nginx"),
path('addDNS/<env>/<project>/',views.add_project_dns,name="add_project_dns"),
path('configCheck/<project>/',views.project_config_check,name="project_config_check"),
path('diagnose/<project>/',views.diagnose_project,name="diagnose_project"),
]
| yuzhenduan/envDeploy | deploy/urls.py | urls.py | py | 691 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "deploy.views.health",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "deploy.views",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "django.urls.pa... |
75312911145 | import matplotlib.pyplot as plt
from moisture_tracers import plotdir
from moisture_tracers.plot.figures.fig2_satellite_comparison import make_plot
def main():
start_time = "20200201"
grid = "lagrangian_grid"
resolutions = ["km1p1", "km2p2", "km4p4"]
lead_times = [30, 36, 42, 48]
make_plot(start_time, grid, resolutions, lead_times)
plt.savefig(
plotdir + "fig4_satellite_comparison_{}_{}.png".format(start_time, grid)
)
if __name__ == "__main__":
import warnings
warnings.filterwarnings("ignore")
main()
| leosaffin/moisture_tracers | moisture_tracers/plot/figures/fig4_satellite_comparison_lagrangian_grid.py | fig4_satellite_comparison_lagrangian_grid.py | py | 560 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "moisture_tracers.plot.figures.fig2_satellite_comparison.make_plot",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.savefig",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage... |
39694098177 | from app.issue_detector import IssueDetector
from app.support_detector import SupportDetector
import pandas as pd
from pathlib import Path
import sys
from pydantic import BaseModel, Field
class SupportScoreCalculator(BaseModel):
timestamp: str = Field()
issue_detector: IssueDetector = Field(default=IssueDetector())
support_detector: SupportDetector = Field(default=SupportDetector())
def calculate(self, messages: list):
# メッセージ群からissue(困り事・質問)に関係するメッセージを取得
issues = self.issue_detector.evaluate(messages)
if not issues:
print("no issue exitst")
sys.exit(0)
records = []
for issue_id in issues:
# idから実際のメッセージを取得
issue_message = self._get_target_message(messages, issue_id)
if not issue_message:
print("targe issue not found")
continue
# issueメッセージと関連がありそうなメッセージ群を抽出
refrences = self._get_reference_messages(messages, issue_id)
if not refrences:
print("no refrence message")
continue
# メッセージ群の中で解決に貢献したメッセージを取得
answer_ids = self.support_detector.evaluate(issue_message, refrences)
for answer_id in answer_ids:
# idから実際のメッセージを取得
answer = self._get_target_message(messages, answer_id)
records.append({"q": issue_message, "a": answer})
df = self._create_df(records)
# 質問と回答バインド情報をcsv出力
self._save_result(df, "qa")
grouped_df = df.groupby("answer_user_id").agg(support_score=("answer_user_id", "size")).reset_index()
self._save_result(grouped_df, "support_score")
return grouped_df
def _get_target_message(self, message_objects, message_id):
for obj in message_objects:
obj_id = obj["id"]
if obj_id == message_id:
return obj
def _get_reference_messages(self, message_objects, issue_id):
messages = []
reference_ids = []
for obj in message_objects:
referenced_message = obj.get("referenced_message")
if referenced_message:
obj_id = obj.get("id")
parent_id = referenced_message["id"]
if parent_id == issue_id or parent_id in reference_ids:
messages.append(obj)
# レコードが生成順にソートされる前提。
reference_ids.append(obj_id)
return messages
def _create_df(self, records):
# issue(困り事・質問文)のmessageId, issueメッセージを投稿したuserId, 回答のmessageId, 回答者のuserId, issue文, 回答文をcsvに出力する
rows = []
for record in records:
row = {
"issue_id": record["q"]["id"],
"issue_user_id": record["q"]["author"]["id"],
"answer_id": record["a"]["id"],
"answer_user_id": record["a"]["author"]["id"],
"issue_message": record["q"]["content"].replace("\n", "\\n"),
"answer_message": record["a"]["content"].replace("\n", "\\n"),
}
rows.append(row)
df = pd.DataFrame(rows)
return df
def _save_result(self, df, prefix: str):
report_dir = Path("result") / "tmp"
df.to_csv(report_dir / f"{prefix}_{self.timestamp}.csv", index=False)
| blocks-web3/empower-link | contribution-analyzer/app/support_score_calculator.py | support_score_calculator.py | py | 3,688 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "app.issue_detector.IssueDetector",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pyd... |
31280891768 | import boto3
import os
import botocore
import logging
from agief_experiment import utils
class Cloud:
# EC2 instances will be launched into this subnet (in a vpc)
subnet_id = 'subnet-0b1a206e'
# For ECS, which cluster to use
cluster = 'default'
# When creating EC2 instances, the root ssh key to use
mainkeyname = 'nextpair'
# For compute hosts, which the security group to use
ec2_compute_securitygroup_id = 'sg-98d574fc'
# AZ for all EC2 instances
availability_zone = 'ap-southeast-2a'
# Placement group for EC2 instances
placement_group = 'MNIST-PGroup'
# Unique, case-sensitive identifier you provide to ensure
# client_token = 'this_is_the_client_token_la_la_34'
# The idempotency of the request.
network_interface_id = 'eni - b2acd4d4'
def __init__(self):
pass
def sync_experiment(self, remote):
"""
Sync experiment from this machine to remote machine
"""
print("\n....... Use remote-sync-experiment.sh to "
"rsync relevant folders.")
cmd = ("../remote/remote-sync-experiment.sh " +
remote.host_key_user_variables())
utils.run_bashscript_repeat(cmd, 15, 6)
def remote_download_output(self, prefix, host_node):
""" Download /output/prefix folder from remote storage (s3) to remote machine.
:param host_node:
:param prefix:
:type host_node: RemoteNode
"""
print("\n....... Use remote-download-output.sh to copy /output files "
"from s3 (typically input and data files) with "
"prefix = " + prefix + ", to remote machine.")
cmd = ("../remote/remote-download-output.sh " + " " + prefix +
" " + host_node.host_key_user_variables())
utils.run_bashscript_repeat(cmd, 15, 6)
def remote_docker_launch_compute(self, host_node):
"""
Assumes there exists a private key for the given
ec2 instance, at keypath
"""
print("\n....... Launch compute node in a docker container "
"on a remote host.")
commands = '''
export VARIABLES_FILE={0}
source {0}
cd $AGI_HOME/bin/node_coordinator
./run-in-docker.sh -d
'''.format(host_node.remote_variables_file)
return utils.remote_run(host_node, commands)
def ecs_run_task(self, task_name):
""" Run task 'task_name' and return the Task ARN """
print("\n....... Running task on ecs ")
client = boto3.client('ecs')
response = client.run_task(
cluster=self.cluster,
taskDefinition=task_name,
count=1,
startedBy='pyScript'
)
logging.debug("LOG: " + response)
length = len(response['failures'])
if length > 0:
logging.error("Could not initiate task on AWS.")
logging.error("reason = " + response['failures'][0]['reason'])
logging.error("arn = " + response['failures'][0]['arn'])
logging.error(" ----- exiting -------")
exit(1)
if len(response['tasks']) <= 0:
logging.error("could not retrieve task arn when initiating task "
"on AWS - something has gone wrong.")
exit(1)
task_arn = response['tasks'][0]['taskArn']
return task_arn
def ecs_stop_task(self, task_arn):
print("\n....... Stopping task on ecs ")
client = boto3.client('ecs')
response = client.stop_task(
cluster=self.cluster,
task=task_arn,
reason='pyScript said so!'
)
logging.debug("LOG: " + response)
def ec2_start_from_instanceid(self, instance_id):
"""
Run the chosen instance specified by instance_id
:return: the instance AWS public and private ip addresses
"""
print("\n....... Starting ec2 (instance id " + instance_id + ")")
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
response = instance.start()
print("LOG: Start response: " + response)
instance_id = instance.instance_id
ips = self.ec2_wait_till_running(instance_id)
return ips
def ec2_start_from_ami(self, name, ami_id, min_ram):
"""
:param name:
:param ami_id: ami id
:param min_ram: (integer), minimum ram to allocate to ec2 instance
:return: ip addresses: public and private, and instance id
"""
print("\n....... Launching ec2 from AMI (AMI id " + ami_id +
", with minimum " + str(min_ram) + "GB RAM)")
# minimum size, 15GB on machine, leaves 13GB for compute
instance_type = None
ram_allocated = 8
if min_ram < 6:
instance_type = 'm4.large' # 8
ram_allocated = 8
elif min_ram < 13:
instance_type = 'r3.large' # 15.25
ram_allocated = 15.25
elif min_ram < 28:
instance_type = 'r3.xlarge' # 30.5
ram_allocated = 30.5
else:
logging.error("cannot create an ec2 instance with that much RAM")
exit(1)
print("\n............. RAM to be allocated: " + str(ram_allocated) +
" GB RAM")
ec2 = boto3.resource('ec2')
subnet = ec2.Subnet(self.subnet_id)
# Set the correct Logz.io token in EC2
logzio_token = os.getenv("AGI_LOGZIO_TOKEN")
user_data = '''
#!/bin/sh
echo export AGI_LOGZIO_TOKEN=%s >> /etc/environment
''' % (logzio_token)
instance = subnet.create_instances(
DryRun=False,
ImageId=ami_id,
MinCount=1,
MaxCount=1,
KeyName=self.mainkeyname,
SecurityGroupIds=[
self.ec2_compute_securitygroup_id,
],
InstanceType=instance_type,
Placement={
'AvailabilityZone': self.availability_zone,
# 'GroupName': self.placement_group,
'Tenancy': 'default' # | 'dedicated' | 'host',
},
Monitoring={
'Enabled': False
},
DisableApiTermination=False,
InstanceInitiatedShutdownBehavior='terminate', # | 'stop'
# ClientToken=self.client_token,
AdditionalInfo='started by run-framework.py',
# IamInstanceProfile={
# 'Arn': 'string',
# 'Name': 'string'
# },
EbsOptimized=False,
UserData=user_data
)
instance_id = instance[0].instance_id
logging.debug("Instance launched %s", instance_id)
# set name
response = ec2.create_tags(
DryRun=False,
Resources=[
instance_id,
],
Tags=[
{
'Key': 'Name',
'Value': name
},
]
)
logging.debug("Set Name tag on instanceid: %s", instance_id)
logging.debug("Response is: %s", response)
ips = self.ec2_wait_till_running(instance_id)
return ips, instance_id
def ec2_wait_till_running(self, instance_id):
"""
:return: the instance AWS public and private ip addresses
"""
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
print("wait_till_running for instance: ", instance)
instance.wait_until_running()
ip_public = instance.public_ip_address
ip_private = instance.private_ip_address
print("Instance is up and running ...")
self.print_ec2_info(instance)
return {'ip_public': ip_public, 'ip_private': ip_private}
def ec2_stop(self, instance_id):
print("\n...... Closing ec2 instance (instance id " +
str(instance_id) + ")")
ec2 = boto3.resource('ec2')
instance = ec2.Instance(instance_id)
self.print_ec2_info(instance)
response = instance.stop()
print("stop ec2: ", response)
def remote_upload_runfilename_s3(self, host_node, prefix, dest_name):
cmd = ("../remote/remote-upload-runfilename.sh " + " " + prefix +
" " + dest_name +
host_node.host_key_user_variables())
try:
utils.run_bashscript_repeat(cmd, 3, 3)
except Exception as e:
logging.error("Remote Upload Failed for this file")
logging.error("Exception: %s", e)
def remote_upload_output_s3(self, host_node, prefix, no_compress,
csv_output):
cmd = "../remote/remote-upload-output.sh " + prefix + " "
cmd += host_node.host_key_user_variables() + " "
cmd += str(no_compress) + " " + str(csv_output)
utils.run_bashscript_repeat(cmd, 3, 3)
def upload_folder_s3(self, bucket_name, key, source_folderpath):
if not os.path.exists(source_folderpath):
logging.warning("folder does not exist, cannot upload: " +
source_folderpath)
return
if not os.path.isdir(source_folderpath):
logging.warning("path is not a folder, cannot upload: " +
source_folderpath)
return
for root, dirs, files in os.walk(source_folderpath):
for f in files:
filepath = os.path.join(source_folderpath, f)
filekey = os.path.join(key, f)
self.upload_file_s3(bucket_name, filekey, filepath)
@staticmethod
def upload_file_s3(bucket_name, key, source_filepath):
try:
if os.stat(source_filepath).st_size == 0:
logging.warning("file is empty, cannot upload: " +
source_filepath)
return
except OSError:
logging.warning("file does not exist, cannot upload: " +
source_filepath)
return
s3 = boto3.resource('s3')
exists = True
try:
s3.meta.client.head_bucket(Bucket=bucket_name)
except botocore.exceptions.ClientError as e:
# If a client error is thrown, then check that it was a 404 error.
# If it was a 404 error, then the bucket does not exist.
error_code = int(e.response['Error']['Code'])
if error_code == 404:
exists = False
if not exists:
logging.warning("s3 bucket " + bucket_name +
" does not exist, creating it now.")
s3.create_bucket(Bucket=bucket_name)
print(" ... file = " + source_filepath + ", to bucket = " +
bucket_name + ", key = " + key)
response = s3.Object(bucket_name=bucket_name,
key=key).put(Body=open(source_filepath, 'rb'))
logging.debug("Response = : ", response)
@staticmethod
def print_ec2_info(instance):
print("Instance details.")
print(" -- Public IP address is: ", instance.public_ip_address)
print(" -- Private IP address is: ", instance.private_ip_address)
print(" -- id is: ", str(instance.instance_id))
| Cerenaut/run-framework | scripts/run-framework/agief_experiment/cloud.py | cloud.py | py | 11,421 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "agief_experiment.utils.run_bashscript_repeat",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "agief_experiment.utils",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "agief_experiment.utils.run_bashscript_repeat",
"line_number": 63,
"us... |
2153986094 | from django.shortcuts import render
# Create your views here.
def func(request, num1, num2):
if num2 != 0:
div = num1 / num2
else:
div = '계산할 수 없습니다.'
context = {
'num1' : num1,
'num2' : num2,
'sub' : num1 - num2,
'mul' : num1 * num2,
'div' : div
}
return render(request, 'calculators/calculator.html', context)
| ji-hyon/Web_study | Django/practice/part2_Django/django_2_2/project2/calculators/views.py | views.py | py | 406 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
}
] |
71355007785 | import dotenv
import openai
import os
dotenv.load_dotenv('../.env')
openai.api_key = os.environ["OPENAI_API_KEY"]
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=[
{"role": "user", "content": "tell me about gpt for social good"}
]
)
print(response["choices"][0]["message"]["content"]) | aiformankind/gpt-for-social-good | gpt/demo.py | demo.py | py | 330 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "openai.api_key",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "openai.ChatComple... |
31415174040 | from pydantic import BaseModel
import json
import requests
import Console
import config
HTTP_PREFIX = "http://"
HOST = config.server_address + "/internal"
class DownloadFileFromAgentInputType(BaseModel):
ip_address: str
file_path: str
class ListFilesFromAgentInputType(BaseModel):
ip_address: str
dir_path: str
class MonitorClipboardOnAgentInputType(BaseModel):
ip_address: str
duration: int
class GenerateFirstCodeForAgentInputType(BaseModel):
ip_address: str
class DisconnectAgentInputType(BaseModel):
ip_address: str
def download_file_from_agent(input: DownloadFileFromAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/downloadFile", data=data)
if response.status_code != 200:
Console.console.print(f"Could not schedule downloading file from agent {input.ip_address}", style="error")
Console.console.print(response.json()['detail'], style="error")
else:
Console.console.print(f'Task for downloading file from an agent scheduled successfully with id:'
f' {response.json()["command_id"]}', style="success")
return response.json()
def list_files_from_agent(input: ListFilesFromAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/listFiles", data=data)
if response.status_code != 200:
Console.console.print(f"Could not schedule listing files from agent {input.ip_address}", style="error")
Console.console.print(response.json()['detail'], style="error")
else:
Console.console.print(f'Task for listing files from an agent scheduled successfully with id:'
f' {response.json()["command_id"]}',
style="success")
return response.json()
def monitor_clipboard_on_agent(input: MonitorClipboardOnAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/monitorClipboard", data=data)
if response.status_code != 200:
Console.console.print(f"Could not schedule monitoring clipboard on agent {input.ip_address}", style="error")
Console.console.print(response.json()['detail'], style="error")
else:
Console.console.print(f'Task for monitoring clipboard on agent scheduled successfully with id:'
f' {response.json()["command_id"]}',
style="success")
return response.json()
def generate_first_code_for_agent(input: GenerateFirstCodeForAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/generateAgentCode", data=data)
if response.status_code == 200:
Console.console.print('Code generated successfully', style="success")
return response.json()['code']
def disconnect_agent(input: DisconnectAgentInputType):
data = json.dumps(input.__dict__)
response = requests.post(url=HTTP_PREFIX+HOST+"/disconnectAgent", data=data)
if response.status_code != 200:
Console.console.print(f"Could not schedule disconnecting agent {input.ip_address}", style="error")
Console.console.print(response.json()['detail'], style="error")
else:
Console.console.print(f'Task for disconnecting agent scheduled successfully with id:'
f' {response.json()["command_id"]}',
style="success")
return response.json()
def list_agents():
response = requests.get(url=HTTP_PREFIX+HOST+"/agents")
if response.status_code == 200:
return response.json()['agents']
return None
| Kuba12a/CybClient | Gateways/CybServerGateway.py | CybServerGateway.py | py | 3,708 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.server_address",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pydan... |
6415147937 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 15 18:08:59 2021
@author: Chris
"""
#%% Imports
from PIL import ImageGrab
import win32gui
import numpy as np
import time
import cv2
#%% Get the ID of the Emulator window
# List to hold window information
window = []
# Name of the window to find
window_name = "Super Street Fighter II (USA) - Snes9x 1.60"
# Function to extract emulator window information
# Given a window and checks if its visible and if it matches the required name
# IF the above are true then we add the information to the window list
def winEnumHandler(hwnd, ctx, window_name=window_name):
if win32gui.IsWindowVisible(hwnd):
print(win32gui.GetWindowText(hwnd))
if win32gui.GetWindowText(hwnd) == window_name:
window.append(hwnd)
window.append(win32gui.GetWindowText(hwnd))
# Function to get the screen
# Uses the enumerate windows function from wn32gui with our handler to get the
# correct window.
win32gui.EnumWindows(winEnumHandler, None)
#%% Window streaming
# Pixelwise relative corrections for the window bounding box
screen_correction = np.array([-8,-51,8,8])
# Loop to capture the window
while True:
try:
# Get the start time
start_time = time.time()
# Get the bounding box for the window
bbox = np.array(win32gui.GetWindowRect(window[0]))
# Correct the window size
bbox = tuple(bbox - screen_correction)
# Get the screen capture
screen_grab = np.array(ImageGrab.grab(bbox))
# Prints the time it took to collect the screenshot
print(f"loop took {time.time()-start_time} seconds")
# Reset the start time for the next loop
start_time=time.time()
# Display the image in a new window
cv2.imshow("window", cv2.cvtColor(screen_grab, cv2.COLOR_BGR2RGB))
# Checks to see if window should be closed and loop stopped
if cv2.waitKey(25) & 0xFF == ord('q'):
cv2.destroyAllWindows()
break
except Exception as e:
print("error", e)
# %%
| qchrisd/StreetFighterBot | ScreenGrabProof.py | ScreenGrabProof.py | py | 2,115 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "win32gui.IsWindowVisible",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "win32gui.GetWindowText",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "win32gui.GetWindowText",
"line_number": 29,
"usage_type": "call"
},
{
"api_name":... |
20504076743 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Filename : kalao_pump_switch.sh
# @Date : 2023-01-12-14-12
# @Project: KalAO-ICS
# @AUTHOR : Janis Hagelberg
"""
kalao_pump_switch.py is part of the KalAO Instrument Control Software it is a maintenance script used to switch
the water cooling pump from on to off and opposite.
(KalAO-ICS).
"""
from kalao.plc import temperature_control
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--on', help='Turn pump ON', action='store_true')
parser.add_argument('--off', dest='on', action='store_false')
parser.set_defaults(feature=False)
args = parser.parse_args()
#if temperature_control.pump_status() == 'ON':
if args.on:
print("Switching pump ON")
temperature_control.pump_on()
else:
print("Switching pump OFF")
temperature_control.pump_off()
| janis-hag/kalao-ics | scripts/kalao_pump_switch.py | kalao_pump_switch.py | py | 907 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "kalao.plc.temperature_control.pump_on",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "kalao.plc.temperature_control",
"line_number": 27,
"usage_type": "name"
}... |
39671693100 | import discord
from discord.ext import commands
import time
import json
import os
if not os.path.isfile('config.json'):
exit
f = open('config.json')
data = json.load(f)
TOKEN = data['token']
servers = data['servers']
message = data['message']
delay = int(data['delay'])
for i in range(len(servers)):
servers[i] = int(servers[i])
bot = discord.Client(token=TOKEN)
@bot.event
async def on_guild_channel_create(channel):
if channel.guild.id in servers:
print(channel, "had just been created")
time.sleep(delay)
await channel.send(message)
bot.run(TOKEN) | skiteskopes/discord_channel_create_auto_messager | discord_bot.py | discord_bot.py | py | 593 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.isfile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "json.load",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.Client",
"line_number... |
39627512703 | import sys
import argparse
from lettuce.bin import main as lettuce_main
from lettuce import world
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from salad.steps.everything import *
from salad.terrains.everything import *
BROWSER_CHOICES = [browser.lower()
for browser in DesiredCapabilities.__dict__.keys()
if not browser.startswith('_')]
BROWSER_CHOICES.sort()
DEFAULT_BROWSER = 'firefox'
class store_driver_and_version(argparse.Action):
drivers = BROWSER_CHOICES
def __call__(self, parser, namespace, values, option_string=None):
driver_info = values.split('-')
if driver_info[0] not in self.drivers:
args = {'driver': driver_info[0],
'choices': ', '.join(map(repr, self.drivers))}
message = 'invalid choice: %(driver)r (choose from %(choices)s)'
raise argparse.ArgumentError(self, message % args)
setattr(namespace, self.dest, driver_info[0])
if len(driver_info) > 1:
setattr(namespace, 'version', driver_info[1])
if len(driver_info) > 2:
setattr(namespace, 'platform', driver_info[2].replace('_', ' '))
def main(args=sys.argv[1:]):
parser = argparse.ArgumentParser(prog="Salad",
description=("BDD browswer-automation "
"made tasty."))
parser.add_argument('--browser', default=DEFAULT_BROWSER,
action=store_driver_and_version, metavar='BROWSER',
help=('Browser to use. Options: %s Default is %s.' %
(BROWSER_CHOICES, DEFAULT_BROWSER)))
parser.add_argument('--remote-url',
help='Selenium server url for remote browsers')
parser.add_argument('--name',
help=('Give your job a name so it '
'can be identified on saucelabs'))
parser.add_argument('--timeout',
help=("Set the saucelabs' idle-timeout for the job"))
(parsed_args, leftovers) = parser.parse_known_args()
world.drivers = [parsed_args.browser]
world.remote_url = parsed_args.remote_url
world.remote_capabilities = {}
if 'version' in parsed_args:
world.remote_capabilities['version'] = parsed_args.version
if 'platform' in parsed_args:
world.remote_capabilities['platform'] = parsed_args.platform
name = _get_current_timestamp() + " - "
if not parsed_args.name:
name += "unnamed job"
else:
name += parsed_args.name
world.remote_capabilities['name'] = name
if not parsed_args.timeout:
world.remote_capabilities['idle-timeout'] = 120
else:
world.remote_capabilities['idle-timeout'] = parsed_args.timeout
lettuce_main(args=leftovers)
def _get_current_timestamp():
from time import strftime
import datetime
return datetime.datetime.strftime(datetime.datetime.now(),
'%d.%m.%Y %H:%M')
if __name__ == '__main__':
main()
| salad/salad | salad/cli.py | cli.py | py | 3,130 | python | en | code | 122 | github-code | 36 | [
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities.__dict__.keys",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.common.desired_capabilities.DesiredCapabilities.__dict__",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
23420924440 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ventas', '0048_auto_20160928_0742'),
]
operations = [
migrations.AlterField(
model_name='comanda',
name='area_encargada',
field=models.CharField(max_length=3, verbose_name=b'Area Encargada', choices=[(b'COC', b'Cocina'), (b'BAR', b'Barra')]),
),
migrations.AlterField(
model_name='comanda',
name='estado_comanda',
field=models.CharField(max_length=3, verbose_name=b'Estado Comanda', choices=[(b'PEN', b'Pendiente'), (b'PRO', b'Procesada'), (b'CAN', b'Cancelada')]),
),
migrations.AlterField(
model_name='comanda',
name='fecha_hora_pedido_comanda',
field=models.DateTimeField(verbose_name=b'Fecha/hora Pedido Comanda'),
),
migrations.AlterField(
model_name='comanda',
name='fecha_hora_procesamiento_comanda',
field=models.DateTimeField(null=True, verbose_name=b'Fecha/hora Procesamiento Comanda', blank=True),
),
migrations.AlterField(
model_name='comanda',
name='producto_a_elaborar',
field=models.ForeignKey(verbose_name=b'Producto a Elaborar', to='stock.ProductoCompuesto'),
),
migrations.AlterField(
model_name='venta',
name='apertura_caja',
field=models.ForeignKey(default=1, verbose_name=b'Apertura de Caja', to='ventas.AperturaCaja', help_text=b'Se asigna dependiendo del usuario logueado y de si posee una Apertura de Caja vigente.'),
),
migrations.AlterField(
model_name='venta',
name='numero_factura_venta',
field=models.ForeignKey(related_name='numero_factura', default=1, verbose_name=b'Numero de Factura de la Venta', to='bar.FacturaVenta', help_text=b'El Numero de Factura se asigna al momento de confirmarse la Venta.'),
),
]
| pmmrpy/SIGB | ventas/migrations/0049_auto_20160930_1114.py | 0049_auto_20160930_1114.py | py | 2,096 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
70345443943 | import numpy as np
import math
import torch
import torch.nn as nn
class Upper(nn.Module): # Upper: Mutual Information Contrastive Learning Upper Bound
'''
This class provides the Upper bound estimation to I(X,Y)
Method:
forward() : provides the estimation with input samples
loglikeli() : provides the log-likelihood of the approximation q(Y|X) with input samples
Arguments:
x_dim, y_dim : the dimensions of samples from X, Y respectively
hidden_size : the dimension of the hidden layer of the approximation network q(Y|X)
x_samples, y_samples : samples from X and Y, having shape [sample_size, x_dim/y_dim]
'''
def __init__(self, x_dim, y_dim, hidden_size):
super(Upper, self).__init__()
# p_mu outputs mean of q(Y|X)
self.p_mu = nn.Sequential(nn.Linear(x_dim, hidden_size//2),
nn.ReLU(),
nn.Linear(hidden_size//2, y_dim))
# p_logvar outputs log of variance of q(Y|X)
self.p_logvar = nn.Sequential(nn.Linear(x_dim, hidden_size//2),
nn.ReLU(),
nn.Linear(hidden_size//2, y_dim),
nn.Tanh())
def get_mu_logvar(self, x_samples):
mu = self.p_mu(x_samples)
logvar = self.p_logvar(x_samples)
return mu, logvar
def forward(self, x_samples, y_samples):
mu, logvar = self.get_mu_logvar(x_samples)
# log of conditional probability of positive sample pairs
positive = - (mu - y_samples)**2 /2./logvar.exp()
prediction_1 = mu.unsqueeze(1) # shape [nsample,1,dim]
y_samples_1 = y_samples.unsqueeze(0) # shape [1,nsample,dim]
# log of conditional probability of negative sample pairs
negative = - ((y_samples_1 - prediction_1)**2).mean(dim=1)/2./logvar.exp()
return (positive.sum(dim = -1) - negative.sum(dim = -1)).mean()
def loglikeli(self, x_samples, y_samples): # unnormalized loglikelihood
mu, logvar = self.get_mu_logvar(x_samples)
return (-(mu - y_samples)**2 /logvar.exp()-logvar).sum(dim=1).mean(dim=0)
def learning_loss(self, x_samples, y_samples):
return - self.loglikeli(x_samples, y_samples)
class Lower(nn.Module):
def __init__(self, x_dim, y_dim, hidden_size):
super(Lower, self).__init__()
self.F_func = nn.Sequential(nn.Linear(x_dim + y_dim, hidden_size),
nn.ReLU(),
nn.Linear(hidden_size, 1))
def forward(self, x_samples, y_samples):
# shuffle and concatenate
sample_size = y_samples.shape[0]
x_tile = x_samples.unsqueeze(0).repeat((sample_size, 1, 1))
y_tile = y_samples.unsqueeze(1).repeat((1, sample_size, 1))
T0 = self.F_func(torch.cat([x_samples,y_samples], dim = -1))
T1 = self.F_func(torch.cat([x_tile, y_tile], dim = -1))-1. #shape [sample_size, sample_size, 1]
lower_bound = T0.mean() - (T1.logsumexp(dim = 1) - np.log(sample_size)).exp().mean()
return lower_bound
def learning_loss(self, x_samples, y_samples):
return -self.forward(x_samples, y_samples)
| joey-wang123/Semi-meta | mi_estimators.py | mi_estimators.py | py | 3,452 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_... |
9587889867 | import os
import csv
from decimal import Decimal
from forex_python.bitcoin import BtcConverter
from forex_python.converter import CurrencyRates
from plugin import plugin, require
FILE_PATH = os.path.abspath(os.path.dirname(__file__))
@require(network=True)
@plugin('currencyconv')
class Currencyconv():
"""
Convert an amount of money from a currency to another.
-- Type currencyconv, press enter and follow the instructions!
"""
def __call__(self, jarvis, s):
currencies = self.find_currencies()
amount = jarvis.input_number('Enter an amount: ')
from_currency = self.get_currency(jarvis, 'Enter from which currency: ', currencies)
to_currency = self.get_currency(jarvis, 'Enter to which currency: ', currencies)
self.currencyconv(jarvis, amount, from_currency, to_currency)
def currencyconv(self, jarvis, amount, fr, to):
"""
currencyconv converts the given amount to another currency
using fore-python
"""
b = BtcConverter(force_decimal=True)
c = CurrencyRates(force_decimal=True)
if (to == "BTC"):
result = b.convert_to_btc(Decimal(amount), fr)
elif (fr == "BTC"):
result = b.convert_btc_to_cur(Decimal(amount), to)
else:
result = c.convert(fr, to, Decimal(amount))
outputText = str(amount) + " " + fr + \
" are equal to " + str(result) + " " + to
jarvis.say(outputText)
def find_currencies(self):
"""
find_currency creates a dict with the inputs that forex-python accepts
"""
with open(os.path.join(FILE_PATH, "../data/currencies.csv"), mode='r') as infile:
reader = csv.reader(infile)
mydict = {r.upper(): row[2] for row in reader for r in row[0:3]}
return mydict
def get_currency(self, jarvis, prompt, currencies):
"""
get_currency checks if the input the user gave is valid based
on the dictionary of find_currencies
"""
while True:
c = jarvis.input(prompt).upper()
if c in currencies:
return currencies[c]
elif c == "show help".upper():
print(', '.join(set(currencies.values())))
prompt = 'Please enter a valid country or currency: '
continue
elif c == "try again".upper():
prompt = 'Please enter a valid country or currency: '
continue
else:
prompt = 'Type -show help- to see valid currencies '\
'or -try again- to continue: '
| sukeesh/Jarvis | jarviscli/plugins/currency_conv.py | currency_conv.py | py | 2,657 | python | en | code | 2,765 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "forex_python.bitcoin.BtcCon... |
6035944019 | import matplotlib.pyplot as plt
import pdb
import numpy as np
import csv
import time
def PlotDemo1(a, b):
a1 = []
b1 = []
a1 = a
b1 = b
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(a,b)
plt.show()
def PlotDemo(a,zero):
a1 = []
b1 = []
a1 = a
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(a)
ax.plot(zero)
plt.show()
def Run(symbol):
csv_reader = csv.reader(open("E:/MyGit/PythonStock/获取当数据并画分时图/data/"+time.strftime('%Y%m%d')+"/"+symbol+".csv",'r', encoding='utf-8'))
#pdb.set_trace()
PICES1 = [row[4] for row in csv_reader]
csv_reader1 = csv.reader(open("E:/MyGit/PythonStock/获取当数据并画分时图/data/"+time.strftime('%Y%m%d')+"/"+symbol+".csv",'r', encoding='utf-8'))
TIME1 = [row[10] for row in csv_reader1]
csv_reader2 = csv.reader(open("E:/MyGit/PythonStock/获取当数据并画分时图/data/"+time.strftime('%Y%m%d')+"/"+symbol+".csv",'r', encoding='utf-8'))
yesterday = [row[3] for row in csv_reader2][2]
TIME2 = [row[4] for row in csv_reader]
PICES3 = [row[5] for row in csv_reader]
TIME3 = [row[6] for row in csv_reader]
PICES4 = [row[7] for row in csv_reader]
TIME4 = [row[8] for row in csv_reader]
PICES5 = [row[9] for row in csv_reader]
TIME5 = [row[10] for row in csv_reader]
print(PICES1)
print(TIME1)
print( yesterday)
print(TIME2)
print(PICES3)
print(TIME3)
print(PICES4)
print(TIME4)
print(PICES5)
print(TIME5)
#PlotDemo1()
#res = [x-1 for x in PICES1]
a = PICES1.remove('当前价格')
b = TIME1.remove('时间')
print(PICES1)
res = list(map(float,PICES1))
res = [x-float(yesterday) for x in res]
res = [x/float(yesterday)*100 for x in res]
zero = [0 for i in range(len(res))]
print(zero)
print(TIME1)
PlotDemo(res,zero)
if __name__ == '__main__':
Run("300736")
| 2017wxyzwxyz/PythonStock | 获取当日数据并画分时图/画出走势图.py | 画出走势图.py | py | 1,976 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "mat... |
71249335464 | """
NSynth classification using PyTorch
Authors: Japheth Adhavan, Jason St. George
Reference: Sasank Chilamkurthy <https://chsasank.github.io>
"""
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
import numpy as np
import utils
import models
import data
import visualize
import time
import os
import argparse
import logging
logging.basicConfig(level=logging.INFO)
gpu_idx = utils.get_free_gpu()
device = torch.device("cuda:{}".format(gpu_idx))
logging.info("Using device cuda:{}".format(gpu_idx))
def train_model(model, dataloaders, criterion, optimizer, scheduler, network_type, num_epochs=10):
"""
:param model:
:param criterion:
:param optimizer:
:param scheduler:
:param num_epochs:
:return:
"""
since = time.time()
best_model_wts = model.state_dict()
best_acc = 0.0
dataset_sizes = {x: len(dataloaders[x].dataset) for x in ["train", "val"]}
model_loss = {x: [0 for _ in range(num_epochs)] for x in ["train", "val"]}
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
print('-' * 10)
for phase in ['train', 'val']:
if phase == 'train':
scheduler.step()
model.train()
else:
model.eval()
running_loss = 0.0
running_corrects = 0
# iterate over data
for batch_idx, (samples, labels, targets) in enumerate(dataloaders[phase]):
inputs = samples.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# aggregate statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train' and batch_idx % 50 == 0:
logging.info('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
(epoch + 1),
(batch_idx + 1) * len(samples),
dataset_sizes[phase],
100. * (batch_idx + 1) / len(dataloaders[phase]),
loss.item()))
epoch_loss = running_loss / dataset_sizes[phase]
epoch_acc = running_corrects.double() / dataset_sizes[phase]
model_loss[phase][epoch] = epoch_loss
logging.info('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase.capitalize(), epoch_loss, epoch_acc))
# deep copy the model
if phase == 'val' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = model.state_dict()
torch.save(best_model_wts, "./models/{}Network.pt".format(network_type))
print()
time_elapsed = time.time() - since
logging.info('Training completed in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
logging.info('Best overall val Acc: {:4f}'.format(best_acc))
# load best model weights
model.load_state_dict(best_model_wts)
return model, model_loss
# def test(model, dataloader):
# spreads = [[0 for y in range(10)] for x in range(10)]
# test_c = [0 for x in range(10)]
# test_t = [0 for x in range(10)]
# c_done = [False for x in range(10)]
# i_done = [False for x in range(10)]
# c_samples, i_samples = 0, 0
# y_test, y_pred = [], []
#
# correct = (preds == labels).squeeze()
# np_predicted = preds.cpu().numpy() # Get vector of int class output labels
# y_pred.extend(np_predicted)
# y_test.extend(if_label.cpu().numpy())
#
# if i_samples < 10 and c_samples < 10:
# for i in range(len(outputs)):
# label = str(labels[i]) # e.g. 'tensor(0)'
# label = int(label[7]) # 0
# test_c[label] += correct[i].item()
# test_t[label] += 1
#
# if np_predicted[i] != label:
# spreads[label][np_predicted[i]] += 1
# if i_samples < 10:
# i_samples += visualize.save_samples(inputs[i],
# np_predicted[i], label,
# i_done, False, CLASS_NAMES)
# else:
# if c_samples < 10:
# c_samples += visualize.save_samples(inputs[i], None, label,
# c_done, True, CLASS_NAMES)
def test(model, test_loader, criterion, classes):
model.eval()
test_loss = 0
correct = 0
no_of_classes = len(classes)
spread = [([0] * no_of_classes) for _ in range(no_of_classes)]
examples = [{} for _ in range(no_of_classes)]
y_test, y_pred = [], []
with torch.no_grad():
for data, labels, target in test_loader:
data, labels = data.to(device), labels.to(device)
output = model(data)
test_loss += criterion(output, labels).item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
actual = labels.view_as(pred)
is_correct = pred.equal(actual)
label_actual = int(labels) if int(labels) < 9 else 9
label_pred = int(pred) if int(pred) < 9 else 9
spread[label_actual][label_pred] += 1
correct += 1 if is_correct else 0
examples[label_actual][is_correct] = (data, label_pred)
y_pred.append(label_pred)
y_test.append(label_actual)
test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
return y_test, y_pred, spread, examples
def main(args):
np.warnings.filterwarnings('ignore')
os.makedirs("./graphs", exist_ok=True)
os.makedirs("./models", exist_ok=True)
model = {
"Simple": models.SimpleNetwork,
"Epic" : models.EpicNetwork,
"Bonus" : models.BonusNetwork
}[args.network]().to(device)
classes = ['bass', 'brass', 'flute', 'guitar', 'keyboard',
'mallet', 'organ', 'reed', 'string', 'vocal']
if args.network == "Bonus":
classes = ['acoustic', 'electronic', 'synthetic']
model.double()
criterion = nn.CrossEntropyLoss()
optimizer_conv = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)
exp_lr_scheduler = lr_scheduler.StepLR(optimizer_conv, step_size=args.step, gamma=args.gamma)
if not args.test:
train_loader = data.get_data_loader("train", batch_size=args.batch_size,
shuffle=True, num_workers=4, network=args.network)
valid_loader = data.get_data_loader("valid", network=args.network)
dataloaders = {
"train": train_loader,
"val": valid_loader
}
logging.info('Training...')
model, model_loss = train_model(model, dataloaders,
criterion, optimizer_conv,
exp_lr_scheduler,
args.network,
num_epochs=args.epochs)
visualize.plot_loss(model_loss, "{}Network".format(args.network))
else:
logging.info('Testing...')
model.load_state_dict(torch.load("./models/{}Network.pt".format(args.network)))
test_loader = data.get_data_loader("test", network=args.network)
y_test, y_pred, spreads, examples = test(model, test_loader, criterion, classes)
visualize.plot_histograms(classes, spreads, type=args.network)
visualize.plot_confusion_matrix(y_test, y_pred, classes, type=args.network)
visualize.save_samples(examples, classes)
logging.info('Completed Successfully!')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='NSynth classifier')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test', action='store_true', default=False,
help='disables training, loads model')
parser.add_argument('--network', default='Epic', const='Epic', nargs="?", choices=['Simple', 'Epic', 'Bonus'],
help='Choose the type of network from Simple, Epic and Bonus (default: Epic)')
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
help='learning rate (default: 0.01)')
parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
help='SGD momentum (default: 0.5)')
parser.add_argument('--step', type=int, default=3, metavar='N',
help='number of epochs to decrease learn-rate (default: 3)')
parser.add_argument('--gamma', type=float, default=0.1, metavar='N',
help='factor to decrease learn-rate (default: 0.1)')
main(parser.parse_args()) | ifrit98/NSynth_Classification_CNN | src/main.py | main.py | py | 9,785 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "utils.get_free_gpu",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.device... |
32538066028 | # -*- coding: utf-8 -*-
"""
Created on Sat May 5 10:53:26 2018
@author: lenovo
"""
import numpy as np
from scipy.optimize import leastsq
def fun(p,x):
"""定义想要拟合的函数"""
k,b = p
return k*x+b
def err(p,x,y):
"""定义误差函数"""
return fun(p,x)-y
x = [1,2,3,4]
y = [6,5,7,10]
p0 = [1,1]
x1 = np.array(x)
y1 = np.array(y)
xishu = leastsq(err,p0,args=(x1,y1))
print(xishu[0]) | wilsonzyp/probability_statistics | Try_leastsq_with_scipy.py | Try_leastsq_with_scipy.py | py | 446 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.optimize.leastsq",
"line_number": 27,
"usage_type": "call"
}
] |
2962922987 | #! /usr/bin/env python
from sensor_msgs.msg import CompressedImage
from cv_bridge import CvBridge, CvBridgeError
import cv2
import rospy
import subprocess
#from PIL import Image # PIL
from __init__ import *
from rgiro_spco2_slam.srv import spco_data_image,spco_data_imageResponse
import spco2_placescnn as places365
class ImageFeatureServer():
def image_server(self, req):
if len(self.frame) == 0:
return spco_data_imageResponse(False)
#cv2.imshow("image", self.frame)
# forward pass
convert_img = places365.Image.fromarray(self.frame)#convert into PIL
input_img = places365.V(self.tf(convert_img).unsqueeze(0))
logit = self.model.forward(input_img)
h_x = places365.F.softmax(logit, 1).data.squeeze()
# save image feature
fp = open(self.DATA_FOLDER + '/img/ft' + str(req.count) + '.csv','a')
h_x_numpy = h_x.to('cpu').detach().numpy().copy()
fp.write(','.join(map(str, h_x_numpy)))
fp.write('\n')
fp.close()
rospy.loginfo("[Service] save new feature")
# save image
if self.image_save:
if req.mode == "new":
p = subprocess.Popen("mkdir " + self.DATA_FOLDER + "/image/", shell=True)
rospy.sleep(0.5)
image_name = self.DATA_FOLDER + "/image/" + str(req.count) + ".jpg"
cv2.imwrite(image_name, self.frame)
rospy.loginfo("[Service spco_data/image] save new image as %s", image_name)
# save and publish activation image
#print "h_x",h_x
probs, idx = h_x.sort(0, True)
probs = probs.numpy()
idx = idx.numpy()
# generate class activation mapping
#print('Class activation map is saved as cam.jpg')
#CAMs = places365.returnCAM(features_blobs[0], weight_softmax, [idx[0]])
# render the CAM and output
#img = cv2.imread('test.jpg')
'''
height, width, _ = self.frame.shape# img.shape
heatmap = cv2.applyColorMap(cv2.resize(CAMs[0],(width, height)), cv2.COLORMAP_JET)
result = heatmap * 0.4 + img * 0.5
image_name = self.DATA_FOLDER + "/image/" + str(req.count) + "_activation.jpg"
cv2.imwrite(image_name, result)
'''
return spco_data_imageResponse(True)
def image_callback(self, image):
try:
self.frame = CvBridge().compressed_imgmsg_to_cv2(image)
except CvBrideError as e:
print (e)
def load_network_model(self):
# load the labels
self.classes, self.labels_IO, self.labels_attribute, self.W_attribute = places365.load_labels()
# load the model
self.model = places365.load_model()
# load the transformer
self.tf = places365.returnTF() # image transformer
# get the softmax weight
self.params = list(self.model.parameters())
self.weight_softmax = self.params[-2].data.numpy()
self.weight_softmax[self.weight_softmax<0] = 0
return (True)
def __init__(self):
TRIALNAME = "test"#rospy.get_param('~trial_name')#test
IMAGE_TOPIC = '/hsrb/head_rgbd_sensor/rgb/image_rect_color/compressed' #"/hsrb/head_rgbd_sensor/rgb/image_raw"#rospy.get_param('~image_topic')#/camera/rgb/image_raw
self.image_save = True #rospy.get_param('~image_save')#true
# subscrib image
rospy.Subscriber(IMAGE_TOPIC, CompressedImage, self.image_callback, queue_size=1)
if self.load_network_model()==False:
print ("error")
self.DATA_FOLDER = datafolder + TRIALNAME
self.frame = []
s = rospy.Service('rgiro_spco2_slam/image', spco_data_image, self.image_server)
rospy.loginfo("[Service spco_data/image] Ready")
if __name__ == "__main__":
rospy.init_node('spco2_image_features',anonymous=False)
srv = ImageFeatureServer()
rospy.spin()
| Shoichi-Hasegawa0628/spco2_boo | rgiro_spco2_slam/src/spco2_image_features.py | spco2_image_features.py | py | 3,939 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "rgiro_spco2_slam.srv.spco_data_imageResponse",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "spco2_placescnn.Image.fromarray",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "spco2_placescnn.Image",
"line_number": 22,
"usage_type": "at... |
5547407739 | """
Tests for voting 31/01/2023.
"""
from scripts.vote_2023_01_31 import start_vote
from brownie import chain, accounts
from brownie.network.transaction import TransactionReceipt
from eth_abi.abi import encode_single
from utils.config import network_name
from utils.test.tx_tracing_helpers import *
from utils.test.event_validators.easy_track import (
validate_evmscript_factory_added_event,
EVMScriptFactoryAdded,
validate_evmscript_factory_removed_event,
)
from utils.easy_track import create_permissions
from utils.agent import agent_forward
from utils.voting import create_vote, bake_vote_items
eth = "0x0000000000000000000000000000000000000000"
def test_vote(
helpers,
accounts,
vote_id_from_env,
bypass_events_decoding,
unknown_person,
interface,
ldo_holder,
):
dai_token = interface.ERC20("0x6B175474E89094C44Da98b954EedeAC495271d0F")
allowed_recipients = [
accounts.at("0xaf8aE6955d07776aB690e565Ba6Fbc79B8dE3a5d", {"force": True}),
accounts.at("0x558247e365be655f9144e1a0140D793984372Ef3", {"force": True}),
accounts.at("0x53773E034d9784153471813dacAFF53dBBB78E8c", {"force": True}),
accounts.at("0xC976903918A0AF01366B31d97234C524130fc8B1", {"force": True}),
accounts.at("0x9e2b6378ee8ad2A4A95Fe481d63CAba8FB0EBBF9", {"force": True}),
accounts.at("0x82AF9d2Ea81810582657f6DC04B1d7d0D573F616", {"force": True}),
accounts.at("0x586b9b2F8010b284A0197f392156f1A7Eb5e86e9", {"force": True}),
accounts.at("0x883f91D6F3090EA26E96211423905F160A9CA01d", {"force": True}),
accounts.at("0x351806B55e93A8Bcb47Be3ACAF71584deDEaB324", {"force": True}),
accounts.at("0xf6502Ea7E9B341702609730583F2BcAB3c1dC041", {"force": True}),
accounts.at("0xDB2364dD1b1A733A690Bf6fA44d7Dd48ad6707Cd", {"force": True}),
accounts.at("0xF930EBBd05eF8b25B1797b9b2109DDC9B0d43063", {"force": True}),
accounts.at("0x6DC9657C2D90D57cADfFB64239242d06e6103E43", {"force": True}),
accounts.at("0x13C6eF8d45aFBCcF15ec0701567cC9fAD2b63CE8", {"force": True}),
]
finance = interface.Finance("0xB9E5CBB9CA5b0d659238807E84D0176930753d86")
dao_voting = interface.Voting("0x2e59A20f205bB85a89C53f1936454680651E618e")
easy_track = interface.EasyTrack("0xF0211b7660680B49De1A7E9f25C65660F0a13Fea")
referral_dai_registry = interface.AllowedRecipientRegistry("0xa295C212B44a48D07746d70d32Aa6Ca9b09Fb846")
referral_dai_topup_factory = interface.TopUpAllowedRecipients("0x009ffa22ce4388d2F5De128Ca8E6fD229A312450")
referral_dai_add_recipient_factory = interface.AddAllowedRecipient("0x8F06a7f244F6Bb4B68Cd6dB05213042bFc0d7151")
referral_dai_remove_recipient_factory = interface.RemoveAllowedRecipient("0xd8f9B72Cd97388f23814ECF429cd18815F6352c1")
referral_program_multisig = accounts.at("0xe2A682A9722354D825d1BbDF372cC86B2ea82c8C", {"force": True})
rewards_topup_factory_old = interface.IEVMScriptFactory("0x77781A93C4824d2299a38AC8bBB11eb3cd6Bc3B7")
rewards_add_factory_old = interface.IEVMScriptFactory("0x9D15032b91d01d5c1D940eb919461426AB0dD4e3")
rewards_remove_factory_old = interface.IEVMScriptFactory("0xc21e5e72Ffc223f02fC410aAedE3084a63963932")
old_factories_list = easy_track.getEVMScriptFactories()
assert len(old_factories_list) == 15
assert referral_dai_topup_factory not in old_factories_list
assert referral_dai_add_recipient_factory not in old_factories_list
assert referral_dai_remove_recipient_factory not in old_factories_list
assert rewards_topup_factory_old in old_factories_list
assert rewards_add_factory_old in old_factories_list
assert rewards_remove_factory_old in old_factories_list
##
## START VOTE
##
vote_id = vote_id_from_env or start_vote({"from": ldo_holder}, silent=True)[0]
tx: TransactionReceipt = helpers.execute_vote(
vote_id=vote_id, accounts=accounts, dao_voting=dao_voting, skip_time=3 * 60 * 60 * 24
)
updated_factories_list = easy_track.getEVMScriptFactories()
assert len(updated_factories_list) == 15
# 1. Add Referral program DAI top up EVM script factory 0x009ffa22ce4388d2F5De128Ca8E6fD229A312450 to Easy Track
assert referral_dai_topup_factory in updated_factories_list
create_and_enact_payment_motion(
easy_track,
referral_program_multisig,
referral_dai_topup_factory,
dai_token,
allowed_recipients,
[10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18, 10 * 10**18,10 * 10**18, 10 * 10**18],
unknown_person,
)
check_add_and_remove_recipient_with_voting(referral_dai_registry, helpers, ldo_holder, dao_voting)
# 2. Add Referral program DAI add recipient EVM script factory 0x8F06a7f244F6Bb4B68Cd6dB05213042bFc0d7151 to Easy Track
assert referral_dai_add_recipient_factory in updated_factories_list
create_and_enact_add_recipient_motion(
easy_track,
referral_program_multisig,
referral_dai_registry,
referral_dai_add_recipient_factory,
unknown_person,
"",
ldo_holder,
)
# 3. Add Referral program DAI remove recipient EVM script factory 0xd8f9B72Cd97388f23814ECF429cd18815F6352c1 to Easy Track
assert referral_dai_remove_recipient_factory in updated_factories_list
create_and_enact_remove_recipient_motion(
easy_track,
referral_program_multisig,
referral_dai_registry,
referral_dai_remove_recipient_factory,
unknown_person,
ldo_holder,
)
# 4. Remove reWARDS top up EVM script factory (old ver) 0x77781A93C4824d2299a38AC8bBB11eb3cd6Bc3B7 from Easy Track
assert rewards_topup_factory_old not in updated_factories_list
# 5. Remove reWARDS add recipient EVM script factory (old ver) 0x9D15032b91d01d5c1D940eb919461426AB0dD4e3 from Easy Track
assert rewards_add_factory_old not in updated_factories_list
# 6. Remove reWARDS remove recipient EVM script factory (old ver) 0xc21e5e72Ffc223f02fC410aAedE3084a63963932 from Easy Track
assert rewards_remove_factory_old not in updated_factories_list
# validate vote events
assert count_vote_items_by_events(tx, dao_voting) == 6, "Incorrect voting items count"
display_voting_events(tx)
if bypass_events_decoding or network_name() in ("goerli", "goerli-fork"):
return
evs = group_voting_events(tx)
validate_evmscript_factory_added_event(
evs[0],
EVMScriptFactoryAdded(
factory_addr=referral_dai_topup_factory,
permissions=create_permissions(finance, "newImmediatePayment")
+ create_permissions(referral_dai_registry, "updateSpentAmount")[2:],
),
)
validate_evmscript_factory_added_event(
evs[1],
EVMScriptFactoryAdded(
factory_addr=referral_dai_add_recipient_factory,
permissions=create_permissions(referral_dai_registry, "addRecipient"),
),
)
validate_evmscript_factory_added_event(
evs[2],
EVMScriptFactoryAdded(
factory_addr=referral_dai_remove_recipient_factory,
permissions=create_permissions(referral_dai_registry, "removeRecipient"),
),
)
validate_evmscript_factory_removed_event(evs[3], rewards_topup_factory_old)
validate_evmscript_factory_removed_event(evs[4], rewards_add_factory_old)
validate_evmscript_factory_removed_event(evs[5], rewards_remove_factory_old)
def _encode_calldata(signature, values):
return "0x" + encode_single(signature, values).hex()
def create_and_enact_payment_motion(
easy_track,
trusted_caller,
factory,
token,
recievers,
transfer_amounts,
stranger,
):
agent = accounts.at("0x3e40D73EB977Dc6a537aF587D48316feE66E9C8c", {"force": True})
agent_balance_before = balance_of(agent, token)
recievers_balance_before = [balance_of(reciever, token) for reciever in recievers]
motions_before = easy_track.getMotions()
recievers_addresses = [reciever.address for reciever in recievers]
calldata = _encode_calldata("(address[],uint256[])", [recievers_addresses, transfer_amounts])
tx = easy_track.createMotion(factory, calldata, {"from": trusted_caller})
motions = easy_track.getMotions()
assert len(motions) == len(motions_before) + 1
chain.sleep(60 * 60 * 24 * 3)
chain.mine()
easy_track.enactMotion(
motions[-1][0],
tx.events["MotionCreated"]["_evmScriptCallData"],
{"from": stranger},
)
recievers_balance_after = [balance_of(reciever, token)for reciever in recievers]
for i in range(len(recievers)):
assert recievers_balance_after[i] == recievers_balance_before[i] + transfer_amounts[i]
agent_balance_after = balance_of(agent, token)
assert agent_balance_after == agent_balance_before - sum(transfer_amounts)
def balance_of(address, token):
if token == eth:
return address.balance()
else:
return token.balanceOf(address)
def create_and_enact_add_recipient_motion(
easy_track,
trusted_caller,
registry,
factory,
recipient,
title,
stranger,
):
recipients_count = len(registry.getAllowedRecipients())
assert not registry.isRecipientAllowed(recipient)
motions_before = easy_track.getMotions()
calldata = _encode_calldata("(address,string)", [recipient.address, title])
tx = easy_track.createMotion(factory, calldata, {"from": trusted_caller})
motions = easy_track.getMotions()
assert len(motions) == len(motions_before) + 1
chain.sleep(60 * 60 * 24 * 3)
chain.mine()
easy_track.enactMotion(
motions[-1][0],
tx.events["MotionCreated"]["_evmScriptCallData"],
{"from": stranger},
)
assert len(registry.getAllowedRecipients()) == recipients_count + 1
assert registry.isRecipientAllowed(recipient)
def create_and_enact_remove_recipient_motion(
easy_track,
trusted_caller,
registry,
factory,
recipient,
stranger,
):
recipients_count = len(registry.getAllowedRecipients())
assert registry.isRecipientAllowed(recipient)
motions_before = easy_track.getMotions()
calldata = _encode_calldata("(address)", [recipient.address])
tx = easy_track.createMotion(factory, calldata, {"from": trusted_caller})
motions = easy_track.getMotions()
assert len(motions) == len(motions_before) + 1
chain.sleep(60 * 60 * 24 * 3)
chain.mine()
easy_track.enactMotion(
motions[-1][0],
tx.events["MotionCreated"]["_evmScriptCallData"],
{"from": stranger},
)
assert len(registry.getAllowedRecipients()) == recipients_count - 1
assert not registry.isRecipientAllowed(recipient)
def check_add_and_remove_recipient_with_voting(registry, helpers, ldo_holder, dao_voting):
recipient_candidate = accounts[0]
title = ""
recipients_length_before = len(registry.getAllowedRecipients())
assert not registry.isRecipientAllowed(recipient_candidate)
call_script_items = [
agent_forward(
[
(
registry.address,
registry.addRecipient.encode_input(recipient_candidate, title),
)
]
)
]
vote_desc_items = ["Add recipient"]
vote_items = bake_vote_items(vote_desc_items, call_script_items)
vote_id = create_vote(vote_items, {"from": ldo_holder})[0]
helpers.execute_vote(
vote_id=vote_id,
accounts=accounts,
dao_voting=dao_voting,
skip_time=3 * 60 * 60 * 24,
)
assert registry.isRecipientAllowed(recipient_candidate)
assert len(registry.getAllowedRecipients()) == recipients_length_before + 1, 'Wrong whitelist length'
call_script_items = [
agent_forward(
[
(
registry.address,
registry.removeRecipient.encode_input(recipient_candidate),
)
]
)
]
vote_desc_items = ["Remove recipient"]
vote_items = bake_vote_items(vote_desc_items, call_script_items)
vote_id = create_vote(vote_items, {"from": ldo_holder})[0]
helpers.execute_vote(
vote_id=vote_id,
accounts=accounts,
dao_voting=dao_voting,
skip_time=3 * 60 * 60 * 24,
)
assert not registry.isRecipientAllowed(recipient_candidate)
assert len(registry.getAllowedRecipients()) == recipients_length_before, 'Wrong whitelist length'
| lidofinance/scripts | archive/tests/test_2023_01_31.py | test_2023_01_31.py | py | 12,625 | python | en | code | 14 | github-code | 36 | [
{
"api_name": "brownie.accounts.at",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "brownie.accounts",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "brownie.accounts.at",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "brownie.acco... |
8733726032 | import pygame
pygame.init()
__screen_info = pygame.display.Info()
__height = __screen_info.current_h
# Screen constants
SCREEN_SIZE = (__height / 7 * 6, __height - 64)
ICON_SIZE = (64, 64)
LEFT_BOUND = 0
RIGHT_BOUND = SCREEN_SIZE[0] - ICON_SIZE[0]
TOP_BOUND = 0
BOTTOM_BOUND = SCREEN_SIZE[1]
ALIEN_INVASION_BOUND = SCREEN_SIZE[1] / 6 * 5 - ICON_SIZE[1]
# Player constants
INITIAL_PLAYER_COORDINATES = (SCREEN_SIZE[0] / 2 - ICON_SIZE[0] / 2,
SCREEN_SIZE[1] / 6 * 5)
PLAYER_BODY_LEFT_PAD = 6
PLAYER_BODY_TOP_PAD = 17
PLAYER_WIDTH = 52
PLAYER_HEIGHT = 31
# Num pixels the player moves left or right with each key pressed
PLAYER_SPEED = SCREEN_SIZE[0] * 9 / 800
# PlayerBullet constants
PLAYER_BULLET_SPEED = SCREEN_SIZE[1] / 100 * 3
PLAYER_BULLET_BODY_LEFT_PAD = 30
PLAYER_BULLET_BODY_TOP_PAD = 0
PLAYER_BULLET_WIDTH = 4
PLAYER_BULLET_HEIGHT = 17
# Alien constants
INITIAL_ALIEN_COORDINATES = (ICON_SIZE[0], SCREEN_SIZE[1] / 10 + ICON_SIZE[1])
ALIEN_BODY_LEFT_PAD = 2
ALIEN_BODY_TOP_PAD = 7
ALIEN_WIDTH = 60
ALIEN_HEIGHT = 48
NUM_ALIENS_PER_ROW = 8
BASE_NUM_ALIEN_ROWS = 3
MAX_NUM_ALIEN_ROWS = 5
ALIEN_HORIZONTAL_GAP = (
SCREEN_SIZE[0] - 2 * INITIAL_ALIEN_COORDINATES[0] - ICON_SIZE[0]) / (NUM_ALIENS_PER_ROW - 1)
ALIEN_VERTICAL_GAP = SCREEN_SIZE[1] / 15
BASE_ALIEN_MOVES_PER_SECOND = 0.4
# Num pixels alien traverses each time it moves
ALIEN_SPEED = SCREEN_SIZE[0] / 80
# Alien bullet constants
BASE_ALIEN_BULLET_SPEED = SCREEN_SIZE[1] / 100
BASE_ALIEN_CHANCE_TO_FIRE = 1 / 1_000
ALIEN_BULLET_BODY_LEFT_PAD = 26
ALIEN_BULLET_BODY_TOP_PAD = 36
ALIEN_BULLET_WIDTH = 12
ALIEN_BULLET_HEIGHT = 28
# UFO constants
INITIAL_UFO_COORDINATES = (0, 0)
BASE_UFO_SPEED = SCREEN_SIZE[0] / 160
UFO_WIDTH = 80
UFO_HEIGHT = 24
UFO_CHANCE_TO_APPEAR = 1 / 400
UFO_LEFT_PAD = 0
UFO_TOP_PAD = 16
UFO_WIDTH = 64
UFO_HEIGHT = 32
# Scales for the constants as the level progresses
ALIEN_LEVEL_BEATEN_MOVES_PER_SECOND_SCALE = 1.15
ALIEN_DROP_ROW_MOVES_PER_SECOND_SCALE = 1.25
ALIEN_CHANCE_TO_FIRE_SCALE = 1.065
UFO_SPEED_SCALE = 1.05
# Hud constants
FONT_SIZE = int(ICON_SIZE[0] / 1.7)
SCORE_TEXT_COORDINATES = (SCREEN_SIZE[0] / 5 - FONT_SIZE * 5 / 2,
SCREEN_SIZE[1] / 100)
SCORE_VALUE_COORDINATES = (
SCORE_TEXT_COORDINATES[0], FONT_SIZE + SCREEN_SIZE[1] / 50)
LEVEL_TEXT_COORDINATES = (SCREEN_SIZE[0] / 2 - FONT_SIZE * 5 / 2,
SCREEN_SIZE[1] / 100)
LEVEL_VALUE_COORDINATES = (
LEVEL_TEXT_COORDINATES[0], FONT_SIZE + SCREEN_SIZE[1] / 50)
NUM_LIVES_TEXT_COORDINATES = (SCREEN_SIZE[0] / 5 * 4 - FONT_SIZE * 5 / 2,
SCREEN_SIZE[1] / 100)
NUM_LIVES_VALUE_COORDINATES = (
NUM_LIVES_TEXT_COORDINATES[0], FONT_SIZE + SCREEN_SIZE[1] / 50)
NUM_POINTS_FOR_ALIEN_KILL = 10
# Game over screen constants
GAME_OVER_TEXT_COORDINATES = (SCREEN_SIZE[0] / 2 - FONT_SIZE * (9 / 2),
SCREEN_SIZE[1] / 2 - FONT_SIZE / 2 - FONT_SIZE * 3)
GAME_OVER_SCORE_TEXT_Y_COORDINATE = GAME_OVER_TEXT_COORDINATES[1] + FONT_SIZE * 2
HIGH_SCORE_TEXT_Y_COORDINATE = GAME_OVER_SCORE_TEXT_Y_COORDINATE + FONT_SIZE * 2
RESTART_TEXT_COORDINATES = (SCREEN_SIZE[0] / 2 - FONT_SIZE * (12 / 2),
HIGH_SCORE_TEXT_Y_COORDINATE + FONT_SIZE * 2)
# Other
NUM_LEVELS_TILL_NEW_ALIEN_ROW = 3
BASE_POINTS_PER_KILL = 10
BASE_POINTS_PER_UFO_KILL = 100
FPS = 30 # Used to maintain smooth movement
| SimonValentino/SpaceInvaders | constants.py | constants.py | py | 3,409 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.display.Info",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 5,
"usage_type": "attribute"
}
] |
72911967144 | """The URL configuration of the application
When the particular URL is hit it feeds the request to the corresponding view.
"""
from django.urls import path
from django.views.generic import RedirectView
from .views import base, add_to_registry, validate_url, issue_registry, dashboard
urlpatterns = [
path('base/', base),
path('add/', add_to_registry),
path('api/validate_url', validate_url),
path('issue_registry/', issue_registry),
path('dashboard/', dashboard, name='dashboard'),
path('', RedirectView.as_view(pattern_name='dashboard', permanent=False)),
]
| architsingh15/django-radius-github | issue_tracker/urls.py | urls.py | py | 585 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.base",
"line_number": 10,
"usage_type": "argument"
},
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "views.add_to_registr... |
21213459172 | import csv
from django.db.models import Q
from django.http import HttpResponse
from data.models import (
List,
Pairing,
AOS,
BCP,
)
def raw_list(request, list_id):
list = List.objects.get(id=list_id)
return HttpResponse(list.raw_list.replace("\n", "<br>"))
def export_pairings_as_csv(request, game_type: int = AOS):
pairings = Pairing.objects.filter(
Q(event__start_date__range=["2023-07-01", "2023-12-31"])
& Q(event__rounds__in=[3, 5, 8])
& Q(event__game_type=game_type)
& Q(event__source=BCP)
).order_by("event__name", "-event__start_date", "round", "id")
response = HttpResponse(content_type="text/csv")
response["Content-Disposition"] = 'attachment; filename="pairings.csv"'
writer = csv.writer(
response,
quoting=csv.QUOTE_NONNUMERIC,
)
writer.writerow(
[
"pairing_id",
"round",
"player1_name",
"player2_name",
"player1_result",
"player2_result",
"player1_score",
"player2_score",
"event_name",
"event_date",
"event_end_date",
"event_country",
"event_online",
"season",
"player1_faction",
"player1_subfaction",
"player2_faction",
"player2_subfaction",
"player1_list_url",
"player2_list_url",
"source",
]
)
for pairing in pairings:
if pairing.event.source == BCP:
player1_name = (
f"{pairing.player1.player.source_json['firstName']} {pairing.player1.player.source_json['lastName']}"
if pairing.player1
else ""
)
player2_name = (
f"{pairing.player2.player.source_json['firstName']} {pairing.player2.player.source_json['lastName']}"
if pairing.player2
else ""
)
else:
player1_name = (
pairing.player1.player.source_json["playerName"]
if pairing.player1
else ""
)
player2_name = (
pairing.player2.player.source_json["playerName"]
if pairing.player2
else ""
)
event_country = (
pairing.event.source_json["country"] if pairing.event.source_json else ""
)
if "isOnlineEvent" in pairing.event.source_json:
event_online = pairing.event.source_json["isOnlineEvent"]
else:
event_online = False
player1_list_faction = (
pairing.player1_list.faction if pairing.player1_list else ""
)
player1_list_subfaction = (
pairing.player1_list.subfaction if pairing.player1_list else ""
)
player2_list_faction = (
pairing.player2_list.faction if pairing.player2_list else ""
)
player2_list_subfaction = (
pairing.player2_list.subfaction if pairing.player2_list else ""
)
if pairing.player1_list and len(pairing.player1_list.raw_list) > 10000:
pairing.player1_list.raw_list = "List too long"
if pairing.player2_list and len(pairing.player2_list.raw_list) > 10000:
pairing.player2_list.raw_list = "List too long"
writer.writerow(
[
pairing.id,
pairing.round,
player1_name,
player2_name,
pairing.player1_result,
pairing.player2_result,
pairing.player1_score,
pairing.player2_score,
pairing.event.name,
pairing.event.start_date,
pairing.event.end_date,
event_country,
event_online,
"2023",
player1_list_faction,
player1_list_subfaction,
player2_list_faction,
player2_list_subfaction,
pairing.player1_list.raw_list if pairing.player1_list else "",
pairing.player2_list.raw_list if pairing.player2_list else "",
"bcp" if pairing.event.source == BCP else "snl",
]
)
return response
| Puciek/aos_tools | data/views.py | views.py | py | 4,349 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "data.models.List.objects.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "data.models.List.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "data.models.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_n... |
39395995067 | from aiogram.types import (
ReplyKeyboardMarkup,
InlineKeyboardMarkup,
InlineKeyboardButton,
)
main_buttons = {
"ask": "Спросить 🤖",
}
class Keyboard:
def __init__(self):
self.main = self.make_main_buttons()
def make_main_buttons(self):
_keyboard_main = ReplyKeyboardMarkup(resize_keyboard=True)
for button_label in main_buttons.values():
_keyboard_main.add(button_label)
return _keyboard_main
@property
def translate(self):
inline_keyboard = InlineKeyboardMarkup(row_width=2)
true_button = InlineKeyboardButton(text="Да", callback_data=f"translate_1")
false_button = InlineKeyboardButton(text="Нет", callback_data=f"translate_0")
inline_keyboard.row(true_button, false_button)
return inline_keyboard
kb = Keyboard()
| Devil666face/ChatGPTosBot | bot/keyboard.py | keyboard.py | py | 856 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiogram.types.ReplyKeyboardMarkup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "aiogram.types.InlineKeyboardMarkup",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "aiogram.types.InlineKeyboardButton",
"line_number": 25,
"usage_type"... |
70874370025 | import logging
from datetime import datetime
from typing import Generic, Text, Type, TypeVar
from uuid import UUID, uuid4
from injector import (
Injector,
UnknownProvider,
UnsatisfiedRequirement,
inject,
singleton,
)
from pydantic import BaseModel, Field
log = logging.getLogger(__name__)
class Command(BaseModel):
id: UUID = Field(default_factory=uuid4)
timestamp: datetime = Field(default_factory=datetime.utcnow)
class Config:
allow_mutation = False
def __str__(self) -> Text:
std_str = super().__str__()
return f"<Command:{self.__class__.__name__} {std_str}>"
class Event(BaseModel):
command_id: UUID
id: UUID = Field(default_factory=uuid4)
timestamp: datetime = Field(default_factory=datetime.utcnow)
class Config:
allow_mutation = False
def __str__(self) -> Text:
std_str = super().__str__()
return f"<Event:{self.__class__.__name__} {std_str}>"
TCommand = TypeVar("TCommand")
class Handler(Generic[TCommand]):
def __call__(self, command: TCommand) -> None:
raise NotImplementedError
@inject
@singleton
class CommandBus:
def __init__(self, container: Injector) -> None:
self._get = container.get
def handle(self, command: Command) -> None:
log.debug(command)
command_cls: Type[Command] = type(command)
handler = self._get(Handler[command_cls])
handler(command)
TEvent = TypeVar("TEvent")
class Listener(Generic[TEvent]):
def __call__(self, event: TEvent) -> None:
raise NotImplementedError
@inject
@singleton
class EventBus:
def __init__(self, container: Injector) -> None:
self._get = container.get
def emit(self, event: TEvent) -> None:
log.debug(event)
event_cls: Type[TEvent] = type(event)
try:
listeners = self._get(list[Listener[event_cls]])
except (UnsatisfiedRequirement, UnknownProvider):
listeners = []
for listener in listeners:
listener(event)
__all__ = ["Command", "CommandBus", "Event", "EventBus", "Handler", "Listener"]
| lzukowski/workflow | src/application/bus.py | bus.py | py | 2,137 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pydantic.BaseModel",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "uuid.UUID",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"... |
74286571624 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# PYTHON_ARGCOMPLETE_OK
# Pass --help flag for help on command-line interface
import sympy as sp
import numpy as np
from pyneqsys.symbolic import SymbolicSys
def solve(guess_a, guess_b, power, solver='scipy'):
""" Constructs a pyneqsys.symbolic.SymbolicSys instance and returns from its ``solve`` method. """
# The problem is 2 dimensional so we need 2 symbols
x = sp.symbols('x:2', real=True)
# There is a user specified parameter ``p`` in this problem:
p = sp.Symbol('p', real=True, negative=False, integer=True)
# Our system consists of 2-non-linear equations:
f = [x[0] + (x[0] - x[1])**p/2 - 1,
(x[1] - x[0])**p/2 + x[1]]
# We construct our ``SymbolicSys`` instance by passing variables, equations and parameters:
neqsys = SymbolicSys(x, f, [p]) # (this will derive the Jacobian symbolically)
# Finally we solve the system using user-specified ``solver`` choice:
return neqsys.solve([guess_a, guess_b], [power], solver=solver)
def main(guess_a=1., guess_b=0., power=3, savetxt='None', verbose=False):
"""
Example demonstrating how to solve a system of non-linear equations defined as SymPy expressions.
The example shows how a non-linear problem can be given a command-line interface which may be
preferred by end-users who are not familiar with Python.
"""
x, sol = solve(guess_a, guess_b, power) # see function definition above
assert sol.success
if savetxt != 'None':
np.savetxt(x, savetxt)
else:
if verbose:
print(sol)
else:
print(x)
if __name__ == '__main__': # are we running from the command line (or are we being imported from)?
try:
import argh
argh.dispatch_command(main, output_file=None)
except ImportError:
import sys
if len(sys.argv) > 1:
import warnings
warnings.warn("Ignoring parameters run "
"'pip install --user argh' to fix.")
main()
| bjodah/pyneqsys | examples/bi_dimensional.py | bi_dimensional.py | py | 2,055 | python | en | code | 38 | github-code | 36 | [
{
"api_name": "sympy.symbols",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sympy.Symbol",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pyneqsys.symbolic.SymbolicSys",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.savetx... |
4767724527 | import os
#os.environ['GLOG_minloglevel'] = '2'
from tqdm import trange
import caffe
import argparse
import pandas as pd
def main(solver_proto, out_dir):
caffe.set_mode_gpu()
solver = caffe.SGDSolver(solver_proto)
train_loss, test_loss = [], []
test_loss.append(solver.test_nets[0].blobs['loss'].data.copy())
for ix in trange(solver.param.max_iter, desc='overall progress'): #):
for jx in trange(solver.param.test_interval, desc='until next test'):
solver.step(1)
train_loss.append(solver.net.blobs['loss'].data.ravel()[0])
test_loss.append(solver.test_nets[0].blobs['loss'].data.ravel()[0])
if ix % 1 == 0:
solver.snapshot()
pd.DataFrame(train_loss, columns=['train_loss']).to_csv(os.path.join(out_dir, 'train_loss.csv'))
pd.DataFrame(test_loss, columns=['test_loss']).to_csv(os.path.join(out_dir, 'test_loss.csv'))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--solver', required=True, type=str)
parser.add_argument('--out_dir', default='.')
args = parser.parse_args()
main(args.solver, args.out_dir)
| alexkreimer/monocular-odometry | tools/solve.py | solve.py | py | 1,237 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "caffe.set_mode_gpu",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "caffe.SGDSolver",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tqdm.trange",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tqdm.trange",
"lin... |
42494180975 | """
WRITE ME
Tests for the R operator / L operator
For the list of op with r op defined, with or without missing test
see this file: doc/library/tensor/basic.txt
For function to automatically test your Rop implementation, look at
the docstring of the functions: check_mat_rop_lop, check_rop_lop,
check_nondiff_rop,
"""
from __future__ import absolute_import, print_function, division
import unittest
from theano.tests import unittest_tools as utt
from theano import function
import theano
from theano import tensor
import itertools
import numpy as np
from theano.gof import Op, Apply
from theano.gradient import grad_undefined
from theano.tests.unittest_tools import SkipTest
from theano.tensor.signal.pool import Pool
from theano.tensor.nnet import conv, conv2d
'''
Special Op created to test what happens when you have one op that is not
differentiable in the computational graph
'''
class BreakRop(Op):
"""
@note: Non-differentiable.
"""
__props__ = ()
def make_node(self, x):
return Apply(self, [x], [x.type()])
def perform(self, node, inp, out_):
x, = inp
out, = out_
out[0] = x
def grad(self, inp, grads):
return [grad_undefined(self, 0, inp[0])]
def R_op(self, inputs, eval_points):
return [None]
break_op = BreakRop()
class RopLop_checker(unittest.TestCase):
"""
Don't peform any test, but provide the function to test the
Rop to class that inherit from it.
"""
def setUp(self):
utt.seed_rng()
# Using vectors make things a lot simpler for generating the same
# computations using scan
self.x = tensor.vector('x')
self.v = tensor.vector('v')
self.rng = np.random.RandomState(utt.fetch_seed())
self.in_shape = (5 + self.rng.randint(3),)
self.mx = tensor.matrix('mx')
self.mv = tensor.matrix('mv')
self.mat_in_shape = (5 + self.rng.randint(3),
5 + self.rng.randint(3))
def check_nondiff_rop(self, y):
"""
If your op is not differentiable(so you can't define Rop)
test that an error is raised.
"""
raised = False
try:
tensor.Rop(y, self.x, self.v)
except ValueError:
raised = True
if not raised:
self.fail((
'Op did not raise an error even though the function'
' is not differentiable'))
def check_mat_rop_lop(self, y, out_shape):
"""
Test the Rop/Lop when input is a matrix and the output is a vector
:param y: the output variable of the op applied to self.mx
:param out_shape: Used to generate a random tensor
corresponding to the evaluation point of the Rop
(i.e. the tensor with which you multiply the
Jacobian). It should be a tuple of ints.
If the Op has more than 1 input, one of them must be mx, while
others must be shared variables / constants. We will test only
against the input self.mx, so you must call
check_mat_rop_lop/check_rop_lop for the other inputs.
We expect all inputs/outputs have dtype floatX.
If you want to test an Op with an output matrix, add a sum
after the Op you want to test.
"""
vx = np.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=self.mat_in_shape),
theano.config.floatX)
yv = tensor.Rop(y, self.mx, self.mv)
rop_f = function([self.mx, self.mv], yv, on_unused_input='ignore')
sy, _ = theano.scan(lambda i, y, x, v:
(tensor.grad(y[i], x) * v).sum(),
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, self.mx, self.mv])
scan_f = function([self.mx, self.mv], sy, on_unused_input='ignore')
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
self.check_nondiff_rop(theano.clone(y, replace={self.mx: break_op(self.mx)}))
vv = np.asarray(self.rng.uniform(size=out_shape), theano.config.floatX)
yv = tensor.Lop(y, self.mx, self.v)
lop_f = function([self.mx, self.v], yv)
sy = tensor.grad((self.v * y).sum(), self.mx)
scan_f = function([self.mx, self.v], sy)
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
def check_rop_lop(self, y, out_shape):
"""
As check_mat_rop_lop, except the input is self.x which is a
vector. The output is still a vector.
"""
# TEST ROP
vx = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
yv = tensor.Rop(y, self.x, self.v)
rop_f = function([self.x, self.v], yv, on_unused_input='ignore')
J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x),
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, self.x])
sy = tensor.dot(J, self.v)
scan_f = function([self.x, self.v], sy, on_unused_input='ignore')
v1 = rop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), ('ROP mismatch: %s %s' % (v1, v2))
known_fail = False
try:
self.check_nondiff_rop(theano.clone(y, replace={self.x: break_op(self.x)}))
except AssertionError:
known_fail = True
# TEST LOP
vx = np.asarray(self.rng.uniform(size=self.in_shape),
theano.config.floatX)
vv = np.asarray(self.rng.uniform(size=out_shape),
theano.config.floatX)
yv = tensor.Lop(y, self.x, self.v)
lop_f = function([self.x, self.v], yv, on_unused_input='ignore')
J, _ = theano.scan(lambda i, y, x: tensor.grad(y[i], x),
sequences=tensor.arange(y.shape[0]),
non_sequences=[y, self.x])
sy = tensor.dot(self.v, J)
scan_f = function([self.x, self.v], sy)
v1 = lop_f(vx, vv)
v2 = scan_f(vx, vv)
assert np.allclose(v1, v2), ('LOP mismatch: %s %s' % (v1, v2))
if known_fail:
raise SkipTest('Rop does not handle non-differentiable inputs '
'correctly. Bug exposed by fixing Add.grad method.')
class test_RopLop(RopLop_checker):
def test_shape(self):
self.check_nondiff_rop(self.x.shape[0])
def test_specifyshape(self):
self.check_rop_lop(tensor.specify_shape(self.x, self.in_shape),
self.in_shape)
def test_max(self):
# If we call max directly, we will return an CAReduce object
# which doesn't have R_op implemented!
# self.check_mat_rop_lop(tensor.max(self.mx, axis=[0,1])[0], ())
self.check_mat_rop_lop(tensor.max(self.mx, axis=0),
(self.mat_in_shape[1],))
self.check_mat_rop_lop(tensor.max(self.mx, axis=1),
(self.mat_in_shape[0],))
def test_argmax(self):
self.check_nondiff_rop(tensor.argmax(self.mx, axis=1))
def test_subtensor(self):
self.check_rop_lop(self.x[:4], (4,))
def test_incsubtensor1(self):
tv = np.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.inc_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
def test_incsubtensor2(self):
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.inc_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
def test_setsubtensor1(self):
tv = np.asarray(self.rng.uniform(size=(3,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.set_subtensor(self.x[:3], t)
self.check_rop_lop(out, self.in_shape)
def test_print(self):
out = theano.printing.Print('x', attrs=('shape',))(self.x)
self.check_rop_lop(out, self.in_shape)
def test_setsubtensor2(self):
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.set_subtensor(t[:4], self.x[:4])
self.check_rop_lop(out, (10,))
def test_dimshuffle(self):
# I need the sum, because the setup expects the output to be a
# vector
self.check_rop_lop(self.x[:4].dimshuffle('x', 0).sum(axis=0),
(4,))
def test_rebroadcast(self):
# I need the sum, because the setup expects the output to be a
# vector
self.check_rop_lop(tensor.unbroadcast(
self.x[:4].dimshuffle('x', 0), 0).sum(axis=1),
(1,))
def test_downsample(self):
rng = np.random.RandomState(utt.fetch_seed())
# ws, shp
examples = (
((2,), (16,)),
((2,), (4, 16,)),
((2,), (4, 2, 16,)),
((1, 1), (4, 2, 16, 16)),
((2, 2), (4, 2, 16, 16)),
((3, 3), (4, 2, 16, 16)),
((3, 2), (4, 2, 16, 16)),
((3, 2, 2), (3, 2, 16, 16, 16)),
((2, 3, 2), (3, 2, 16, 16, 16)),
((2, 2, 3), (3, 2, 16, 16, 16)),
((2, 2, 3, 2), (3, 2, 6, 6, 6, 5)),
)
for example, ignore_border in itertools.product(examples, [True, False]):
(ws, shp) = example
vx = rng.rand(*shp)
vex = rng.rand(*shp)
x = theano.shared(vx)
ex = theano.shared(vex)
maxpool_op = Pool(ignore_border, ndim=len(ws))
a_pooled = maxpool_op(x, ws).flatten()
yv = tensor.Rop(a_pooled, x, ex)
mode = None
if theano.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
rop_f = function([], yv, on_unused_input='ignore', mode=mode)
sy, _ = theano.scan(lambda i, y, x, v:
(tensor.grad(y[i], x) * v).sum(),
sequences=tensor.arange(a_pooled.shape[0]),
non_sequences=[a_pooled, x, ex],
mode=mode)
scan_f = function([], sy, on_unused_input='ignore', mode=mode)
v1 = rop_f()
v2 = scan_f()
assert np.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
def test_conv(self):
for conv_op in [conv.conv2d, conv2d]:
for border_mode in ['valid', 'full']:
image_shape = (2, 2, 4, 5)
filter_shape = (2, 2, 2, 3)
image_dim = len(image_shape)
filter_dim = len(filter_shape)
input = tensor.TensorType(
theano.config.floatX,
[False] * image_dim)(name='input')
filters = tensor.TensorType(
theano.config.floatX,
[False] * filter_dim)(name='filter')
ev_input = tensor.TensorType(
theano.config.floatX,
[False] * image_dim)(name='ev_input')
ev_filters = tensor.TensorType(
theano.config.floatX,
[False] * filter_dim)(name='ev_filters')
def sym_conv2d(input, filters):
return conv_op(input, filters, border_mode=border_mode)
output = sym_conv2d(input, filters).flatten()
yv = tensor.Rop(output, [input, filters], [ev_input, ev_filters])
mode = None
if theano.config.mode == "FAST_COMPILE":
mode = "FAST_RUN"
rop_f = function([input, filters, ev_input, ev_filters],
yv, on_unused_input='ignore', mode=mode)
sy, _ = theano.scan(lambda i, y, x1, x2, v1, v2:
(tensor.grad(y[i], x1) * v1).sum() +
(tensor.grad(y[i], x2) * v2).sum(),
sequences=tensor.arange(output.shape[0]),
non_sequences=[output, input, filters,
ev_input, ev_filters],
mode=mode)
scan_f = function([input, filters, ev_input, ev_filters], sy,
on_unused_input='ignore', mode=mode)
dtype = theano.config.floatX
image_data = np.random.random(image_shape).astype(dtype)
filter_data = np.random.random(filter_shape).astype(dtype)
ev_image_data = np.random.random(image_shape).astype(dtype)
ev_filter_data = np.random.random(filter_shape).astype(dtype)
v1 = rop_f(image_data, filter_data, ev_image_data, ev_filter_data)
v2 = scan_f(image_data, filter_data, ev_image_data, ev_filter_data)
assert np.allclose(v1, v2), ("Rop mismatch: %s %s" % (v1, v2))
def test_join(self):
tv = np.asarray(self.rng.uniform(size=(10,)),
theano.config.floatX)
t = theano.shared(tv)
out = tensor.join(0, self.x, t)
self.check_rop_lop(out, (self.in_shape[0] + 10,))
def test_dot(self):
insh = self.in_shape[0]
vW = np.asarray(self.rng.uniform(size=(insh, insh)),
theano.config.floatX)
W = theano.shared(vW)
self.check_rop_lop(tensor.dot(self.x, W), self.in_shape)
def test_elemwise0(self):
self.check_rop_lop((self.x + 1) ** 2, self.in_shape)
def test_elemwise1(self):
self.check_rop_lop(self.x + tensor.cast(self.x, 'int32'),
self.in_shape)
def test_reshape(self):
new_shape = tensor.constant(np.asarray([
self.mat_in_shape[0] * self.mat_in_shape[1]],
dtype='int64'))
self.check_mat_rop_lop(self.mx.reshape(new_shape),
(self.mat_in_shape[0] * self.mat_in_shape[1],))
def test_flatten(self):
self.check_mat_rop_lop(self.mx.flatten(),
(self.mat_in_shape[0] * self.mat_in_shape[1],))
def test_sum(self):
self.check_mat_rop_lop(self.mx.sum(axis=1), (self.mat_in_shape[0],))
def test_softmax(self):
# Softmax adds an extra dimnesion !
self.check_rop_lop(tensor.nnet.softmax(self.x)[0], self.in_shape[0])
def test_alloc(self):
# Alloc of the sum of x into a vector
out1d = tensor.alloc(self.x.sum(), self.in_shape[0])
self.check_rop_lop(out1d, self.in_shape[0])
# Alloc of x into a 3-D tensor, flattened
out3d = tensor.alloc(self.x, self.mat_in_shape[0], self.mat_in_shape[1], self.in_shape[0])
self.check_rop_lop(out3d.flatten(), self.mat_in_shape[0] * self.mat_in_shape[1] * self.in_shape[0])
def test_invalid_input(self):
success = False
try:
tensor.Rop(0., [tensor.matrix()], [tensor.vector()])
success = True
except ValueError:
pass
assert not success
def test_multiple_outputs(self):
m = tensor.matrix('m')
v = tensor.vector('v')
m_ = tensor.matrix('m_')
v_ = tensor.vector('v_')
mval = self.rng.uniform(size=(3, 7)).astype(theano.config.floatX)
vval = self.rng.uniform(size=(7,)).astype(theano.config.floatX)
m_val = self.rng.uniform(size=(3, 7)).astype(theano.config.floatX)
v_val = self.rng.uniform(size=(7,)).astype(theano.config.floatX)
rop_out1 = tensor.Rop([m, v, m + v], [m, v], [m_, v_])
assert isinstance(rop_out1, list)
assert len(rop_out1) == 3
rop_out2 = tensor.Rop((m, v, m + v), [m, v], [m_, v_])
assert isinstance(rop_out2, tuple)
assert len(rop_out2) == 3
all_outs = []
for o in rop_out1, rop_out2:
all_outs.extend(o)
f = theano.function([m, v, m_, v_], all_outs)
f(mval, vval, m_val, v_val)
def test_Rop_dot_bug_18Oct2013_Jeremiah(self):
# This test refers to a bug reported by Jeremiah Lowin on 18th Oct
# 2013. The bug consists when through a dot operation there is only
# one differentiable path (i.e. there is no gradient wrt to one of
# the inputs).
x = tensor.arange(20.0).reshape([1, 20])
v = theano.shared(np.ones([20]))
d = tensor.dot(x, v).sum()
tensor.Rop(tensor.grad(d, v), v, v)
| Theano/Theano | theano/tests/test_rop.py | test_rop.py | py | 17,055 | python | en | code | 9,807 | github-code | 36 | [
{
"api_name": "theano.gof.Op",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "theano.gof.Apply",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "theano.gradient.grad_undefined",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "unittes... |
28841930639 | import pygame, sys, time, random
from pygame.locals import *
pygame.init()
mainClock = pygame.time.Clock()
lives = 3
lives2 = 3
width = 800
height = 600
windowSurface = pygame.display.set_mode((width, height), 0, 32)
pygame.display.set_caption('Star Wars!')
movementSpeed = 10
projectileSpeed = 30
scrollSpeed = 6
iambecomespeed = 200
shotFrameCounter = 0
targetFrameCounter = 0
collisionFrameCounter = 0
shots = []
shots2 = []
targets = []
lifeblocks = []
nopain = []
death = []
maxLives = 3
score = 0
maxTargets = 5
lifes = 4
maxShots = 3
Finvincible = 1
iambecome = 1
moveLeft = False
moveLeft2 = False
moveRight = False
moveRight2 = False
black = (0, 0, 0)
white = (255, 255, 255)
red = (255, 0, 0)
green = (0, 255, 0)
blue = (0, 0, 255)
yellow = (255, 255, 0)
x = 48
y = 48
t = 40
player = pygame.Rect(273, 20, x, t)
player2 = pygame.Rect(273, 530, x, t)
bg = pygame.Rect(0, -100, 10, 10)
shoot = False
shoot2 = False
background = pygame.image.load('Resources/Images/StarsPattern.png')
Da_Ship = pygame.image.load('Resources/Images/marrio.jpeg')
SS_Falcon = pygame.image.load('Resources/Images/SS Falcon.png').convert()
Rover = pygame.image.load('Resources/Images/World.png').convert()
The_World = pygame.image.load('Resources/Images/tuskc.png').convert()
pew = pygame.mixer.Sound('Resources/Audio/Gun+1.wav')
pew2 = pygame.mixer.Sound('Resources/Audio/Gun+Shot2.wav')
boom = pygame.mixer.Sound('Resources/Audio/Explosion+1.wav')
boom7 = pygame.mixer.Sound('Resources/Audio/boom7.wav')
space = pygame.mixer.music.load('Resources/Audio/Space Fighter Loop.mp3')
DASHIP = pygame.transform.scale(Da_Ship, (x, y))
FALCON = pygame.transform.scale(SS_Falcon, (x ,y))
ROVER = pygame.transform.scale(Rover, (x,y))
THE_WORLD = pygame.transform.scale(The_World, (x,y))
mcounter = 1
mouset = True
yellowrect = pygame.draw.rect(windowSurface, yellow, (400, 550, 30, 30))
greenRect = pygame.draw.rect(windowSurface, green, (250, 10, 500, 300))
titleFont = pygame.font.SysFont("none", 60)
myText = "Welcome to Space War! Here are the rules:"
text = titleFont.render(myText, True, black)
def end(lives,lives2):
while True:
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
pygame.display.update()
if lives <= 0:
font = pygame.font.SysFont("none", 24)
scoreText = ("Player 2 WINS!")
text2 = font.render(scoreText, True, white)
windowSurface.blit(text2, (10, 10))
thatRect = pygame.draw.rect(windowSurface, green, (50, 300, 390, 100))
myText = "End Game?"
thisRect = pygame.draw.rect(windowSurface, green, (50, 450, 390, 100))
myText2 = "New Game?"
text = titleFont.render(myText, True, black)
textRect = text.get_rect()
textRect.centerx = thatRect.centerx
textRect.centery = thatRect.centery
windowSurface.blit(text, textRect)
text2 = titleFont.render(myText2, True, black)
textRect2 = text.get_rect()
textRect2.centerx = thisRect.centerx
textRect2.centery = thisRect.centery
windowSurface.blit(text2, textRect2)
pygame.display.update()
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= thatRect.left and event.pos[0] <= thatRect.right and event.pos[
1] >= thatRect.top and \
event.pos[1] <= thatRect.bottom:
print("endgame selected!")
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= thisRect.left and event.pos[0] <= thisRect.right and event.pos[
1] >= thisRect.top and \
event.pos[1] <= thisRect.bottom:
pygame.mixer.music.unpause()
print("newgame selected")
startgame()
chooseship()
if event.type == QUIT:
print("quit selected!")
pygame.quit()
sys.exit()
if lives2 <= 0:
font = pygame.font.SysFont("none", 24)
scoreText = ("Player 1 WINS!")
text2 = font.render(scoreText, True, white)
windowSurface.blit(text2, (10, 10))
thatRect = pygame.draw.rect(windowSurface, green, (50, 300, 390, 100))
myText = "End Game?"
thisRect = pygame.draw.rect(windowSurface, green, (50, 450, 390, 100))
myText2 = "New Game?"
text = titleFont.render(myText, True, black)
textRect = text.get_rect()
textRect.centerx = thatRect.centerx
textRect.centery = thatRect.centery
windowSurface.blit(text, textRect)
text2 = titleFont.render(myText2, True, black)
textRect2 = text.get_rect()
textRect2.centerx = thisRect.centerx
textRect2.centery = thisRect.centery
windowSurface.blit(text2, textRect2)
pygame.display.update()
for event in pygame.event.get():
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= thatRect.left and event.pos[0] <= thatRect.right and event.pos[
1] >= thatRect.top and \
event.pos[1] <= thatRect.bottom:
print("endgame selected!")
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= thisRect.left and event.pos[0] <= thisRect.right and event.pos[
1] >= thisRect.top and \
event.pos[1] <= thisRect.bottom:
pygame.mixer.music.unpause()
print("newgame selected")
startgame()
chooseship()
if event.type == QUIT:
print("quit selected!")
pygame.quit()
sys.exit()
pygame.display.update()
def chooseship():
shotFrameCounter = 0
targetFrameCounter = 0
collisionFrameCounter = 0
shots = []
shots2 = []
targets = []
lifeblocks = []
nopain = []
death = []
maxLives = 3
score = 0
maxTargets = 5
lifes = 4
maxShots = 3
Finvincible = 1
iambecome = 1
moveLeft = False
moveLeft2 = False
moveRight = False
moveRight2 = False
x = 48
y = 54
player = pygame.Rect(273, 20, x, y)
player2 = pygame.Rect(273, 530, x, y)
bg = pygame.Rect(0, -100, 10, 10)
shoot = False
shoot2 = False
lives = 3
lives2 = 3
mcounter = 1
safe = 0
safe2 = 0
mouset = True
while mouset:
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
blueRect = pygame.draw.rect(windowSurface, blue, (200, 100, 60, 60))
redRect = pygame.draw.rect(windowSurface, red, (200, 300, 60, 60))
greenRect = pygame.draw.rect(windowSurface, green, (400, 100, 60, 60))
whiteRect = pygame.draw.rect(windowSurface, white, (400, 300, 60, 60))
firstship = "The World"
secondship = "Rover"
thirdship = "Inevitability"
fourthship = "Falcon"
daFont = pygame.font.SysFont("none", 20)
hrship = daFont.render(firstship, True, blue)
windowSurface.blit(hrship, (200, 170))
rhship = daFont.render(secondship, True, red)
windowSurface.blit(rhship, (200, 370))
ssship = daFont.render(thirdship, True, green)
windowSurface.blit(ssship, (400, 170))
tship = daFont.render(fourthship, True, white)
windowSurface.blit(tship, (400, 370))
windowSurface.blit(THE_WORLD, blueRect)
windowSurface.blit(ROVER, redRect)
windowSurface.blit(DASHIP, greenRect)
windowSurface.blit(FALCON, whiteRect)
pygame.display.update()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= greenRect.left and event.pos[0] <= greenRect.right and event.pos[
1] >= greenRect.top and event.pos[1] <= greenRect.bottom:
if mcounter == 2:
ship2 = DASHIP
shipname = ("daship")
mouset = False
if event.pos[0] >= blueRect.left and event.pos[0] <= blueRect.right and event.pos[1] >= blueRect.top and \
event.pos[1] <= blueRect.bottom:
if mcounter == 2:
ship2 = THE_WORLD
shipname = ("world")
mouset = False
if event.pos[0] >= redRect.left and event.pos[0] <= redRect.right and event.pos[1] >= redRect.top and \
event.pos[1] <= redRect.bottom:
if mcounter == 2:
ship2 = ROVER
shipname = ("Rover")
mouset = False
if event.pos[0] >= whiteRect.left and event.pos[0] <= whiteRect.right and event.pos[
1] >= whiteRect.top and \
event.pos[1] <= whiteRect.bottom:
if mcounter == 2:
ship2 = FALCON
shipname = ("Falcon")
mouset = False
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= greenRect.left and event.pos[0] <= greenRect.right and event.pos[
1] >= greenRect.top and event.pos[1] <= greenRect.bottom:
if mcounter == 1:
ship1 = DASHIP
shipname = ("daship")
mcounter = 2
if event.pos[0] >= blueRect.left and event.pos[0] <= blueRect.right and event.pos[1] >= blueRect.top and \
event.pos[1] <= blueRect.bottom:
if mcounter == 1:
ship1 = THE_WORLD
shipname = ("mworld")
mcounter = 2
if event.pos[0] >= redRect.left and event.pos[0] <= redRect.right and event.pos[1] >= redRect.top and \
event.pos[1] <= redRect.bottom:
if mcounter == 1:
ship1 = ROVER
shipname = ("Rover")
mcounter = 2
if event.pos[0] >= whiteRect.left and event.pos[0] <= whiteRect.right and event.pos[
1] >= whiteRect.top and \
event.pos[1] <= whiteRect.bottom:
if mcounter == 1:
ship1 = FALCON
shipname = ("Falcon")
mcounter = 2
ship1 = pygame.transform.rotate(ship1, 180)
great = True
while great:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_LEFT:
moveLeft = True
if event.key == K_RIGHT:
moveRight = True
if event.key == K_p:
shoot = True
pew2.play()
if event.key == K_a:
moveLeft2 = True
if event.key == K_d:
moveRight2 = True
if event.key == K_SPACE:
shoot2 = True
pew.play()
if event.type == KEYUP:
if event.key == K_LEFT:
moveLeft = False
if event.key == K_RIGHT:
moveRight = False
if event.key == K_p:
shoot = False
if event.key == K_a:
moveLeft2 = False
if event.key == K_d:
moveRight2 = False
if event.key == K_SPACE:
shoot2 = False
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if moveLeft2 == True:
if player2.left > 0:
player2.left -= movementSpeed
if moveRight2 == True:
if player2.right < width:
player2.right += movementSpeed
if moveLeft == True:
if player.left > 0:
player.left -= movementSpeed
if moveRight == True:
if player.right < width:
player.right += movementSpeed
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
windowSurface.blit(ship1, player)
windowSurface.blit(ship2, player2)
for target in targets[:]:
if target.left < - 20:
targets.remove(target)
for life in lifeblocks[:]:
if life.left < - 20:
lifeblocks.remove(life)
for invincible in nopain[:]:
if invincible.left < - 20:
nopain.remove(invincible)
for dead in death[:]:
if dead.left < -20:
death.remove(dead)
if shoot == True and (len(shots) < maxShots):
shots.append(pygame.Rect(player.centerx - 3, player.centery - 3, 6, 6))
for i in range(len(shots)):
pygame.draw.rect(windowSurface, green, shots[i])
shots[i].bottom += projectileSpeed
if shots[i].colliderect(player2):
lives2 -= 1
shots[i].top = 600
boom7.play()
for target in targets[:]:
if shots[i].colliderect(target):
targets.remove(target)
lives -= 1
shots[i].top = 600
boom.play()
for life in lifeblocks[:]:
if shots[i].colliderect(life):
lifeblocks.remove(life)
lives += 1
shots[i].top = 600
boom.play()
for invincible in nopain[:]:
if shots[i].colliderect(invincible):
nopain.remove(invincible)
if safe == 0:
safe = 30
maxLives -= 1
for dead in death[:]:
if shots[i].colliderect(dead):
lives2 = 1
shots[i].top = 600
boom.play()
if safe > 0:
if safe % 3 == 0:
boom.play()
ship1.set_alpha(255)
safe -= 1
else:
ship1.set_alpha(0)
else:
ship1.set_alpha(255)
if shoot2 == True and (len(shots2) < maxShots):
shots2.append(pygame.Rect(player2.centerx - 3, player2.centery - 3, 6, 6))
for i in range(len(shots2)):
pygame.draw.rect(windowSurface, red, shots2[i])
shots2[i].bottom -= projectileSpeed
if shots2[i].colliderect(player):
lives -= 1
boom7.play()
for target in targets[:]:
if shots2[i].colliderect(target):
targets.remove(target)
lives2 -= 1
shots2[i].bottom = 0
for life in lifeblocks[:]:
if shots2[i].colliderect(life):
lifeblocks.remove(life)
lives2 += 1
shots2[i].bottom = 0
for invincible in nopain[:]:
if shots2[i].colliderect(invincible):
invincible.left = -10
if safe2 == 0:
safe2 = 30
for dead in death[:]:
if shots2[i].colliderect(dead):
lives = 1
shots2[i].bottom = 0
if safe2 > 0:
if safe2 % 3 == 0:
boom.play()
ship2.set_alpha(255)
safe2 -= 1
else:
ship2.set_alpha(0)
else:
ship2.set_alpha(255)
for shot in shots[:]:
if shot.top > 620:
shots.remove(shot)
for shot in shots[:]:
if shot.colliderect(player2):
shot.top = 600
for shot2 in shots2[:]:
if shot2.bottom < 0:
shots2.remove(shot2)
for shot2 in shots2[:]:
if shot2.colliderect(player):
shot2.bottom = 0
z = random.randint(0, 23)
if z == 4:
if len(targets) < maxTargets:
targets.append(pygame.Rect(width + 20, random.randint(100, height - 100), 40, 20))
if z == 13:
if len(lifeblocks) < lifes:
lifeblocks.append(pygame.Rect(width + 20, random.randint(100, height - 100), 40, 20))
if z == 5:
if len(nopain) < Finvincible:
nopain.append(pygame.Rect(width + 20, random.randint(100, height - 100), 40, 20))
if z == 1:
if len(death) < iambecome:
death.append(pygame.Rect(width + 20, random.randint(100, height - 100), 40, 20))
for i in range(len(targets)):
pygame.draw.rect(windowSurface, red, targets[i])
targets[i].left -= movementSpeed
for i in range(len(lifeblocks)):
pygame.draw.rect(windowSurface, blue, lifeblocks[i])
lifeblocks[i].left -= movementSpeed
for i in range(len(nopain)):
pygame.draw.rect(windowSurface, black, nopain[i])
nopain[i].left -= movementSpeed
for i in range(len(death)):
pygame.draw.rect(windowSurface, white, death[i])
death[i].left -= iambecomespeed
font = pygame.font.SysFont("none", 20)
scoreText = "Lives: " + str(lives)
text2 = font.render(scoreText, True, green)
windowSurface.blit(text2, (10, 10))
font = pygame.font.SysFont("none", 20)
scoreText = "Lives: " + str(lives2)
text3 = font.render(scoreText, True, red)
windowSurface.blit(text3, (750, 560))
pygame.display.update()
mainClock.tick(60)
if safe > 0:
safe -= 1
if safe2 > 0:
safe2 -= 1
if lives <= 0 or lives2 <= 0:
end(lives,lives2)
def playmusic():
v = .1
pygame.mixer.music.load('Resources/Audio/Space Fighter Loop.mp3')
pygame.mixer.music.play(-1, 0)
pygame.mixer.music.set_volume(v)
def startgame():
game = True
realgame = False
while game:
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
greenRect = pygame.draw.rect(windowSurface, green, (200, 250, 390, 100))
titleFont = pygame.font.SysFont("none", 90)
myText = "Start game?"
text = titleFont.render(myText, True, black)
textRect = text.get_rect()
textRect.centerx = windowSurface.get_rect().centerx
textRect.centery = windowSurface.get_rect().centery
windowSurface.blit(text, textRect)
bigFont = pygame.font.SysFont("none", 100)
Spacewar = "SPACE WAR"
text3 = bigFont.render(Spacewar, True, red)
windowSurface.blit(text3, (200, 100))
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= greenRect.left and event.pos[0] <= greenRect.right and event.pos[1] >= greenRect.top and event.pos[1] <= greenRect.bottom:
playmusic()
tules = True
while tules:
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == MOUSEBUTTONDOWN:
if event.pos[0] >= yellowrect.left and event.pos[0] <= yellowrect.right and event.pos[
1] >= yellowrect.top and \
event.pos[1] <= yellowrect.bottom:
chooseship()
pygame.display.update()
windowSurface.fill(black)
windowSurface.blit(background, bg)
bg.left -= scrollSpeed
if bg.left < -800:
bg.left = 0
yellowrect = pygame.draw.rect(windowSurface, yellow, (400, 550, 30, 30))
greenRect = pygame.draw.rect(windowSurface, green, (30, 10, 720, 40))
redrect = pygame.draw.rect(windowSurface, red, (60, 150, 70, 40))
bluerect = pygame.draw.rect(windowSurface, blue, (60, 220, 70, 40))
blackrect = pygame.draw.rect(windowSurface, black, (60, 290, 70, 40))
whiterect = pygame.draw.rect(windowSurface, white, (60, 360, 70, 40))
rulered = "if you hit the red rectangle, you lose a life"
ruleblue = "if you hit the blue rectangle, you get a life"
ruleblack = "if you hit the black rectangle, you are invisible (but it itself is basically invisible) until you shoot"
rulewhite = " if you hit the white rectangle, the other character gets their lives reduced to one life(you can try to hit it, anyway.)"
titleFont = pygame.font.SysFont("none", 50)
myText = "Welcome to Space War! Here are the rules:"
text = titleFont.render(myText, True, black)
Start = "READY? PRESS THE YELLOW BUTTON!"
text3 = titleFont.render(Start, True, blue)
windowSurface.blit(text3, (75, 500))
windowSurface.blit(text, greenRect)
littleFont = pygame.font.SysFont("none", 20)
tusk = pygame.font.SysFont("none", 18)
text4 = littleFont.render(rulered, True, red)
windowSurface.blit(text4, (160, 150))
text5 = littleFont.render(ruleblue, True, blue)
windowSurface.blit(text5, (160, 220))
text6 = littleFont.render(ruleblack, True, white)
windowSurface.blit(text6, (160, 290))
text7 = tusk.render(rulewhite, True, white)
windowSurface.blit(text7, (160, 360))
pygame.display.update()
pygame.display.update()
startgame()
| Noah04322/Assignments | End of Year.py | End of Year.py | py | 23,466 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.time.Clock",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pygame.time",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.set_mode",... |
17887984325 | from PySide2.QtCore import QUrl, QObject, Slot
from PySide2.QtGui import QGuiApplication
from PySide2.QtQuick import QQuickView
class MyClass(QObject):
@Slot(int, result=str) # 声明为槽,输入参数为int类型,返回值为str类型
def returnValue(self, value):
return str(value + 10)
if __name__ == '__main__':
path = 'src/demo1.qml'
app = QGuiApplication([])
view = QQuickView()
con = MyClass()
context = view.rootContext()
context.setContextProperty("con", con)
view.engine().quit.connect(app.quit)
view.setSource(QUrl(path))
view.show()
app.exec_()
| pyminer/pyminer | pyminer/widgets/widgets/basic/quick/demo1.py | demo1.py | py | 635 | python | en | code | 77 | github-code | 36 | [
{
"api_name": "PySide2.QtCore.QObject",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "PySide2.QtCore.Slot",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "PySide2.QtGui.QGuiApplication",
"line_number": 18,
"usage_type": "call"
},
{
"api_name"... |
2153945744 | from django.shortcuts import render
product_price = {"라면":980,"홈런볼":1500,"칙촉":2300, "식빵":1800}
# Create your views here.
def price(request, thing, cnt):
if thing in product_price:
y_n = 'y'
price = product_price[thing]
else:
y_n = 'n'
price = 0
context = {
'y_n' : y_n,
'price' : price,
'thing' : thing,
'cnt' : cnt,
'total' : price * cnt
}
return render(request, 'prices/price.html', context) | ji-hyon/Web_study | Django/practice/part2_Django/django_2_1/project1/prices/views.py | views.py | py | 505 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 20,
"usage_type": "call"
}
] |
28536518861 | """project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import (
include,
url,
)
from django.contrib import admin
from classifier.views import CreateClassification
from healthcheck.views import HealthCheck
from report.views import CreateReport
from secret.views import Secret
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^auth/', include('djoser.urls')),
url(r'^auth/', include('djoser.urls.authtoken')),
url(
r'^classify-image',
CreateClassification.as_view(),
name='classify-image',
),
url(r'^health', HealthCheck.as_view(), name='healthcheck'),
url(r'^report', CreateReport.as_view(), name='report'),
url(r'^secret', Secret.as_view(), name='secret'),
]
| mechtron/coreys-image-classifier | api/project/urls.py | urls.py | py | 1,341 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 29,
"usage_type": "name"
},
{
"api_name... |
30526696470 | from __future__ import print_function, division
import os
import logging
import math
import numpy as np
import pandas as pd
import torch
from torch.utils.tensorboard import SummaryWriter
IS_ON_SERVER = False if os.getcwd().startswith('/home/SENSETIME/yuanjing1') else True
axis_name2np_dim = {
"x": 2,
"y": 1,
"z": 0,
}
# --------
# data normalization
def mean_std_norm(array):
return (array - array.mean()) / (array.std() + 1e-10)
def rescale01(arr):
return (arr - arr.min()) / (arr.max() - arr.min())
def window_rescale(arr, a_min=None, a_max=None):
arr = np.clip(arr, a_min=a_min, a_max=a_max)
return (arr - a_min) / (a_max - a_min)
def auto_window(arr):
return window_rescale(arr, a_min=np.percentile(arr, 1), a_max=np.percentile(arr, 99))
# --------
# pad & crop
def get_pad_border(origin_shape, target_shape):
assert len(origin_shape) == len(target_shape), 'Dimension mismatch.'
borders = []
for i in range(len(origin_shape)):
tmp = target_shape[i] - origin_shape[i]
borders.extend((tmp // 2, tmp - tmp // 2))
return tuple(zip(borders[::2], borders[1::2]))
def pad_zyx_constant(nda, target_shape, pad_value=0, strict=False):
assert nda.ndim == len(target_shape), 'Dimension mismatch.'
if strict:
assert np.all(np.array(target_shape) >= np.array(nda.shape)), 'Target shape must be larger than input shape.'
else:
target_shape = np.maximum(nda.shape, target_shape)
borders = get_pad_border(nda.shape, target_shape)
nda = np.pad(nda, borders, mode='constant', constant_values=pad_value)
return nda
def center_crop_zyx(nda, target_shape):
starts = np.asarray((np.asarray(nda.shape) - np.asarray(target_shape)) // 2)
slice_fn = tuple(map(slice, starts, np.asarray(starts) + np.asarray(target_shape)))
return nda[slice_fn]
def constant_pad_crop(nda, target_shape, pad_value=0, strict=False):
assert nda.ndim == len(target_shape), 'Dimension mismatch.'
nda = pad_zyx_constant(nda, target_shape, pad_value, strict)
return center_crop_zyx(nda, target_shape)
# --------
# logger
def log_init(log_dir):
logger = logging.getLogger()
logger.setLevel(level=logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(lineno)d - %(module)s - %(message)s')
# handle for txt file
f_handler = logging.FileHandler(os.path.join(log_dir, 'log.txt'))
f_handler.setLevel(logging.INFO)
f_handler.setFormatter(formatter)
# handle for screen
c_handler = logging.StreamHandler()
c_handler.setLevel(logging.INFO)
c_handler.setFormatter(formatter)
logger.addHandler(f_handler)
logger.addHandler(c_handler)
writer = SummaryWriter(log_dir=os.path.join(log_dir, 'tb'))
return logger, writer
# --------
def get_bbox(np_lbl):
lbl_indices = np.nonzero(np_lbl)
bbox = np.array([[i.min(), i.max()] for i in lbl_indices])
return bbox
def hist_match(source, template):
oldshape = source.shape
source = source.ravel()
template = template.ravel()
# get the set of unique pixel values and their corresponding indices and counts
s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, return_counts=True)
t_values, t_counts = np.unique(template, return_counts=True)
s_quantiles = np.cumsum(s_counts).astype(np.float64)
s_quantiles /= s_quantiles[-1]
t_quantiles = np.cumsum(t_counts).astype(np.float64)
t_quantiles /= t_quantiles[-1]
interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)
return interp_t_values[bin_idx].reshape(oldshape)
def make_one_hot(labels, num_classes):
r"""
Convert int labels to one-hot encoding labels
:param labels: N x H x W or N x D x H x W shape torch.LongTensor
:param num_classes: class number control output channel C
:return: N x C x H x W or N x C x D x H x W
"""
labels = torch.unsqueeze(labels, dim=1)
one_hot_shape = list(labels.size())
one_hot_shape[1] = num_classes
one_hot = torch.zeros(one_hot_shape).long().to(labels.device)
return one_hot.scatter_(dim=1, index=labels, value=1)
def save_csv(cfg, file_list, file_name):
df = pd.DataFrame(np.array(file_list), columns=['file names'])
df.to_csv(os.path.join(cfg.save_dir, file_name), index=False)
def train_val_test_split(data_list, train_control, val_control, data_stratify=None, random_seed=None):
def _split_one_group(data_list, train_num, val_num, random_seed=None):
data_length = len(data_list)
if random_seed is not None:
np.random.seed(random_seed)
ids_seq = np.random.permutation(data_length)
return data_list[ids_seq[0:train_num]], \
data_list[ids_seq[train_num:train_num + val_num]], \
data_list[ids_seq[train_num + val_num:]]
data_length = len(data_list)
if type(data_list) != np.ndarray:
data_list = np.array(data_list)
train_num = int(math.ceil(train_control * data_length)) if isinstance(train_control, float) else train_control
val_num = int(math.floor(val_control * data_length)) if isinstance(val_control, float) else val_control
if data_stratify is None:
train_list, val_list, test_list = _split_one_group(data_list, train_num, val_num, random_seed)
else:
if type(data_stratify) != np.ndarray:
data_stratify = np.array(data_stratify)
classes, classes_counts = np.unique(data_stratify, return_counts=True)
train_ratio = train_control if isinstance(train_control, float) else train_num / data_length
val_ratio = val_control if isinstance(val_control, float) else val_num / data_length
train_nums = []
val_nums = []
for i in range(len(classes)):
if i != len(classes) - 1:
train_nums.append(int(math.ceil(train_ratio * classes_counts[i])))
val_nums.append(int(math.floor(val_ratio * classes_counts[i])))
else:
train_nums.append(train_num - np.asarray(train_nums).sum())
val_nums.append(val_num - np.asarray(val_nums).sum())
train_list = np.array([])
val_list = np.array([])
test_list = np.array([])
for i, (t, v) in enumerate(zip(train_nums, val_nums)):
tmp_train_list, tmp_val_list, tmp_test_list = \
_split_one_group(data_list[data_stratify == classes[i]], t, v,
random_seed + i * 10 if random_seed is not None else random_seed)
train_list = np.concatenate((train_list, tmp_train_list))
val_list = np.concatenate((val_list, tmp_val_list))
test_list = np.concatenate((test_list, tmp_test_list))
return train_list.tolist(), val_list.tolist(), test_list.tolist()
| eugeneyuan/test_rep | src/utils/miscs.py | miscs.py | py | 6,820 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.clip",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.percentile",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.all",
"line_number": ... |
23012638929 | """
Script for labeling purposes
Usage :
python label_processing.py -d [PATH TO IMAGES FOLDER] [PARAMS]
Parameters:
-d : Path to the folder where the images is stored
-l : Lower all labels in xml files.
-c : Count each label of all the xml files
-s : Find images with specific label
-lm : Create label_map.pbtxt
-ar : Remove xml piles without images
RangRang - Machine Learning - 2021
"""
import os, argparse, glob
import xml.etree.ElementTree as ET
def auto_remove(path):
""" Menghapus file xml tanpa gambar """
images = [os.path.splitext(x) for x in os.listdir(path)]
images = [x for x, y in images]
for x in set(images):
if images.count(x) < 2:
os.remove(os.path.join(path, x + '.xml'))
def make_labelmap(path, export_dir):
""" Membuat label_map.pbtxt """
labels = []
for xml_file in glob.glob(path + '/*.xml'):
root = ET.parse(xml_file).getroot()
for member in root.findall('object'):
if member[0].text not in labels:
labels.append(member[0].text)
with open(os.path.join(export_dir, 'label_map.pbtxt'), 'w') as w:
for i, label in enumerate(labels):
w.writelines('item {\n id: ' + str(i + 1) + "\n name: '" + label + "'\n}\n\n")
print(f'[INFO] label_map.pbtxt exported to {export_dir}')
def counter(path):
""" Menghitung jumlah masing - masing label dari setiap xml file """
count = {}
for xml_file in glob.glob(path + '/*.xml'):
root = ET.parse(xml_file).getroot()
for member in root.findall('object'):
if member[0].text in count:
count[member[0].text] += 1
else:
count[member[0].text] = 1
print('[INFO] Label Counter')
for i, (key, value) in enumerate(count.items()):
print(f' {i + 1}. {key} : {value}')
def search(path, indexs):
""" Mencari image yang memiliki label tertentu """
images = {}
for xml_file in glob.glob(path + '/*.xml'):
images[os.path.basename(xml_file)] = []
root = ET.parse(xml_file).getroot()
for member in root.findall('object'):
images[os.path.basename(xml_file)].append(member[0].text)
print('[INFO] Label Finder')
for label in indexs.split(','):
print(f' {label} '.center(20, '#'))
for img in [x for x, y in images.items() if label in y]:
print(f' - {img}')
print()
def lower(path):
""" lowering all label in the xml files """
for xml_file in glob.glob(path + '/*.xml'):
root = ET.parse(xml_file).getroot()
for member in root.findall('object'):
member[0].text = member[0].text.lower()
element = ET.tostring(root)
with open(xml_file, "wb") as w:
w.write(element)
def UA(path):
""" lowering all label in the xml files """
for xml_file in glob.glob(path + '/*.xml'):
root = ET.parse(xml_file).getroot()
img_file = list(os.path.splitext(os.path.basename(root.find('filename').text)))
xml_file_ = list(os.path.splitext(os.path.basename(xml_file)))
if img_file[0] != xml_file_[0]:
img_file[0] = xml_file_[0]
if os.path.isfile(os.path.join(path, img_file[0] + '.jpg')):
img_file[1] = '.jpg'
else:
img_file[1] = '.jpeg'
img_file = img_file[0] + img_file[1]
root.find('filename').text = img_file
print(f'[INFO] Writing {xml_file}')
element = ET.tostring(root)
with open(xml_file, "wb") as w:
w.write(element)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Labeling helper script")
parser.add_argument("-d",
"--img_dir",
help="Path to the folder where the images is stored.",
type=str)
parser.add_argument("-l",
"--to_lower",
help="Lower all labels in xml files.",
action='store_true')
parser.add_argument("-c",
"--counter",
help="Count each label of all the xml files",
action='store_true')
parser.add_argument("-s",
"--search",
help="Find images with specific label",
type=str)
parser.add_argument("-lm",
"--label_map",
help="Create label_map.pbtxt",
type=str)
parser.add_argument("-ar",
"--auto_remove",
help="Delete xlm files without img",
action='store_true')
parser.add_argument("-ua",
"--update_annotation",
help="Update annotation",
action='store_true')
args = parser.parse_args()
if args.img_dir is None:
raise KeyError('Harus menyertakan -d argument atau folder dimana images disimpan')
if args.to_lower:
lower(args.img_dir)
if args.counter:
counter(args.img_dir)
if args.search:
search(args.img_dir, args.search)
if args.label_map:
make_labelmap(args.img_dir, args.label_map)
if args.auto_remove:
auto_remove(args.img_dir)
if args.update_annotation:
UA(args.img_dir) | Hyuto/rangrang-ML | scripts/label_processing.py | label_processing.py | py | 5,474 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.splitext",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.remove",
"line_number... |
6789488341 | from django.contrib import auth
from django.core.exceptions import ObjectDoesNotExist
from .models import Member
class EcosystemActivityMiddleware:
EXTENSIONS_EXCLUDED = ['js', 'map', 'css']
PATHS_EXCLUDED = ['/api/graphql-jwt']
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
user = self._get_user(request)
if user.is_authenticated and \
request.method in ['POST', 'PUT', 'DELETE']:
try:
member = Member.objects.get(user=user)
member.update_activity()
except ObjectDoesNotExist:
pass
return self.get_response(request)
def _get_user(self, request):
if not hasattr(request, '_cached_user'):
request._cached_user = auth.get_user(request)
return request._cached_user
| tomasgarzon/exo-services | service-exo-core/ecosystem/middleware.py | middleware.py | py | 888 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.Member.objects.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "models.Member.objects",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "models.Member",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "dj... |
19406242010 | #
# @lc app=leetcode id=24 lang=python3
#
# [24] Swap Nodes in Pairs
#
# @lc code=start
# Definition for singly-linked list.
from typing import Optional
class ListNode:
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution:
def swapPairs(self, head: Optional[ListNode]) -> Optional[ListNode]:
dummy = ListNode(0)
dummy.next = head
pre = dummy
while head and head.next:
left = head
right = head.next
pre.next = right
left.next = right.next
right.next = left
pre = left
head = pre.next
if head:
pre.next = head
return dummy.next
# @lc code=end
| Matthewow/Leetcode | vscode_extension/24.swap-nodes-in-pairs.py | 24.swap-nodes-in-pairs.py | py | 772 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 18,
"usage_type": "name"
}
] |
30180224626 | from flask import Flask, render_template, request, redirect
import datetime
app=Flask(__name__)
messages=[]
@app.route('/')
def index():
return render_template("index.html.jinja2", messages=messages)
@app.route('/post/add/', methods=['POST'])
def add_message():
text = request.form.get('message')
timestamp = datetime.datetime.now()
messages.append({'text': text, 'timestamp': timestamp})
return redirect('/')
if __name__ == "__main__":
app.run(debug=True) | haishengbao/website | app.py | app.py | py | 490 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.request.form.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.request... |
29277732322 | # QUESTION:
# In Hogwarts the currency is made up of galleon (G) and Sickle (s),
# and there are seven coins in general circulation:
# 1s, 5s, 10s, 25s, 50s, G1(100s), and G2(200s)
# It's possible to make G3.5 in the following way:
# 1xG2 +1xG1 + 4x10s +1x5s + 5x1s
# How many different ways can G3.5 be made using any number of coins?
# Using Dynamic Programming: Bottom Up Memoization
from typing import List
def count(coins: List[int], sum: int):
n = len(coins)
# Initiate a table to store results
# The rows represent the sum, and the columns represent the coins
# The value of table[i][j] will be the number of solutions for
# sum = i and coins[0..j]
table = [[0 for x in range(n)] for x in range(sum+1)]
# Fill the entries for 0 sum
for i in range(n):
table[0][i] = 1
# Fill rest of the table entries in bottom up manner
for i in range(1, sum+1):
for j in range(n):
coin = coins[j]
# Count of solutions which include the coin
x = table[i - coin][j] if i-coin >= 0 else 0
# Count of solutions which do not include the coin
y = table[i][j-1] if j >= 1 else 0
# total count
table[i][j] = x + y
# for i, row in enumerate(table):
# print(f"{i}: {row}")
return table[sum][n-1]
# Hogwart coins as presented in the question
coins = [1, 5, 10, 25, 50, 100, 200]
sum = 350
print(f"There are {count(coins, sum)} ways to make {sum} using the following coins: {coins}") | krissukoco/hogwarts-coins | main.py | main.py | py | 1,525 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
}
] |
25114962700 | from flask import Flask
from flask_restful import Resource, Api
from flask_jwt_extended import JWTManager
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from controler import *
app = Flask(__name__)
api = Api(app)
app.config['JWT_SECRET_KEY'] = 'qwejhfloimslvuywdkkvuhssss'
jwt = JWTManager(app)
engine = create_engine('postgresql://postgres:1234@localhost/db11', echo=True)
Base.metadata.create_all(engine)
Session = sessionmaker(bind=engine)
session = Session()
if __name__ == "__main__":
api.add_resource(AddUser, '/user')
api.add_resource(Login, '/login')
api.add_resource(GetUser, '/user/<int:id>')
api.add_resource(GetMyself, '/user')
api.add_resource(UpdateUser, '/user')
api.add_resource(AddBank, '/bank')
api.add_resource(GetBank, '/bank/<int:id>')
api.add_resource(AddCredit, '/user/credit')
api.add_resource(UpdateCredit, '/user/credit/<int:credit_id>')
api.add_resource(GetCredit, '/user/credit/<int:credit_id>')
api.add_resource(AddTransaction, '/user/credit/<int:credit_id>/transaction')
api.add_resource(GetTransaction, '/user/credit/<int:credit_id>/transaction/<int:transaction_id>')
app.run(debug=True)
"""{
"username":"Vovik",
"first_name":"Vova",
"last_name":"Putin",
"phone":"09348124",
"email":"putin@gmail.com",
"password":"123"
}"""
'''
add bank
{
"all_money": 500000,
"per_cent" : 30
}
add user
{
"login": "mylogin",
"password": "my password",
"name": "myname",
"passport": "myUKRpasport",
"address": "Lviv",
"email": "user@gmail.com",
"phone_number": "88005553535"
"status": ""
}
add credit
{
"start_date": "21.01.2020",
"end_date": "21.01.2021",
"start_sum": 1000,
"current_sum": 100,
"bank_id": 1,
"user_id": 1
}
add transaction
{
"date": "17.12.2020",
"summ": 200
}
''' | VolodymyrVoloshyn02/PP | app.py | app.py | py | 1,882 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_restful.Api",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask_jwt_extended.JWTManager",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchem... |
72485218024 | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from . import views
from .apis import listings, users
from knox import views as knox_views
router = DefaultRouter()
router.register(r"users", users.UserViewSet, basename="users_api")
router.register(r"listings", listings.AuctionViewSet, basename="listings_api")
# https://stackoverflow.com/questions/63439268/how-to-use-parameters-in-drf-router
router.register(r"listings/(?P<auction_id>\d+)/bids", listings.BidViewSet, basename="bids_api")
router.register(r"listings/(?P<auction_id>\d+)/comments", listings.CommentViewSet, basename="comments_api")
urlpatterns = [
path("listings/<int:listing_id>/watch", listings.watch, name="watch_action"),
# User (Knox) Routes
path('', include(router.urls)),
#path('users/<int:pk>/update', users.UserUpdateView.as_view()),
#path('users/<int:pk>/deactivate', users.UserDeactivateView.as_view()),
path('auth-api/', include('knox.urls')),
path('logout', knox_views.LogoutView.as_view(), name="knox-logout"),
path('register', users.RegisterView.as_view()),
path('login', users.LoginView.as_view()),
] | pecodeliar/BayleeWeb | api/urls.py | urls.py | py | 1,160 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "apis.users.UserViewSet",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "apis.users",
"line_number": 8,
"usage_type": "name"
},
{
"api_name... |
484991740 |
# features - word cnt, character cnt, sentence cnt, word freq
import sys
import time
import json
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
import numpy as np
import pandas as pd
porter = PorterStemmer()
stop_words = set(stopwords.words('english'))
train_file = "/Users/aparahuja/Desktop/IITD/ML/Assignment 2/Q1/reviews_Digital_Music_5.json/Music_Review_train.json"
test_file = "/Users/aparahuja/Desktop/IITD/ML/Assignment 2/Q1/reviews_Digital_Music_5.json/Music_Review_test.json"
if len(sys.argv) > 1:
train_file = sys.argv[1]
test_file = sys.argv[2]
train_data = pd.read_json(train_file, lines=True)
test_data = pd.read_json(test_file, lines=True)
vocabulary = set()
theta = {}
phi = {}
cnt = {}
m = len(train_data)
def F1_Confusion(y_true, y_predict):
confusion = np.zeros((5, 5))
n = len(y_true)
for i in range(n):
confusion[int(y_true[i]) - 1][int(y_predict[i]) - 1] += 1
# print("Confusion Matrix: ")
# print(confusion)
print("F1 scores:")
f1_avg = 0
for i in range(5):
tp = confusion[i][i]
fn = sum([confusion[j][i] if i != j else 0 for j in range(5)])
fp = sum([confusion[i][j] if i != j else 0 for j in range(5)])
f1_score = tp/(tp+(fp+fn)/2)
f1_avg += f1_score/5
print("\tClass " + str(i+1) + " = " + "{:.5f}".format(f1_score))
print("Macro F1 score = " + "{:.5f}".format(f1_avg))
def tokenize(review):
tokens = [w.lower() for w in word_tokenize(review)]
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [porter.stem(word) for word in stripped if word.isalpha()]
return [word for word in words if word not in stop_words]
def findcharClass(n):
if n < 400:
return "$A$"
if n < 800:
return "$B$"
if n < 1200:
return "$C$"
if n < 1600:
return "$D$"
return "$E$"
def findwordClass(n):
if n < 100:
return "$A$"
if n < 200:
return "$B$"
if n < 300:
return "$C$"
if n < 400:
return "$D$"
return "$E$"
def findDenseClass(n):
if n < 4:
return "$A$"
if n < 8:
return "$B$"
if n < 12:
return "$C$"
if n < 16:
return "$D$"
return "$E$"
TokenizedReviews = []
def initializeModel(fcnt):
global vocabulary, theta, phi, cnt, TokenizedReviews
vocabulary = set()
theta = {}
phi = {}
cnt = {}
TokenizedReviews = []
for index, data in train_data.iterrows():
review = data['reviewText']
label = data['overall']
theta[label] = {}
cnt[label] = 0
phi[label] = 0
words = tokenize(review)
# convert to bigrams
bigrams = [words[i] + words[i+1] for i in range(len(words) - 1)]
if fcnt == 0:
TokenizedReviews.append(words + bigrams)
for word in words + bigrams:
vocabulary.add(word)
else:
TokenizedReviews.append(words)
for word in words:
vocabulary.add(word)
if fcnt > 0:
vocabulary.add("$A$")
vocabulary.add("$B$")
vocabulary.add("$C$")
vocabulary.add("$D$")
vocabulary.add("$E$")
vocabulary.add("UNK")
file = open("vocabulary_e.txt", "w")
file.write(json.dumps(list(vocabulary)))
file.close()
for label in theta:
for word in vocabulary:
theta[label][word] = 0
def learnParameters(fcnt):
for index, data in train_data.iterrows():
review = data['reviewText']
label = data['overall']
words = TokenizedReviews[index]
phi[label] += 1
for word in words:
theta[label][word] += 1
cnt[label] += 1
if fcnt == 1:
theta[label][findcharClass(len(review))] += 1
if fcnt == 2:
theta[label][findwordClass(len(words))] += 1
# avg word length
# theta[label][findDenseClass(len(review) / len(words))] += 1
for label in theta:
for word in vocabulary:
theta[label][word] = (theta[label][word] + 1) / \
(cnt[label] + len(vocabulary) + 1)
phi[label] /= m
def predict(words, label):
ans = np.log(phi[label])
for word in words:
if word in vocabulary:
ans += np.log(theta[label][word])
else:
ans += np.log(theta[label]["UNK"])
return ans
def findModelAccuracy(input_data, fcnt, datatype):
print("Running model on " + datatype + " data.")
correct = 0
total = len(input_data)
y_true = []
y_predict = []
for index, data in input_data.iterrows():
review = data['reviewText']
words = tokenize(review)
bigrams = [words[i] + words[i+1] for i in range(len(words) - 1)]
ans_label = data['overall']
ans, logProbab = "", - sys.maxsize
for label in phi:
if fcnt == 0:
prediction = predict(words + bigrams, label)
if fcnt == 1:
prediction = predict(
words, label) + 5*np.log(theta[label][findcharClass(len(review))])
if fcnt == 2:
prediction = predict(
words, label) + 5*np.log(theta[label][findwordClass(len(words))])
# avg word length
# prediction = predict(words, label) + 5 * np.log(theta[label][findDenseClass(len(review) / len(words))])
if logProbab <= prediction:
ans = label
logProbab = prediction
if ans_label == ans:
correct += 1
y_true.append(ans_label)
y_predict.append(ans)
F1_Confusion(y_true, y_predict)
print("Model " + datatype + " accuracy:",
"{:.2f}".format(correct/total*100) + "%")
features = ['single words + bigrams',
'single words + charcter count', 'single words + word count']
fcnt = 0
for feature in features:
print("\nTesting with feature - " + feature)
st = time.time()
initializeModel(fcnt)
learnParameters(fcnt)
en = time.time()
print("Training Time = " + "{:.2f}".format(en - st) + " sec")
# findModelAccuracy(train_data, "Training")
findModelAccuracy(test_data, fcnt, "Testing")
fcnt += 1
| AparAhuja/Machine_Learning | Naive Bayes and SVM/Q1/e.py | e.py | py | 6,378 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.stem.porter.PorterStemmer",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 14,
"usage_type": "name"
},
{
"... |
1742330108 | import requests
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.views.generic import ListView, CreateView, DetailView, UpdateView, DeleteView
from .models import *
# Create your class-based views here.
@login_required()
def home(request):
fridge = Fridge.objects.filter(user=request.user)
context = {
'fridge': fridge,
}
return render(request, 'fridge/home.html', context)
class FridgeDetailView(DetailView):
model = Fridge
# CREATE ITEM
class FridgeCreateView(LoginRequiredMixin, CreateView):
model = Fridge
fields = ['name', 'quantity']
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
# UPDATE ITEM
class FridgeUpdateView(LoginRequiredMixin, UserPassesTestMixin, UpdateView):
model = Fridge
# When successfully deleted, will take user back to homepage
success_url = '/fridge'
fields = ['name', 'quantity']
def form_valid(self, form):
form.instance.user = self.request.user
return super().form_valid(form)
def test_func(self):
fridge_item = self.get_object()
# Prevents others to update other people's items
if self.request.user == fridge_item.user:
return True
return False
class FridgeDeleteView(LoginRequiredMixin, UserPassesTestMixin, DeleteView):
model = Fridge
# When successfully deleted, will take user back to homepage
success_url = '/fridge'
def test_func(self):
fridge_item = self.get_object()
# Prevents others to update other people's items
if self.request.user == fridge_item.user:
return True
return False
@login_required()
def recipe(request):
url = "https://spoonacular-recipe-food-nutrition-v1.p.rapidapi.com/recipes/random"
querystring = {
'number': 1,
'veryPopular': True,
'fillIngredients': True,
'addRecipeInformation': True,
'addRecipeNutrition': True
}
headers = {
'x-rapidapi-host': "spoonacular-recipe-food-nutrition-v1.p.rapidapi.com",
'x-rapidapi-key': "f8540d734amsh0d72a908c3766d4p1be29fjsn28baee86ebe6"
}
res = requests.request("GET", url, headers=headers, params=querystring).json()['recipes']
instructions = get_instructions(res[0]['analyzedInstructions'])
ingredients = get_ingredients(res[0]['extendedIngredients'])
context = {
'title': res[0]['title'],
'instructions': instructions,
'ingredients': ingredients,
'recipe_link': res[0]['sourceUrl'],
'image_link': res[0]['image'],
}
print(res[0]['summary'])
return render(request, 'fridge/fridge_recipe.html', context)
def get_instructions(res: list) -> list:
instructions = []
for instruction in res[0]['steps']:
instructions.append(instruction['step'])
return instructions
def get_ingredients(res: list) -> list:
ingredients = []
for ingredient in res:
ingredients.append(ingredient['name'])
return ingredients
| cysong12/COMP-2800-Team-DTC-14-Cominder | Apps/fridge/views.py | views.py | py | 3,201 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.decorators.login_required",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.views.generic.DetailView",
"line_number": 20,
"usage_type": ... |
23407025114 | # -*- coding: utf-8 -*-
"""
Department to Employee is One to Many.
解决方案:
有的时候 One to Many 对应的 Many 可能数量太多, 无法作为冗余跟 One 一起储存.
这时可以以 One.id 建立 Global Index.
问题:
由于 GSI 本质是另一个 DynamoDB Table, 只不过系统帮你自动维护了. 同样的 GSI 也会根据
hash key 做 partition 分散流量. 如果 One 这边的 entity 的数量不够多. 那么会导致 GSI
的流量不均衡.
"""
import os
import random
import string
import typing
import pynamodb
from pynamodb.attributes import UnicodeAttribute
from pynamodb.connection import Connection
from pynamodb.indexes import GlobalSecondaryIndex, KeysOnlyProjection
from pynamodb.models import Model
os.environ["AWS_DEFAULT_PROFILE"] = "eq_sanhe"
connection = Connection()
class DepartmentModel(Model):
class Meta:
table_name = "one-to-many-department-2"
region = "us-east-1"
billing_mode = pynamodb.models.PAY_PER_REQUEST_BILLING_MODE
department_id = UnicodeAttribute(hash_key=True)
department_name = UnicodeAttribute()
@classmethod
def _create_one(cls, department_id, department_name):
try:
cls.get(hash_key=department_id)
except Model.DoesNotExist:
cls(
department_id=department_id,
department_name=department_name,
).save()
# @classmethod
# def _find_employees(cls, department_id: str):
# employee_info_map: EmployeeInfoMap
# return [
# EmployeeModel(
# employee_id=employee_info_map.employee_id,
# employee_name=employee_info_map.employee_name,
# )
# for employee_info_map in cls.get(hash_key=department_id).employees
# ]
class DepartmentEmployeeIndex(GlobalSecondaryIndex):
class Meta:
index = "one-to-many-department-employee-index-2"
projection = KeysOnlyProjection
department_id = UnicodeAttribute(hash_key=True, null=True)
class EmployeeModel(Model):
"""
A DynamoDB User
"""
class Meta:
table_name = "one-to-many-employee-2"
region = "us-east-1"
billing_mode = pynamodb.models.PAY_PER_REQUEST_BILLING_MODE
employee_id = UnicodeAttribute(hash_key=True)
employee_name = UnicodeAttribute()
department_id = UnicodeAttribute(null=True)
department_index = DepartmentEmployeeIndex()
@classmethod
def _create_one(cls, employee_id, employee_name, department_id=None):
try:
cls.get(hash_key=employee_id)
except Model.DoesNotExist:
cls(
employee_id=employee_id,
employee_name=employee_name,
department_id=department_id,
).save()
@classmethod
def _assign_department(cls, employee_id, department_id: str):
employee: EmployeeModel = cls.get(hash_key=employee_id)
if employee.department_id == department_id:
raise ValueError
else:
employee.update(
actions=[
EmployeeModel.department_id.set(department_id)
]
)
@classmethod
def _find_department(cls, employee_id: str) -> DepartmentModel:
return DepartmentModel.get(hash_key=cls.get(hash_key=employee_id).department_id)
DepartmentModel.create_table(wait=True)
EmployeeModel.create_table(wait=True)
class BusinessQuery:
@classmethod
def find_employees_by_department(cls, department_id) -> typing.Iterable[EmployeeModel]:
return EmployeeModel.department_index.query(department_id)
# --- Create Employee
def create_department():
DepartmentModel._create_one(department_id="IT", department_name="Internet Technology")
DepartmentModel._create_one(department_id="HR", department_name="Human Resource")
# create_department()
def create_employee():
def random_name():
return "".join(random.sample(string.ascii_lowercase, 8))
department_list = ["IT", "HR"]
n_employee = 1000
with EmployeeModel.batch_write() as batch:
for i in range(1, 1 + n_employee):
employee = EmployeeModel(
employee_id=f"e-{i}",
employee_name=random_name(),
department_id=random.choice(department_list)
)
batch.save(employee)
# create_employee()
def find_employees():
counter = 0
for employee in BusinessQuery.find_employees_by_department(department_id="IT"):
counter += 1
print(employee.employee_id, employee.employee_name)
print(f"total = {counter}")
find_employees()
def delete_all_tables():
DepartmentModel.delete_table()
EmployeeModel.delete_table()
# delete_all_tables()
| MacHu-GWU/Dev-Exp-Share | docs/source/01-AWS/01-All-AWS-Services-Root/21-Database/01-DynamoDB-Root/04-Dynamodb-Data-Modeling/principal/one-to-many/strategy2.py | strategy2.py | py | 4,760 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "pynamodb.connection.Connection",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pynamodb.models.Model",
"line_number": 34,
"usage_type": "name"
},
{
"api_name": "... |
38221121523 | from itertools import permutations
def solution(k, dungeons):
answer = 0
per = list(permutations(dungeons, len(dungeons)))
for i in range(len(per)):
cnt = 0
copy_k = k
for j in range(len(per[i])):
if per[i][j][0] <= copy_k:
copy_k -= per[i][j][1]
cnt += 1
if cnt > answer:
answer = cnt
return answer
| kh-min7/Programmers | 87946(피로도).py | 87946(피로도).py | py | 411 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.permutations",
"line_number": 5,
"usage_type": "call"
}
] |
25650877857 | import sqlite3
from sqlite3 import Error
def create_connection(path):
connection = None
try:
connection = sqlite3.connect(path)
print("Connection to SQLite DB successful")
except Error as e:
print(f"The error '{e}' occurred")
return connection
def connect_to_db():
return create_connection("contact_db.sqlite")
def execute_query(connection, query):
cursor = connection.cursor()
try:
cursor.execute(query)
connection.commit()
print("Query executed successfully")
except Error as e:
print(f"The error '{e}' occurred")
def execute_read_query(connection, query):
cursor = connection.cursor()
result = None
try:
cursor.execute(query)
result = cursor.fetchall()
return result
except Error as e:
print(f"The error '{e}' occurred")
def insert_query(email, subject, message):
Insert_contact = f"INSERT INTO contacts (email, subject, message) VALUES ('{email}', '{subject}', '{message}');"
return Insert_contact
def read_query():
return "SELECT * from contacts"
if __name__ =="__main__":
connection = create_connection("contact_db.sqlite")
create_contacts_table = """
CREATE TABLE IF NOT EXISTS contacts (
id INTEGER PRIMARY KEY AUTOINCREMENT,
email TEXT NOT NULL,
subject TEXT,
message TEXT
);
"""
execute_query(connection, create_contacts_table)
# Insert_contact = """
# INSERT INTO
# contacts (email, subject, message)
# VALUES
# ('test@test.com', 'test', 'test1'),
# ('test@test.com', 'test', 'test2');
# """
# execute_query(connection, Insert_contact)
select_contacts = "SELECT * from contacts"
contacts = execute_read_query(connection, select_contacts)
for contact in contacts:
print(contact) | rezzco/portfo | shit.py | shit.py | py | 1,772 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sqlite3.Error",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "sqlite3.Error",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sqlite3.Error",
"line_n... |
3896815670 | from datetime import timedelta
from pathlib import Path
import pytest
import htmap
from htmap.utils import timeout_to_seconds, wait_for_path_to_exist
def test_returns_when_path_does_exist():
path = Path(__file__)
wait_for_path_to_exist(path)
@pytest.mark.parametrize("timeout", [0, -1])
def test_timeout_on_nonexistent_path(timeout):
path = Path("foo")
with pytest.raises(htmap.exceptions.TimeoutError):
wait_for_path_to_exist(path, timeout=timeout)
@pytest.mark.parametrize(
"timeout, expected", [(1, 1.0), (0.1, 0.1), (timedelta(seconds=2.3), 2.3), (None, None),],
)
def test_timeout_to_seconds(timeout, expected):
assert timeout_to_seconds(timeout) == expected
| htcondor/htmap | tests/unit/test_wait_for_path_to_exist.py | test_wait_for_path_to_exist.py | py | 706 | python | en | code | 29 | github-code | 36 | [
{
"api_name": "pathlib.Path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "htmap.utils.wait_for_path_to_exist",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pytest.r... |
26884869558 | import torch
from torch.utils.data import Dataset
import torch.utils.data.dataloader as dataloader
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from numpy.matlib import repmat
import math
MAX_LEN = 300
AUDIO_TYPE_ID = {'vowel-u': 0,'vowel-i': 1,'vowel-a': 2,'alphabet-a-z': 3, 'cough': 4, 'count-1-20': 5,}
# IMP_OVEC_FEAT = [16, 100, 709 , 88, 612, 484, 1390, 591, 94, 716, 499, 463, 373, 95, 1407, 86 ]#, \
# 194, 401, 1389, 380, 381, 49, 495, 319, 1, 24, 685, 465, 711, 727, 1132, 695, \
# 356, 726, 352, 10, 815, 729, 1153, 421, 332, 1327, 395, 700, 1432, 583, 1202, 754, 1306, 291]
# IMP_OVEC_FEAT = np.arange(1409,1582)
# IMP_OVEC_FEAT = np.arange(1582)
IMP_OVEC_FEAT = np.arange(10)
def uttToSpkChile(fullname):
f = fullname.split('/')[-1]#[:-1]
spk_id = f.split('_')[1]
return spk_id
class Dataset(Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, X, Y,all_files_ovec, UTT_TO_SPK):
self.X = X
self.Y = Y
self.mfc_to_ovec = self.mfcToOvec(X, all_files_ovec)
self.UTT_TO_SPK = UTT_TO_SPK
def __len__(self):
'Denotes the total number of samples'
return len(self.X)
def mfcToOvec(self, all_files_mfc, all_files_ovec):
'''return {mfcfile: ovecfile, ... }'''
mfc_file_base = list(map(lambda x: x.split('/')[-1].split('.')[0], all_files_mfc ))
ovec_file_base = list(map(lambda x: ('_').join(x.split('/')[-1].split('.')[0].split('_')[:-1]), all_files_ovec ))
# print(mfc_file_base)
# print(ovec_file_base)
res = {}
i = 0
for mfc_file in mfc_file_base:
j = 0
for ovec_file in ovec_file_base:
if mfc_file != ovec_file:
j += 1
continue
res[all_files_mfc[i]] = all_files_ovec[j]
break
i += 1
return res
def __getitem__(self, index):
x = self.X[index] # filename
y = self.Y[index] # int (0,1)
ovec_file = self.mfc_to_ovec[x]
ovec_feat = np.load(ovec_file)[IMP_OVEC_FEAT]
# print(ovec_file)
# print(np.load(ovec_file).shape)
# print(ovec_feat)
# exit()
# print(x, ovec_file)
audio_type = x.split('/')[-1].split('_')[0]
spk = self.UTT_TO_SPK[uttToSpkChile(x)]
# print(x, spk)
feat = np.load(x)
# print("FEAT: ", feat.shape)
### FOR SPEC ###
# need to do the transpose for spectrograms but not for mfccs
# feat = feat.transpose()
################
orig_len = feat.shape[0]
feat = repmat(feat, int(math.ceil(MAX_LEN/(feat.shape[0]))),1)
feat = feat[:MAX_LEN,:]
#### shuffling the cylinder ##
# pivot = np.random.randint(MAX_LEN)
# idx1 = np.arange(pivot, MAX_LEN)
# idx2 = np.arange(0, pivot)
# idx = np.concatenate((idx1, idx2))
# feat = feat[idx]
###############################
feat = feat.transpose()
return feat, int(y), AUDIO_TYPE_ID[audio_type], spk, ovec_feat
class BasicBlock(nn.Module):
def __init__(self, planes):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv1d(planes, planes, 3, 1, 1, bias=False)
self.bn1 = nn.BatchNorm1d(planes)
self.relu = nn.ReLU(inplace=True)
self.leakyrelu = nn.LeakyReLU(inplace=True)
self.conv2 = nn.Conv1d(planes, planes, 3, 1, 1, bias=False)
self.bn2 = nn.BatchNorm1d(planes)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.leakyrelu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.leakyrelu(out)
return out
class conv_model1(nn.Module):
def __init__(self, TOTAL_NUM_SPKS):
super(conv_model1, self).__init__()
self.num_filter = 128
self.encoder = nn.Conv1d(40, self.num_filter, 3, 1, bias=False, padding=1)
self.encoder1 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False, padding=1)
self.encoder2 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False, padding=1, groups=self.num_filter)
# self.decoder = nn.Conv1d(self.num_filter, 40, 3, 1, bias=False, padding=1)
self.decoder = nn.ConvTranspose1d(self.num_filter, self.num_filter, 3,1,padding=1)
self.decoder1 = nn.ConvTranspose1d(self.num_filter, self.num_filter, 3,1,padding=1)
self.decoder2 = nn.ConvTranspose1d(self.num_filter, 40, 3,1,padding=1)
self.f2 = nn.Linear(self.num_filter, 6) # 6 classes
self.f3 = nn.Linear(self.num_filter, TOTAL_NUM_SPKS)
self.f4 = nn.Linear(self.num_filter, len(IMP_OVEC_FEAT))
self.basic1 = BasicBlock(self.num_filter)
self.basic2 = BasicBlock(self.num_filter)
self.bn = nn.BatchNorm1d(40)
def forward(self,x):
# x = self.bn(x)
enc = self.encoder(x)
enc = nn.LeakyReLU()(enc)
enc = self.encoder1(enc)
enc = self.basic1(enc)
enc = nn.LeakyReLU()(enc)
enc = self.encoder2(enc)
enc = nn.LeakyReLU()(enc)
# print("enc.shape: ", enc.shape)
dec = nn.LeakyReLU()(self.decoder(enc))
dec = nn.LeakyReLU()(self.decoder1(dec))
dec = self.basic2(dec)
dec = nn.LeakyReLU()(self.decoder2(dec))
# print("dec.shape: ", dec.shape)
enc_permute = enc.permute(0,2,1)
# print("enc_permute.shape ", enc_permute.shape)
enc_pooled = F.avg_pool1d(enc, kernel_size=(enc.shape[2])).squeeze()
out2 = nn.LeakyReLU()(self.f2(enc_pooled))
out3 = nn.LeakyReLU()(self.f3(enc_pooled))
out4 = nn.LeakyReLU()(self.f4(enc_pooled))
return dec, out2, out3, out4
class classification_model(nn.Module):
def __init__(self):
super(classification_model, self).__init__()
self.num_filter = 128
self.encoder = nn.Conv1d(40, self.num_filter, 3, 1, bias=False, padding=1)
self.encoder1 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False, padding=1, groups=self.num_filter)
self.encoder2 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False, padding=1, groups=self.num_filter)
self.f1 = nn.Linear(128, 2) # 2 classes
self.bn = nn.BatchNorm1d(40)
def forward(self,x):
# print(x.shape)
# x = self.bn(x)
enc = self.encoder(x)
enc = nn.LeakyReLU()(enc)
enc = self.encoder1(enc)
enc = nn.LeakyReLU()(enc)
enc = self.encoder2(enc)
enc = nn.LeakyReLU()(enc)
# enc = enc.permute(0,2,1) #b,t,f
# print("enc.shape ", enc.shape)
enc_permute = enc.permute(0,2,1)
# print("enc_permute.shape ", enc_permute.shape)
enc_pooled = F.avg_pool1d(enc, kernel_size=(enc.shape[2])).squeeze()
# print("enc_pooled ", enc_pooled.shape)
out = self.f1(enc_pooled) #b,t,2
# print(out.shape)
return enc_pooled, out
class OVEC_model(nn.Module):
def __init__(self, mode):
super(OVEC_model, self).__init__()
self.num_filter = 256
self.inp_channel = 40 #40
self.ovec_length = len(IMP_OVEC_FEAT)
self.cnn1 = nn.Conv1d(self.inp_channel, self.num_filter, kernel_size=3, stride=1, bias=False, padding=1)
self.cnn2 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False)
self.cnn3 = nn.Conv1d(self.num_filter, self.num_filter, 3, 1, bias=False)
self.f1 = nn.Linear(self.num_filter, self.ovec_length)
self.f2 = nn.Linear(self.num_filter, 2)
self.bn = nn.BatchNorm1d(self.num_filter)
self.bn1 = nn.BatchNorm1d(self.ovec_length)
self.mode = mode
def forward(self, x):
enc = self.cnn1(x)
enc = nn.LeakyReLU()(enc)
enc = self.cnn2(enc)
enc = nn.LeakyReLU()(enc)
enc = self.cnn3(enc)
enc = nn.LeakyReLU()(enc)
enc_permute = enc.permute(0,2,1)
enc_pooled = F.avg_pool1d(enc, kernel_size=(enc.shape[2])).squeeze()
# enc_pooled = self.bn(enc_pooled)
if self.mode == "ovec":
# print(enc_pooled)
out = self.f1(enc_pooled)
# print(out)
# out = self.bn1(out)
# print(out)
# exit()
if self.mode == "class":
out = self.f2(enc_pooled)
return out | KalibrateBlockchain/VFO2 | version_1/models_def.py | models_def.py | py | 8,707 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "numpy.load",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "numpy.matlib.repmat",
"line_nu... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.