id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
4901849 | <filename>sound_classification/confusion_matrix.py
# -*- coding: utf-8 -*-
__author__ = 'lgeorge'
import argparse
import logging
import pylab
import numpy as np
from sklearn.metrics import confusion_matrix
def displayConfusionMatrix(aConfusion_matrix, labels=None):
#ax.set_xticklabels([''] + labels)
"""
:param aConfusion_matrix: confusion matrix not normalized
:param labels: labels of each class (one per row of matrix)
:return:
usage: sklearn.metrics.confusion_matrix(Y, clf.predict(X))
"""
aConfusion_matrix = np.array(aConfusion_matrix)
normalized_confusion_matrix = np.array(aConfusion_matrix.copy(), dtype=np.float)
for i in range(normalized_confusion_matrix.shape[0]):
sum_line = float(np.sum(aConfusion_matrix[i, :]))
normalized_confusion_matrix[i,:] = normalized_confusion_matrix[i,:] / sum_line
fig = pylab.figure('confusion matrix')
ax = fig.add_subplot(111)
# colormap = pylab.cm.jet # bad # colormap = pylab.cm.bwr # ok
colormap = pylab.cm.coolwarm # seems to be good
cax = ax.matshow(normalized_confusion_matrix, cmap=colormap, interpolation='None')
ax.xaxis.set_label_position('top')
if labels is not None:
#ax.set_xticklabels([''] + labels)
#ax.set_yticklabels([''] + labels)
pylab.xticks(np.arange(len(labels)), labels, rotation=90)
pylab.yticks(np.arange(len(labels)), labels)
cb = fig.colorbar(cax)
#cb.set_clim(0,1)
#cb.solids.set_edgecolor("face")
cb.set_label('Percentage')
for i in range(normalized_confusion_matrix.shape[0]):
for j in range(normalized_confusion_matrix.shape[1]):
val = aConfusion_matrix[i, j]
if val == 0:
cell = ""
else:
cell = "{0:.0f}".format( val )
#pylab.text(j - .2, i + .2, cell, fontsize=14, va='center', ha='center')
pylab.text(j , i , cell, fontsize=14, va='center', ha='center')
pylab.xlabel('Predicted')
pylab.ylabel('Expected')
pylab.tight_layout()
pylab.show()
return fig
| StarcoderdataPython |
6617177 | # Copyright (C) 2020 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
import pathlib
import tarfile
import shutil
PYMEDPHYS_BAT_NAME = "pymedphys.bat"
def main():
"""The script that boots when PyMedPhysGUI-vX.Y.Z.exe is run.
This script checks to see if the required PyMedPhys files have been
installed within the current working directory. If they have not
it extracts them.
Once the embedded Python distribution is provisioned this boots up
the PyMedPhys streamlit app.
Note
----
This script will run with pyinstaller's Python install. However, no
external libraries are installed within this Python instance.
PyMedPhys itself is stored within ``python-embed/Lib/site-packages``.
The Python within ``python-embed`` is not the same Python install
that pyinstaller is using to run this script.
"""
cwd = pathlib.Path(os.getcwd())
installation_path = cwd.joinpath("python-embed")
pymedphys_bat = cwd.joinpath(PYMEDPHYS_BAT_NAME)
if not pymedphys_bat.exists():
_install(cwd, installation_path)
_boot_streamlit_app(installation_path)
def _install(cwd, installation_path):
"""Extract the Python embedded environment to the current working directory.
Note
----
The ``pymedphys.bat`` is extracted last, as this is used to test
whether or not the install was completed.
"""
pyinstaller_temp_dir = pathlib.Path(
sys._MEIPASS # pylint: disable = no-member, protected-access
)
data_path = pyinstaller_temp_dir.joinpath("data")
python_xztar = data_path.joinpath("python-embed.tar.xz")
installation_path.mkdir()
with tarfile.open(python_xztar) as f:
f.extractall(installation_path)
for f in ["LICENSE", PYMEDPHYS_BAT_NAME]:
shutil.copy(data_path.joinpath(f), cwd.joinpath(f))
def _boot_streamlit_app(python_embedded_directory):
"""Starts the PyMedPhys GUI within the Python embedded distribution.
Parameters
----------
python_embedded_directory
The full path to the Python embedded distribution.
"""
subprocess.check_call(
"python.exe -m pymedphys gui", cwd=python_embedded_directory, shell=True
)
if __name__ == "__main__":
main()
| StarcoderdataPython |
4826250 | <filename>playAI.py
from MCTS import MCTS
from connect4.Connect4Game import Connect4Game, display
from connect4.Connect4Players import HumanConnect4Player
from connect4.tensorflows.NNet import NNetWrapper as NNet
from utils import dotdict
import numpy as np
if __name__ == '__main__':
goingFirst = True
folder = "H:\\alpha-zero-trained\\final\\h2\\mcts_visits_tanh\\default\\1\\"
game = Connect4Game()
nn = NNet(game)
nn.load_checkpoint(folder, 'best.pth.tar')
args = dotdict({'numMCTSSims': 25, 'cpuct': 1})
mcts1 = MCTS(game, nn, args)
AI = lambda x: np.argmax(mcts1.getActionProb(x, temp=0))
human = HumanConnect4Player(game).play
if goingFirst:
players = [AI, None, human]
else:
players = [human, None, AI]
curPlayer = 1
board = game.getInitBoard()
while game.getGameEnded(board, curPlayer) == 0:
display(board, symbols=True)
action = players[curPlayer + 1](game.getCanonicalForm(board, curPlayer))
valids = game.getValidMoves(game.getCanonicalForm(board, curPlayer), 1)
while valids[action] == 0:
print("Move", action, "is illegal. You can play:", [i for i, legal in enumerate(board[0] == 0) if legal])
action = players[curPlayer + 1](game.getCanonicalForm(board, curPlayer))
board, curPlayer = game.getNextState(board, curPlayer, action)
display(board)
print("Game over!", "Result ", str(game.getGameEnded(board, 1)))
| StarcoderdataPython |
78475 | <reponame>samhuairen/deepTools
import sys
import itertools
import numpy as np
import scipy.cluster.hierarchy as sch
import scipy.stats
import matplotlib as mpl
mpl.use('Agg')
mpl.rcParams['pdf.fonttype'] = 42
mpl.rcParams['svg.fonttype'] = 'none'
from deeptools import cm # noqa: F401
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.ticker
import matplotlib.mlab
import matplotlib.markers
import matplotlib.colors as pltcolors
from deeptools.utilities import toString, convertCmap
import plotly.offline as offline
import plotly.graph_objs as go
import plotly.figure_factory as ff
old_settings = np.seterr(all='ignore')
class Correlation:
"""
class to work with matrices
having sample data
to compute correlations, plot
them and make scatter plots
"""
def __init__(self, matrix_file,
corr_method=None,
labels=None,
remove_outliers=False,
skip_zeros=False,
log1p=False):
self.load_matrix(matrix_file)
self.skip_zeros = skip_zeros
self.corr_method = corr_method
self.corr_matrix = None # correlation matrix
self.column_order = None
self.rowCenter = False
if labels is not None:
# test that the length of labels
# corresponds to the length of
# samples
self.labels = labels
self.labels = [toString(x) for x in self.labels]
if self.matrix.shape[1] == 1:
# There's nothing that can be done with a single sample
sys.exit("\nPlease use a matrix with more than one sample\n")
if skip_zeros is True:
# remove rows containing only nans or zeros
# that could be unmappable regions.
self.remove_rows_of_zeros()
if remove_outliers is True:
# remove outliers, otherwise outliers will produce a very
# high pearson correlation. Unnecessary for spearman correlation
self.remove_outliers()
if log1p is True:
self.matrix = np.log1p(self.matrix)
if corr_method:
self.compute_correlation()
def load_matrix(self, matrix_file):
"""
loads a matrix file saved using the numpy
savez method. Two keys are expected:
'matrix' and 'labels'. The matrix should
contain one sample per row
"""
_ma = np.load(matrix_file)
# matrix: cols correspond to samples
self.matrix = np.asarray(_ma['matrix'].tolist())
if np.any(np.isnan(self.matrix)):
num_nam = len(np.flatnonzero(np.isnan(self.matrix.flatten())))
sys.stderr.write("*Warning*. {} NaN values were found. They will be removed along with the "
"corresponding bins in other samples for the computation "
"and plotting\n".format(num_nam))
self.matrix = np.ma.compress_rows(np.ma.masked_invalid(self.matrix))
self.labels = list(map(toString, _ma['labels']))
assert len(self.labels) == self.matrix.shape[1], "ERROR, length of labels is not equal " \
"to length of matrix samples"
@staticmethod
def get_outlier_indices(data, max_deviation=200):
"""
The method is based on the median absolute deviation. See
<NAME> and <NAME> (1993),
"Volume 16: How to Detect and Handle Outliers",
The ASQC Basic References in Quality Control:
Statistical Techniques, <NAME>, Ph.D., Editor.
returns the list, without the outliers
The max_deviation=200 is like selecting a z-score
larger than 200, just that it is based on the median
and the median absolute deviation instead of the
mean and the standard deviation.
"""
median = np.median(data)
b_value = 1.4826 # value set for a normal distribution
mad = b_value * np.median(np.abs(data))
outliers = []
if mad > 0:
deviation = abs(data - median) / mad
"""
outliers = data[deviation > max_deviation]
print "outliers removed {}".format(len(outliers))
print outliers
"""
outliers = np.flatnonzero(deviation > max_deviation)
return outliers
def remove_outliers(self, verbose=True):
"""
get the outliers *per column* using the median absolute
deviation method
Returns the filtered matrix
"""
unfiltered = len(self.matrix)
to_remove = None
for col in self.matrix.T:
outliers = self.get_outlier_indices(col)
if to_remove is None:
to_remove = set(outliers)
else:
# only set to remove those bins in which
# the outliers are present in all cases (colums)
# that's why the intersection is used
to_remove = to_remove.intersection(outliers)
if len(to_remove):
to_keep = [x for x in range(self.matrix.shape[0])
if x not in to_remove]
self.matrix = self.matrix[to_keep, :]
if verbose:
sys.stderr.write(
"total/filtered/left: "
"{}/{}/{}\n".format(unfiltered,
unfiltered - len(to_keep),
len(to_keep)))
return self.matrix
def remove_rows_of_zeros(self):
# remove rows containing all zeros or all nans
_mat = np.nan_to_num(self.matrix)
to_keep = _mat.sum(1) != 0
self.matrix = self.matrix[to_keep, :]
def save_corr_matrix(self, file_handle):
"""
saves the correlation matrix
"""
if self.column_order:
self.corr_matrix = self.corr_matrix[:, self.column_order][self.column_order]
self.labels = [self.labels[i] for i in self.column_order]
self.labels = [toString(x) for x in self.labels]
file_handle.write("\t'" + "'\t'".join(self.labels) + "'\n")
fmt = "\t".join(np.repeat('%.4f', self.corr_matrix.shape[1])) + "\n"
i = 0
for row in self.corr_matrix:
file_handle.write(
"'%s'\t" % self.labels[i] + fmt % tuple(row))
i += 1
def compute_correlation(self):
"""
computes spearman or pearson
correlation for the samples in the matrix
The matrix should contain the values of each sample per column
that's why the transpose is used.
>>> matrix = np.array([[1, 2, 3, np.nan],
... [1, 2, 3, 4],
... [6, 4, 3, 1]]).T
>>> np.savez_compressed("/tmp/test_matrix.npz", matrix=matrix, labels=['a', 'b', 'c'])
>>> c = Correlation("/tmp/test_matrix.npz", corr_method='pearson')
the results should be as in R
>>> c.compute_correlation().filled(np.nan)
array([[ 1. , 1. , -0.98198051],
[ 1. , 1. , -0.98198051],
[-0.98198051, -0.98198051, 1. ]])
>>> c.corr_method = 'spearman'
>>> c.corr_matrix = None
>>> c.compute_correlation()
array([[ 1., 1., -1.],
[ 1., 1., -1.],
[-1., -1., 1.]])
"""
if self.corr_matrix is not None:
return self.corr_matrix
num_samples = len(self.labels)
# initialize correlation matrix
if self.corr_method == 'pearson':
self.corr_matrix = np.ma.corrcoef(self.matrix.T, allow_masked=True)
else:
corr_matrix = np.zeros((num_samples, num_samples), dtype='float')
# do an all vs all correlation using the
# indices of the upper triangle
rows, cols = np.triu_indices(num_samples)
for index in range(len(rows)):
row = rows[index]
col = cols[index]
corr_matrix[row, col] = scipy.stats.spearmanr(self.matrix[:, row], self.matrix[:, col])[0]
# make the matrix symmetric
self.corr_matrix = corr_matrix + np.triu(corr_matrix, 1).T
return self.corr_matrix
def plotly_correlation(self, corr_matrix, plot_filename, labels, plot_title='',
vmax=None, vmin=None, plot_numbers=True,
colormap='jet'):
"""plot_correlation, but using plotly"""
textElement = []
for row in range(corr_matrix.shape[0]):
trow = []
for col in range(corr_matrix.shape[0]):
if plot_numbers:
trow.append("{:0.2f}".format(corr_matrix[row, col]))
else:
trow.append('')
textElement.append(trow)
zauto = True
if vmax is not None or vmin is not None:
zauto = False
convertedCmap = convertCmap(colormap)
fig = ff.create_annotated_heatmap(corr_matrix, x=labels, y=labels, colorscale=convertedCmap, showscale=True, zauto=zauto, zmin=vmin, zmax=vmax, annotation_text=textElement)
fig.layout['title'] = plot_title
offline.plot(fig, filename=plot_filename, auto_open=False)
def plot_correlation(self, plot_filename, plot_title='', vmax=None,
vmin=None, colormap='jet', image_format=None,
plot_numbers=False, plotWidth=11, plotHeight=9.5):
"""
plots a correlation using a symmetric heatmap
"""
num_rows = len(self.labels)
corr_matrix = self.compute_correlation()
# set a font size according to figure length
if num_rows < 6:
font_size = 14
elif num_rows > 40:
font_size = 5
else:
font_size = int(14 - 0.25 * num_rows)
mpl.rcParams.update({'font.size': font_size})
# set the minimum and maximum values
if vmax is None:
vmax = 1
if vmin is None:
vmin = 0 if corr_matrix .min() >= 0 else -1
# Compute and plot dendrogram.
fig = plt.figure(figsize=(plotWidth, plotHeight))
plt.suptitle(plot_title)
axdendro = fig.add_axes([0.02, 0.12, 0.1, 0.66])
axdendro.set_axis_off()
y_var = sch.linkage(corr_matrix, method='complete')
z_var = sch.dendrogram(y_var, orientation='left',
link_color_func=lambda k: 'darkred')
axdendro.set_xticks([])
axdendro.set_yticks([])
cmap = plt.get_cmap(colormap)
# this line simply makes a new cmap, based on the original
# colormap that goes from 0.0 to 0.9
# This is done to avoid colors that
# are too dark at the end of the range that do not offer
# a good contrast between the correlation numbers that are
# plotted on black.
if plot_numbers:
cmap = pltcolors.LinearSegmentedColormap.from_list(colormap + "clipped",
cmap(np.linspace(0, 0.9, 10)))
cmap.set_under((0., 0., 1.))
# Plot distance matrix.
axmatrix = fig.add_axes([0.13, 0.1, 0.6, 0.7])
index = z_var['leaves']
corr_matrix = corr_matrix[index, :]
corr_matrix = corr_matrix[:, index]
if corr_matrix.shape[0] > 30:
# when there are too many rows it is better to remove
# the black lines surrounding the boxes in the heatmap
edge_color = 'none'
else:
edge_color = 'black'
if image_format == "plotly":
self.plotly_correlation(corr_matrix,
plot_filename,
self.labels,
plot_title=plot_title,
vmax=vmax,
vmin=vmin,
colormap=colormap,
plot_numbers=plot_numbers)
return
img_mat = axmatrix.pcolormesh(corr_matrix,
edgecolors=edge_color,
cmap=cmap,
vmax=vmax,
vmin=vmin)
axmatrix.set_xlim(0, num_rows)
axmatrix.set_ylim(0, num_rows)
axmatrix.yaxis.tick_right()
axmatrix.set_yticks(np.arange(corr_matrix .shape[0]) + 0.5)
axmatrix.set_yticklabels(np.array(self.labels).astype('str')[index])
axmatrix.xaxis.set_tick_params(labeltop=True)
axmatrix.xaxis.set_tick_params(labelbottom=False)
axmatrix.set_xticks(np.arange(corr_matrix .shape[0]) + 0.5)
axmatrix.set_xticklabels(np.array(self.labels).astype('str')[index], rotation=45, ha='left')
axmatrix.tick_params(
axis='x',
which='both',
bottom=False,
top=False)
axmatrix.tick_params(
axis='y',
which='both',
left=False,
right=False)
# Plot colorbar
axcolor = fig.add_axes([0.13, 0.065, 0.6, 0.02])
cobar = plt.colorbar(img_mat, cax=axcolor, orientation='horizontal')
cobar.solids.set_edgecolor("face")
if plot_numbers:
for row in range(num_rows):
for col in range(num_rows):
axmatrix.text(row + 0.5, col + 0.5,
"{:.2f}".format(corr_matrix[row, col]),
ha='center', va='center')
self.column_order = index
fig.savefig(plot_filename, format=image_format)
plt.close()
def plotly_scatter(self, plot_filename, corr_matrix, plot_title='', minXVal=None, maxXVal=None, minYVal=None, maxYVal=None):
"""Make the scatter plot of a matrix with plotly"""
n = self.matrix.shape[1]
self.matrix = self.matrix
fig = go.Figure()
domainWidth = 1. / n
annos = []
for i in range(n):
x = domainWidth * (i + 1)
y = 1 - (domainWidth * i + 0.5 * domainWidth)
anno = dict(text=self.labels[i], showarrow=False, xref='paper', yref='paper', x=x, y=y, xanchor='right', yanchor='middle')
annos.append(anno)
data = []
zMin = np.inf
zMax = -np.inf
for x in range(n):
xanchor = 'x{}'.format(x + 1)
base = x * domainWidth
domain = [base, base + domainWidth]
if x > 0:
base = 1 - base
fig['layout']['xaxis{}'.format(x + 1)] = dict(domain=domain, range=[minXVal, maxXVal], anchor='free', position=base)
for y in range(0, n):
yanchor = 'y{}'.format(y + 1)
if x == 1:
base = 1 - y * domainWidth
domain = [base - domainWidth, base]
fig['layout']['yaxis{}'.format(y + 1)] = dict(domain=domain, range=[minYVal, maxYVal], side='right', anchor='free', position=1.0)
if x > y:
vector1 = self.matrix[:, x]
vector2 = self.matrix[:, y]
Z, xEdges, yEdges = np.histogram2d(vector1, vector2, bins=50)
Z = np.log10(Z)
if np.min(Z) < zMin:
zMin = np.min(Z)
if np.max(Z) > zMax:
zMax = np.max(Z)
name = '{}={:.2f}'.format(self.corr_method, corr_matrix[x, y])
trace = go.Heatmap(z=Z, x=xEdges, y=yEdges, showlegend=False, xaxis=xanchor, yaxis=yanchor, name=name, showscale=False)
data.append(trace)
# Fix the colorbar bounds
for trace in data:
trace.update(zmin=zMin, zmax=zMax)
data[-1]['colorbar'].update(title="log10(instances per bin)", titleside="right")
data[-1].update(showscale=True)
fig['data'] = data
fig['layout'].update(title=plot_title, showlegend=False, annotations=annos)
offline.plot(fig, filename=plot_filename, auto_open=False)
def plot_scatter(self, plot_filename, plot_title='', image_format=None, log1p=False, xRange=None, yRange=None):
"""
Plot the scatter plots of a matrix
in which each row is a sample
"""
num_samples = self.matrix.shape[1]
corr_matrix = self.compute_correlation()
grids = gridspec.GridSpec(num_samples, num_samples)
grids.update(wspace=0, hspace=0)
fig = plt.figure(figsize=(2 * num_samples, 2 * num_samples))
plt.rcParams['font.size'] = 8.0
plt.suptitle(plot_title)
if log1p is True:
self.matrix = np.log1p(self.matrix)
min_xvalue = self.matrix.min()
max_xvalue = self.matrix.max()
min_yvalue = min_xvalue
max_yvalue = max_xvalue
if xRange is not None:
min_xvalue = xRange[0]
max_xvalue = xRange[1]
if yRange is not None:
min_yvalue = yRange[0]
max_yvalue = yRange[1]
if (min_xvalue % 2 == 0 and max_xvalue % 2 == 0) or \
(min_xvalue % 1 == 0 and max_xvalue % 2 == 1):
# make one value odd and the other even
max_xvalue += 1
if (min_yvalue % 2 == 0 and max_yvalue % 2 == 0) or \
(min_yvalue % 1 == 0 and max_yvalue % 2 == 1):
# make one value odd and the other even
max_yvalue += 1
# plotly output
if image_format == 'plotly':
self.plotly_scatter(plot_filename, corr_matrix, plot_title=plot_title, minXVal=min_xvalue, maxXVal=max_xvalue, minYVal=min_yvalue, maxYVal=max_yvalue)
return
rows, cols = np.triu_indices(num_samples)
for index in range(len(rows)):
row = rows[index]
col = cols[index]
if row == col:
# add titles as
# empty plot in the diagonal
ax = fig.add_subplot(grids[row, col])
ax.text(0.5, 0.5, self.labels[row],
verticalalignment='center',
horizontalalignment='center',
fontsize=10, fontweight='bold',
transform=ax.transAxes)
ax.set_axis_off()
continue
ax = fig.add_subplot(grids[row, col])
vector1 = self.matrix[:, row]
vector2 = self.matrix[:, col]
ax.text(0.2, 0.8, "{}={:.2f}".format(self.corr_method,
corr_matrix[row, col]),
horizontalalignment='left',
transform=ax.transAxes)
ax.get_yaxis().set_tick_params(
which='both',
left=False,
right=False,
direction='out')
ax.get_xaxis().set_tick_params(
which='both',
top=False,
bottom=False,
direction='out')
for tick in ax.xaxis.get_major_ticks():
tick.label.set_rotation('45')
if col != num_samples - 1:
ax.set_yticklabels([])
else:
ax.yaxis.tick_right()
ax.get_yaxis().set_tick_params(
which='both',
left=False,
right=True,
direction='out')
if col - row == 1:
ax.xaxis.tick_bottom()
ax.get_xaxis().set_tick_params(
which='both',
top=False,
bottom=True,
direction='out')
for tick in ax.xaxis.get_major_ticks():
tick.label.set_rotation('45')
else:
ax.set_xticklabels([])
ax.hist2d(vector2, vector1, bins=200, cmin=0.1)
ax.set_xlim(min_xvalue, max_xvalue)
ax.set_ylim(min_yvalue, max_yvalue)
plt.savefig(plot_filename, format=image_format)
plt.close()
def plotly_pca(self, plotFile, Wt, pvar, PCs, eigenvalues, cols, plotTitle):
"""
A plotly version of plot_pca, that's called by it to do the actual plotting
"""
fig = go.Figure()
fig['layout']['xaxis1'] = {'domain': [0.0, 0.48], 'anchor': 'x1', 'title': 'PC{} ({:4.1f}% of var. explained)'.format(PCs[0], 100.0 * pvar[PCs[0] - 1])}
fig['layout']['yaxis1'] = {'domain': [0.0, 1.0], 'anchor': 'x1', 'title': 'PC{} ({:4.1f}% of var. explained)'.format(PCs[1], 100.0 * pvar[PCs[1] - 1])}
fig['layout']['xaxis2'] = {'domain': [0.52, 1.0], 'title': 'Principal Component'}
fig['layout']['yaxis2'] = {'domain': [0.0, 1.0], 'anchor': 'x2', 'title': 'Eigenvalue', 'rangemode': 'tozero', 'showgrid': False}
fig['layout']['yaxis3'] = {'domain': [0.0, 1.0], 'anchor': 'x2', 'title': 'Cumulative variability', 'rangemode': 'tozero', 'side': 'right', 'overlaying': 'y2'}
fig['layout'].update(title=plotTitle)
# PCA
if cols is not None:
colors = itertools.cycle(cols)
n = len(self.labels)
data = []
for i in range(n):
trace = go.Scatter(x=[Wt[PCs[0] - 1, i]],
y=[Wt[PCs[1] - 1, i]],
mode='marker',
xaxis='x1',
yaxis='y1',
name=self.labels[i])
trace['marker'].update(size=20)
if cols is not None:
trace['marker'].update(color=next(colors))
data.append(trace)
# Scree plot
trace = go.Bar(showlegend=False,
name='Eigenvalues',
x=range(1, n + 1),
y=eigenvalues[:n],
xaxis='x2',
yaxis='y2')
data.append(trace)
# Cumulative variability
trace = go.Scatter(showlegend=False,
x=range(1, n + 1),
y=pvar.cumsum()[:n],
mode='lines+markers',
name='Cumulative variability',
xaxis='x2',
yaxis='y3',
line={'color': 'red'},
marker={'symbol': 'circle-open-dot', 'color': 'black'})
data.append(trace)
annos = []
annos.append({'yanchor': 'bottom', 'xref': 'paper', 'xanchor': 'center', 'yref': 'paper', 'text': 'PCA', 'y': 1.0, 'x': 0.25, 'font': {'size': 16}, 'showarrow': False})
annos.append({'yanchor': 'bottom', 'xref': 'paper', 'xanchor': 'center', 'yref': 'paper', 'text': 'Scree plot', 'y': 1.0, 'x': 0.75, 'font': {'size': 16}, 'showarrow': False})
fig['data'] = data
fig['layout']['annotations'] = annos
offline.plot(fig, filename=plotFile, auto_open=False)
def plot_pca(self, plot_filename=None, PCs=[1, 2], plot_title='', image_format=None, log1p=False, plotWidth=5, plotHeight=10, cols=None, marks=None):
"""
Plot the PCA of a matrix
Returns the matrix of plotted values.
"""
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(plotWidth, plotHeight))
# Filter
m = self.matrix
rvs = m.var(axis=1)
if self.transpose:
m = m[np.nonzero(rvs)[0], :]
rvs = rvs[np.nonzero(rvs)[0]]
if self.ntop > 0 and m.shape[0] > self.ntop:
m = m[np.argpartition(rvs, -self.ntop)[-self.ntop:], :]
rvs = rvs[np.argpartition(rvs, -self.ntop)[-self.ntop:]]
# log2 (if requested)
if self.log2:
self.matrix = np.log2(self.matrix + 0.01)
# Row center / transpose
if self.rowCenter and not self.transpose:
_ = self.matrix.mean(axis=1)
self.matrix -= _[:, None]
if self.transpose:
m = m.T
# Center and scale
m2 = (m - np.mean(m, axis=0))
m2 /= np.std(m2, axis=0, ddof=1) # Use the unbiased std. dev.
# SVD
U, s, Vh = np.linalg.svd(m2, full_matrices=False, compute_uv=True) # Is full_matrices ever needed?
# % variance, eigenvalues
eigenvalues = s**2
variance = eigenvalues / float(np.max([1, m2.shape[1] - 1]))
pvar = variance / variance.sum()
# Weights/projections
Wt = Vh
if self.transpose:
# Use the projected coordinates for the transposed matrix
Wt = np.dot(m2, Vh.T).T
if plot_filename is not None:
n = n_bars = len(self.labels)
if eigenvalues.size < n:
n_bars = eigenvalues.size
markers = itertools.cycle(matplotlib.markers.MarkerStyle.filled_markers)
if cols is not None:
colors = itertools.cycle(cols)
else:
colors = itertools.cycle(plt.cm.gist_rainbow(np.linspace(0, 1, n)))
if marks is not None:
markers = itertools.cycle(marks)
if image_format == 'plotly':
self.plotly_pca(plot_filename, Wt, pvar, PCs, eigenvalues, cols, plot_title)
else:
ax1.axhline(y=0, color="black", linestyle="dotted", zorder=1)
ax1.axvline(x=0, color="black", linestyle="dotted", zorder=2)
for i in range(n):
color = next(colors)
marker = next(markers)
if isinstance(color, np.ndarray):
color = pltcolors.to_hex(color, keep_alpha=True)
ax1.scatter(Wt[PCs[0] - 1, i], Wt[PCs[1] - 1, i],
marker=marker, color=color, s=150, label=self.labels[i], zorder=i + 3)
if plot_title == '':
ax1.set_title('PCA')
else:
ax1.set_title(plot_title)
ax1.set_xlabel('PC{} ({:4.1f}% of var. explained)'.format(PCs[0], 100.0 * pvar[PCs[0] - 1]))
ax1.set_ylabel('PC{} ({:4.1f}% of var. explained)'.format(PCs[1], 100.0 * pvar[PCs[1] - 1]))
lgd = ax1.legend(scatterpoints=1, loc='center left', borderaxespad=0.5,
bbox_to_anchor=(1, 0.5),
prop={'size': 12}, markerscale=0.9)
# Scree plot
ind = np.arange(n_bars) # the x locations for the groups
width = 0.35 # the width of the bars
if mpl.__version__ >= "2.0.0":
ax2.bar(2 * width + ind, eigenvalues[:n_bars], width * 2)
else:
ax2.bar(width + ind, eigenvalues[:n_bars], width * 2)
ax2.set_ylabel('Eigenvalue')
ax2.set_xlabel('Principal Component')
ax2.set_title('Scree plot')
ax2.set_xticks(ind + width * 2)
ax2.set_xticklabels(ind + 1)
ax3 = ax2.twinx()
ax3.axhline(y=1, color="black", linestyle="dotted")
ax3.plot(width * 2 + ind, pvar.cumsum()[:n], "r-")
ax3.plot(width * 2 + ind, pvar.cumsum()[:n], "wo", markeredgecolor="black")
ax3.set_ylim([0, 1.05])
ax3.set_ylabel('Cumulative variability')
plt.subplots_adjust(top=3.85)
plt.tight_layout()
plt.savefig(plot_filename, format=image_format, bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
return Wt, eigenvalues
| StarcoderdataPython |
5060135 | <filename>lib/solver.py
import pulp
import lib.entities as en
class Solver:
"""The solver class solves a problem on supply chain
"""
def __init__(self, supply_chain):
self._supply_chain=supply_chain
self._initialize()
def _initialize(self):
"""initializes the problem before solving"""
pass
def _solve(self):
"""solves the problem"""
pass
class PlanningSolver(Solver):
"""The solver class for supply chain planning"""
def __init__(self, supply_chain):
super(PlanningSolver, self).__init__(supply_chain)
def _initialize(self):
self._prob=pulp.LpProblem("Supply chain planning", pulp.LpMaximize) # creating the problem
spch=self._supply_chain # the supply chain
prods=spch._products # the products in the supply chain
comps=spch._components # the components in the supply chain
trans=spch._transitions # the transitions in the supply chain
### variable definition ###
# a variable for each product
prod_vars=dict([(p._name,pulp.LpVariable(p._name,0,p._order_size)) for p in prods])
# a variable for each inventory with positive stock
inv_vars=dict([("_i_"+c._name,pulp.LpVariable("_i_"+c._name,0,c._stock)) for c in comps if c._stock > 0])
# a variable for each supplier with is connected to an entity
sup_vars=dict([("_s_"+t._target,pulp.LpVariable("_s_"+t._target,0)) for t in trans if t._tr_type == en.TransitionType.DIR])
# a variable for each transition between two entities
trans_var_pairs=[]
for t in trans:
if t._sources:
trans_var_pairs.extend([(f"_{s}_{t._target}",pulp.LpVariable(f"_{s}_{t._target}",0)) for s in t._sources])
trans_vars=dict(trans_var_pairs)
# a variable for each branch of or operators
or_var_list=dict([(t._target, [pulp.LpVariable(f"_x_{s}_{t._target}",0,1,pulp.LpInteger) for s in t._sources]) for t in trans if t._tr_type == en.TransitionType.OR])
### constraints ###
# or variable constraints: sum of or variables must be 1
for _,or_vars in or_var_list.items():
self._prob += pulp.lpSum(or_vars) == 1
# computing the upper bound of inflow into entities
upper_bounds=self._compute_upper_bounds()
# conservation law constraints
for t in trans:
target=t._target
# computing outflow
if self.is_product(target): # if the transition target is a product, the outflow goes all into the product
outflow=prod_vars[target]
else: # otherwise the transition target is a component, so we need to find the outflow
outgoing_trans=spch._outgoings[target] #the outgoing transitions
outflow=pulp.lpSum([trans_vars[f"_{target}_{ot._target}"] for ot in outgoing_trans])
### computing inflow and adding conservation law contraint
# add the inventory to the inflow if its stock is positive
inflow=inv_vars[f"_i_{target}"] if self.has_positive_stock(target) else 0
if t._tr_type == en.TransitionType.AND: # if transition is an AND transition
if t._sources:
for s in t._sources:
self._prob+=outflow==trans_vars[f"_{s}_{target}"]+inflow
elif t._tr_type == en.TransitionType.OR: # if transition is an OR transition
if t._sources:
for n,s in enumerate(t._sources):
self._prob+=outflow-inflow <= trans_vars[f"_{s}_{target}"]+(1-or_var_list[target][n])*upper_bounds[target]
self._prob+=outflow-inflow >= trans_vars[f"_{s}_{target}"]-(1-or_var_list[target][n])*upper_bounds[target]
else: # the transition is a direct transition from supplier to an entity
inflow+=sup_vars[f"_s_{target}"]
self._prob+=outflow==inflow
# add leaf contrains
for leaf in spch._leaves:
# computing outflow
if self.is_product(leaf): # if the transition target is a product, the outflow goes all into the product
outflow=prod_vars[leaf]
else: # otherwise the transition target is a component, so we need to find the outflow
outgoing_trans=spch._outgoings[leaf] #the outgoing transitions
outflow=pulp.lpSum([trans_vars[f"_{leaf}_{ot._target}"] for ot in outgoing_trans])
# add the inventory to the inflow if its stock is positive
inflow=inv_vars[f"_i_{leaf}"] if self.has_positive_stock(leaf) else 0
self._prob+=outflow==inflow
# the objective: Maximize the sum of products and the sum of inventory outflows
self._prob += pulp.lpSum([v for _,v in prod_vars.items()]) + pulp.lpSum([inv for _,inv in inv_vars.items()])
def _compute_upper_bounds(self):
"""computes upper bound on inflow into any entity.
It is essential for linearization of OR constraints.
It returns a dictionary mapping an entity name into its upper bound flow.
"""
spch=self._supply_chain # the supply chain
prods=spch._products # the products in the supply chain
trans=spch._transitions # the transitions in the supply chain
upper_bound_dict=dict([(p._name,p._order_size) for p in prods])
# keep updating the upper bound until reaching a fixed point
done=False
while not done:
done=True
for t in trans:
if t._target not in upper_bound_dict:
ub=0
updated=True
for out_tr in spch._outgoings[t._target]:
if out_tr._target in upper_bound_dict:
ub += upper_bound_dict[out_tr._target]
else:
updated=False
break
if updated:
upper_bound_dict[t._target]=ub
done=False
else:
continue
return upper_bound_dict
def solve(self):
self._prob.solve()
def write_lp(self, filename):
self._prob.writeLP(filename)
def is_product(self, name):
return self._supply_chain._entity_dict[name].get_type() == en.EntityType.PROD
def has_positive_stock(self, name):
ent=self._supply_chain._entity_dict[name]
return ent.get_type() == en.EntityType.COMP and ent._stock > 0
def objective(self):
"""returns the optimal value"""
return pulp.value(self._prob.objective)
def __str__(self):
prob=self._prob
status=f"Status: {pulp.LpStatus[prob.status]}"
obj=f"Objective={self.objective()}"
opt_vals='\n'.join([f"{v.name}={v.varValue}" for v in prob.variables()])
return f"{status}\n\n{obj}\n\n{opt_vals}"
| StarcoderdataPython |
3576017 | #!/usr/bin/env python
# FishStateMachine:
# Implementation of the finite state machine for SoFi (soft robotic fish)
#
# Node name: finite_state_machine
# Subscribed topics:
# - fish_pose
# - target_found
# - average_heading
# - average_pitch
# - average_dist
# - target_centroid (TODO)
# Published topics:
# - pid_enable
# - heading_state
# - heading_setpoint
# - pitch_state
# - pitch_setpoint
# - dist_state
# - dist_setpoint
# - heading_cmd
# - pitch_cmd
# - thrust_cmd
# Manual Testing Notes
# The results of each published topic in this node was tested for the following inputs:
# - target_found (topic) was True
# - target_found (topic) was False
# Both of the above inputs were tested in each state (INIT, SEARCH, FOLLOW) of the state machine.
# The program changed states and published to the pid_enable and heading... topics correctly
import rospy
import roslib
#import serial # see http://pyserial.readthedocs.org/en/latest/pyserial_api.html
from time import time, sleep
import time as clock
from std_msgs.msg import String, Float64, Bool
from sensor_msgs.msg import Image
from geometry_msgs.msg import PoseStamped
import cv2
from cv_bridge import CvBridge
import numpy as np
from fishstatecontroller.msg import State, Position
class FishStateController():
def __init__(self, update_hz, heading_setpoint=0.0, pitch_setpoint=0.0, dist_setpoint = 3.0, lost_delay=10):
"""
update_hz: the update rate of the state machine
"""
###State information, message, and publisher###
self.state = None
self.states = ("INIT","SEARCH","FOLLOW")
self.state_msg = State()
self.state_pub = rospy.Publisher('fish_state', State, queue_size=10)
#self.state_pub = rospy.Publisher('fish_state', String, queue_size=10)
self.update_hz = update_hz
self.pid_enable_pub = rospy.Publisher('pid_enable', Bool, queue_size=10)
self.target_found = False
self.fish_pose = PoseStamped()
###heading, pitch, and distance state and setpoint information###
self.heading_state = None
self.heading_state_pub = rospy.Publisher('heading_state', Float64, queue_size=10)
self.heading_setpoint = heading_setpoint
self.heading_setpoint_pub = rospy.Publisher('heading_setpoint', Float64, queue_size=10)
self.pitch_state = None
self.pitch_state_pub = rospy.Publisher('pitch_state', Float64, queue_size=10)
self.pitch_setpoint = pitch_setpoint
self.pitch_setpoint_pub = rospy.Publisher('pitch_setpoint', Float64, queue_size=10)
self.dist_state = None
self.dist_state_pub = rospy.Publisher('dist_state', Float64, queue_size=10)
self.dist_setpoint = dist_setpoint
self.dist_setpoint_pub = rospy.Publisher('dist_setpoint', Float64, queue_size=10)
self.heading_cmd_pub = rospy.Publisher('heading_cmd', Float64, queue_size=10)
self.pitch_cmd_pub = rospy.Publisher('pitch_cmd', Float64, queue_size=10)
self.thrust_cmd_pub = rospy.Publisher('thrust_cmd', Float64, queue_size=10)
self.search_direction = None #1 for search RIGHT, -1 for search LEFT
self.LOST_DELAY = lost_delay #in frames
self.transitionTo("INIT")
def run(self):
rate = rospy.Rate(self.update_hz)
count = self.LOST_DELAY
while not rospy.is_shutdown():
if self.state == "INIT":
self.pid_enable_pub.publish(False)
sleep(15)
self.transitionTo("SEARCH")
elif self.state == "SEARCH":
if self.target_found:
self.transitionTo("FOLLOW")
self.publish_states()
else:
self.pid_enable_pub.publish(False)
self.publish_search_cmd() #Publish HARD LEFT
elif self.state == "FOLLOW":
#if target is not found, wait 10 frames before we transition to SEARCH
if not self.target_found:
if count == 0:
self.transitionTo("SEARCH")
self.pid_enable_pub.publish(False)
count = self.LOST_DELAY
continue
else:
count -= 1
#if target is found, reset the count
else:
count = self.LOST_DELAY
self.publish_states()
#print("Following target at: %f, %f"%(self.fish_pose.pose.position.y, self.fish_pose.pose.position.z))
direction = self.fish_pose.pose.position.y
self.search_direction = - direction / abs(direction) #scale to +1/-1
rate.sleep()
def transitionTo(self, state_name):
self.state = state_name
#print(self.state)
###Can use below if adjust direction not important.
self.state_msg.header.stamp = rospy.Time.now()
self.state_msg.state = self.state
self.state_pub.publish(self.state_msg)
def publish_states(self):
self.pid_enable_pub.publish(True)
#heading
self.heading_state_pub.publish(self.heading_state)
self.heading_setpoint_pub.publish(self.heading_setpoint)
#pitch
self.pitch_state_pub.publish(self.pitch_state)
self.pitch_setpoint_pub.publish(self.pitch_setpoint)
#distance
self.dist_state_pub.publish(self.dist_state)
self.dist_setpoint_pub.publish(self.dist_setpoint)
def publish_search_cmd(self):
#Publish a hard left to the heading, pitch, and thrust commands if in SEARCH state
if self.search_direction is not None:
self.heading_cmd_pub.publish(self.search_direction)
self.pitch_cmd_pub.publish(0)
self.thrust_cmd_pub.publish(-1)
def heading_callback(self, ros_data):
self.heading_state = ros_data
def pitch_callback(self, ros_data):
self.pitch_state = ros_data
def dist_callback(self, ros_data):
self.dist_state = ros_data
def found_callback(self, ros_data):
self.target_found = ros_data.data
def pose_callback(self, ros_data):
self.fish_pose = ros_data
if __name__ == '__main__':
rospy.init_node('finite_state_machine', anonymous=True)
update_hz = 24
heading_setpoint = rospy.get_param("~heading_setpoint", 0.0)
state_machine = FishStateController(update_hz, heading_setpoint=heading_setpoint)
rospy.Subscriber('average_heading', Float64, state_machine.heading_callback)
rospy.Subscriber('average_pitch', Float64, state_machine.pitch_callback)
rospy.Subscriber('average_dist', Float64, state_machine.dist_callback)
rospy.Subscriber('target_found', Bool, state_machine.found_callback)
rospy.Subscriber('fish_pose', PoseStamped, state_machine.pose_callback)
print("Fish State Machine: Beginning at %d hz\n"%(update_hz))
state_machine.run()
print("\nFish State Machine: done\n")
| StarcoderdataPython |
5129550 | import sys
__author__ = '<NAME>'
def p2(n):
"""
Each new term in the Fibonacci sequence is generated by adding the previous
two terms. By starting with 1 and 2, the first 10 terms will be:
1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
By considering the terms in the Fibonacci sequence whose values do not
exceed four million, find the sum of the even-valued terms.
"""
fibonacci(n, 1, 2, 2)
def fibonacci(n, first, second, result):
"""fibonacci"""
if second >= n:
print("Answer of the Problem 2nd is %d" % result)
return
nx = first + second
if nx % 2 == 0:
result += nx
fibonacci(n, second, nx, result)
if __name__ == '__main__':
p2(89)
p2(4*1000000)
sys.exit(0)
| StarcoderdataPython |
6440070 | """
Event action.
"""
import random
from muddery.events.base_event_action import BaseEventAction
class EventAttack(BaseEventAction):
"""
Event to start a combat.
"""
key = "EVENT_ATTACK"
def func(self, event, character):
"""
Start a combat.
"""
rand = random.random()
# If matches the odds, put the character in combat.
# There can be several mods with different odds.
if rand <= event["odds"]:
# Attack mob.
character.attack_temp_target(event["mob"], event["level"], event["desc"])
| StarcoderdataPython |
1865080 | from ArchicadDG import Rect
def rect_print(rect,title):
top=rect.GetTop()
left=rect.GetLeft()
right=rect.GetRight()
bottom=rect.GetBottom()
print "_____"+title+"_____"
print "top("+str(top)+")"
print "left("+str(left)+")"
print "right("+str(right)+")"
print "bottom("+str(bottom)+")"
pass
def run_test():
rct1=Rect(1,1,40,60)
rect_print(rct1,"one")
rct2=Rect(20,20,60,60)
rect_print(rct2,"two")
print "IsIntersecting:"+str(rct1.IsIntersecting(rct2))
int_rct=rct1.Intersect(rct2)
rect_print(int_rct,"three")
pass
| StarcoderdataPython |
9676091 | import os
import secrets
from fastapi import Depends, FastAPI, HTTPException
from fastapi.openapi.docs import get_swagger_ui_html
from fastapi.openapi.utils import get_openapi
from fastapi.security import HTTPBasic, HTTPBasicCredentials
# custom modules
from models.algebra import array
app = FastAPI(docs_url=None, redoc_url=None, openapi_url=None)
# Basic Auth
security = HTTPBasic()
def get_current_username(
credentials: HTTPBasicCredentials = Depends(security),
):
correct_username = secrets.compare_digest(
credentials.username, os.environ.get("API_USERNAME", "username")
)
correct_password = secrets.compare_digest(
credentials.password, os.environ.get("API_PASSWORD", "password")
)
if not (correct_username and correct_password):
raise HTTPException(
status_code=401,
detail="Incorrect email or password",
headers={"WWW-Authenticate": "Basic"},
)
return credentials.username
###################
# status endpoint #
###################
@app.get("/")
def status():
return {"status": "Ok"}
##################
# docs endpoints #
##################
@app.get("/docs", include_in_schema=False)
def get_documentation(_username: str = Depends(get_current_username)):
return get_swagger_ui_html(openapi_url="/openapi.json", title="docs")
@app.get("/openapi.json", include_in_schema=False)
def openapi(_username: str = Depends(get_current_username)):
return get_openapi(title="FastAPI", version="0.1.0", routes=app.routes)
###################
# other endpoints #
###################
@app.get("/array")
def get_array(_username: str = Depends(get_current_username)):
try:
result = array.get_random().tolist()
except Exception as exc:
raise HTTPException(
status_code=400, detail=f"{type(exc).__name__}: {str(exc)}"
) from exc
return {"result": result}
| StarcoderdataPython |
8033761 |
import glob
#import os
import pandas as pd
colnames=['Ticker', 'Date', 'Open', 'High', 'Low', 'Close', 'Volume']
def pivotAndInterpolate(row,index,column,reIndex, interpolater,limiter, df):
dfOut = df.pivot_table(row, index, column)
dfOut.index = pd.to_datetime(dfOut.index, format='%Y%m%d')
dfOut = dfOut.reindex(reIndex)
dfOut=dfOut.interpolate(method=interpolater, limit_area=limiter)
dfOut=dfOut.fillna(0)
return dfOut
all_files = glob.glob('C:/QM/rnd/ASX-2015-2018/ASX-2015-2018/2*.txt') # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, names=colnames, header=None, encoding='utf-8') for f in all_files)
data = pd.concat(df_from_each_file, ignore_index=True, sort=True)
data['HighLow'] = data['High']/data['Low']
index = pd.date_range('20150102','20180629')
dfOpen=pivotAndInterpolate('Open', ['Date'], 'Ticker',index, 'linear','inside', data)
dfLow=pivotAndInterpolate('High', ['Date'], 'Ticker',index, 'linear','inside',data)
dfHigh=pivotAndInterpolate('Low', ['Date'], 'Ticker',index, 'linear','inside',data)
dfClose=pivotAndInterpolate('Close', ['Date'], 'Ticker',index, 'linear','inside',data)
dfVolume=pivotAndInterpolate('Volume', ['Date'], 'Ticker',index, 'linear','inside',data)
dfHighLow=pivotAndInterpolate('HighLow', ['Date'], 'Ticker',index, 'linear','inside',data)
dfCloseReturns=dfClose/dfClose.shift(1) - 1 #Close to close Returns
import numpy as np
from fastai.structured import add_datepart
import matplotlib.pyplot as plt
asxTicker='VHY'
ticker=dfClose[asxTicker]
ticker=ticker.reset_index()
add_datepart(ticker, 'index')
trainSize=700
ticker['mon_fri'] = 0
for i in range(0,len(ticker)):
if (ticker['indexDayofweek'][i] == 0 or ticker['indexDayofweek'][i] == 4):
ticker['mon_fri'][i] = 1
else:
ticker['mon_fri'][i] = 0
train = ticker[:trainSize]
valid = ticker[trainSize:]
x_train = train.drop(asxTicker, axis=1)
y_train = train[asxTicker]
x_valid = valid.drop(asxTicker, axis=1)
y_valid = valid[asxTicker]
#implement ARIMA
from pmdarima.arima import auto_arima
train = ticker[:trainSize]
valid = ticker[trainSize:]
training = train[asxTicker]
validation = valid[asxTicker]
model = auto_arima(training, start_p=1, start_q=1,max_p=3, max_q=3, m=12,start_P=0, seasonal=True,d=1, D=1, trace=True,error_action='ignore',suppress_warnings=True)
model.fit(training)
noOfPeriods=len(ticker)-trainSize
forecast = model.predict(n_periods=noOfPeriods)
forecast = pd.DataFrame(forecast,index = valid.index,columns=['Prediction'])
rmsA=np.sqrt(np.mean(np.power((np.array(valid[asxTicker])-np.array(forecast['Prediction'])),2)))
plt.plot(train[asxTicker])
plt.plot(valid[asxTicker])
plt.plot(forecast['Prediction']) | StarcoderdataPython |
3567259 | import json
file = open('staff.json', 'r')
staffs = json.load(file)['data']
firstnames = {}
lastnames = {}
middlenames = {}
output = []
for staff in staffs:
if staff["firstname"].lower() not in firstnames.keys():
firstnames[staff["firstname"].lower()] = True
temp = {}
temp["id"] = staff["firstname"].lower()
temp["name"] = {}
temp["name"]["value"] = staff["firstname"]
output.append(temp)
with open('slot_firstname.json', 'w') as outfile:
json.dump(output, outfile, sort_keys = True, indent = 4)
output = []
for staff in staffs:
if staff["lastname"].lower() not in lastnames.keys():
lastnames[staff["lastname"].lower()] = True
temp = {}
temp["id"] = staff["lastname"].lower()
temp["name"] = {}
temp["name"]["value"] = staff["lastname"]
output.append(temp)
with open('slot_lastname.json', 'w') as outfile:
json.dump(output, outfile, sort_keys = True, indent = 4)
output = []
for staff in staffs:
if staff["middlename"] == None:
continue
if staff["middlename"].lower() not in middlenames.keys():
middlenames[staff["middlename"].lower()] = True
temp = {}
temp["id"] = staff["middlename"].lower()
temp["name"] = {}
temp["name"]["value"] = staff["middlename"]
output.append(temp)
with open('slot_middlename.json', 'w') as outfile:
json.dump(output, outfile, sort_keys = True, indent = 4)
| StarcoderdataPython |
5177449 | # -*- coding: utf-8 -*-
"""
Created on Fri May 21 11:55:50 2021
@author: freeridingeo
"""
import os
from pathlib import Path
import numpy as np
import pandas as pd
import geopandas as gpd
from sentinelhub import BBoxSplitter, CRS
import rasterio
from rasterio.windows import Window, bounds as wind_bounds
from rasterio.warp import transform_bounds
from shapely.geometry import Polygon
def prepare_large_aoi_grid(vectorfile, t_crs, gr_sz, res=10, save=False):
"""
read in shape file and reproject it into the projection that will compute
correct aoi size
Args:
vectorfile: the AOI shapfile either in ESRI shapefile or geojson
t_crs: the target coordination;
gr_sz: tile/grid size to split the AOI, default is 168 by 168 pixels;
save: save the generated AOI tile grid as a pickle file
Return:
patchID: the splitted tile that will be saved as EOpatch with the IDs
Note:
when save is set to Ture. An ESRI shapefile is saved to the disk under
folder called "aoi_tile_grid"
"""
aoi_geo = gpd.read_file(vectorfile)
if aoi_geo.crs == t_crs:
aoi_reprj = aoi_geo.to_crs(crs=t_crs.pyproj_crs())
else:
aoi_reprj = aoi_geo
aoi_shape = aoi_reprj.geometry.values[-1]
data_res = res
width_pix = int((aoi_shape.bounds[2] - aoi_shape.bounds[0])/data_res)
heigth_pix = int((aoi_shape.bounds[3] - aoi_shape.bounds[1])/data_res)
print('Dimension of the area is {} x {} pixels'\
.format(width_pix, heigth_pix))
width_grid = int(round(width_pix/gr_sz))
heigth_grid = int(round(heigth_pix/gr_sz))
tile_splitter = BBoxSplitter([aoi_shape], t_crs, (width_grid, heigth_grid))
print("The area is splitted into a grid with {} by {} tiles!"\
.format(width_grid, heigth_grid))
tiles = np.array(tile_splitter.get_bbox_list())
info_list = np.array(tile_splitter.get_info_list())
# get the all polygon information from the splitted AOI
idxs_x = [info['index_x'] for info in tile_splitter.info_list]
idxs_y = [info['index_y'] for info in tile_splitter.info_list]
#save all the patch ID for tiles and save it as numpy array
patchID = np.array(range(len(tiles))).astype("int")
geometry = [Polygon(bbox_.get_polygon()) for bbox_ in tiles[patchID]]
while save == True:
# get the name of the file
nm = vectorfile.split("/")[-1]
tile_path = "aoi_tile_grid"
df = pd.DataFrame({'index_x': idxs_x, 'index_y': idxs_y})
gdf = gpd.GeoDataFrame(df, crs=t_crs.pyproj_crs(), geometry= geometry)
gdf.to_file(os.path.join(tile_path, nm))
return patchID, tiles
def create_eopatch_tiles_from_aoi_pixels(aoi_raster, t_crs,
res=10, grid_sz = 46):
"""
Loop through aoi pixels in the geotif to create grid cells.
---
Param
aoi_raster: geotif of aoi;
t_crs: target CRS for the grid cell
grid_sz: grid cell size.
Return
patchIDs: patch ID
tile_list: EOpatch that contain boundbox of grid cell
"""
gpd_geo = list()
prop = list()
tile_lists = list()
# loop through each row and column of aoi pixel to create bounding box
with rasterio.open(aoi_raster) as src_dst:
for col_off in range(0, src_dst.width):
for row_off in range(0, src_dst.height):
bounds = wind_bounds(Window(col_off, row_off, 1, 1), src_dst.transform)
xmin, ymin, xmax, ymax = transform_bounds(
*[src_dst.crs, "epsg:4326"] + list(bounds), densify_pts=21
)
poly = Polygon([
(xmin, ymin), (xmax, ymin), (xmax, ymax), (xmin, ymax), (xmin, ymin)
])
gpd_geo.append(poly)
prop.append("{}_{}".format(col_off, row_off))
gpd_df = gpd.GeoDataFrame(prop, crs=CRS.WGS84.pyproj_crs(), geometry=gpd_geo)
gpd_reproj = gpd_df.rename(columns={0: "id", "geometry": "geometry"})
gpd_reproj = gpd_reproj.to_crs(crs=t_crs.pyproj_crs())
designed_bbox_shapes = gpd_reproj.geometry.tolist()
for aoi_shape in designed_bbox_shapes:
width_pix = int((aoi_shape.bounds[2] - aoi_shape.bounds[0])/res)
heigth_pix = int((aoi_shape.bounds[3] - aoi_shape.bounds[1])/res)
width_grid = int(round(width_pix/grid_sz))
heigth_grid = int(round(heigth_pix/grid_sz))
# split the tile grid by the desired grid number
tile_splitter = BBoxSplitter([aoi_shape], t_crs, (width_grid, heigth_grid))
tile_list = np.array(tile_splitter.get_bbox_list())
info_list = np.array(tile_splitter.get_info_list())
# get the all pylogon information from the splitted AOI
idxs_x = [info['index_x'] for info in tile_splitter.info_list]
idxs_y = [info['index_y'] for info in tile_splitter.info_list]
tile_lists.append(tile_list)
tile_list = np.array(tile_lists).flatten()
return tile_list | StarcoderdataPython |
5132228 | <reponame>Lee2532/airflow
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.postgres_operator import PostgresOperator
from datetime import datetime, timedelta
import time
from datetime import datetime
import pandas
from sqlalchemy import create_engine
import psycopg2
#test123
default_args = {
'owner': 'solee',
'depends_on_past': False,
'start_date': datetime(2020, 6, 3),
'retries': 3,
'retry_delay': timedelta(minutes=5)
}
dag = DAG(dag_id='test',
default_args=default_args,
schedule_interval="@once",
)
def _sleep():
for i in range(100):
print(str(datetime.now()))
time.sleep(1)
wait_this = PythonOperator(
task_id='wait',
python_callable=_sleep,
dag=dag,
)
task = PythonOperator(
task_id=f'test',
python_callable=_sleep,
dag=dag,
)
[wait_this, task] | StarcoderdataPython |
12858612 | import unittest
from collections import OrderedDict
from dbcut.utils import sorted_nested_dict
def test_simple_dict_is_sorted():
data = {
"c": 1,
"a": 2,
"b": 3,
}
expected = OrderedDict([("a", 2), ("b", 3), ("c", 1)])
assert expected == sorted_nested_dict(data)
def test_nested_iterables_are_sorted():
data = {
"c": [1, 3, 2],
"a": 2,
"b": (3, 1, 2),
}
expected = OrderedDict(
[
("a", 2),
# The tuple is transformed into a list here. Still an iterable though.
("b", [1, 2, 3]),
("c", [1, 2, 3]),
]
)
assert expected == sorted_nested_dict(data)
def test_nested_dicts_are_sorted():
data = {
"c": 1,
"a": {"b": 1, "a": 2},
"b": 3,
}
expected = OrderedDict(
[("a", OrderedDict([("a", 2), ("b", 1)])), ("b", 3), ("c", 1)]
)
assert expected == sorted_nested_dict(data)
def test_non_dicts_are_untouched():
data = "ravioli"
assert data is sorted_nested_dict(data)
data = ["r", "a", "v", "i", "o", "l", "i"]
assert data is sorted_nested_dict(data)
data = 42
assert data is sorted_nested_dict(data)
class Custom:
pass
data = Custom()
assert data is sorted_nested_dict(data)
| StarcoderdataPython |
11220980 | <filename>Sorting_Algorithms/Insertion_sort_Ascending_and_descending.py<gh_stars>0
def insertionAscending(array):
l=len(array)
for i in range(1,l):
a=array[i]
j=i-1
while j>=0 and array[j]>a:
array[j+1]=array[j]
j-=1
array[j+1]=a
return array
def insertionDescending(array):
l=len(array)
for i in range(1,l):
a=array[i]
j=i-1
while j>=0 and array[j]<a:
array[j+1]=array[j]
j-=1
array[j+1]=a
return array
def display(array):
for i in array:
print(i,end=" ")
print()
if __name__=="__main__":
array=list(map(int,input("Array: ").split()))
display(insertionAscending(array))
display(insertionDescending(array)) | StarcoderdataPython |
6402198 | import numpy as np
ON_SEASON = [3, 4, 5, 6, 7, 8, 9]
ON_SEASON_2 = [
(np.datetime64('2017-04-02'), np.datetime64('2017-11-01')),
(np.datetime64('2018-03-29'), np.datetime64('2018-10-28')),
(np.datetime64('2019-03-20'), np.datetime64('2019-10-30')),
(np.datetime64('2020-07-23'), np.datetime64('2020-10-28')),
(np.datetime64('2021-02-28'), np.datetime64('2021-10-31'))
]
LABELS = {
'award': {
'MILBORGAS': 0,
'FSLMSAS': 1,
'MWLMSAS': 2,
'SLMSAS': 3,
'TLMSAS': 4,
'ELMSAS': 5,
'MEXMSAS': 6,
'SALMSAS': 7,
'CARMSAS': 8,
'AFLRS': 9,
'CALMSAS': 10,
'FUTURES': 11,
'ALPBMSAS': 12,
'DSLMSAS': 13,
'ILMSAS': 14,
'WSCHAMP': 15,
'ALAS': 16,
'NLAS': 17,
'PCLMSAS': 18,
'ALPOW': 19,
'NLPOW': 20,
'NORMSAS': 21,
'PIOLGAS': 22,
'NYPMSAS': 23,
'BAMLART': 24,
'TEXPOWP': 25,
'CALPOWP': 26,
'MIDPOWH': 27,
'INTPOWH': 28,
'FSLPOWP': 29,
'CALPOWH': 30,
'FSLPOWH': 31,
'PCLPOWH': 32,
'INTPOWP': 33,
'CARPOWP': 34,
'PCLPOWP': 35,
'TEXPOWH': 36,
'CARPOWH': 37,
'SALPOWH': 38,
'MIDPOWP': 39,
'EASPOWH': 40,
'SALPOWP': 41,
'EASPOWP': 42,
'SOUPOWP': 43,
'SOUPOWH': 44,
'MLBPLAYOW': 45,
'FSLPSAS': 46,
'CSAS': 47,
'MLBSECOND': 48,
'MLBAFIRST': 49,
'SLPSAS': 50,
'BAAAAAS': 51,
'BAMILAS': 52,
'BASSAS': 53,
'BAHAXXAS': 54,
'BADSLAS': 55,
'BAROAS': 56,
'BAAAXAS': 57,
'TLPSAS': 58,
'BALAXXAS': 59,
'NLGG': 60,
'APPPSAS': 61,
'CALPSAS': 62,
'NWLPSAS': 63,
'SALPSAS': 64,
'NLSS': 65,
'ELPSAS': 66,
'ALGG': 67,
'PIOPSAS': 68,
'AZLPSAS': 69,
'MWLPSAS': 70,
'ALSS': 71,
'CARPSAS': 72,
'DSLPSAS': 73,
'ALPBEOSAS': 74,
'GCLPSAS': 75,
'PCLPSAS': 76,
'PWLPSAS': 77,
'ILPSAS': 78,
'NORPOWH': 79,
'APPPOWP': 80,
'NORPOWP': 81,
'NYPPOWH': 82,
'NYPPOWP': 83,
'PIOPOWH': 84,
'APPPOWH': 85,
'AFLPSAS': 86,
'PIOPOWP': 87,
'MILBGG': 88,
'WDPOY': 89,
'MLBHOF': 90,
'HRDERBY': 91,
'NLPOM': 92,
'ALPOM': 93,
'ALPITOM': 94,
'NLPITOM': 95,
'ALROM': 96,
'NLRRELMON': 97,
'NLROM': 98,
'ALRRELMON': 99
},
'statusCode': {
'A': 0,
'RM': 1,
'D60': 2,
'D10': 3,
'D7': 4,
'PL': 5,
'SU': 6,
'BRV': 7,
'FME': 8,
'RES': 9,
'DEC': 10
},
'typeCode': {
'ASG': 0,
'SFA': 1,
'SC': 2,
'OPT': 3,
'CU': 4,
'SGN': 5,
'SE': 6,
'TR': 7,
'DES': 8,
'OUT': 9,
'DFA': 10,
'NUM': 11,
'REL': 12,
'CLW': 13,
'RTN': 14,
'RET': 15
},
'positionName': {
'Pitcher': 1,
'Outfielder': 2,
'Catcher': 3,
'First Base': 4,
'Second Base': 5,
'Third Base': 6,
'Shortstop': 7,
'Pinch Hitter': 8,
'Designated Hitter': 9,
'Pinch Runner': 10
},
'positionType': {
'Infielder': 1,
'Pitcher': 2,
'Outfielder': 3,
'Hitter': 4,
'Catcher': 5,
'Runner': 6
},
'gameType': {
'R': 0,
'S': 1,
'D': 2,
'L': 3,
'E': 4,
'F': 5,
'W': 6,
'A': 6
},
'positionCode': {
'1': 0,
'2': 1,
'3': 2,
'4': 3,
'5': 4,
'6': 5,
'7': 6,
'8': 7,
'9': 8,
'10': 9,
'O': 10
},
'birthCountry': {
'USA': 0,
'Dominican Republic': 1,
'Venezuela': 2,
'Cuba': 3,
'Puerto Rico': 4,
'Mexico': 5,
'Canada': 6,
'Japan': 7,
'Colombia': 8,
'Panama': 9
}
}
CATEGORICAL_COLS = {
'player_statusCode': 12,
'primaryPositionCode': 12,
'player_transactions_typeCode': 17
}
def is_on_season(dt: np.datetime64) -> bool:
for season_start, season_end in ON_SEASON_2:
if (dt >= season_start) and (dt <= season_end):
return True
return False
| StarcoderdataPython |
11301319 | <gh_stars>10-100
# Generated by Django 1.11.2 on 2017-08-07 23:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("letters", "0004_auto_20170708_2222")]
operations = [
migrations.AddField(
model_name="letter",
name="note",
field=models.TextField(blank=True, verbose_name="Comments from editor"),
)
]
| StarcoderdataPython |
4847374 | <filename>self_created_svm_linear_optimization_classification_prob.py
# This is a support vector machine algorithm written from scratch
# works for linear data sets
# by <NAME>
import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
class Support_Vector_Machine:
def __init__(self, visualization=True):
self.visualization = visualization
self.colors = {1:'r',-1:'b'}
if self.visualization:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
#train
def fit(self, data):
self.data = data
#{ ||w||: [w, b]}
opt_dict = {}
transform = [ [1,1],
[-1,1],
[-1,-1],
[1,-1]]
all_data = []
for yi in self.data:
for featureset in self.data[yi]:
for feature in featureset:
all_data.append(feature)
self.max_feature_value = max(all_data)
self.min_feature_value = min(all_data)
all_data = None
#support vector yi(xi.w+b) = 1
step_sizes = [self.max_feature_value * 0.1,
self.max_feature_value * 0.01,
#point of expenses
self.max_feature_value * 0.001,]
#extremly expensive
b_range_multiple = 5
b_multiple = 5
latest_optimum = self.max_feature_value*10
for step in step_sizes:
w = np.array([latest_optimum,latest_optimum])
#we can do this because convex
optimized = False
while not optimized:
for b in np.arange(-1*(self.max_feature_value*b_range_multiple),
self.max_feature_value*b_range_multiple,
step*b_multiple):
for transformation in transform:
w_t = w*transformation
found_option = True
#weakest link in SVM fundamentally
#SMO attempt to solve it
#yi(xi.w+b) >=1
#####add Break() in this fun later
for i in self.data:
for xi in self.data[i]:
yi = i
if not yi*(np.dot(w_t,xi)+b) >= 1:
found_option = False
####break()
if found_option:
opt_dict[np.linalg.norm(w_t)] = [w_t,b]
if w[0] < 0:
optimized = True
print('Optimized a step')
else:
w = w - step
norms = sorted([n for n in opt_dict])
#||w|| : [w, b]
opt_choice = opt_dict[norms[0]]
self.w = opt_choice[0]
self.b = opt_choice[1]
latest_optimum = opt_choice[0][0]*2
for i in self.data:
for xi in self.data[i]:
yi = i
print(xi,':', yi*(np.dot(self.w,xi)+self.b))
def predict(self, features):
# sign (x.w+b)
classification = np.sign(np.dot(features, self.w) + self.b)
if classification != 0 and self.visualization:
self.ax.scatter(features[0],features[1],s=200, marker='*', c=self.colors[classification])
return classification
def visualize(self):
[[self.ax.scatter(x[0], x[1], s=100, c=self.colors[i]) for x in data_dict[i]] for i in data_dict]
# hyperplane = x.w+b
#v = x.w+b
#psv = 1
#nsv = -1
#decB = 0
def hyperplane(x,w,b,v):
return (-w[0]*x-b+v) / w[1]
data_range = (self.min_feature_value*0.9,self.max_feature_value*1.1)
hyp_x_min = data_range[0]
hyp_x_max = data_range[1]
# (w.x+b)=1
# positive support vector hyperplane
psv1 = hyperplane(hyp_x_min,self.w,self.b,1)
psv2 = hyperplane(hyp_x_max,self.w,self.b,1)
self.ax.plot([hyp_x_min,hyp_x_max],[psv1,psv2], 'k')
# (w.x+b)=-1
# negative support vector hyperplane
nsv1 = hyperplane(hyp_x_min,self.w,self.b,-1)
nsv2 = hyperplane(hyp_x_max,self.w,self.b,-1)
self.ax.plot([hyp_x_min,hyp_x_max],[nsv1,nsv2], 'k')
# (w.x+b)=0
# positive support vector hyperplane
db1 = hyperplane(hyp_x_min,self.w,self.b,0)
db2 = hyperplane(hyp_x_max,self.w,self.b,0)
self.ax.plot([hyp_x_min,hyp_x_max],[db1,db2], 'k')
plt.show()
#training data
data_dict = {-1:np.array([[1,7],
[2,8],
[3,8],]),
1:np.array([[5,1],
[6,-1],
[7,3],])}
svm = Support_Vector_Machine()
svm.fit(data=data_dict)
predict_us = [[0,10],
[1,3],
[3,4],
[3,5],
[5,5],
[5,6],
[6,-5],
[5,8]]
for p in predict_us:
svm.predict(p)
svm.visualize()
| StarcoderdataPython |
6628617 | <gh_stars>1-10
from django.test import TestCase
from custom.icds_reports.reports.service_delivery_dashboard_data import get_service_delivery_report_data
class TestServiceDeliveryData(TestCase):
def test_get_service_delivery_report_data_0_3(self):
get_service_delivery_report_data.clear('icds-cas', 0, 10, None, False,
{'aggregation_level': 1}, 2017, 5, 'pw_lw_children')
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
None,
False,
{
'aggregation_level': 1,
},
2017,
5,
'pw_lw_children',
)
expected = {
'data': [
{
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'num_launched_awcs': 22,
'valid_visits': 3,
'expected_visits': 379,
'gm_0_3': 222,
'children_0_3': 314,
'num_awcs_conducted_cbe': 1,
'num_awcs_conducted_vhnd': 8,
'thr_21_days': 261,
'thr_25_days': 180,
'thr_eligible': 598,
'vhnd_conducted': 12,
'home_visits': '0.79 %',
'gm': '70.70 %',
'cbe': '4.55 %',
'thr': '43.65 %',
'cbe_sector_percent': '14.29 %',
'vhnd_sector_value': 8
},
{
'state_name': 'st1',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %'
},
{
'state_name': 'st2',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 11,
'valid_visits': 0,
'expected_visits': 193,
'gm_0_3': 139,
'children_0_3': 171,
'num_awcs_conducted_cbe': 1,
'num_awcs_conducted_vhnd': 6,
'thr_21_days': 181,
'thr_25_days': 156,
'thr_eligible': 318,
'vhnd_conducted': 9,
'home_visits': '0.00 %',
'gm': '81.29 %',
'cbe': '9.09 %',
'thr': '56.92 %'
},
{
'state_name': 'st3',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st4',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st5',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st6',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 0,
'valid_visits': 0,
'expected_visits': 0,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 0,
'vhnd_conducted': 0,
'home_visits': 'Data Not Entered',
'gm': 'Data Not Entered',
'cbe': 'Data Not Entered',
'thr': 'Data Not Entered'
},
{
'state_name': 'st7',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 1,
'valid_visits': 0,
'expected_visits': 1,
'gm_0_3': 0,
'children_0_3': 0,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 0,
'thr_21_days': 0,
'thr_25_days': 0,
'thr_eligible': 1,
'vhnd_conducted': 0,
'home_visits': '0.00 %',
'gm': 'Data Not Entered',
'cbe': '0.00 %',
'thr': '0.00 %'
}
],
'aggregationLevel': 1,
'recordsTotal': 7,
'recordsFiltered': 7
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_state_0_3(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
'district_name',
False,
{
'aggregation_level': 2,
'state_id': 'st1',
},
2017,
5,
'pw_lw_children',
)
expected = {
'data': [
{
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %',
'cbe_sector_percent': '0.00 %',
'vhnd_sector_value': 2
},
{
'state_name': 'st1',
'district_name': 'd1',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'num_launched_awcs': 10,
'valid_visits': 3,
'expected_visits': 185,
'gm_0_3': 83,
'children_0_3': 143,
'num_awcs_conducted_cbe': 0,
'num_awcs_conducted_vhnd': 2,
'thr_21_days': 80,
'thr_25_days': 24,
'thr_eligible': 279,
'vhnd_conducted': 3,
'home_visits': '1.62 %',
'gm': '58.04 %',
'cbe': '0.00 %',
'thr': '28.67 %'
}
],
'aggregationLevel': 2,
'recordsTotal': 1,
'recordsFiltered': 1
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_3_6(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
None,
False,
{
'aggregation_level': 1,
},
2017,
5,
'children',
)
expected = {
'data': [
{
'num_launched_awcs': 22,
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'lunch_21_days': 15,
'lunch_25_days': 0,
'pse_eligible': 991,
'pse_21_days': 66,
'pse_25_days': 20,
'gm_3_5': 473,
'children_3_5': 675,
'gm': '70.07 %',
'pse': '6.66 %',
'sn': '1.51 %'
},
{
'num_launched_awcs': 10,
'state_name': 'st1',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
},
{
'num_launched_awcs': 11,
'state_name': 'st2',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 11,
'lunch_25_days': 0,
'pse_eligible': 507,
'pse_21_days': 59,
'pse_25_days': 20,
'gm_3_5': 239,
'children_3_5': 342,
'gm': '69.88 %',
'pse': '11.64 %',
'sn': '2.17 %'
},
{
'num_launched_awcs': 0,
'state_name': 'st3',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st4',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st5',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 0,
'state_name': 'st6',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 0,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 0,
'gm': 'Data Not Entered',
'pse': 'Data Not Entered',
'sn': 'Data Not Entered'
},
{
'num_launched_awcs': 1,
'state_name': 'st7',
'district_name': 'Data Not Entered',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 0,
'lunch_25_days': 0,
'pse_eligible': 1,
'pse_21_days': 0,
'pse_25_days': 0,
'gm_3_5': 0,
'children_3_5': 1,
'gm': '0.00 %',
'pse': '0.00 %',
'sn': '0.00 %'
}
],
'aggregationLevel': 1,
'recordsTotal': 7,
'recordsFiltered': 7
}
self.assertDictEqual(expected, data)
def test_get_service_delivery_data_state_3_6(self):
data = get_service_delivery_report_data(
'icds-cas',
0,
10,
'district_name',
False,
{
'aggregation_level': 2,
'state_id': 'st1',
},
2017,
5,
'children',
)
expected = {
'data': [
{
'num_launched_awcs': 10,
'state_name': 'All',
'district_name': 'All',
'block_name': 'All',
'supervisor_name': 'All',
'awc_name': 'All',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
},
{
'num_launched_awcs': 10,
'state_name': 'st1',
'district_name': 'd1',
'block_name': 'Data Not Entered',
'supervisor_name': 'Data Not Entered',
'awc_name': 'Data Not Entered',
'lunch_21_days': 4,
'lunch_25_days': 0,
'pse_eligible': 483,
'pse_21_days': 7,
'pse_25_days': 0,
'gm_3_5': 234,
'children_3_5': 332,
'gm': '70.48 %',
'pse': '1.45 %',
'sn': '0.83 %'
}
],
'aggregationLevel': 2,
'recordsTotal': 1,
'recordsFiltered': 1
}
self.assertDictEqual(expected, data)
| StarcoderdataPython |
6600413 | #copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import time
import tensorflow as tf
from tensorflow.core.protobuf.rewriter_config_pb2 import RewriterConfig
import glob
import os
import sys
from npu_bridge.estimator import npu_ops
from dllogger.logger import LOGGER
import dllogger.logger as dllg
#input_shape = [512, 512, 1] # (height, width, channel)
# 用户自定义模型路径、输入、输出
model_path='./unet-industrial_tf.pb'
input_tensor_name='input:0'
output_tensor_name='output:0'
class Classifier(object):
def __init__(self):
config = tf.ConfigProto()
custom_op = config.graph_options.rewrite_options.custom_optimizers.add()
custom_op.name = "NpuOptimizer"
custom_op.parameter_map["use_off_line"].b = True
custom_op.parameter_map["precision_mode"].s = tf.compat.as_bytes("force_fp16")
config.graph_options.rewrite_options.remapping = RewriterConfig.OFF
custom_op.parameter_map["graph_run_mode"].i = 0
self.graph = self.__load_model(model_path)
self.input_tensor = self.graph.get_tensor_by_name(input_tensor_name)
self.output_tensor = self.graph.get_tensor_by_name(output_tensor_name)
# create session
self.sess = tf.Session(config=config, graph=self.graph)
def __load_model(self, model_file):
with tf.gfile.GFile(model_file, "rb") as gf:
graph_def = tf.GraphDef()
graph_def.ParseFromString(gf.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name="")
return graph
def do_infer(self, batch_data):
out = self.sess.run(self.output_tensor, feed_dict={self.input_tensor: batch_data})
return out
def DAGM2007_Dataset(data_dir, class_id=1, batch_size=1):
data_dir = os.path.join(data_dir, "raw_images/private/Class%d" % class_id)
csv_file = os.path.join(data_dir, "test_list.csv")
image_dir = os.path.join(data_dir, "Test")
mask_image_dir = os.path.join(data_dir, "Test/Label")
input_shape = mask_shape = [512, 512, 1]
shuffle_buffer_size = 10000
def decode_csv(line):
input_image_name, image_mask_name, label = tf.decode_csv(
line, record_defaults=[[""], [""], [0]], field_delim=','
)
def decode_image(filepath, resize_shape, normalize_data_method):
image_content = tf.read_file(filepath)
image = tf.image.decode_png(contents=image_content, channels=resize_shape[-1], dtype=tf.uint8)
image = tf.image.resize_images(
image,
size=resize_shape[:2],
method=tf.image.ResizeMethod.BILINEAR, # [BILINEAR, NEAREST_NEIGHBOR, BICUBIC, AREA]
align_corners=False,
preserve_aspect_ratio=True
)
image.set_shape(resize_shape)
image = tf.cast(image, tf.float32)
if normalize_data_method == "zero_centered":
image = tf.divide(image, 127.5) - 1
elif normalize_data_method == "zero_one":
image = tf.divide(image, 255.0)
return image
input_image = decode_image(
filepath=tf.strings.join([image_dir, input_image_name], separator='/'),
resize_shape=input_shape,
normalize_data_method="zero_centered",
)
mask_image = tf.cond(
tf.equal(image_mask_name, ""),
true_fn=lambda: tf.zeros(mask_shape, dtype=tf.float32),
false_fn=lambda: decode_image(
filepath=tf.strings.join([mask_image_dir, image_mask_name], separator='/'),
resize_shape=mask_shape,
normalize_data_method="zero_one",
),
)
label = tf.cast(label, tf.int32)
return (input_image, mask_image), label
dataset = tf.data.TextLineDataset(csv_file)
dataset = dataset.skip(1) # Skip CSV Header
dataset = dataset.cache()
dataset = dataset.apply(
tf.data.experimental.map_and_batch(
map_func=decode_csv,
num_parallel_calls=64,
batch_size=batch_size,
drop_remainder=True,
)
)
dataset = dataset.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
return dataset
def iou_score_fn(y_pred, y_true, threshold, eps=1e-5):
y_true = y_true > threshold
y_pred = y_pred > threshold
y_true = y_true.astype(np.float32)
y_pred = y_pred.astype(np.float32)
intersection = y_true * y_pred
intersection = tf.reduce_sum(intersection, axis=(1, 2, 3))
numerator = 2.0 * intersection + eps
divisor = tf.reduce_sum(y_true, axis=(1, 2, 3)) + tf.reduce_sum(y_pred, axis=(1, 2, 3)) + eps
return tf.reduce_mean(numerator / divisor)
def main():
filepath = sys.argv[1]
classifier = Classifier()
ds = DAGM2007_Dataset(data_dir=filepath, class_id=1, batch_size=2)
iter = ds.make_initializable_iterator()
ds_sess = tf.Session()
ds_sess.run(iter.initializer)
next_element = iter.get_next()
eval_metrics = dict()
IOU_THS = [[],[],[],[],[],[],[],[]]
i = 1
while True:
try:
# features
input = ds_sess.run(next_element)
batch_data = input[0]
batch_labels = input[1]
# input_image, mask_image, labels
input_image = batch_data[0]
mask_image = batch_data[1]
labels = batch_labels
y_pred = classifier.do_infer(input_image)
labels = tf.cast(labels, tf.float32)
labels_preds = tf.reduce_max(y_pred, axis=(1, 2, 3))
j = 0
for threshold in [0.05, 0.125, 0.25, 0.5, 0.75, 0.85, 0.95, 0.99]:
tf.reset_default_graph()
with tf.Session() as eval_sess:
iou_score = iou_score_fn(y_pred=y_pred, y_true=mask_image, threshold=threshold)
eval_results = eval_sess.run(iou_score)
eval_metrics["IoU_THS_%s" % threshold] = tf.metrics.mean(iou_score)
IOU_THS[j].append(eval_results)
j += 1
i += 1
print("======batch %s finished ======" % str(i))
except tf.errors.OutOfRangeError as e:
print("### Total IoU_THS_0.05: ", np.mean(IOU_THS[0]))
print("### Total IoU_THS_0.125: ", np.mean(IOU_THS[1]))
print("### Total IoU_THS_0.25: ", np.mean(IOU_THS[2]))
print("### Total IoU_THS_0.5: ", np.mean(IOU_THS[3]))
print("### Total IoU_THS_0.75: ", np.mean(IOU_THS[4]))
print("### Total IoU_THS_0.85: ", np.mean(IOU_THS[5]))
print("### Total IoU_THS_0.95: ", np.mean(IOU_THS[6]))
print("### Total IoU_THS_0.99: ", np.mean(IOU_THS[7]))
break
ds_sess.close()
classifier.sess.close()
if __name__ == '__main__':
main()
| StarcoderdataPython |
8106013 | <filename>Chapter 08/Chap08_Example8.68.py
class Myfather(object):
def __init__(self):
super().__init__()
print("I am a Father class constructor")
def mydisplay_father(self):
print("I am a Father class instance method")
class Mymother(object):
def __init__(self):
super().__init__()
print("I am a Mother class constructor")
def mydisplay_mother(self):
print("I am a Mother class instance method")
class MyDaughter(Myfather,Mymother):
def __init__(self):
super().__init__() # calling parent class constructor.
print("I am a Daughter class constructor")
def mydisplay_Daughter(self):
print("I am a Daughter class instance method")
myobj = MyDaughter()
print(MyDaughter.mro()) | StarcoderdataPython |
3385310 | from SpecImports import *
BattleCells = {}
CogData = []
ReserveCogData = []
| StarcoderdataPython |
1884185 | <reponame>adisbladis/geostore
from unittest.mock import MagicMock, patch
from geostore.api_keys import SUCCESS_KEY
from geostore.error_response_keys import ERROR_MESSAGE_KEY
from geostore.step_function_keys import DATASET_ID_KEY, VERSION_ID_KEY
from geostore.validation_summary.task import lambda_handler
from .aws_utils import any_lambda_context
from .stac_generators import any_dataset_id, any_dataset_version_id
def should_require_dataset_id() -> None:
response = lambda_handler({VERSION_ID_KEY: any_dataset_version_id()}, any_lambda_context())
assert response == {ERROR_MESSAGE_KEY: "'dataset_id' is a required property"}
def should_require_dataset_version() -> None:
response = lambda_handler({DATASET_ID_KEY: any_dataset_id()}, any_lambda_context())
assert response == {ERROR_MESSAGE_KEY: "'version_id' is a required property"}
@patch("geostore.validation_summary.task.validation_results_model_with_meta")
def should_return_success_false_if_any_validation_results_are_unsuccessful(
validation_results_model_mock: MagicMock,
) -> None:
# Given an unsuccessful result
validation_results_model_mock.return_value.validation_outcome_index.count.return_value = 1
response = lambda_handler(
{DATASET_ID_KEY: any_dataset_id(), VERSION_ID_KEY: any_dataset_version_id()},
any_lambda_context(),
)
assert response == {SUCCESS_KEY: False}
| StarcoderdataPython |
6570732 | <filename>perspectivesx_project/django_auth_lti/backends.py
import logging
from time import time
import oauth2
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.core.exceptions import PermissionDenied
from ims_lti_py.tool_provider import DjangoToolProvider
logger = logging.getLogger(__name__)
class LTIAuthBackend(ModelBackend):
"""
By default, the ``authenticate`` method creates ``User`` objects for
usernames that don't already exist in the database. Subclasses can disable
this behavior by setting the ``create_unknown_user`` attribute to
``False``.
"""
# Create a User object if not already in the database?
create_unknown_user = True
# Username prefix for users without an sis source id
unknown_user_prefix = "cuid:"
def authenticate(self, request):
logger.info("about to begin authentication process")
request_key = request.POST.get('oauth_consumer_key', None)
if request_key is None:
logger.error("Request doesn't contain an oauth_consumer_key; can't continue.")
return None
if not settings.LTI_OAUTH_CREDENTIALS:
logger.error("Missing LTI_OAUTH_CREDENTIALS in settings")
raise PermissionDenied
secret = settings.LTI_OAUTH_CREDENTIALS.get(request_key)
if secret is None:
logger.error("Could not get a secret for key %s" % request_key)
raise PermissionDenied
logger.debug('using key/secret %s/%s' % (request_key, secret))
tool_provider = DjangoToolProvider(request_key, secret, request.POST.dict())
postparams = request.POST.dict()
logger.debug('request is secure: %s' % request.is_secure())
for key in postparams:
logger.debug('POST %s: %s' % (key, postparams.get(key)))
logger.debug('request abs url is %s' % request.build_absolute_uri())
for key in request.META:
logger.debug('META %s: %s' % (key, request.META.get(key)))
logger.info("about to check the signature")
try:
request_is_valid = tool_provider.is_valid_request(request)
except oauth2.Error:
logger.exception(u'error attempting to validate LTI launch %s',
postparams)
request_is_valid = False
'''
if not request_is_valid:
logger.error("Invalid request: signature check failed.")
raise PermissionDenied
'''
logger.info("done checking the signature")
logger.info("about to check the timestamp: %d" % int(tool_provider.oauth_timestamp))
if time() - int(tool_provider.oauth_timestamp) > 60 * 60:
logger.error("OAuth timestamp is too old.")
#raise PermissionDenied
else:
logger.info("timestamp looks good")
logger.info("done checking the timestamp")
# (this is where we should check the nonce)
# if we got this far, the user is good
user = None
# Retrieve username from LTI parameter or default to an overridable function return value
username = tool_provider.lis_person_sourcedid or self.get_default_username(
tool_provider, prefix=self.unknown_user_prefix)
username = self.clean_username(username) # Clean it
email = tool_provider.lis_person_contact_email_primary
first_name = tool_provider.lis_person_name_given
last_name = tool_provider.lis_person_name_family
logger.info("We have a valid username: %s" % username)
UserModel = get_user_model()
# Note that this could be accomplished in one try-except clause, but
# instead we use get_or_create when creating unknown users since it has
# built-in safeguards for multiple threads.
if self.create_unknown_user:
user, created = UserModel.objects.get_or_create(**{
UserModel.USERNAME_FIELD: username,
})
if created:
logger.debug('authenticate created a new user for %s' % username)
else:
logger.debug('authenticate found an existing user for %s' % username)
else:
logger.debug(
'automatic new user creation is turned OFF! just try to find and existing record')
try:
user = UserModel.objects.get_by_natural_key(username)
except UserModel.DoesNotExist:
logger.debug('authenticate could not find user %s' % username)
# should return some kind of error here?
pass
# update the user
if email:
user.email = email
if first_name:
user.first_name = first_name
if last_name:
user.last_name = last_name
user.save()
logger.debug("updated the user record in the database")
return user
def clean_username(self, username):
return username
def get_default_username(self, tool_provider, prefix=''):
"""
Return a default username value from tool_provider in case offical
LTI param lis_person_sourcedid was not present.
"""
# Default back to user_id lti param
uname = tool_provider.get_custom_param('canvas_user_id') or tool_provider.user_id
return prefix + uname
| StarcoderdataPython |
3233278 | <filename>20_valid_parentheses.py<gh_stars>0
'''
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
An input string is valid if:
Open brackets must be closed by the same type of brackets.
Open brackets must be closed in the correct order.
Note that an empty string is also considered valid.
Example 1:
Input: "()"
Output: true
Example 2:
Input: "()[]{}"
Output: true
Example 3:
Input: "(]"
Output: false
Example 4:
Input: "([)]"
Output: false
Example 5:
Input: "{[]}"
Output: true
https://leetcode.com/problems/valid-parentheses/description/
'''
class Solution(object):
def isValid(self, s):
"""
:type s: str
:rtype: bool
"""
l = []
closing = {")", "}", "]"}
opening = {"(", "{", "["}
closing_dict = {")":"(","}":"{","]":"["}
for char in s:
if char in opening:
l.append(char)
elif char in closing:
if closing_dict[char] != l[-1]:
return False
else:
l.pop()
else:
return False
if l:
return False
return True
| StarcoderdataPython |
5148495 | from os import path
import subprocess
import anndata as ad
# import pandas as pd
import numpy as np
np.random.seed(42)
metric = 'asw_batch'
# metric_file = metric + '.tsv'
metric_file = metric + '.h5ad'
print(">> Running script")
out = subprocess.check_output([
"./" + metric,
"--input_prediction", 'resources_test/joint_embedding/test_resource.prediction.h5ad',
"--input_solution", 'resources_test/joint_embedding/test_resource.solution.h5ad',
"--output", metric_file
]).decode("utf-8")
print(">> Checking whether file exists")
assert path.exists(metric_file)
# result = pd.read_table(metric_file)
result = ad.read_h5ad(metric_file)
sol = ad.read_h5ad('resources_test/joint_embedding/test_resource.solution.h5ad')
pred = ad.read_h5ad('resources_test/joint_embedding/test_resource.prediction.h5ad')
# print(">> Check that score makes sense")
# assert result.shape == (1, 4)
# assert result['metric'][0] == metric
# score = result.loc[0, 'value']
# print(score)
print(">> Check contents of result.uns")
assert 'dataset_id' in result.uns
assert result.uns['dataset_id'] == sol.uns['dataset_id']
assert 'method_id' in result.uns
assert result.uns['method_id'] == pred.uns['method_id']
assert 'metric_ids' in result.uns
assert result.uns['metric_ids'] == [metric]
assert 'metric_values' in result.uns
score = result.uns['metric_values'][0]
print(score)
assert 0 <= score <= 1
assert score == 0.7223580315216693
print(">> All tests passed successfully")
| StarcoderdataPython |
13035 | import pandas as pd
# Global variable to set the base path to our dataset folder
base_url = '../dataset/'
def update_mailing_list_pandas(filename):
"""
Your docstring documentation starts here.
For more information on how to proper document your function, please refer to the official PEP8:
https://www.python.org/dev/peps/pep-0008/#documentation-strings.
"""
df = # Read your csv file with pandas
return # Your logic to filter only rows with the `active` flag the return the number of rows
# Calling the function to test your code
print(update_mailing_list_pandas('mailing_list.csv'))
| StarcoderdataPython |
8099195 | <filename>casepro/msgs/migrations/0008_messageaction.py
# -*- coding: utf-8 -*-
from __future__ import print_function, unicode_literals
import django.contrib.postgres.fields
from django.conf import settings
from django.db import migrations, models
def migrate_messageactions(apps, schema_editor):
MessageActionOld = apps.get_model("cases", "MessageAction")
MessageAction = apps.get_model("msgs", "MessageAction")
old_actions = list(MessageActionOld.objects.all())
for old_action in old_actions:
MessageAction.objects.create(
org=old_action.org,
messages=old_action.messages,
action=old_action.action,
created_by=old_action.created_by,
created_on=old_action.created_on,
label=old_action.label,
)
if old_actions:
print("Migrated %d message actions to new model in msgs app" % len(old_actions))
class Migration(migrations.Migration):
dependencies = [
("cases", "0020_delete_messageexport"),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
("orgs", "0015_auto_20160209_0926"),
("msgs", "0007_unhandled_index"),
]
operations = [
migrations.CreateModel(
name="MessageAction",
fields=[
("id", models.AutoField(verbose_name="ID", serialize=False, auto_created=True, primary_key=True)),
("messages", django.contrib.postgres.fields.ArrayField(base_field=models.IntegerField(), size=None)),
(
"action",
models.CharField(
max_length=1,
choices=[
("F", "Flag"),
("N", "Un-flag"),
("L", "Label"),
("U", "Remove Label"),
("A", "Archive"),
("R", "Restore"),
],
),
),
("created_on", models.DateTimeField(auto_now_add=True)),
(
"created_by",
models.ForeignKey(
related_name="message_actions", to=settings.AUTH_USER_MODEL, on_delete=models.PROTECT
),
),
("label", models.ForeignKey(to="cases.Label", null=True, on_delete=models.PROTECT)),
(
"org",
models.ForeignKey(
related_name="message_actions",
verbose_name="Organization",
to="orgs.Org",
on_delete=models.PROTECT,
),
),
],
),
migrations.RunPython(migrate_messageactions),
]
| StarcoderdataPython |
3474441 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import range
from keras import backend as K
from agents import DDQN
from memory import SimpleExperienceReplay, Buffer
from models import duel_atari_cnn as nn
from envs import Env
from utils import *
from collections import Counter
import gym
import numpy as np
import tensorflow as tf
import argparse
import random
def play(ql, env, buf, epsilon=0.0):
terminal = False
episode_reward = 0
t = 0
actions_count = Counter()
buf.reset()
obs = env.reset()
buf.add(obs)
while not terminal:
env.render()
action = ql.predict_action(buf.state, epsilon)
obs, reward, terminal, _ = env.step(action)
buf.add(obs)
actions_count[action] += 1
if reward != 0:
episode_reward += reward
t += 1
print("Episode Reward {}".format(episode_reward))
print("Action summary {}".format(actions_count))
parser = argparse.ArgumentParser()
parser.add_argument('--games', type=int, default=10, help='Number of games played')
parser.add_argument('--epsilon', type=float, default=0, help='Epsilon value, probability of a random action')
parser.add_argument('--batch_size', type=int, default=32, help='Number of states to train on each step')
parser.add_argument('--gamma', type=float, default=0.99, help='Gamma for Q-Learning steps')
parser.add_argument('--height', type=int, default=80, help='Observation height after resize')
parser.add_argument('--width', type=int, default=80, help='Observation width after resize')
parser.add_argument('--history_window', type=int, default=4, help='Number of observations forming a state')
parser.add_argument('--checkpoint_dir', type=str, help='Directory TF Graph will be saved to periodically')
parser.add_argument('--name', type=str, help='Name of OpenAI environment to run, ex. (Breakout-v0, Pong-v0)')
parser.add_argument('--seed', type=int, default=0, help='Random seed')
args = parser.parse_args()
print(args)
if not args.checkpoint_dir:
parser.error('--checkpoint_dir must not be empty')
if not args.name:
parser.error('--name must not be empty')
gym_env = gym.make(args.name)
np.random.seed(args.seed)
random.seed(args.seed)
tf.set_random_seed(args.seed)
gym_env.seed(args.seed)
network_input_shape = (args.history_window, args.height, args.width)
n_actions = gym_env.action_space.n
observation_shape = gym_env.observation_space.shape
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Graph().as_default() as g, tf.Session(config=config) as sess:
sess = tf.Session(config=config)
K.set_session(sess)
main = nn(network_input_shape, n_actions)
target = nn(network_input_shape, n_actions)
main.compile(optimizer='rmsprop', loss='mse')
saver = tf.train.Saver()
load_checkpoint(saver, args.checkpoint_dir, sess)
buf = Buffer(args.history_window, (args.height, args.width))
obs_preprocess = lambda i: preprocess(i, args.height, args.width)
reward_clip = lambda r: np.clip(r, -1.0, 1.0)
env = Env(gym_env, obs_preprocess, reward_clip)
ql = DDQN(main, target, args.batch_size, n_actions, args.gamma)
print("Playing {} games ...".format(args.games))
for _ in range(args.games):
play(ql, env, buf, epsilon=args.epsilon)
| StarcoderdataPython |
6433839 | from .test_paste import * | StarcoderdataPython |
6651806 | <reponame>adityagoel28/deployy
# Generated by Django 3.1.2 on 2022-02-01 16:55
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('Home', '0003_vaccinecenterdetails_vaccinedetails'),
]
operations = [
migrations.AlterField(
model_name='vaccinedetails',
name='availability',
field=models.IntegerField(),
),
]
| StarcoderdataPython |
89234 | <filename>libs/file/nxpy/core/file/__init__.py
from .file import *
| StarcoderdataPython |
341463 | <gh_stars>1-10
import multiprocessing
import os
import time
import traceback
from datetime import datetime
from multiprocessing import current_process
from multiprocessing.context import Process
from queue import Queue
from injector import inject
from pdip.base import Pdi
from pdip.configuration.models.application import ApplicationConfig
from pdip.cqrs import Dispatcher, ICommandHandler
from pdip.data import RepositoryProvider
from pdip.data.decorators import transactionhandler
from pdip.dependency.container import DependencyContainer
from pdip.logging.loggers.database import SqlLogger
from process.application.StartExecutionProcess.StartExecutionProcessCommand import StartExecutionProcessCommand
from process.application.execution.services.OperationExecution import OperationExecution
from process.domain.aps import ApSchedulerJob, ApSchedulerEvent, ApSchedulerJobEvent
from process.domain.operation import DataOperation, DataOperationJob
class StartExecutionProcessCommandHandler(ICommandHandler[StartExecutionProcessCommand]):
@inject
def __init__(self,
dispatcher: Dispatcher,
logger: SqlLogger,
repository_provider: RepositoryProvider,
*args, **kwargs):
super().__init__(*args, **kwargs)
self.logger = logger
self.repository_provider = repository_provider
self.dispatcher = dispatcher
def handle(self, command: StartExecutionProcessCommand):
"""
:param job_id: Ap Scheduler Job Id
:param data_operation_id: Data Operation Id
:return:
"""
try:
start = time.time()
start_datetime = datetime.now()
application_config = DependencyContainer.Instance.get(ApplicationConfig)
data_operation_query = DependencyContainer.Instance.get(RepositoryProvider).get(
DataOperation).filter_by(
Id=command.DataOperationId)
data_operation = data_operation_query.first()
if data_operation is None:
raise Exception('Operation Not Found')
self.logger.info(
f"{command.DataOperationId}-{command.JobId}-{data_operation.Name} Execution Create started",
job_id=command.DataOperationJobExecutionId)
manager = multiprocessing.Manager()
process_queue = manager.Queue()
operation_process = Process(target=self.start_process,
args=(application_config.root_directory,
command.DataOperationId, command.JobId,
command.DataOperationJobExecutionId,
process_queue))
operation_process.start()
while True:
operation_process.join(timeout=1)
if operation_process.is_alive():
result = process_queue.get(timeout=60)
self.logger.info(
f"{command.DataOperationId}-{command.JobId}-{data_operation.Name} Execution running on {operation_process.pid}. Process Message:{result}",
job_id=command.DataOperationJobExecutionId)
process_queue.task_done()
break
end_datetime = datetime.now()
end = time.time()
self.logger.info(
f"{command.DataOperationId}-{command.JobId}-{data_operation.Name} Execution Create finished. Start :{start_datetime} - End :{end_datetime} - ElapsedTime :{end - start}",
job_id=command.DataOperationJobExecutionId)
except Exception as ex:
self.logger.exception(ex,
f"{command.DataOperationId}-{command.JobId} Execution Create getting error. ",
job_id=command.DataOperationJobExecutionId)
raise
finally:
if manager is not None:
manager.shutdown()
@staticmethod
def start_process(root_directory: str, data_operation_id: int, job_id: int,
data_operation_job_execution_id: int,
process_queue: Queue):
pdi = Pdi(root_directory=root_directory, initialize_flask=False)
pdi.get(StartExecutionProcessCommandHandler).start(data_operation_id=data_operation_id, job_id=job_id,
data_operation_job_execution_id=data_operation_job_execution_id,
process_queue=process_queue)
def check_removed_job(self, ap_scheduler_job_id):
EVENT_JOB_REMOVED = 2 ** 10
job_detail_query = self.repository_provider.query(
ApSchedulerJob, ApSchedulerEvent, ApSchedulerJobEvent
) \
.filter(ApSchedulerJobEvent.ApSchedulerJobId == ApSchedulerJob.Id) \
.filter(ApSchedulerJobEvent.EventId == ApSchedulerEvent.Id) \
.filter(ApSchedulerEvent.Code == EVENT_JOB_REMOVED) \
.filter(ApSchedulerJob.Id == ap_scheduler_job_id)
job_detail = job_detail_query.first()
if job_detail is not None:
data_operation_job = self.repository_provider.get(
DataOperationJob).first(IsDeleted=0,
ApSchedulerJobId=job_detail.ApSchedulerJob.Id)
if data_operation_job is not None:
self.repository_provider.get(DataOperationJob).delete_by_id(data_operation_job.Id)
@transactionhandler
def start(self, data_operation_id: int, job_id: int, data_operation_job_execution_id: int,
process_queue: Queue):
process_queue.put(f'{os.getppid()} initialized {current_process().name}({os.getpid()}) process')
start = time.time()
start_datetime = datetime.now()
self.logger.info(f"{data_operation_id}-{job_id} Data Operations Started",
job_id=data_operation_job_execution_id)
try:
DependencyContainer.Instance.get(OperationExecution).start(data_operation_id=data_operation_id,
job_id=job_id,
data_operation_job_execution_id=data_operation_job_execution_id)
self.logger.info(
f"{data_operation_id}-{job_id} Data Operations Finished",
job_id=data_operation_job_execution_id)
except Exception as ex:
exc = traceback.format_exc() + '\n' + str(ex)
self.logger.info(
f"{data_operation_id}-{job_id} Data Operations Finished With Error: {exc}",
job_id=data_operation_job_execution_id)
finally:
self.check_removed_job(ap_scheduler_job_id=job_id)
end_datetime = datetime.now()
end = time.time()
self.logger.info(
f"{data_operation_id}-{job_id} Start :{start_datetime} - End :{end_datetime} - ElapsedTime :{end - start}",
job_id=data_operation_job_execution_id)
del self.logger
| StarcoderdataPython |
11285248 | from vell import spell
def test_import():
assert 'check' in spell.__dict__.keys()
def test_check():
spell.check()
| StarcoderdataPython |
6611914 | # This sample tests for generic protocol variance consistency.
from typing import Protocol, TypeVar, Union
# pyright: strict
_T1 = TypeVar("_T1")
_T2 = TypeVar("_T2", bound=int)
_T3 = TypeVar("_T3", bytes, str)
_T1_co = TypeVar("_T1_co", covariant=True)
_T1_contra = TypeVar("_T1_contra", contravariant=True)
class Protocol1(Protocol[_T1, _T2, _T3]):
def m1(self, p0: _T1, p1: _T2, p2: _T3) -> Union[_T1, _T2]:
...
def m2(self) -> _T1:
...
def m3(self) -> _T2:
...
def m4(self) -> _T3:
...
# This should generate an error because _T3 should be contravariant
class Protocol2(Protocol[_T1, _T2, _T3]):
def m1(self, p0: _T1, p1: _T2, p2: _T3) -> _T1:
...
def m2(self) -> _T1:
...
def m3(self) -> _T2:
...
class Protocol3(Protocol[_T1_co]):
def m1(self) -> None:
pass
# This should generate an error because _T1 should be contravariant.
class Protocol4(Protocol[_T1]):
def m1(self, p0: _T1) -> None:
...
# This should generate an error because _T1_co should be contravariant.
class Protocol5(Protocol[_T1_co]):
# This should generate an error because a covariant TypeVar
# should not be used as a parameter type.
def m1(self, p0: _T1_co) -> None:
...
# This should generate an error because _T1 should be covariant.
class Protocol6(Protocol[_T1]):
def m1(self) -> _T1:
...
# This should generate an error because _T1_contra should be covariant.
class Protocol7(Protocol[_T1_contra]):
# This should generate an error because a contravariant TypeVar
# should not be used as a return type.
def m1(self) -> _T1_contra:
...
class Protocol8(Protocol[_T1]):
def m1(self) -> _T1:
...
def m2(self, p1: _T1) -> None:
pass
| StarcoderdataPython |
4922722 | from django.db import models
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _
from model_utils import Choices
from model_utils.models import TimeStampedModel
from ..utils import markup
class Scrap(TimeStampedModel):
MARKUP_LANGUAGE = Choices(*markup.LANGUAGES)
raw_title = models.CharField(_('Raw scrap title'), max_length=100)
markup_type = models.CharField(choices=MARKUP_LANGUAGE, max_length=10,
default=markup.DEFAULT_LANGUAGE)
@cached_property
def html_title(self):
convert = markup.html_inline_renderer_for(self.markup_type)
return convert(self.raw_title)
def __str__(self):
return self.raw_title
| StarcoderdataPython |
1913641 | <reponame>DanielJDufour/cambio<gh_stars>0
#-*- coding: utf-8 -*-
from unittest import main, TestCase
from cambio import add_param_to_class_instantiation
from cambio import find_all_named_parameters
from cambio import remove_class_definition
from cambio import remove_class_instantiation_parameter
from cambio import remove_comments
from cambio import remove_imports
from cambio import declare_variable
from cambio import replace_class
from cambio import replace_variable_declaration
class TestRemovingClassDeclaration(TestCase):
def test_removing_multi_line_class_declaration(self):
old_code = '''
class A():
prop1 = 1
prop2 = 2
prop3 = 3
class B():
prop1 = 1
prop2 = 2
prop3 = 3
class C():
prop1 = 1
prop2 = 2
prop3 = 3
'''
new_code = remove_class_definition(old_code, "B")
expected_code = '''
class A():
prop1 = 1
prop2 = 2
prop3 = 3
class C():
prop1 = 1
prop2 = 2
prop3 = 3
'''
self.assertEqual(new_code, expected_code)
class TestRemovingComments(TestCase):
def test_removing_comments(self):
old_code = "# here's a comment\n\nprint('hello')"
new_code = remove_comments(old_code)
self.assertEqual(new_code.strip(), "print('hello')")
class TestRemovingImports(TestCase):
def test_removing_imports(self):
old_code = "from non_existent_package import non_existent_module\n\nfruit='apple'"
code_without_imports = remove_imports(old_code)
exec(code_without_imports.strip())
class TestDeclaringVariable(TestCase):
def test_declaring_string(self):
old_settings = "from sys import version_info\npython_version = version_info.major"
new_settings = declare_variable(old_settings, "SECRET_KEY", '123456789')
self.assertTrue(new_settings, "from sys import version_info\nSECRET_KEY = '12345678'\npython_version = version_info.major")
def test_declaring_float(self):
old_settings = "from sys import version_info\npython_version = version_info.major"
new_settings = declare_variable(old_settings, "SECRET_NUMBER", 123456789)
self.assertTrue(new_settings, "from sys import version_info\nSECRET_NUMBER = 12345678\npython_version = version_info.major")
class TestReplacingClass(TestCase):
def test_replacing_class(self):
old_code = "my_fruit = Apple(age=1)"
new_code = replace_class(old_code, "Apple", "Orange")
self.assertEqual(new_code, "my_fruit = Orange(age=1)")
def test_conditionally_replacing_class(self):
old_code = "fruits = [new Apple(old=False), new Apple(old=True)]"
new_code = replace_class(old_code, "Apple", "Orange", lambda data : 'old=True' in data['text'])
self.assertEqual(new_code, "fruits = [new Apple(old=False), new Orange(old=True)]")
class TestReplacingVariableDeclaration(TestCase):
def test_replacing_variable_declaration(self):
old_code = "HTTP_ORIGIN = 'http://localhost:8000'"
new_code = replace_variable_declaration(old_code, 'HTTP_ORIGIN', 'http://localhost:4200')
self.assertEqual(new_code, "HTTP_ORIGIN = 'http://localhost:4200'")
class TestFindingsClassInstantiationParameters(TestCase):
def test_finding_class_instantiation_parameters(self):
old_code = 'new Fruit(age_in_days=5, type="Apple", country="USA")'
params = [match.group() for match in find_all_named_parameters(old_code)]
self.assertEqual(str(params), str(['age_in_days=5', ', type="Apple"', ', country="USA"']))
class TestAddingParamToClassInstantiation(TestCase):
def test_adding_param_to_class_instantiation_with_one_param(self):
old_code = 'new Fruit(country_code="USA")'
new_code = add_param_to_class_instantiation(old_code, 'Fruit', 'type', 'Apple')
self.assertEqual(new_code, 'new Fruit(country_code="USA", type="Apple")')
def test_adding_param_to_class_instantiation(self):
old_code = 'new Fruit(age_in_days=5, type="Apple", country="USA")'
new_code = add_param_to_class_instantiation(old_code, 'Fruit', 'quality', '100')
self.assertEqual(new_code, 'new Fruit(age_in_days=5, type="Apple", country="USA", quality="100")')
def test_adding_calculated_param_to_class_instantiation(self):
old_code = 'new Fruit(age_in_days=5, type="Apple", country="USA")'
new_code = add_param_to_class_instantiation(old_code, 'Fruit', 'expiration', lambda data: 10 if 'Apple' in data['line'] else 1)
self.assertEqual(new_code, 'new Fruit(age_in_days=5, type="Apple", country="USA", expiration=10)')
def test_conditionally_adding_param_to_class_instantiation(self):
old_code ='''
class Breakfast:
origin = new Egg()
class Lunch:
origin = new Egg()
'''
new_code = add_param_to_class_instantiation(old_code, 'Egg', 'scrambled', True, lambda data : data['before'].rindex(' Breakfast') > data['before'].rindex('class'))
self.assertEqual(new_code, '''
class Breakfast:
origin = new Egg(scrambled=True)
class Lunch:
origin = new Egg()
''')
class TestRemovingClassInstantiationParameter(TestCase):
def test_removing_when_only_one_parameter(self):
old_code = "my_car = Car(age=10)\nyour_car = Car(age=2)"
new_code = remove_class_instantiation_parameter(old_code, 'Car', 'age')
self.assertEqual(new_code, 'my_car = Car()\nyour_car = Car()')
def test_removing_when_multiple_parameter(self):
old_code = "my_car = Car(age=10, make='Ford')\nyour_car = Car(year=2020, age=2)"
new_code = remove_class_instantiation_parameter(old_code, 'Car', 'age')
self.assertEqual(new_code, "my_car = Car(make='Ford')\nyour_car = Car(year=2020)")
def test_removing_when_multiple_parameter(self):
old_code = "my_car = Car(age=10, make='Ford')\nyour_car = Car(year=2020, age=2)"
new_code = remove_class_instantiation_parameter(old_code, 'Car', 'age')
self.assertEqual(new_code, "my_car = Car(make='Ford')\nyour_car = Car(year=2020)")
def test_conditionally_removing_parameter(self):
old_code = "bottle_1 = Wine(age=100)\nbottle_2 = Wine(age=1)"
# removes all bottles under 10 years of age
new_code = remove_class_instantiation_parameter(old_code, 'Wine', 'age', lambda age: age < 10)
self.assertEqual(new_code, "bottle_1 = Wine(age=100)\nbottle_2 = Wine()")
if __name__ == '__main__':
main() | StarcoderdataPython |
1600788 | <reponame>karthikbhamidipati/reinforcement-learning<filename>algorithms/linear_wrapper.py
import numpy as np
class LinearWrapper:
"""
Wrapper for env to perform Linear Value function approximation
"""
def __init__(self, env):
"""
Constructor for LinearWrapper
:param env: Reinforcement learning environment
"""
self.env = env
self.n_actions = self.env.n_actions
self.n_states = self.env.n_states
self.n_features = self.n_actions * self.n_states
def encode_state(self, s):
"""
Method for encoding a state into a feature matrix.
Algorithm:
1. Initialize features of size (num_actions, num_actions * n_states)
2. For each action, calculate the flat index of the features for the state and action
3. Mark the features matrix with the action index and above calculated index as 1.0
:param s: State for which encoding should be performed
:return: features of the encoded state
"""
features = np.zeros((self.n_actions, self.n_features))
for a in range(self.n_actions):
i = np.ravel_multi_index((s, a), (self.n_states, self.n_actions))
features[a, i] = 1.0
return features
def decode_policy(self, theta):
"""
Method to decode the theta and extract the policy and value
:param theta: weight
:return: policy and value decoded
"""
policy = np.zeros(self.env.n_states, dtype=int)
value = np.zeros(self.env.n_states)
for s in range(self.n_states):
features = self.encode_state(s)
q = features.dot(theta)
policy[s] = np.argmax(q)
value[s] = np.max(q)
return policy, value
def reset(self):
"""
Method to reset the environment to the starting state encoded into feature matrix
:return: starting state encoded into a feature matrix
"""
return self.encode_state(self.env.reset())
def step(self, action):
"""
Method to call the step function of the environment and encode the next state
:param action: action to be taken from the current state
:return: encoded state, reward, done
"""
state, reward, done = self.env.step(action)
return self.encode_state(state), reward, done
def render(self, policy=None, value=None):
"""
Method to render the environment
:param policy: Policy for the environment
:param value: Value for the environment
:return: None
"""
self.env.render(policy, value)
| StarcoderdataPython |
1982986 | import sys
import click
from dataclasses import dataclass
from multiprocessing.context import AuthenticationError
from rich.console import Console
from rich.table import Table
from rich.text import Text
from pyrsched.rpc import RPCScheduler
from halo import Halo
PYRSCHED_LOGO = "[italic bold][#e20074]P[/#e20074][white]S[/white][/bold italic]"
@dataclass
class ContextWrapper:
""" Wraps some objects which are accessible in each command. """
scheduler: RPCScheduler
con: Console
json_output: bool
def make_job_table(job_list):
table = Table(title=f"{PYRSCHED_LOGO} Job list ({len(job_list)} total)")
table.add_column("ID", no_wrap=True)
table.add_column("name", style="bold #e20074")
table.add_column("interval", justify="right")
table.add_column("next run at")
table.add_column("is running?")
for job in job_list:
table.add_row(
job["id"],
job["name"],
str(job["trigger"]["interval"]),
job["next_run_time"],
"[bold green]✔[/bold green]" if job["is_running"] else "[bold red]х[/bold red]",
)
return table
@click.group()
@click.option("--json", is_flag=True, help="Output json instead of formatted text.")
@click.option("--port", type=click.INT, default=12345, help="Server port to connect to.")
@click.pass_context
def cli(ctx, json, port):
# workaround for too wide calculated console width
c = Console()
c = Console(width=c.width-1)
ctx.obj = ContextWrapper(scheduler=RPCScheduler(port=port), con=c, json_output=json,)
try:
ctx.obj.scheduler.connect()
except ConnectionError:
ctx.obj.con.print(
"[bold][red]Could not connect to server, is it running? Exiting...[/red][/bold]"
)
ctx.exit()
except AuthenticationError:
ctx.obj.con.print(
"[bold][red]Could not connect to server, wrong authentication key. Is the shared secret set to the correct value? Exiting...[/red][/bold]"
)
ctx.exit()
@cli.command(name="add")
@click.argument("pipeline_filename", type=click.STRING)
@click.argument("interval", type=click.INT)
@click.option("--start", is_flag=True, help="Start the job immediately after it was added to the jobstore")
@click.pass_context
def add_job_command(ctx, pipeline_filename, interval, start):
""" Add a job to the jobstore with a given interval. This does not start the job.
\b
PIPELINE_FILENAME: Pipeline name without suffix (.yaml)
INTERVAL: Execution interval in seconds (integer)
"""
with Halo(text="Adding job...", spinner="dots", color="magenta") as spinner:
job_id = ctx.obj.scheduler.add_job(pipeline_filename, interval)
spinner.color = "green"
if ctx.obj.json_output:
spinner.stop()
ctx.obj.con.print({"id": job_id})
else:
spinner.stop()
ctx.obj.con.print(job_id, highlight=False)
if start:
ctx.invoke(start_job_command, job_id=job_id)
@cli.command(name="get")
@click.argument("job_id", type=click.STRING)
@click.pass_context
def get_job_command(ctx, job_id):
with Halo(text="Loading job...", spinner="dots", color="magenta") as spinner:
job = ctx.obj.scheduler.get_job(job_id)
spinner.color = "green"
spinner.stop()
ctx.obj.con.print(job)
@cli.command(name="reschedule")
@click.argument("job_id", type=click.STRING)
@click.argument("interval", type=click.INT)
@click.pass_context
def reschedule_command(ctx, job_id, interval):
""" Change the interval of a job.
Rescheduling a job restarts a job with the new interval if it was not running.
JOB_ID: ID or name of the job. Name resolution works only if the name is unambiguous.
INTERVAL: Execution interval in seconds (integer)
"""
with Halo(text="Rescheduling job...", spinner="dots", color="magenta") as spinner:
job = ctx.obj.scheduler.reschedule_job(job_id, interval)
spinner.color = "green"
spinner.stop()
ctx.obj.con.print(job)
@cli.command(name="start")
@click.argument("job_id", type=click.STRING)
@click.pass_context
def start_job_command(ctx, job_id):
""" Start a job.
JOB_ID: ID or name of the job. Name resolution works only if the name is unambiguous.
"""
with Halo(text="Starting job...", spinner="dots", color="magenta") as spinner:
job = ctx.obj.scheduler.start_job(job_id)
if job is None:
spinner.color = "red"
ctx.obj.con.print(
f"[bold][red]Job {ctx.obj.scheduler.get_previous_job_id()} not found.[/red][/bold]",
highlight=False,
)
spinner.stop()
ctx.exit()
spinner.color = "green"
# if ctx.obj.json_output:
spinner.stop()
ctx.obj.con.print(job)
# else:
# spinner.stop()
# ctx.obj.con.print(job)
@cli.command(name="stop")
@click.argument("job_id", type=click.STRING)
@click.pass_context
def stop_job_command(ctx, job_id):
""" Stop a job.
JOB_ID: ID or name of the job. Name resolution works only if the name is unambiguous.
"""
with Halo(text="Stopping job...", spinner="dots", color="magenta") as spinner:
job = ctx.obj.scheduler.stop_job(job_id)
if job is None:
spinner.color = "red"
ctx.obj.con.print(
f"[bold][red]Job {ctx.obj.scheduler.get_previous_job_id()} not found.[/red][/bold]",
highlight=False,
)
spinner.stop()
ctx.exit()
spinner.color = "green"
# if ctx.obj.json_output:
spinner.stop()
ctx.obj.con.print(job)
# if ctx.obj.json_output:
# ctx.obj.con.print(job)
# else:
# ctx.obj.con.print(job)
@cli.command(name="list")
@click.pass_context
def list_command(ctx):
""" List jobs known to the jobstore. """
with Halo(text="Loading list...", spinner="dots", color="magenta") as spinner:
job_list = ctx.obj.scheduler.list_jobs()
spinner.color = "green"
if ctx.obj.json_output:
spinner.stop()
ctx.obj.con.print(job_list)
else:
job_table = make_job_table(job_list)
spinner.stop()
ctx.obj.con.print(job_table)
@cli.command(name="status")
@click.pass_context
def status_command(ctx):
""" Shows some status information. """
with Halo(text="Loading", spinner="dots", color="magenta") as spinner:
state = ctx.obj.scheduler.state
spinner.color = "green"
if ctx.obj.json_output:
spinner.stop()
ctx.obj.con.print(state)
else:
job_table = make_job_table(state["job_list"])
updown = "[green]up[/green]" if state["is_running"] else "[red]down[/red]"
runstate = {
0: "[red]STATE_STOPPED[/red]",
1: "[green]STATE_RUNNING[/green]",
2: "[yellow]STATE_PAUSED[/yellow]",
}[state["run_state"]]
state_text = f"Server is [bold]{updown}[/bold] with run state [bold]{runstate}[/bold]. Load: {state['cpu_load']}"
stored_job_id = ctx.obj.scheduler.get_previous_job_id()
last_job_id_status = Text("Job-ID used by the 'use last' token (-): '")
last_job_id_status.append(f"{stored_job_id}", style="bold white")
last_job_id_status.append("'")
spinner.stop()
ctx.obj.con.print(
f"{PYRSCHED_LOGO}: the [bold italic #e20074]P[/bold italic #e20074]ypyr-[bold italic white]S[/bold italic white]cheduler"
)
ctx.obj.con.print(state_text)
ctx.obj.con.print(job_table)
ctx.obj.con.print(last_job_id_status)
@cli.command(name="remove")
@click.argument("job_id", type=click.STRING)
@click.pass_context
def remove_job(ctx, job_id):
""" Remove a job.
JOB_ID: ID or name of the job. Name resolution works only if the name is unambiguous.
"""
with Halo(text="Removing job...", spinner="dots", color="magenta") as spinner:
job = ctx.obj.scheduler.remove_job(job_id)
if job is None:
spinner.color = "red"
ctx.obj.con.print(
f"[bold][red]Job {ctx.obj.scheduler.get_previous_job_id()} not found.[/red][/bold]",
highlight=False,
)
spinner.stop()
ctx.exit()
spinner.color = "green"
# if ctx.obj.json_output:
spinner.stop()
ctx.obj.con.print(job)
if __name__ == "__main__":
cli(prog_name="pyrsched-cli")
| StarcoderdataPython |
1950331 | #!/usr/bin/env python
# coding: utf-8
# ___
#
# <a href='http://www.pieriandata.com'> <img src='../Pierian_Data_Logo.png' /></a>
# ___
# # Pandas Data Visualization Exercise
#
# This is just a quick exercise for you to review the various plots we showed earlier. Use **df3** to replicate the following plots.
# In[1]:
import pandas as pd
import matplotlib.pyplot as plt
df3 = pd.read_csv('df3')
#get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
df3.info()
# In[3]:
df3.head()
# ** Recreate this scatter plot of b vs a. Note the color and size of the points. Also note the figure size. See if you can figure out how to stretch it in a similar fashion. Remeber back to your matplotlib lecture...**
# In[4]:
df3.plot.scatter(x='a',y='b',c='red',s=50,figsize=(12,3))
plt.show(block=False)
# ** Create a histogram of the 'a' column.**
# In[5]:
df3['a'].plot.hist()
plt.show(block=False)
# ** These plots are okay, but they don't look very polished. Use style sheets to set the style to 'ggplot' and redo the histogram from above. Also figure out how to add more bins to it.***
# In[6]:
plt.style.use('ggplot')
plt.show(block=False)
# In[7]:
df3['a'].plot.hist(alpha=0.5,bins=25)
plt.show(block=False)
# ** Create a boxplot comparing the a and b columns.**
# In[8]:
df3[['a','b']].plot.box()
plt.show(block=False)
# ** Create a kde plot of the 'd' column **
# In[9]:
df3['d'].plot.kde()
plt.show(block=False)
# ** Figure out how to increase the linewidth and make the linestyle dashed. (Note: You would usually not dash a kde plot line)**
# In[10]:
df3['d'].plot.density(lw=5,ls='--')
plt.show(block=False)
# ** Create an area plot of all the columns for just the rows up to 30. (hint: use .ix).**
# In[15]:
df3.ix[0:30].plot.area(alpha=0.4)
plt.show(block=False)
# ## Bonus Challenge!
# Note, you may find this really hard, reference the solutions if you can't figure it out!
# ** Notice how the legend in our previous figure overlapped some of actual diagram. Can you figure out how to display the legend outside of the plot as shown below?**
#
# ** Try searching Google for a good stackoverflow link on this topic. If you can't find it on your own - [use this one for a hint.](http://stackoverflow.com/questions/23556153/how-to-put-legend-outside-the-plot-with-pandas)**
# In[17]:
f = plt.figure()
df3.ix[0:30].plot.area(alpha=0.4,ax=f.gca())
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.show()
# # Great Job!
| StarcoderdataPython |
9656506 | import tensorflow as tf
import numpy as np
LR_A = 0.001
LR_C = 0.001
GAMMA = 0.9
TAU = 0.01
MEMORY_CAPACITY = 10000
BATCH_SIZE = 32
class DDPG(object):
def __init__(self, a_dim, s_dim, a_bound):
self.memory = np.zeros((MEMORY_CAPACITY, s_dim*2+a_dim+1), dtype=np.float32)
self.pointer = 0
self.memory_full = False
self.sess = tf.Session()
self.a_replace_counter, self.c_replace_counter = 0, 0
self.a_dim, self.s_dim, self.a_bound = a_dim, s_dim, a_bound[1]
self.S = tf.placeholder(tf.float32, [None, s_dim], 's')
self.S_ = tf.placeholder(tf.float32, [None, s_dim], 's_')
self.R = tf.placeholder(tf.float32, [None, 1], 'r')
with tf.variable_scope('Actor'):
self.a = self._build_a(self.S, scope='eval', trainable=True)
a_ = self._build_a(self.S_, scope='target', trainable=False)
with tf.variable_scope('Critic'):
q = self._build_c(self.S, self.a, scope='eval', trainable=True)
q_ = self._build_c(self.S_, a_, scope='target', trainable=False)
self.ae_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/eval')
self.at_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Actor/target')
self.ce_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope ='Critic/eval')
self.ct_params = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='Critic/target')
self.soft_replace = [[tf.assign(ta, (1 - TAU) * ta + TAU * ea), tf.assign(tc, (1 - TAU) * tc + TAU * ec)]
for ta, ea, tc, ec in zip(self.at_params, self.ae_params, self.ct_params, self.ce_params)]
q_target = self.R + GAMMA * q_
td_error = tf.losses.mean_squared_error(labels=q_target, predictions=q)
self.ctrain = tf.train.AdamOptimizer(LR_C).minimize(td_error, var_list=self.ce_params)
a_loss = - tf.reduce_mean(q) #maxmize q
self.atrain = tf.train.AdamOptimizer(LR_A).minimize(a_loss, var_list=self.ae_params)
self.sess.run(tf.global_variables_initializer())
def choose_action(self, s):
return self.sess.run(self.a, feed_dict={self.S:s[None, :]})[0]
def learn(self):
self.sess.run(self.soft_replace)
indices = np.random.choice(MEMORY_CAPACITY, size=BATCH_SIZE)
bt = self.memory[indices, :]
bs = bt[:, :self.s_dim]
ba = bt[:, self.s_dim:self.s_dim+self.a_dim]
br = bt[:, -self.s_dim-1:-self.s_dim]
bs_ = bt[:, -self.s_dim:]
self.sess.run(self.atrain, feed_dict={self.S:bs})
self.sess.run(self.ctrain, feed_dict={self.S:bs, self.a:ba, self.R:br, self.S_:bs_})
def store_transition(self, s, a, r, s_):
transition = np.hstack((s, a, [r], s_))
index = self.pointer % MEMORY_CAPACITY
self.memory[index: ] = transition
self.pointer += 1
if self.pointer > MEMORY_CAPACITY:
self.memory_full = True
def _build_a(self, s, scope, trainable):
with tf.variable_scope(scope):
net = tf.layers.dense(s, 100, activation=tf.nn.relu, name='l1', trainable=trainable)
a = tf.layers.dense(net, self.a_dim, activation=tf.nn.tanh, name='a', trainable=trainable)
return tf.multiply(a, self.a_bound, name='scaled_a')
def _build_c(self, s, a, scope, trainable):
with tf.variable_scope(scope):
n_l1 = 100
w1_s = tf.get_variable('w1_s', [self.s_dim, n_l1], trainable=trainable)
w1_a = tf.get_variable('w1_a', [self.a_dim, n_l1], trainable=trainable)
b1 = tf.get_variable('b1', [1, n_l1], trainable=trainable)
net = tf.nn.relu(tf.matmul(s, w1_s) + tf.matmul(a, w1_a) + b1)
return tf.layers.dense(net, 1, trainable=trainable)
def save(self):
saver = tf.train.Saver()
saver.save(self.sess, './params/', write_meta_graph=False)
def restore(self):
saver = tf.train.Saver()
saver.restore(self.sess, './params/') | StarcoderdataPython |
8088262 | <reponame>LaurentAjdnik/pyqir
# Generated from MockLanguage.g4 by ANTLR 4.10.1
from antlr4 import *
from io import StringIO
import sys
if sys.version_info[1] > 5:
from typing import TextIO
else:
from typing.io import TextIO
def serializedATN():
return [
4,0,8,56,6,-1,2,0,7,0,2,1,7,1,2,2,7,2,2,3,7,3,2,4,7,4,2,5,7,5,2,
6,7,6,2,7,7,7,1,0,1,0,1,1,1,1,1,2,1,2,1,2,1,2,1,2,1,3,1,3,1,3,1,
4,4,4,31,8,4,11,4,12,4,32,1,5,4,5,36,8,5,11,5,12,5,37,1,5,1,5,1,
6,1,6,1,6,1,6,5,6,46,8,6,10,6,12,6,49,9,6,1,6,1,6,1,7,1,7,1,7,1,
7,0,0,8,1,1,3,2,5,3,7,4,9,5,11,6,13,7,15,8,1,0,3,1,0,48,57,3,0,9,
10,13,13,32,32,2,0,10,10,13,13,58,0,1,1,0,0,0,0,3,1,0,0,0,0,5,1,
0,0,0,0,7,1,0,0,0,0,9,1,0,0,0,0,11,1,0,0,0,0,13,1,0,0,0,0,15,1,0,
0,0,1,17,1,0,0,0,3,19,1,0,0,0,5,21,1,0,0,0,7,26,1,0,0,0,9,30,1,0,
0,0,11,35,1,0,0,0,13,41,1,0,0,0,15,52,1,0,0,0,17,18,5,120,0,0,18,
2,1,0,0,0,19,20,5,104,0,0,20,4,1,0,0,0,21,22,5,99,0,0,22,23,5,110,
0,0,23,24,5,111,0,0,24,25,5,116,0,0,25,6,1,0,0,0,26,27,5,109,0,0,
27,28,5,122,0,0,28,8,1,0,0,0,29,31,7,0,0,0,30,29,1,0,0,0,31,32,1,
0,0,0,32,30,1,0,0,0,32,33,1,0,0,0,33,10,1,0,0,0,34,36,7,1,0,0,35,
34,1,0,0,0,36,37,1,0,0,0,37,35,1,0,0,0,37,38,1,0,0,0,38,39,1,0,0,
0,39,40,6,5,0,0,40,12,1,0,0,0,41,42,5,47,0,0,42,43,5,47,0,0,43,47,
1,0,0,0,44,46,8,2,0,0,45,44,1,0,0,0,46,49,1,0,0,0,47,45,1,0,0,0,
47,48,1,0,0,0,48,50,1,0,0,0,49,47,1,0,0,0,50,51,6,6,0,0,51,14,1,
0,0,0,52,53,9,0,0,0,53,54,1,0,0,0,54,55,6,7,0,0,55,16,1,0,0,0,4,
0,32,37,47,1,0,1,0
]
class MockLanguageLexer(Lexer):
atn = ATNDeserializer().deserialize(serializedATN())
decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ]
T__0 = 1
T__1 = 2
T__2 = 3
T__3 = 4
QubitId = 5
Whitespace = 6
Comment = 7
Invalid = 8
channelNames = [ u"DEFAULT_TOKEN_CHANNEL", u"HIDDEN" ]
modeNames = [ "DEFAULT_MODE" ]
literalNames = [ "<INVALID>",
"'x'", "'h'", "'cnot'", "'mz'" ]
symbolicNames = [ "<INVALID>",
"QubitId", "Whitespace", "Comment", "Invalid" ]
ruleNames = [ "T__0", "T__1", "T__2", "T__3", "QubitId", "Whitespace",
"Comment", "Invalid" ]
grammarFileName = "MockLanguage.g4"
def __init__(self, input=None, output:TextIO = sys.stdout):
super().__init__(input, output)
self.checkVersion("4.10.1")
self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache())
self._actions = None
self._predicates = None
| StarcoderdataPython |
4841027 | # -*- encoding: utf-8 -*-
"""AMQP Specifications and Classes"""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__since__ = '2011-09-23'
__version__ = '3.0.2'
__all__ = [
'body', 'decode', 'commands', 'constants', 'encode', 'exceptions', 'frame',
'header', 'heartbeat'
]
| StarcoderdataPython |
9696896 | """base_tiler"""
__version__ = '0.1' | StarcoderdataPython |
3576802 | <reponame>roemmele/answerquest<gh_stars>10-100
import os
import argparse
import json
import subprocess
def get_answer_annotated_data(questions, answer_sents, answers, scores,
min_score=0.0):
filtered_questions = []
filtered_answer_sents = []
for idx, (question,
answer_sent,
answer,
score) in enumerate(zip(questions,
answer_sents,
answers,
scores)):
if score < min_score:
continue
answer_start_char = None
while answer_start_char == None:
try:
answer_start_char = answer_sent.lower().index(answer.lower())
except:
if " " not in answer: # Answer not found
break
# Trim leading word from answer to see if subsegment can be found
answer = answer[answer.index(" ") + 1:]
if answer_start_char is not None:
# answer_start_chars.append(answer_start_char)
answer_end_char = answer_start_char + len(answer)
answer_sent = (answer_sent[:answer_start_char] + "<ANSWER> "
+ answer_sent[answer_start_char:])
answer_sent = (answer_sent[:answer_end_char + len("<ANSWER> ")]
+ " </ANSWER>" +
answer_sent[answer_end_char + len(" <ANSWER>"):])
filtered_answer_sents.append(answer_sent)
filtered_questions.append(question)
return {'answer_sent': filtered_answer_sents,
'question': filtered_questions}
def run_proc(jar_dir, input_texts):
'''Switch to directory that contains H&S code'''
os.chdir(args.jar_dir)
'''Run Java rule-based code via python subprocess. Ensure sense tagging and parsing servers are running on ports
specified in the multiple config/QuestionTransducer.properties files. Each of these config files specifies different
ports for these servers. Code below will continuously cycle through each config so that it can distribute
the analyses across different ports, in order to speed things up.'''
qg_output = {'question': [],
'answer_sent': [],
'answer': [],
'score': []}
for text_idx, text in enumerate(input_texts):
proc = subprocess.Popen(["java", "-Xmx1200m",
"-cp", "question-generation.jar",
"edu/cmu/ark/QuestionAsker",
"--verbose", "--model", "models/linear-regression-ranker-reg500.ser.gz",
"--just-wh", "--max-length", "30", "--downweight-pro",
"--properties", args.config_file],
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
proc_outputs = [proc_output.split("\t") for proc_output in
proc.communicate(text.encode())[0].decode('utf-8').strip().split("\n")]
try:
for question, answer_sent, answer, score in proc_outputs:
#qg_output['text_id'].append(args.start_idx + text_idx)
qg_output['question'].append(question)
qg_output['answer_sent'].append(answer_sent)
qg_output['answer'].append(answer)
qg_output['score'].append(float(score))
except: # Possibly no questions were returned (e.g. error in parsing)
continue
print("PROCESSED TEXTS UP TO INDEX", text_idx, "\n\n")
return (qg_output['question'],
qg_output['answer_sent'],
qg_output['answer'],
qg_output['score'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--input_file", "-input_file",
help="Path to input texts (one text per line).",
type=str, required=True)
parser.add_argument("--jar_dir", "-jar_dir",
help="Directory containing downloaded java program.",
type=str, required=True)
# parser.add_argument("--start_idx", "-start_idx", type=int)
# parser.add_argument("--end_idx", "-end_idx", type=int)
parser.add_argument("--config_file", "-config_file",
help="Config file for java program",
type=str, required=True)
parser.add_argument("--output_dir", "-output_dir",
help="Directory path of where to save Q&A output",
type=str, required=True)
parser.add_argument("--min_score", "-min_score",
help="Filter questions below this quality score.\
By default, all generated questions will be saved.",
type=float, default=0.0)
args = parser.parse_args()
with open(args.input_file) as f:
input_texts = [text.strip() for text in f]
orig_dir = os.path.dirname(os.path.abspath(__file__))
questions, answer_sents, answers, scores = run_proc(args.jar_dir,
input_texts)
os.chdir(orig_dir)
annotated_qg_data = get_answer_annotated_data(questions, answer_sents, answers, scores,
min_score=args.min_score)
if not os.path.isdir(args.output_dir):
os.mkdir(args.output_dir)
with open(os.path.join(args.output_dir, 'input_texts.txt'), 'w') as f:
f.write("\n".join(annotated_qg_data['answer_sent']))
print("Saved answer-annotated sentences to",
os.path.join(args.output_dir, 'input_texts.txt'))
with open(os.path.join(args.output_dir, 'questions.txt'), 'w') as f:
f.write("\n".join(annotated_qg_data['question']))
print("Saved questions to",
os.path.join(args.output_dir, 'questions.txt'))
| StarcoderdataPython |
119761 | <filename>ooni/tests/test_utils.py<gh_stars>0
import os
from twisted.trial import unittest
from ooni.utils import log, generate_filename, net
class TestUtils(unittest.TestCase):
def setUp(self):
self.test_details = {
'test_name': 'foo',
'test_start_time': '2016-01-01 01:22:22'
}
self.extension = 'ext'
self.prefix = 'prefix'
self.basename = 'filename'
self.filename = 'filename.txe'
def test_log_encode(self):
logmsgs = (
(r"spam\x07\x08", "spam\a\b"),
(r"spam\x07\x08", u"spam\a\b"),
(r"ham\u237e", u"ham"+u"\u237e")
)
for encoded_logmsg, logmsg in logmsgs:
self.assertEqual(log.log_encode(logmsg), encoded_logmsg)
def test_generate_filename(self):
filename = generate_filename(self.test_details)
self.assertEqual(filename, 'foo-2016-01-01T012222Z')
def test_generate_filename_with_extension(self):
filename = generate_filename(self.test_details, extension=self.extension)
self.assertEqual(filename, 'foo-2016-01-01T012222Z.ext')
def test_generate_filename_with_prefix(self):
filename = generate_filename(self.test_details, prefix=self.prefix)
self.assertEqual(filename, 'prefix-foo-2016-01-01T012222Z')
def test_generate_filename_with_extension_and_prefix(self):
filename = generate_filename(self.test_details, prefix=self.prefix, extension=self.extension)
self.assertEqual(filename, 'prefix-foo-2016-01-01T012222Z.ext')
def test_get_addresses(self):
addresses = net.getAddresses()
assert isinstance(addresses, list)
| StarcoderdataPython |
1735329 | <filename>django/rpg/main/models.py
from django.db import models
from datetime import datetime
import re
class UserManager(models.Manager):
def register_validator(self, post_data):
errors = {}
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
if len(post_data['first_name']) < 1:
errors['first_name'] = "First name must be at least 2 characters long"
if len(post_data['last_name']) < 1:
errors['last_name'] = "Last name must be at least 2 characters long"
if not EMAIL_REGEX.match(post_data['email']):
errors['email'] = "Invalid email"
if len(post_data['password']) < 7:
errors['password'] = "Password must be at least 8 characters long"
if post_data['password'] != post_data['confirm_password']:
errors['password'] = "Passwords do not match!"
return errors
class CharacterManager(models.Manager):
def character_validator(self, post_data):
errors = {}
if len(post_data['name']) < 2:
errors['name'] = "A Character Name must consist of at least 3 characters"
if len(post_data['ability']) < 5:
errors['ability'] = "You must create an ability longer than 6 characters long"
return errors
class ItemManager(models.Manager):
def item_validator(self, post_data):
errors = {}
if post_data['attack'] > 99:
errors['attack'] = "You cannot exceed 100 Attack"
return errors
class ObstacleManager(models.Manager):
def obstacle_validator(self, post_data):
errors = {}
if post_data['durability'] > 10:
errors['durability'] = "This Obstacle cannot be destroyed"
return errors
class User(models.Model):
first_name = models.CharField(max_length=20)
last_name = models.CharField(max_length=20)
email = models.EmailField(max_length=20)
password = models.CharField(max_length=20)
confirm_password = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
class Character(models.Model):
name = models.CharField(max_length=20)
ability = models.CharField(max_length=20)
health = models.IntegerField()
attack = models.IntegerField()
users = models.ManyToManyField(User, related_name="characters")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = CharacterManager()
class Item(models.Model):
name = models.CharField(max_length=20)
effect = models.CharField(max_length=50)
health = models.IntegerField()
attack = models.IntegerField()
characters = models.ManyToManyField(Character, related_name="items")
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ItemManager()
class Obstacle(models.Model):
obstacle_name = models.CharField(max_length=20)
durability = models.IntegerField()
item = models.ForeignKey(Item, related_name="obstacles", on_delete = models.CASCADE)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = ObstacleManager()
| StarcoderdataPython |
9774061 | class Pokemon():
"""A class to represent a Pokemon
Attributes
----------
name : str
the name of the pokemon
level : int
the pokemon level
hp : int
the current HP level of the pokemon
# """
generation = 'base'
def __init__(self, name, level, start_hp, energy_type, moves):
self.name = name
self.level = level
self.hp = start_hp
self.energy_type = energy_type
self.moves = moves
def take_damage(self, damage_amount):
self.hp = self.hp - damage_amount
def __str__(self):
return f'Pokemon: {self.name} with {self.hp} HP left'
class WaterPokemon(Pokemon):
def __init__(self, name:str, level:int, start_hp:int, moves:tuple):
super().__init__(name, level, start_hp, 'water', moves)
self.energy_type = 'post_water'
poliwag = WaterPokemon('Poliwag', 13, 60, ('Water Gun', 30))
starmie = WaterPokemon('Starmie', 28, 90, ('Star Freeze', 30))
print(poliwag.energy_type)
# def battle(poke1, poke2):
# while poke1.hp > 0 and poke2.hp > 0:
# poke1.take_damage(poke2.moves[1])
# poke2.take_damage(poke1.moves[1])
# print(poke1)
# print(poke2)
# battle(poliwag, starmie)
def my_func(var1: int, var2:str) -> str:
return var2 * var1
| StarcoderdataPython |
6441616 | <filename>gesund_projekt/calories/migrations/0003_caloriefooddetail.py
# Generated by Django 4.0.1 on 2022-03-24 10:12
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('calories', '0002_caloriecategory_alter_calorieintake_id'),
]
operations = [
migrations.CreateModel(
name='CalorieFoodDetail',
fields=[
('id', models.AutoField(editable=False, primary_key=True, serialize=False)),
('food', models.TextField()),
('description', models.TextField()),
('calories', models.FloatField()),
('protein', models.FloatField()),
('fat', models.FloatField()),
('carb', models.FloatField()),
('sugar', models.FloatField()),
('fiber', models.FloatField()),
('status', models.BooleanField()),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='calories.caloriecategory')),
],
),
]
| StarcoderdataPython |
11341620 | from django.apps import AppConfig
class WordsConfig(AppConfig):
name = 'words'
| StarcoderdataPython |
9724130 | <filename>readtwice/models/input_utils.py
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for creating or transforming model inputs."""
from typing import Dict, List, Optional, Text, Tuple, Union
import tensorflow.compat.v1 as tf
def make_block_pos_features(block_ids):
"""Creates feature with block relative positions in the original document."""
block_ids_expanded_0 = tf.expand_dims(block_ids, 0)
x = tf.cast(
tf.logical_and(
tf.equal(tf.expand_dims(block_ids, 1), block_ids_expanded_0),
tf.not_equal(block_ids_expanded_0, 0)), tf.int32)
# pylint: disable=line-too-long
# `tf.linalg.band_part(x, -1, 0)` sets to lower triangual part of matrix to 0.
# See https://www.tensorflow.org/versions/r1.15/api_docs/python/tf/linalg/band_part
# for more details.
# pylint: enable=line-too-long
return tf.reduce_sum(tf.linalg.band_part(x, -1, 0), 1)
def mask_same_entity_mentions(
token_ids, annotation_begins,
annotation_ends, annotation_labels,
masked_lm_positions, masked_lm_weights,
mask_token_id, apply_across_whole_batch):
"""Mask mentions from the same entity."""
batch_size = tf.shape(token_ids)[0]
block_length = tf.shape(token_ids)[1]
max_num_annotations = tf.shape(annotation_labels)[1]
mask_begin = tf.sequence_mask(annotation_begins, block_length, dtype=tf.int32)
mask_end = tf.sequence_mask(annotation_ends + 1, block_length, dtype=tf.int32)
# Ignore annotations with 0 ([PAD]) and 1 ([UNK]) labels
is_annotation_not_pad_or_unk = tf.logical_and(
tf.not_equal(annotation_labels, 0), tf.not_equal(annotation_labels, 1))
# [batch_size, max_num_annotations]
is_annotation_masked = tf.reduce_max(
tf.cast(
tf.logical_and(
tf.less_equal(
tf.expand_dims(annotation_begins, -1),
tf.expand_dims(masked_lm_positions, 1)),
tf.greater_equal(
tf.expand_dims(annotation_ends, -1),
tf.expand_dims(masked_lm_positions, 1))),
dtype=tf.int32) *
tf.expand_dims(tf.cast(masked_lm_weights, dtype=tf.int32), 1), -1)
if apply_across_whole_batch:
# [batch_size * max_num_annotations, batch_size * max_num_annotations]
are_annotations_for_the_same_entity = tf.cast(
tf.logical_and(
tf.equal(
tf.reshape(annotation_labels,
[batch_size * max_num_annotations, 1]),
tf.reshape(annotation_labels,
[1, batch_size * max_num_annotations])),
tf.reshape(is_annotation_not_pad_or_unk,
[batch_size * max_num_annotations, 1])),
dtype=tf.int32)
# [batch_size * max_num_annotations]
should_annotation_be_masked = tf.einsum(
'i,ij->j',
tf.reshape(is_annotation_masked, [batch_size * max_num_annotations]),
are_annotations_for_the_same_entity)
# [batch_size, max_num_annotations]
should_annotation_be_masked = tf.reshape(should_annotation_be_masked,
[batch_size, max_num_annotations])
else:
# [batch_size, max_num_annotations, max_num_annotations]
are_annotations_for_the_same_entity = tf.cast(
tf.logical_and(
tf.equal(
tf.expand_dims(annotation_labels, -1),
tf.expand_dims(annotation_labels, 1)),
tf.expand_dims(is_annotation_not_pad_or_unk, -1)),
dtype=tf.int32)
# [batch_size, max_num_annotations]
should_annotation_be_masked = tf.einsum(
'bi,bij->bj', is_annotation_masked, are_annotations_for_the_same_entity)
should_annotation_be_masked = tf.minimum(should_annotation_be_masked, 1)
should_token_be_masked = (
tf.reduce_max((mask_end - mask_begin) *
tf.expand_dims(should_annotation_be_masked, -1), 1))
# [batch_size, block_length]
return (token_ids * (1 - should_token_be_masked) +
mask_token_id * should_token_be_masked)
def make_is_span_maskable_features(num_blocks_per_example,
block_length, max_num_annotations,
annotation_begins,
annotation_ends,
annotation_labels):
"""Prepares is-token-belongs-to-an-annotation mask."""
annotation_begins = tf.reshape(annotation_begins,
[num_blocks_per_example, max_num_annotations])
annotation_ends = tf.reshape(annotation_ends,
[num_blocks_per_example, max_num_annotations])
annotation_labels = tf.reshape(annotation_labels,
[num_blocks_per_example, max_num_annotations])
annotation_mask = tf.expand_dims(
tf.cast(tf.not_equal(annotation_labels, 0), tf.int32), -1)
mask_begin = tf.sequence_mask(annotation_begins, block_length, dtype=tf.int32)
mask_begin_plus_one = tf.sequence_mask(
annotation_begins + 1, block_length, dtype=tf.int32)
mask_end = tf.sequence_mask(annotation_ends + 1, block_length, dtype=tf.int32)
def make_mask(x):
x = x * annotation_mask
x = tf.reduce_sum(x, 1)
x = tf.minimum(x, 1)
x = tf.reshape(x, [num_blocks_per_example * block_length])
return x
return (make_mask(mask_end - mask_begin),
make_mask(mask_end - mask_begin_plus_one))
def dynamic_padding_1d(tensor, length,
padding_token_id):
"""Padds or truncates 1D tensor to a specified length."""
length_to_pad = length - tf.shape(tensor)[0]
paddings = tf.expand_dims(
tf.concat(
[tf.constant([0]),
tf.expand_dims(tf.maximum(length_to_pad, 0), 0)],
axis=0), 0)
def pad():
return tf.pad(
tensor, paddings, 'CONSTANT', constant_values=padding_token_id)
padded_tensor = tf.cond(
length_to_pad > 0,
true_fn=pad,
false_fn=lambda: tensor[:length],
strict=True)
padded_tensor.set_shape(length)
return padded_tensor
def get_num_examples_in_tf_records(paths):
if isinstance(paths, str):
paths = [paths]
num_examples = 0
for path in paths:
num_examples += sum(1 for _ in tf.python_io.tf_record_iterator(path))
return num_examples
def get_block_params_from_input_file(input_file):
"""Extract the `num_blocks_per_example` and `block_length` from the record."""
first_record = next(tf.python_io.tf_record_iterator(input_file))
first_example = tf.train.Example.FromString(first_record)
num_blocks_per_example = len(
first_example.features.feature['block_ids'].int64_list.value)
max_seq_len = len(
first_example.features.feature['token_ids'].int64_list.value)
if max_seq_len % num_blocks_per_example != 0:
raise ValueError('Record contain inconsistent input: '
'num_blocks_per_example={}, max_seq_len={}'.format(
num_blocks_per_example, max_seq_len))
block_length = max_seq_len // num_blocks_per_example
return num_blocks_per_example, block_length
def get_num_annotations_from_input_file(input_file):
"""Extract the `max_num_annotations` (per block) from the record."""
first_record = next(tf.python_io.tf_record_iterator(input_file))
first_example = tf.train.Example.FromString(first_record)
num_annotations_per_example = None
# For historical reasons, the data could have either
# `annotation_*` features or `answer_annotation_*` features.
# We allow both options for backward compatibility.
if 'annotation_labels' in first_example.features.feature:
num_annotations_per_example = len(
first_example.features.feature['annotation_labels'].int64_list.value)
if 'answer_annotation_labels' in first_example.features.feature:
assert num_annotations_per_example is None
num_annotations_per_example = len(
first_example.features.feature['answer_annotation_labels'].int64_list
.value)
# Currently, we force the number of entity and answer annotations to
# be the same. That could be changed in the future rather easily.
if 'entity_annotation_labels' in first_example.features.feature:
assert num_annotations_per_example is not None
assert num_annotations_per_example == len(
first_example.features.feature['entity_annotation_labels'].int64_list
.value)
num_blocks_per_example = len(
first_example.features.feature['block_ids'].int64_list.value)
if num_annotations_per_example % num_blocks_per_example != 0:
raise ValueError(
'Record contain inconsistent input: '
'num_blocks_per_example={}, num_annotations_per_example={}'.format(
num_blocks_per_example, num_annotations_per_example))
return num_annotations_per_example // num_blocks_per_example
def get_span_prediction_example_decode_fn(
num_blocks_per_example,
block_length,
max_num_answer_annotations = None,
max_num_entity_annotations = None,
extra_int_features_shapes = None):
"""Returns a decode function to parse a single example into Tensors."""
max_seq_len = num_blocks_per_example * block_length
name_to_features = {
'token_ids': tf.FixedLenFeature([max_seq_len], tf.int64),
'block_ids': tf.FixedLenFeature([num_blocks_per_example], tf.int64),
'prefix_length': tf.FixedLenFeature([num_blocks_per_example], tf.int64),
}
if not extra_int_features_shapes:
extra_int_features_shapes = dict()
for feature_name, feature_shape in extra_int_features_shapes.items():
total_length = 1
for x in feature_shape:
total_length *= x
name_to_features[feature_name] = tf.FixedLenFeature([total_length],
tf.int64)
if max_num_answer_annotations is not None:
max_num_annotations_total = (
num_blocks_per_example * max_num_answer_annotations)
name_to_features.update({
'answer_annotation_begins':
tf.FixedLenFeature([max_num_annotations_total], tf.int64),
'answer_annotation_ends':
tf.FixedLenFeature([max_num_annotations_total], tf.int64),
'answer_annotation_labels':
tf.FixedLenFeature([max_num_annotations_total], tf.int64),
})
if max_num_entity_annotations is not None:
max_num_annotations_total = (
num_blocks_per_example * max_num_entity_annotations)
name_to_features.update({
'entity_annotation_begins':
tf.FixedLenFeature([max_num_annotations_total], tf.int64),
'entity_annotation_ends':
tf.FixedLenFeature([max_num_annotations_total], tf.int64),
'entity_annotation_labels':
tf.FixedLenFeature([max_num_annotations_total], tf.int64),
})
reshape_features = {
'token_ids': [num_blocks_per_example, block_length],
'answer_annotation_begins': [
num_blocks_per_example, max_num_answer_annotations
],
'answer_annotation_ends': [
num_blocks_per_example, max_num_answer_annotations
],
'answer_annotation_labels': [
num_blocks_per_example, max_num_answer_annotations
],
'entity_annotation_begins': [
num_blocks_per_example, max_num_entity_annotations
],
'entity_annotation_ends': [
num_blocks_per_example, max_num_entity_annotations
],
'entity_annotation_labels': [
num_blocks_per_example, max_num_entity_annotations
],
}
reshape_features.update(extra_int_features_shapes)
def _decode_fn(record):
"""Decodes a serialized tf.train.Example to a dictionary of Tensors.
Arguments:
record: A scalar string Tensor containing a serialized tf.train.Example.
Returns:
A dictionary of the decoded (and derived) Tensors.
"""
example = tf.io.parse_single_example(record, name_to_features)
for name in example.keys():
t = example[name]
if name in reshape_features:
t = tf.reshape(t, reshape_features[name])
if t.dtype == tf.int64:
t = tf.to_int32(t)
example[name] = t
example['block_pos'] = make_block_pos_features(example['block_ids'])
return example
return _decode_fn
| StarcoderdataPython |
8191989 | <reponame>bisnupriyasahu/cmssw
from __future__ import print_function
# Auto generated configuration file
# with command line options: stepALCA --datatier ALCARECO --conditions auto:run2_data -s ALCA:PromptCalibProdSiStripGains --eventcontent ALCARECO -n 1000 --dasquery=file dataset=/ZeroBias/Run2016C-SiStripCalMinBias-18Apr2017-v1/ALCARECO run=276243 --no_exec
import FWCore.ParameterSet.Config as cms
import os
from Configuration.StandardSequences.Eras import eras
import Utilities.General.cmssw_das_client as das_client
###################################################################
def getFileNames_das_client():
###################################################################
"""Return files for given DAS query via das_client"""
files = []
query = "dataset dataset=/ZeroBias/Run2*SiStripCalMinBias-*/ALCARECO site=T2_CH_CERN"
jsondict = das_client.get_data(query)
status = jsondict['status']
if status != 'ok':
print("DAS query status: %s"%(status))
return files
data = jsondict['data']
viableDS = []
for element in data:
viableDS.append(element['dataset'][0]['name'])
print("Using Dataset:",viableDS[-1])
query = "file dataset=%s site=T2_CH_CERN | grep file.name" % viableDS[-1]
jsondict = das_client.get_data(query)
status = jsondict['status']
if status != 'ok':
print("DAS query status: %s"%(status))
return files
mongo_query = jsondict['mongo_query']
filters = mongo_query['filters']
data = jsondict['data']
files = []
for row in data:
the_file = [r for r in das_client.get_value(row, filters['grep'])][0]
if len(the_file) > 0 and not the_file in files:
files.append(the_file)
return files
###################################################################
process = cms.Process('testFromALCARECO')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
###################################################################
# Messages
###################################################################
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.categories.append("SiStripGainsPCLWorker")
process.MessageLogger.destinations = cms.untracked.vstring("cout")
process.MessageLogger.cout = cms.untracked.PSet(
threshold = cms.untracked.string("DEBUG"),
default = cms.untracked.PSet(limit = cms.untracked.int32(0)),
FwkReport = cms.untracked.PSet(limit = cms.untracked.int32(-1),
reportEvery = cms.untracked.int32(1000)
),
SiStripGainsPCLWorker = cms.untracked.PSet( limit = cms.untracked.int32(-1)),
)
process.MessageLogger.statistics.append('cout')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.AlCaRecoStreams_cff')
process.load('Configuration.StandardSequences.EndOfProcess_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(10)
)
INPUTFILES=getFileNames_das_client()
if len(INPUTFILES)==0:
print("** WARNING: ** According to a DAS query no suitable data for test is available. Skipping test")
os._exit(0)
myFiles = cms.untracked.vstring()
myFiles.extend([INPUTFILES[0][0].replace("\"","")])
# Input source
process.source = cms.Source("PoolSource",
fileNames = myFiles,
secondaryFileNames = cms.untracked.vstring()
)
process.options = cms.untracked.PSet()
# Additional output definition
process.ALCARECOStreamPromptCalibProdSiStripGains = cms.OutputModule("PoolOutputModule",
SelectEvents = cms.untracked.PSet(SelectEvents = cms.vstring('pathALCARECOPromptCalibProdSiStripGains')
),
dataset = cms.untracked.PSet(dataTier = cms.untracked.string('ALCARECO'),
filterName = cms.untracked.string('PromptCalibProdSiStripGains')
),
eventAutoFlushCompressedSize = cms.untracked.int32(5242880),
fileName = cms.untracked.string('PromptCalibProdSiStripGains.root'),
outputCommands = cms.untracked.vstring('drop *',
'keep *_MEtoEDMConvertSiStripGains_*_*'
)
)
# Other statements
process.ALCARECOEventContent.outputCommands.extend(process.OutALCARECOPromptCalibProdSiStripGains_noDrop.outputCommands)
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_data', '')
# Path and EndPath definitions
process.endjob_step = cms.EndPath(process.endOfProcess)
process.ALCARECOStreamPromptCalibProdSiStripGainsOutPath = cms.EndPath(process.ALCARECOStreamPromptCalibProdSiStripGains)
# Schedule definition
process.schedule = cms.Schedule(process.pathALCARECOPromptCalibProdSiStripGains,process.endjob_step,process.ALCARECOStreamPromptCalibProdSiStripGainsOutPath)
from PhysicsTools.PatAlgos.tools.helpers import associatePatAlgosToolsTask
associatePatAlgosToolsTask(process)
# Customisation from command line
# Add early deletion of temporary data products to reduce peak memory need
from Configuration.StandardSequences.earlyDeleteSettings_cff import customiseEarlyDelete
process = customiseEarlyDelete(process)
# End adding early deletion
| StarcoderdataPython |
11354643 | import numpy as np
import yaml
import collections
import torch
import json
from PIL import Image
import pandas as pd
from pathlib import Path
from .scene_dataset import SceneDataset
class RealSceneDataset(SceneDataset):
def __init__(self, root, n_objects=None, resize=(320, 240)):
self.root = Path(root)
self.name = self.root.name
self.config = yaml.load((self.root / 'config.yaml').read_text())
self.scales = dict()
for i in range(1,13):
self.scales[f'cube{i}'] = np.array([0.035, 0.035, 0.0175])
# One category = one name in this dataset
self.objects = np.unique(sum([scene['objects'] for scene in self.config['scenes']], []))
self.categories = {i: o for i, o in enumerate(self.objects)}
self.text_to_category = {o: i for i, o in self.categories.items()}
self.object_categories = self.categories
self.resize = resize
self.frame_index = self.build_frame_index()
if n_objects is not None:
if not isinstance(n_objects, collections.abc.Iterable):
n_objects = [n_objects]
self.frame_index = self.frame_index[self.frame_index['n_objects'].isin(n_objects)]
def build_frame_index(self):
scenes_dir = self.root / 'scenes'
scene_ids, config_ids, cam_ids, view_ids, n_objects = [], [], [], [], []
for d in scenes_dir.iterdir():
scene_id = int(d.name.split('-')[0])
config_id = int(d.name.split('-')[1])
n_cameras = len(list(d.glob('0-kinect*-0-rgb.png')))
n_views = len(list(d.glob('*-kinect-0-rgb.png')))
scene_ids += [scene_id] * n_cameras * n_views
config_ids += [config_id] * n_cameras * n_views
n_objects += [len(self.config['scenes'][scene_id]['objects'])] * n_cameras * n_views
cam_ids += list(range(n_cameras)) * n_views
view_ids += list(np.arange(n_views).repeat(n_cameras))
frame_index = pd.DataFrame({'scene_id': scene_ids, 'config_id': config_ids,
'cam_id': cam_ids, 'view_id': view_ids,
'n_objects':n_objects})
return frame_index
def load(self, scene_id, config_id, view_id, cam_id):
scene_config_dir = self.root / 'scenes' / f'{scene_id}-{config_id}'
cam = 'kinect' if cam_id == 0 else 'kinect2'
rgb = Image.open(scene_config_dir / f'{view_id}-{cam}-0-rgb.png')
if cam == 'kinect2':
width, height = rgb.size
new_width, new_height = 1408, 1056
left = (width - new_width)/2
top = (height - new_height)/2
right = (width + new_width)/2
bottom = (height + new_height)/2
rgb = rgb.crop((left, top, right, bottom))
rgb = rgb.resize(self.resize, resample=Image.BILINEAR)
rgb = torch.tensor(np.array(rgb))
obs = json.loads((scene_config_dir / 'infos.json').read_text())
if 'categories' not in obs:
obs['categories'] = obs['names']
positions = obs['positions']
scales = np.stack([self.scales[name] for name in obs['categories']])
aabbs = tuple(zip(positions - scales / 2, positions + scales / 2))
obs.update(aabbs=aabbs, categories_txt=obs['categories'])
obs.update(categories=[self.text_to_category[c] for c in obs['categories_txt']])
obs.update(names=obs['categories_txt'])
segm = torch.zeros(rgb.shape[:2])
return rgb, segm, obs
| StarcoderdataPython |
1833485 | <reponame>koll00/Gui_SM
# -*- coding:utf-8 -*-
#
# Copyright © 2011-2012 <NAME>
# Licensed under the terms of the MIT License
# (see SMlib/__init__.py for details)
"""
IPython v0.13+ client's widget
"""
# IPython imports
'''
try: # 1.0
from IPython.qt.console.rich_ipython_widget import RichIPythonWidget
except ImportError: # 0.13
from IPython.frontend.qt.console.rich_ipython_widget import RichIPythonWidget
'''
from IPython.qt.console.rich_ipython_widget import RichIPythonWidget
# Qt imports
from PyQt4.QtGui import QTextEdit, QKeySequence, QShortcut
from PyQt4.QtCore import SIGNAL, Qt
from SMlib.utils.qthelpers import restore_keyevent
# Local imports
from SMlib.config import CONF
from SMlib.utils import programs
from SMlib.widgets.mixins import (BaseEditMixin, InspectObjectMixin,
TracebackLinksMixin)
class IPythonControlWidget(TracebackLinksMixin, InspectObjectMixin, QTextEdit,
BaseEditMixin):
"""
Subclass of QTextEdit with features from Spyder's mixins to use as the
control widget for IPython widgets
"""
QT_CLASS = QTextEdit
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
BaseEditMixin.__init__(self)
TracebackLinksMixin.__init__(self)
InspectObjectMixin.__init__(self)
self.calltips = False # To not use Spyder calltips
self.found_results = []
def showEvent(self, event):
"""Reimplement Qt Method"""
self.emit(SIGNAL("visibility_changed(bool)"), True)
def _key_question(self, text):
""" Action for '?' and '(' """
parent = self.parentWidget()
self.current_prompt_pos = parent._prompt_pos
if self.get_current_line_to_cursor():
last_obj = self.get_last_obj()
if last_obj and not last_obj.isdigit():
self.show_docstring(last_obj)
self.insert_text(text)
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Question and not self.has_selected_text() and \
self.set_inspector_enabled:
self._key_question(text)
elif key == Qt.Key_ParenLeft and not self.has_selected_text() \
and self.set_inspector_enabled:
self._key_question(text)
else:
# Let the parent widget handle the key press event
QTextEdit.keyPressEvent(self, event)
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonControlWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonControlWidget, self).focusOutEvent(event)
class IPythonPageControlWidget(QTextEdit, BaseEditMixin):
"""
Subclass of QTextEdit with features from Spyder's mixins.BaseEditMixin to
use as the paging widget for IPython widgets
"""
QT_CLASS = QTextEdit
def __init__(self, parent=None):
QTextEdit.__init__(self, parent)
BaseEditMixin.__init__(self)
self.found_results = []
def showEvent(self, event):
"""Reimplement Qt Method"""
self.emit(SIGNAL("visibility_changed(bool)"), True)
def keyPressEvent(self, event):
"""Reimplement Qt Method - Basic keypress event handler"""
event, text, key, ctrl, shift = restore_keyevent(event)
if key == Qt.Key_Slash and self.isVisible():
self.emit(SIGNAL("show_find_widget()"))
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonPageControlWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(IPythonPageControlWidget, self).focusOutEvent(event)
class SMIPythonWidget(RichIPythonWidget):
"""
Spyder's IPython widget
This class has custom control and page_control widgets, additional methods
to provide missing functionality and a couple more keyboard shortcuts.
"""
def __init__(self, *args, **kw):
# To override the Qt widget used by RichIPythonWidget
self.custom_control = IPythonControlWidget
self.custom_page_control = IPythonPageControlWidget
super(SMIPythonWidget, self).__init__(*args, **kw)
self.set_background_color()
# --- Spyder variables ---
self.ipyclient = None
# --- Keyboard shortcuts ---
inspectsc = QShortcut(QKeySequence("Ctrl+I"), self,
self._control.inspect_current_object)
inspectsc.setContext(Qt.WidgetWithChildrenShortcut)
clear_consolesc = QShortcut(QKeySequence("Ctrl+L"), self,
self.clear_console)
clear_consolesc.setContext(Qt.WidgetWithChildrenShortcut)
# --- IPython variables ---
# To send an interrupt signal to the Spyder kernel
self.custom_interrupt = True
# To restart the Spyder kernel in case it dies
self.custom_restart = True
#---- Public API ----------------------------------------------------------
def set_ipyclient(self, ipyclient):
"""Bind this IPython widget to an IPython client widget
(see SMlib/plugins/ipythonconsole.py)"""
self.ipyclient = ipyclient
self.exit_requested.connect(ipyclient.exit_callback)
def show_banner(self):
"""Banner for IPython widgets with pylab message"""
from IPython.core.usage import default_gui_banner
banner = default_gui_banner
pylab_o = CONF.get('ipython_console', 'pylab', True)
autoload_pylab_o = CONF.get('ipython_console', 'pylab/autoload', True)
mpl_installed = programs.is_module_installed('matplotlib')
if mpl_installed and (pylab_o and autoload_pylab_o):
backend_o = CONF.get('ipython_console', 'pylab/backend', 0)
backends = {0: 'module://IPython.zmq.pylab.backend_inline',
1: 'Qt4Agg', 2: 'Qt4Agg', 3: 'MacOSX', 4: 'GTKAgg',
5: 'WXAgg', 6: 'TKAgg'}
pylab_013_message = """
Welcome to pylab, a matplotlib-based Python environment [backend: %s].
For more information, type 'help(pylab)'.\n""" % backends[backend_o]
pylab_1_message = """
Populating the interactive namespace from numpy and matplotlib"""
if programs.is_module_installed('IPython', '>=1.0'):
banner = banner + pylab_1_message
else:
banner = banner + pylab_013_message
sympy_o = CONF.get('ipython_console', 'symbolic_math', True)
if sympy_o:
lines = """
These commands were executed:
>>> from __future__ import division
>>> from sympy import *
>>> x, y, z, t = symbols('x y z t')
>>> k, m, n = symbols('k m n', integer=True)
>>> f, g, h = symbols('f g h', cls=Function)
"""
banner = banner + lines
return banner
def clear_console(self):
self.execute("%clear")
def write_to_stdin(self, line):
"""
Send raw characters to the IPython kernel through stdin
but only if the kernel is currently looking for raw input.
"""
if self._reading:
if programs.is_module_installed('IPython', '>=1.0'):
self.kernel_client.stdin_channel.input(line)
else:
self.kernel_manager.stdin_channel.input(line)
def set_background_color(self):
lightbg_o = CONF.get('ipython_console', 'light_color', True)
if not lightbg_o:
self.set_default_style(colors='linux')
#---- IPython private methods ---------------------------------------------
def _context_menu_make(self, pos):
"""Reimplement the IPython context menu"""
menu = super(SMIPythonWidget, self)._context_menu_make(pos)
return self.ipyclient.add_actions_to_context_menu(menu)
def _banner_default(self):
"""
Reimplement banner creation to let the user decide if he wants a
banner or not
"""
banner_o = CONF.get('ipython_console', 'show_banner', True)
if banner_o:
return self.show_banner()
else:
return ''
#---- Qt methods ----------------------------------------------------------
def focusInEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(SMIPythonWidget, self).focusInEvent(event)
def focusOutEvent(self, event):
"""Reimplement Qt method to send focus change notification"""
self.emit(SIGNAL('focus_changed()'))
return super(SMIPythonWidget, self).focusOutEvent(event)
| StarcoderdataPython |
8172876 | <filename>apps/cowry_docdata/admin.py
from babel.numbers import format_currency
from django.contrib import admin
from django.core.urlresolvers import reverse
from django.utils import translation
from .models import DocDataPaymentOrder, DocDataPayment, DocDataPaymentLogEntry
class DocDataPaymentLogEntryInine(admin.TabularInline):
model = DocDataPaymentLogEntry
can_delete = False
extra = 0
max_num = 0
fields = ('timestamp', 'level', 'message')
readonly_fields = fields
class DocDataPaymentInline(admin.TabularInline):
model = DocDataPayment
can_delete = False
extra = 0
max_num = 0
fields = ('payment_method', 'status', 'created', 'updated')
readonly_fields = fields
class DocDataPaymentOrderAdmin(admin.ModelAdmin):
list_filter = ('status',)
list_display = ('created', 'amount_override', 'status')
raw_id_fields = ('order',)
search_fields = ('payment_order_id', 'merchant_order_reference')
inlines = (DocDataPaymentInline, DocDataPaymentLogEntryInine)
def amount_override(self, obj):
language = translation.get_language().split('-')[0]
return format_currency(obj.amount / 100, obj.currency, locale=language)
amount_override.short_description = 'amount'
admin.site.register(DocDataPaymentOrder, DocDataPaymentOrderAdmin)
class DocDataPaymentLogEntryAdmin(admin.ModelAdmin):
# List view.
list_display = ('payment', 'level', 'message')
list_filter = ('level', 'timestamp')
search_fields = ('message',)
def payment(self, obj):
payment = obj.docdata_payment_order
url = reverse('admin:%s_%s_change' % (payment._meta.app_label, payment._meta.module_name), args=[payment.id])
return "<a href='%s'>%s</a>" % (str(url), payment)
payment.allow_tags = True
# Don't allow the detail view to be accessed.
def has_change_permission(self, request, obj=None):
if not obj:
return True
return False
admin.site.register(DocDataPaymentLogEntry, DocDataPaymentLogEntryAdmin)
| StarcoderdataPython |
4839915 | __author__ = '<NAME>'
import roslib; roslib.load_manifest('aidu_gui')
from PySide import QtGui, QtCore
from PySide.QtGui import QApplication
from time import sleep
from window import Window
from ros_thread import ROSThread
class Manager:
"""
The manager for the application GUI. This internally handles all the other GUI elements that are necessary to
display the system. It also handles shutdown events and appropriately shuts down the ROS thread whenever the user
wants to exit the application.
"""
def __init__(self):
pass
@staticmethod
def setup():
Manager.app = QApplication([])
Manager.app.aboutToQuit.connect(Manager.exit)
font = Manager.app.font()
font.setPointSize(18)
Manager.app.setFont(font)
Manager.window = Window()
Manager.window.showFullScreen()
Manager.window.activateWindow()
Manager.ros_thread = ROSThread(Manager.app)
Manager.ros_thread.start()
QtGui.QShortcut(QtGui.QKeySequence(QtCore.Qt.CTRL + QtCore.Qt.Key_Q), Manager.window,
Manager.window.close)
@staticmethod
def execute():
"""
Starts execution of the GUI. Returns the application's exit code when it is shut down.
"""
return Manager.app.exec_()
@staticmethod
def exit():
"""
Callback function for when the user exits the application.
This will attempt to stop the ROS thread and will wait before shutting the GUI down.
"""
Manager.ros_thread.stop()
while not Manager.ros_thread.done:
try:
sleep(0.1)
except KeyboardInterrupt:
break
| StarcoderdataPython |
1844523 | # -*- coding: utf-8 -*-
from chainer import Chain
import chainer.functions as F
import chainer.links as L
class ImageCnn(Chain):
def __init__(self, input_channel, output_channel, filters, mid_units, n_label):
super(ImageCnn, self).__init__(
# input_channel: 1:白黒 3:RGB など
conv1=L.Convolution2D(input_channel, 96, 11, stride=4),
conv2=L.Convolution2D(96, 256, 5, pad=2),
conv3=L.Convolution2D(256, 384, 3, pad=1),
conv4=L.Convolution2D(384, 384, 3, pad=1),
conv5=L.Convolution2D(384, 256, 3, pad=1),
fc6=L.Linear(None, 4096),
fc7=L.Linear(None, 4096),
fc8=L.Linear(None, n_label),
)
def __call__(self, x):
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv1(x))), 3, stride=2)
h = F.max_pooling_2d(F.local_response_normalization(
F.relu(self.conv2(h))), 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv4(h))
h = F.max_pooling_2d(F.relu(self.conv5(h)), 1, stride=2)
h = F.dropout(F.relu(self.fc6(h)))
h = F.dropout(F.relu(self.fc7(h)))
h = self.fc8(h)
return h
| StarcoderdataPython |
6565302 | <gh_stars>1-10
import torch
from torchvision import transforms
import itertools
import numpy as np
from config import cfg
out_size = cfg.input_size
rct = transforms.Compose([transforms.ToPILImage(),
transforms.Resize((out_size,out_size)),
transforms.ToTensor()])
def random_crop(image, crop_size, num_of_crops):
_,img_row,img_col = image.shape
crops, bboxes = [], []
crops.append(rct(image))
for i in range(num_of_crops):
coefficients = np.random.rand(2)
top_left = np.array([coefficients[0]*img_row/2, coefficients[1]*img_col/2],dtype=int)
bottom_right = top_left+crop_size
# print("top left row:",top_left[0],"bottom right row:",bottom_right[0],
# "top left col:",top_left[1],"bottom right col:",bottom_right[1])
crop = image[:,top_left[0]:bottom_right[0], top_left[1]:bottom_right[1]]
crops.append(rct(crop))
bboxes.append(np.array([top_left, bottom_right]))
image_out = torch.stack(crops,dim=0)
bbox_out = np.array(bboxes)
return image_out, bbox_out
def create_combinations(bboxes):
return list(itertools.combinations(np.arange(0, len(bboxes)), 2))
def calc_iou(bbox1, bbox2, orig_img_size = 224):
template1 = np.zeros((orig_img_size,orig_img_size))
template2 = np.zeros((orig_img_size,orig_img_size))
template1[bbox1[0][0]:bbox1[1][0], bbox1[0][1]:bbox1[1][1]] = 1
template2[bbox2[0][0]:bbox2[1][0], bbox2[0][1]:bbox2[1][1]] = 1
iou_mask = template1+template2
_,cnts = np.unique(iou_mask, return_counts=True)
if len(cnts) == 3:
iou = cnts[2]/(cnts[1]+cnts[2])
else:
iou = 0
return torch.tensor([iou])
def calculate_ious(combinations, bboxes):
ious = []
for comb in combinations:
ious.append(calc_iou(bboxes[comb[0]], bboxes[comb[1]]))
return torch.stack(ious, dim=0)
def calculate_ious_batch(combinations, bboxes):
ious = []
for bbox_elm in bboxes:
ious.append(calculate_ious(combinations, bbox_elm))
return torch.stack(ious, dim=0)
def calculate_ious_for_img(bboxes, orig_img_size = 224):
template = np.zeros((orig_img_size,orig_img_size))
for bbox in bboxes:
template[bbox[0][0]:bbox[1][0], bbox[0][1]:bbox[1][1]] = 1
_,cnts = np.unique(template, return_counts=True)
if len(cnts) == 2:
iou = cnts[1]/(cnts[0]+cnts[1])
else:
iou = 0
return torch.tensor([iou])
def calculate_ious_for_img_batch(bboxes):
ious = []
for bbox_elm in bboxes:
ious.append(calculate_ious_for_img(bbox_elm))
return torch.stack(ious, dim=0)
def normalize_vector(x):
norm = x.norm(p=2, dim=1, keepdim=True)
x_normalized = x.div(norm.expand_as(x))
return x_normalized
def calculate_cosdists(combinations, emb_vectors):
emb_vectors = normalize_vector(emb_vectors)
sim_mat = torch.nn.functional.linear(emb_vectors, emb_vectors)
similarity_vector = torch.zeros(1,len(combinations)).squeeze(0)
for cnt, comb in enumerate(combinations):
similarity_vector[cnt] = sim_mat[comb[0], comb[1]]
del sim_mat
return similarity_vector
def calculate_cosdists_batch(combinations_batch, emb_vectors):
similarity_vector_batch = []
for i,combinations in enumerate(combinations_batch):
similarity_vector_batch.append(calculate_cosdists(combinations,
emb_vectors[i]))
return torch.stack(similarity_vector_batch, dim=0)
# image = torch.rand((3,512,512))
# crop_size = 512
# num_of_crops = 4
# image_out, bbox_out = random_crop(image, crop_size, num_of_crops)
#
# combinations = create_combinations(bbox_out)
#
# ious = calculate_ious(combinations, bbox_out)
#
# bbox_out_batch = np.stack((bbox_out,bbox_out), axis=0)
# ious_batch = calculate_ious_batch(combinations, bbox_out_batch)
#
# emb_vectors = torch.rand((3,512))
# cos_dists = calculate_cosdists(combinations, emb_vectors) | StarcoderdataPython |
5014494 | <gh_stars>1-10
import random
from debug import dump_func_name
import sys
import logging
import time
class noise(object):
# @dump_func_name
def __init__(self, ber, delay):
self.total_packets_sent = 0
self.total_errors = 0
self.logger = logging.getLogger('myapp')
hdlr = logging.FileHandler('./noise.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
self.logger.addHandler(hdlr)
self.logger.setLevel(logging.INFO)
self.set_err_rate(ber)
self.set_ave_delay(delay)
@dump_func_name
def set_ave_delay(self, delay):
if 2 * delay > sys.maxsize:
self.ave_delay = sys.maxsize / 2
else:
self.ave_delay = delay
# @dump_func_name
def get_delay(self):
self.delay = random.randint(0, 2 * self.ave_delay)
# logger.info('%sms delay',self.delay)
return self.delay
# @dump_func_name
def set_err_rate(self, ber):
self.err_rate = ber
# assume that rate < 1 / maxint is negligible
if self.err_rate > sys.maxsize:
self.err_rate = sys.maxsize
# set error packet as random packet between 1 & ber
self.err_pkt = random.randint(1, self.err_rate)
print("packet", self.err_pkt, " of ", self.err_rate, " will be lost")
# logger.info('BER: 1/',ber,'packet: ',self.err_pkt)
str = "BER:1/%s #%s" % (self.err_rate, self.err_pkt)
self.logger.warning(str)
# @dump_func_name
def is_packet_lost(self):
self.total_packets_sent += 1
# Is_error if total_packets mod ber = error_packet
if (self.total_packets_sent % self.err_rate == self.err_pkt):
self.lost = True
self.total_errors += 1
else:
self.lost = False
# change error packet sequence number on every ber packets
if (self.total_packets_sent % self.err_rate == self.err_rate - 1):
self.set_err_rate(self.err_rate)
self.get_delay()
str = "# %s err: %s BER:1/%s >%s %s - delay %sms" % (self.total_packets_sent,
self.total_errors, self.err_rate, self.err_pkt, self.lost, self.delay)
self.logger.info(str)
time.sleep(self.delay / 1000)
return self.lost
@dump_func_name
def apply_emu(self):
self.get_delay()
self.is_packet_lost()
if __name__ == "__main__":
# lose 1 in 500
n = noise(18, 100)
for i in range(50):
f = n.is_packet_lost()
if f is True:
print(i, " is lost - ", f)
# else:
# print(i, " is a-okay")
| StarcoderdataPython |
4916453 | import cv2
import numpy as np
#load image
img = cv2.imread('homography-test.jpg', cv2.IMREAD_COLOR)
#corners of book covers (before)
frontCoverPtsBefore = np.array([[32, 48], [279, 136], [247, 430], [39, 281]], dtype="float32")
backCoverPtsBefore = np.array([[279, 136], [474, 36], [463, 316], [247, 430]], dtype="float32")
#corners of book covers (after)
frontCoverPtsAfter = np.array([[0, 0], [299, 0], [299, 599], [0, 599]], dtype="float32")
backCoverPtsAfter = np.array([[300, 0], [599, 0], [599, 599], [300, 599]], dtype="float32")
#get the transformation matrices for both covers
M_front = cv2.getPerspectiveTransform(frontCoverPtsBefore, frontCoverPtsAfter)
M_back = cv2.getPerspectiveTransform(backCoverPtsBefore, backCoverPtsAfter)
#warpPerspective both images
img_front = cv2.warpPerspective(img, M_front, (600, 600))
img_back = cv2.warpPerspective(img, M_back, (600, 600))
#copy half of the warped back cover into the warped front cover
np.copyto(img_front[:, 300:, :], img_back[:, 300:, :])
#display before and after
cv2.imshow('img', img)
cv2.imshow('img_front', img_front)
cv2.waitKey(0)
cv2.destroyAllWindows() | StarcoderdataPython |
6596845 | <reponame>OpenRTDynamics/PythonAPI_Experiments<filename>openrtdynamics2/lang/signal_interface.py<gh_stars>0
from . import lang as dy
from . import block_prototypes as block_prototypes
from .diagram_core.signal_network.signals import Signal, UndeterminedSignal, BlockOutputSignal, SimulationInputSignal
from typing import Dict, List
"""
This adds a layer around the signal-class.
It enhances the ease of use of signals by implementing operators
in-between signals e.g. it becomes possible to add, multiply, ...
signal variables among each other.
"""
class structure:
"""
A structure to combine multiple signals
It behaves similarly to a python hash array.
Further, the keys are used as (part of) the variable names in the generated code.
"""
def __init__(self, *args, **kwargs):
if len(args) == 1:
self._entries = args[0]
else:
self._entries = kwargs
# list of signals; deterministic order
self._signals = []
self._keys = []
for k, s in self._entries.items():
s.set_name( k )
self._signals.append(s)
self._keys.append(k)
def __getitem__(self, key):
return self._entries[key]
def __setitem__(self, key, signal):
signal.set_name( key )
self._entries[key] = signal
self._signals.append(signal)
self._keys.append(key)
def items(self):
"""
items function - like a hash array has
"""
return self._entries.items()
def to_list(self):
"""
return an ordered list of the signals stored in this structure
"""
return self._signals
def replace_signals(self, signals):
"""
replace the signals stored in this structure with the given list of signals.
The given list shall have the same order like the list returned by to_list().
"""
for i in range(0, len(signals)):
s = signals[i]
k = self._keys[i]
self._entries[k] = s
self._signals = signals
def convert_python_constant_val_to_const_signal(val):
if isinstance(val, SignalUserTemplate):
# given value is already a signal
return val
if type(val) == int: # TODO: check for range and eventually make int64
return dy.int32(val)
if type(val) == float:
return dy.float64(val)
raise BaseException('unable to convert given source ' + str(val) + ' to a signal object.')
# internal helper
def _comparison(left, right, operator : str ):
return wrap_signal( block_prototypes.ComparisionOperator(dy.get_current_system(), left.unwrap, right.unwrap, operator).outputs[0] )
class SignalUserTemplate(object):
def __init__(self, system, wrapped_signal : Signal):
self._system = system
self._wrapped_signal = wrapped_signal
def __hash__(self):
return id(self)
@property
def unwrap(self):
"""
Get the library-internal representation of a signal (internal use only)
"""
return self._wrapped_signal
@property
def name(self):
"""
the identifier of the signal
"""
return self._wrapped_signal.name
@property
def properties(self):
"""
A hash array of properties describing the signal
"""
return self._wrapped_signal.properties
def set_properties(self, p):
"""
Set the properties of the signal
"""
self._wrapped_signal.properties = { **self._wrapped_signal.properties, **p }
return self
def set_datatype(self, datatype):
# call setDatatype_nonotitication to prevent the (untested) automatic update of the datatypes
self._wrapped_signal.setDatatype_nonotitication(datatype)
return self
def set_name(self, name):
"""
Set the signals identifier. Must be a string without spaces and alphanumerical characters only.
"""
self._wrapped_signal.set_name(name)
return self
# TODO: can this be removed?
def set_name_raw(self, name):
self._wrapped_signal.set_name_raw(name)
return self
def extend_name(self, name):
"""
Extend the current signal identifier by appending characters at the end of the string
"""
self._wrapped_signal.set_name( self._wrapped_signal.name + name )
return self
def set_blockname(self, name):
"""
Set the name of the block that has the signal as one of its outputs
"""
self._wrapped_signal.set_blockname(name)
return self
# ...
#
# operator overloads
#
def __add__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return wrap_signal( block_prototypes.Operator1( dy.get_current_system(), inputSignals=[ self.unwrap, other.unwrap ], operator='+').outputs[0] )
def __radd__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return wrap_signal( block_prototypes.Operator1( dy.get_current_system(), inputSignals=[ self.unwrap, other.unwrap ], operator='+').outputs[0] )
def __sub__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return wrap_signal( block_prototypes.Operator1( dy.get_current_system(), inputSignals=[ self.unwrap, other.unwrap ], operator='-').outputs[0] )
def __rsub__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return wrap_signal( block_prototypes.Operator1( dy.get_current_system(), inputSignals=[ other.unwrap, self.unwrap ], operator='-').outputs[0] )
def __mul__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return wrap_signal( block_prototypes.Operator1( dy.get_current_system(), inputSignals=[ self.unwrap, other.unwrap ], operator='*').outputs[0] )
def __rmul__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return wrap_signal( block_prototypes.Operator1( dy.get_current_system(), inputSignals=[ self.unwrap, other.unwrap ], operator='*').outputs[0] )
def __truediv__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return wrap_signal( block_prototypes.Operator1( dy.get_current_system(), inputSignals=[ self.unwrap, other.unwrap ], operator='/').outputs[0] )
def __rtruediv__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return wrap_signal( block_prototypes.Operator1( dy.get_current_system(), inputSignals=[ other.unwrap, self.unwrap ], operator='/').outputs[0] )
# _comparison operators
def __le__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = self, right = other, operator = '<=' ) )
def __rle__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = other, right = self, operator = '<=' ) )
def __ge__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = self, right = other, operator = '>=' ) )
def __rge__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = other, right = self, operator = '>=' ) )
def __lt__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = self, right = other, operator = '<' ) )
def __rlt__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = other, right = self, operator = '<' ) )
def __gt__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = self, right = other, operator = '>' ) )
def __rgt__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = other, right = self, operator = '>' ) )
def __eq__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = self, right = other, operator = '==' ) )
def __req__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = self, right = other, operator = '==' ) )
def __ne__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = self, right = other, operator = '!=' ) )
def __rne__(self, other):
other = convert_python_constant_val_to_const_signal(other)
return ( _comparison(left = self, right = other, operator = '!=' ) )
# prev name was SignalUser, SignalUserAnonymous
class SignalUser(SignalUserTemplate):
def __init__(self, sim):
SignalUserTemplate.__init__( self, system=sim, wrapped_signal=UndeterminedSignal(sim) )
def inherit_datatype(self, from_signal : SignalUserTemplate):
"""
The datatype of this anonymous signal shall be inherited from the given signal 'from_signal'
"""
# self.inherit_datatype_of_signal = from_signal
# from_signal.unwrap.inherit_datatype_to( self.unwrap )
self.unwrap.inherit_datatype_from_signal( from_signal.unwrap )
# only for anonymous (feedback) signals
def __lshift__(self, other):
# close a feedback loop by connecting the signals self and other
self.unwrap.setequal(other.unwrap)
return other
class BlockOutputSignalUser(SignalUserTemplate):
"""
A signal that is the output of a block (normal case)
"""
def __init__(self, signalToWrap : BlockOutputSignal):
SignalUserTemplate.__init__( self, system=signalToWrap.system, wrapped_signal=signalToWrap )
class SimulationInputSignalUser(SignalUserTemplate):
"""
A special signal that is an input to a simulation.
"""
def __init__(self, system, datatype = None):
input_signal = SimulationInputSignal(system, datatype=datatype)
SignalUserTemplate.__init__( self, system=system, wrapped_signal=input_signal )
def unwrap( signal : SignalUserTemplate ):
return signal.unwrap
def unwrap_list( signals : List[SignalUserTemplate] ):
# create a new list of signals
list_of_unwrapped_signals=[]
for signal in signals:
list_of_unwrapped_signals.append( signal.unwrap )
return list_of_unwrapped_signals
def unwrap_hash( signals ):
"""
unwrap all signals in a hash array and return a copy
"""
# make a copy
list_of_unwrapped_signals = signals.copy()
# create a new list of signals
for key, signal in list_of_unwrapped_signals.items():
list_of_unwrapped_signals[key] = signal.unwrap
return list_of_unwrapped_signals
def wrap_signal( signal : Signal ):
# wraps a block output signal
return BlockOutputSignalUser( signal )
def wrap_signal_list( signals : List[ Signal ] ):
# wraps a list of block output signals
list_of_wrapped_signals=[]
for signal in signals:
list_of_wrapped_signals.append( BlockOutputSignalUser( signal ) )
return list_of_wrapped_signals
| StarcoderdataPython |
4804443 | from django.urls import path
import wishlist.views as wishlist
app_name = 'wishlist'
urlpatterns = [
path('', wishlist.view, name='view'),
path('add/<pk>/', wishlist.wishlist_add, name='add'),
path('remove/<pk>/', wishlist.wishlist_add, name='remove'),
path('clear/', wishlist.clear, name='clear'),
] | StarcoderdataPython |
3206522 | """Common utils for the library."""
from typing import Optional
import torch as _torch
def mask_padded_values(xs: _torch.FloatTensor, n: _torch.LongTensor,
mask_value: float = -float('inf'),
mutate: bool = False):
"""Turns padded values into given mask value.
Args:
xs: A tensor of size (batch_size, list_size, 1) containing padded
values.
n: A tensor of size (batch_size) containing list size of each query.
mask_value: The value to mask with (default: -inf).
mutate: Whether to mutate the values of xs or return a copy.
"""
mask = _torch.repeat_interleave(
_torch.arange(xs.shape[1], device=xs.device).reshape((1, xs.shape[1])),
xs.shape[0], dim=0)
n_mask = _torch.repeat_interleave(
n.reshape((n.shape[0], 1)), xs.shape[1], dim=1)
if not mutate:
xs = xs.clone()
xs[mask >= n_mask] = mask_value
return xs
def tiebreak_argsort(
x: _torch.FloatTensor,
descending: bool = True,
generator: Optional[_torch.Generator] = None) -> _torch.LongTensor:
"""Computes a per-row argsort of matrix x with random tiebreaks.
Args:
x: A 2D tensor where each row will be argsorted.
descending: Whether to sort in descending order.
Returns:
A 2D tensor of the same size as x, where each row is the argsort of x,
with ties broken randomly.
"""
rng_kwargs = {"generator": generator} if generator is not None else {}
p = _torch.randperm(x.shape[1], device=x.device, **rng_kwargs)
return p[_torch.argsort(x[:, p], descending=descending)]
def rank_by_score(
scores: _torch.FloatTensor,
n: _torch.LongTensor,
generator: Optional[_torch.Generator] = None) -> _torch.LongTensor:
"""Sorts scores in decreasing order.
This method ensures that padded documents are placed last and ties are
broken randomly.
Args:
scores: A tensor of size (batch_size, list_size, 1) or
(batch_size, list_size) containing scores.
n: A tensor of size (batch_size) containing list size of each query.
"""
if scores.dim() == 3:
scores = scores.reshape((scores.shape[0], scores.shape[1]))
return tiebreak_argsort(mask_padded_values(scores, n), generator=generator)
def rank_by_plackettluce(
scores: _torch.FloatTensor, n: _torch.LongTensor,
generator: Optional[_torch.Generator] = None) -> _torch.LongTensor:
"""Samples a ranking from a plackett luce distribution.
This method ensures that padded documents are placed last.
Args:
scores: A tensor of size (batch_size, list_size, 1) or
(batch_size, list_size) containing scores.
n: A tensor of size (batch_size) containing list size of each query.
"""
if scores.dim() == 3:
scores = scores.reshape((scores.shape[0], scores.shape[1]))
masked_scores = mask_padded_values(scores, n)
# This implementation uses reservoir sampling, which comes down to doing
# Uniform(0, 1) ^ (1 / p) and then sorting by the resulting values. The
# following implementation is a numerically stable variant that operates in
# log-space.
log_p = _torch.nn.LogSoftmax(dim=1)(masked_scores)
rng_kwargs = {"generator": generator} if generator is not None else {}
u = _torch.rand(log_p.shape, device=scores.device, **rng_kwargs)
r = _torch.log(-_torch.log(u)) - log_p
return tiebreak_argsort(r, descending=False, generator=generator)
def batch_pairs(x: _torch.Tensor) -> _torch.Tensor:
"""Returns a pair matrix
This matrix contains all pairs (i, j) as follows:
p[_, i, j, 0] = x[_, i]
p[_, i, j, 1] = x[_, j]
Args:
x: The input batch of dimension (batch_size, list_size) or
(batch_size, list_size, 1).
Returns:
Two tensors of size (batch_size, list_size ^ 2, 2) containing
all pairs.
"""
if x.dim() == 2:
x = x.reshape((x.shape[0], x.shape[1], 1))
# Construct broadcasted x_{:,i,0...list_size}
x_ij = _torch.repeat_interleave(x, x.shape[1], dim=2)
# Construct broadcasted x_{:,0...list_size,i}
x_ji = _torch.repeat_interleave(x.permute(0, 2, 1), x.shape[1], dim=1)
return _torch.stack([x_ij, x_ji], dim=3)
| StarcoderdataPython |
6681174 | <filename>arbitrage/private_markets/vircurex.py
from .market import Market, TradeException
import time
import requests
import hashlib
import random
from collections import OrderedDict
import config
import database
class PrivateVircurex(Market):
domain = "https://api.vircurex.com"
def __init__(self):
super().__init__()
self.secrets = config.vircurex_secrets
self.user = config.vircurex_user
self.get_balances()
def secure_request(self, command, params={}, params_nohash={}):
"""params is an ordered dictionary of parameters to pass. params_nohash is a dictionary of
parameters that aren't part of the encoded request."""
secret = self.secrets[command]
t = time.strftime("%Y-%m-%dT%H:%M:%S", time.gmtime()) # UTC time
txid = "%s-%f" % (t, random.randint(0, 1 << 31))
txid = hashlib.sha256(txid.encode("ascii")).hexdigest() # unique transmission ID using random hash
# token computation - dict order matters here!
vp = [command] + list(params.values())
token_input = "%s;%s;%s;%s;%s" % (secret, self.user, t, txid, ';'.join(map(str, vp)))
token = hashlib.sha256(token_input.encode("ascii")).hexdigest()
# Building request
reqp = {"account": self.user, "id": txid, "token": token, "timestamp": t}
reqp.update(params)
reqp.update(params_nohash)
url = "%s/api/%s.json" % (self.domain, command)
data = requests.get(url, params=reqp)
return data.json()
def _buy(self, amount, price):
"""Create a buy limit order"""
params = OrderedDict((("ordertype", "BUY"), ("amount", "{:.8f}".format(amount)),
("currency1", self.p_coin), ("unitprice", "{:.8f}".format(price)),
("currency2", self.s_coin)))
response = self.secure_request("create_order", params)
if response["status"] != 0:
raise TradeException(response["status"])
params = {"orderid": response["orderid"]}
response = self.secure_request("release_order", params)
if response["status"] != 0:
raise TradeException(response["status"])
return response["orderid"]
def _sell(self, amount, price):
"""Create a sell limit order"""
params = OrderedDict((("ordertype", "SELL"), ("amount", "{:.8f}".format(amount)),
("currency1", self.p_coin), ("unitprice", "{:.8f}".format(price)),
("currency2", self.s_coin)))
response = self.secure_request("create_order", params)
if response["status"] != 0:
raise TradeException(response["status"])
params = {"orderid": response["orderid"]}
response = self.secure_request("release_order", params)
if response["status"] != 0:
raise TradeException(response["status"])
return response["orderid"]
def update_order_status(self):
if not self.open_orders:
return
response = self.secure_request('read_orders', params_nohash={'otype': 1})
received_open_orders = []
for i in range(1, response['numberorders'] + 1):
order_name = 'order-' + str(i)
received_open_orders.append(response[order_name])
remaining_open_orders = []
completed_order_ids = []
for open_order in self.open_orders:
found_order = [found_order for found_order in received_open_orders if
found_order['orderid'] == open_order['order_id']]
if not found_order:
completed_order_ids.append(open_order['order_id'])
else:
remaining_open_orders.append(open_order)
if completed_order_ids:
self.open_orders = remaining_open_orders
database.order_completed(self.name, completed_order_ids)
def get_balances(self):
"""Get balance"""
try:
res = self.secure_request("get_balances")
self.p_coin_balance = float(res["balances"][self.p_coin]["availablebalance"])
self.s_coin_balance = float(res["balances"][self.s_coin]["availablebalance"])
except Exception:
raise Exception("Error getting balance: Vircurex error %d" % res['status'])
| StarcoderdataPython |
9759131 |
class Config:
def __init__(self, path):
self.path = path
self.options = {}
self.votes = []
self.tunes = []
self.available_rcon_commands = []
self.rcon_commands = []
def read(self):
with open(self.path) as f:
lines = f.readlines()
lines = [line.strip() for line in lines if len(line.strip()) and ((line.strip()[0] != '#' and ' ' in line.strip()) or (len(line.strip()) > 9 and line.strip()[:9] == '#command:'))]
options = [line for line in lines if line.split(' ', 1)[0] not in ['add_vote', 'tune'] and line.split(' ', 1)[0][0] != '#']
tunes = [line.split(' ', 1)[1] for line in lines if line.split(' ', 1)[0] == 'tune']
votes = [line.split(' ', 1)[1] for line in lines if line.split(' ', 1)[0] == 'add_vote']
rcon_commands = [line[9:] for line in lines if line[:9] == '#command:']
self.options = {}
for line in options:
command = line.split(' ', 1)[0]
widget = line.rsplit(' ', 1)[1].split(':', 1)[1] if line.rsplit(' ', 1)[1][0] == '#' and '#widget:' in line.rsplit(' ', 1)[1] else 'text'
line = line.split(' ', 1)[1]
if ' ' in line and line.rsplit(' ', 1)[1][0] == '#' and '#widget:' in line.rsplit(' ', 1)[1]:
line = line.rsplit(' ', 1)[0]
value = line.strip('"')
# in case of select widget save the selections to the value
if len(widget) >= 7:
if widget[:7] == 'select:' and len(widget[7:]):
selections = widget.split(':', 1)[1].split(',')
widget = 'select'
if value not in selections:
selections.append(value)
for selection in selections:
value += ',{0}'.format(selection)
elif widget[:7] == 'select:':
widget = 'text'
self.options[command] = (value[:1000], widget)
self.tunes = [{'command': line.rsplit()[0].strip('"'), 'value': float(line.split()[1].strip('"'))} for line in tunes]
self.votes = [{'command': line.rsplit('" ', 1)[1].strip('"'), 'title': line.rsplit('" ', 1)[0].strip('"')} for line in votes if len(line.split('" ')) == 2]
for line in rcon_commands:
self.available_rcon_commands.extend([command for command in line.split() if command not in self.available_rcon_commands])
def write(self, path=None):
if not path:
path = self.path
with open(path, 'w') as f:
for key, value in self.options.iteritems():
f.write(u'{0} "{1}" #widget:{2}\n'.format(key, value[0], value[1]).encode('UTF-8'))
for tune in self.tunes:
f.write(u'tune {0} {1}\n'.format(tune['command'], tune['value']).encode('UTF-8'))
for vote in self.votes:
f.write(u'add_vote "{0}" "{1}"\n'.format(vote['title'], vote['command']).encode('UTF-8'))
for rcon_command in self.rcon_commands:
f.write(u'{0} {1}\n'.format(rcon_command['command'], rcon_command['value']).encode('UTF-8'))
def add_option(self, command, value, widget='text'):
if isinstance(value, int):
value = str(value)
self.options[command] = (value.replace('"', r'\"'), widget)
def add_tune(self, command, value):
self.tunes.append({'command': command, 'value': float(value)})
def add_vote(self, command, title):
self.votes.append({'command': command.replace('"', r'\"'), 'title': title.replace('"', r'\"')})
def add_rcon_command(self, command, value):
self.rcon_commands.append({'command': command.replace('"', r'\"'), 'value': value.replace('"', r'\"')})
| StarcoderdataPython |
5019429 | import unittest
from lambda_tools import mapper
FAMOUS_FIVE = ['Dick', 'Julian', 'George', 'Anne', 'Timmy']
class StringFieldEntity:
hello = mapper.StringField()
class TestStringField(unittest.TestCase):
def test_simple_mapping(self):
result = mapper.parse(StringFieldEntity, { 'hello': 'world' })
self.assertEqual('world', result.hello)
self.assertIsInstance(result, StringFieldEntity)
def test_simple_mapping_with_default(self):
result = mapper.parse(StringFieldEntity, { })
self.assertEqual(None, result.hello)
def test_simple_mapping_with_unknown_value(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(StringFieldEntity, { 'goodbye': 'test'})
)
def test_simple_mapping_with_non_dict(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(StringFieldEntity, 'Hello world')
)
class RequiredStringFieldEntity:
hello = mapper.StringField(required=True)
class TestRequiredStringField(unittest.TestCase):
def test_missing_required_field(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(RequiredStringFieldEntity, { })
)
class IntFieldEntity:
count = mapper.IntField(default=100)
class TestIntField(unittest.TestCase):
def test_int_field(self):
result = mapper.parse(IntFieldEntity, { 'count': 10 })
self.assertEqual(result.count, 10)
def test_missing_int_field(self):
result = mapper.parse(IntFieldEntity, { })
self.assertEqual(result.count, 100)
def test_int_as_string(self):
result = mapper.parse(IntFieldEntity, { 'count': '10' })
self.assertEqual(result.count, 10)
class BoolFieldEntity:
active = mapper.BoolField()
class TestBoolField(unittest.TestCase):
def test_true(self):
result = mapper.parse(BoolFieldEntity, { 'active': True })
self.assertEqual(result.active, True)
def test_false(self):
result = mapper.parse(BoolFieldEntity, { 'active': False })
self.assertEqual(result.active, False)
class ChoiceFieldEntity:
name = mapper.ChoiceField(FAMOUS_FIVE)
class TestChoiceField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(ChoiceFieldEntity, { 'name': 'Julian' })
def test_invalid(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ChoiceFieldEntity, { 'name': 'Jack' })
)
def test_missing(self):
result = mapper.parse(ChoiceFieldEntity, { })
self.assertEqual(None, result.name)
class ListFieldEntity:
names = mapper.ListField(mapper.StringField())
class TestListField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(ListFieldEntity, { 'names': FAMOUS_FIVE })
self.assertListEqual(result.names, FAMOUS_FIVE)
def test_set(self):
names = set(FAMOUS_FIVE)
result = mapper.parse(ListFieldEntity, { 'names': names })
self.assertListEqual(result.names, list(names))
def test_empty_list(self):
result = mapper.parse(ListFieldEntity, { 'names': [] })
self.assertListEqual(result.names, [])
def test_invalid_list(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ListFieldEntity, { 'names': range(5) })
)
def test_string(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ListFieldEntity, { 'names': '5' })
)
def test_dict(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ListFieldEntity, { 'names': {} })
)
class DictFieldEntity:
environment = mapper.DictField(mapper.StringField())
class TestDictField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(DictFieldEntity, { 'environment': { 'one': 'two' } })
self.assertDictEqual(result.environment, { 'one': 'two' })
def test_invalid_dict(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(DictFieldEntity, { 'environment': { 'one': [] } })
)
class ClassFieldEntity:
five = mapper.ClassField(ChoiceFieldEntity)
class TestChoiceField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(ClassFieldEntity, {
'five': {
'name': 'Julian'
}
})
self.assertEqual(result.five.name, 'Julian')
def test_invalid(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ClassFieldEntity, {
'five': {
'name': 'Philip'
}
})
)
class ListClassFieldEntity:
five = mapper.ListField(mapper.ClassField(ChoiceFieldEntity))
class TestListClassField(unittest.TestCase):
def test_valid(self):
result = mapper.parse(ListClassFieldEntity, {
'five': [
{ 'name': 'Julian' },
{ 'name': 'Dick' },
{ 'name': 'George' },
{ 'name': 'Anne' },
{ 'name': 'Timmy' }
]
})
names = sorted([x.name for x in result.five])
self.assertListEqual(names, sorted(FAMOUS_FIVE))
def test_invalid(self):
self.assertRaises(
mapper.MappingError,
lambda: mapper.parse(ListClassFieldEntity, {
'five': [
{ 'name': 'Peter' },
{ 'name': 'Janet' },
{ 'name': 'Jack' },
{ 'name': 'Barbara' },
{ 'name': 'George' },
{ 'name': 'Pam' },
{ 'name': 'Colin' },
]
})
)
class ClassWithDefaultFieldEntity:
five = mapper.ClassField(ChoiceFieldEntity, default_field='name')
class TestClassWithDefaultField(unittest.TestCase):
def test_default_field(self):
result = mapper.parse(ClassWithDefaultFieldEntity, { 'five': 'George' })
self.assertEqual(result.five.name, 'George') | StarcoderdataPython |
11289780 | #!/usr/bin/env python
#
# Example of how to analyse HLT objects using FWLite and pyROOT.
#
# adapted from PhysicsTools/PatExamples/bin/PatBasicFWLiteAnalyzer.py
from __future__ import print_function
import ROOT
import sys
from DataFormats.FWLite import Events, Handle
#----------------------------------------------------------------------
# main
#----------------------------------------------------------------------
events = Events (["input.root"])
handle = Handle ("trigger::TriggerFilterObjectWithRefs")
# declare a variable 'electrons' to get them from the trigger objects
ROOT.gROOT.ProcessLine("std::vector<reco::ElectronRef> electrons;")
# copied from DataFormats/HLTReco/interface/TriggerTypeDefs.h
TriggerElectron = +82
# note that for this collection there is only one (not two for L1Iso and L1NonIso)
label = ("hltL1NonIsoHLTNonIsoSingleElectronEt22TighterEleIdOneOEMinusOneOPFilter")
numElectronsSeen = 0
numEventsWithElectron = 0
# loop over events
for event in events:
# use getByLabel, just like in cmsRun
event.getByLabel (label, handle)
# get the product
trigobjs = handle.product()
trigobjs.getObjects(TriggerElectron, ROOT.electrons)
print("number of electrons in this event:",len(ROOT.electrons))
bestOneOverEminusOneOverP = None
numElectronsSeen += len(ROOT.electrons)
if len(ROOT.electrons) > 0:
numEventsWithElectron += 1
else:
continue
for eleindex, electron in enumerate(ROOT.electrons):
print("electron",eleindex)
# see HLTrigger/Egamma/src/HLTElectronOneOEMinusOneOPFilterRegional.cc
# how 1/E-1/p is calculated
tracks = electron.track().product()
superClusters = electron.superCluster().product()
print(" number of tracks:",len(tracks))
print(" number of superclusters:",len(superClusters))
for track in tracks:
momentum = track.p()
for superCluster in superClusters:
energy = superCluster.energy()
thisOneOverEminusOneOverP = abs(1/energy - 1/momentum)
print(" momentum=",momentum,"energy=",energy,"E/P=",energy/momentum,"1/E-1/p=",thisOneOverEminusOneOverP)
if bestOneOverEminusOneOverP == None or thisOneOverEminusOneOverP < bestOneOverEminusOneOverP:
bestOneOverEminusOneOverP = thisOneOverEminusOneOverP
# loop over clusters
# loop over tracks
# loop over electron trigger objects
print("best value:",bestOneOverEminusOneOverP)
print("total number of electrons:",numElectronsSeen)
print("events with at least one electron:",numEventsWithElectron)
| StarcoderdataPython |
8144005 | <reponame>WolfLink/qsearch<filename>qsearch/integrations.py<gh_stars>1-10
try:
from qiskit import QuantumCircuit
import qiskit
except ImportError:
qiskit = None
raise ImportError("Cannot import qiskit, please run pip3 install qiskit before importing qiskit code.")
import numpy as np
from .gates import *
class QiskitImportError(Exception):
"""A class to represent issues importing code from qiskit"""
class QiskitGateConverter:
def __init__(self, num_qubits):
self.registers = []
self.num_qubits = num_qubits
self.parameters = []
def convert(self, gate, qubits, cbits):
"""Abstraction to convert an arbitrary qiskit gate to a layer in a qsearch circuit"""
if cbits != []:
raise QiskitImportError("Classical operations are not supported in qsearch for now.")
return getattr(self, f'convert_{gate.name}')(gate, qubits, cbits)
def convert_cx(self, gate, qubits, cbits):
for q in qubits:
if q.register.name not in self.registers:
if len(self.registers) == 0:
self.registers.append(q.register.name)
else:
raise QiskitImportError("Qsearch does not support importing circuits with multiple quantum registers.")
pair = [q.index for q in qubits]
assert len(pair) == 2, "CNOT between more than 2 qubits?"
return NonadjacentCNOTGate(self.num_qubits, pair[0], pair[1])
def convert_u3(self, gate, qubits, cbits):
assert len(qubits) == 1, "U3 on more than one qubit?"
identity_gate = IdentityGate()
self.parameters.extend(gate.params)
index = qubits[0].index
return KroneckerGate(*[identity_gate]*index, U3Gate(), *[identity_gate]*(self.num_qubits-index-1))
def convert_u2(self, gate, qubits, cbits):
assert len(qubits) == 1, "U2 on more than one qubit?"
identity_gate = IdentityGate()
self.parameters.extend(gate.params)
index = qubits[0].index
return KroneckerGate(*[identity_gate]*index, U2Gate(), *[identity_gate]*(self.num_qubits-index-1))
def convert_rx(self, gate, qubits, cbits):
assert len(qubits) == 1, "X on more than one qubit?"
identity_gate = IdentityGate()
self.parameters.extend(gate.params)
index = qubits[0].index
return KroneckerGate(*[identity_gate]*index, XGate(), *[identity_gate]*(self.num_qubits-index-1))
def convert_ry(self, gate, qubits, cbits):
assert len(qubits) == 1, "Y on more than one qubit?"
identity_gate = IdentityGate()
self.parameters.extend(gate.params)
index = qubits[0].index
return KroneckerGate(*[identity_gate]*index, YGate(), *[identity_gate]*(self.num_qubits-index-1))
def convert_rz(self, gate, qubits, cbits):
assert len(qubits) == 1, "Z on more than one qubit?"
identity_gate = IdentityGate()
self.parameters.extend(gate.params)
index = qubits[0].index
return KroneckerGate(*[identity_gate]*index, ZGate(), *[identity_gate]*(self.num_qubits-index-1))
def qiskit_to_qsearch(circ, converter=None):
"""Convert qiskit code to qsearch *structure* and parameters"""
converter = converter if converter is not None else QiskitGateConverter(circ.num_qubits)
circuit = []
for gate, qubits, cbits in circ.data:
circuit.append(converter.convert(gate, qubits, cbits))
return ProductGate(*circuit), np.array(converter.parameters)
| StarcoderdataPython |
1937576 | # coding: utf-8
"""
Layered Insight Assessment, Compliance, Witness & Control
LI Assessment & Compliance performs static vulnerability analysis, license and package compliance. LI Witness provides deep insight and analytics into containerized applications. Control provides dynamic runtime security and analytics for containerized applications. You can find out more about the Layered Insight Suite at [http://layeredinsight.com](http://layeredinsight.com).
OpenAPI spec version: 0.10
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class PolicyApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def add_policy(self, **kwargs):
"""
Create new security policy
Creates a security policy object. ID SHOULD NOT be passed when creating a new policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_policy(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Policy policy:
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.add_policy_with_http_info(**kwargs)
else:
(data) = self.add_policy_with_http_info(**kwargs)
return data
def add_policy_with_http_info(self, **kwargs):
"""
Create new security policy
Creates a security policy object. ID SHOULD NOT be passed when creating a new policy.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.add_policy_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Policy policy:
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_policy" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'policy' in params:
body_params = params['policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Policies', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Policy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_policy(self, policy_id, **kwargs):
"""
Delete policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_policy(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.delete_policy_with_http_info(policy_id, **kwargs)
else:
(data) = self.delete_policy_with_http_info(policy_id, **kwargs)
return data
def delete_policy_with_http_info(self, policy_id, **kwargs):
"""
Delete policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.delete_policy_with_http_info(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to delete (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params) or (params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `delete_policy`")
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Policies/{policyID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def generate_seccomp_for_policy(self, policy_id, **kwargs):
"""
Get a Seccomp policy derivied from a LI policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.generate_seccomp_for_policy(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy (required)
:param bool li_agent: If true, the policy will include whitelisted syscalls for the LI agent.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.generate_seccomp_for_policy_with_http_info(policy_id, **kwargs)
else:
(data) = self.generate_seccomp_for_policy_with_http_info(policy_id, **kwargs)
return data
def generate_seccomp_for_policy_with_http_info(self, policy_id, **kwargs):
"""
Get a Seccomp policy derivied from a LI policy
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.generate_seccomp_for_policy_with_http_info(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy (required)
:param bool li_agent: If true, the policy will include whitelisted syscalls for the LI agent.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'li_agent']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method generate_seccomp_for_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params) or (params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `generate_seccomp_for_policy`")
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id']
query_params = []
if 'li_agent' in params:
query_params.append(('liAgent', params['li_agent']))
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Policies/{policyID}/Seccomp', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_containers_running_policy(self, policy_id, **kwargs):
"""
Get containers running a specific policy
Returns list of containers running the policy with the specified ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_containers_running_policy(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to find containers running (required)
:return: Containers
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_containers_running_policy_with_http_info(policy_id, **kwargs)
else:
(data) = self.get_containers_running_policy_with_http_info(policy_id, **kwargs)
return data
def get_containers_running_policy_with_http_info(self, policy_id, **kwargs):
"""
Get containers running a specific policy
Returns list of containers running the policy with the specified ID
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_containers_running_policy_with_http_info(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to find containers running (required)
:return: Containers
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_containers_running_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params) or (params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `get_containers_running_policy`")
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Policies/{policyID}/Containers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Containers',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_policies(self, **kwargs):
"""
Get all policies
Returns a list of policies that are accessible to this user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_policies(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Policies
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_policies_with_http_info(**kwargs)
else:
(data) = self.get_policies_with_http_info(**kwargs)
return data
def get_policies_with_http_info(self, **kwargs):
"""
Get all policies
Returns a list of policies that are accessible to this user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_policies_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: Policies
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_policies" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Policies', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Policies',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_policy(self, policy_id, **kwargs):
"""
Get specific policy
Returns details for policy with matching ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_policy(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to get (required)
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_policy_with_http_info(policy_id, **kwargs)
else:
(data) = self.get_policy_with_http_info(policy_id, **kwargs)
return data
def get_policy_with_http_info(self, policy_id, **kwargs):
"""
Get specific policy
Returns details for policy with matching ID.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_policy_with_http_info(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to get (required)
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params) or (params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `get_policy`")
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Policies/{policyID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Policy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_policy_by_name(self, policy_name, **kwargs):
"""
Get specific policy by name
Returns details for policy with matching name.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_policy_by_name(policy_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_name: Name of policy to search database for (required)
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_policy_by_name_with_http_info(policy_name, **kwargs)
else:
(data) = self.get_policy_by_name_with_http_info(policy_name, **kwargs)
return data
def get_policy_by_name_with_http_info(self, policy_name, **kwargs):
"""
Get specific policy by name
Returns details for policy with matching name.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_policy_by_name_with_http_info(policy_name, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_name: Name of policy to search database for (required)
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_name']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_policy_by_name" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_name' is set
if ('policy_name' not in params) or (params['policy_name'] is None):
raise ValueError("Missing the required parameter `policy_name` when calling `get_policy_by_name`")
collection_formats = {}
path_params = {}
if 'policy_name' in params:
path_params['policyName'] = params['policy_name']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/PoliciesByName/{policyName}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Policy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def suspend_policy(self, policy_id, **kwargs):
"""
Suspend security policy
Suspends a policy so it won't block anything.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.suspend_policy(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to suspend (required)
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.suspend_policy_with_http_info(policy_id, **kwargs)
else:
(data) = self.suspend_policy_with_http_info(policy_id, **kwargs)
return data
def suspend_policy_with_http_info(self, policy_id, **kwargs):
"""
Suspend security policy
Suspends a policy so it won't block anything.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.suspend_policy_with_http_info(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to suspend (required)
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method suspend_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params) or (params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `suspend_policy`")
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Policies/{policyID}/Suspend', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Policy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def update_policy(self, policy_id, **kwargs):
"""
Update security policy
Updates a specified security policy object.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_policy(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to update (required)
:param Policy policy:
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_policy_with_http_info(policy_id, **kwargs)
else:
(data) = self.update_policy_with_http_info(policy_id, **kwargs)
return data
def update_policy_with_http_info(self, policy_id, **kwargs):
"""
Update security policy
Updates a specified security policy object.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update_policy_with_http_info(policy_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str policy_id: hexadecimal ID of policy to update (required)
:param Policy policy:
:return: Policy
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['policy_id', 'policy']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method update_policy" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'policy_id' is set
if ('policy_id' not in params) or (params['policy_id'] is None):
raise ValueError("Missing the required parameter `policy_id` when calling `update_policy`")
collection_formats = {}
path_params = {}
if 'policy_id' in params:
path_params['policyID'] = params['policy_id']
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'policy' in params:
body_params = params['policy']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = ['ApiKey']
return self.api_client.call_api('/Policies/{policyID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Policy',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| StarcoderdataPython |
3416739 | <reponame>shaunakv1/python_pillow_circular_thumbnail
from PIL import Image, ImageOps, ImageDraw
im = Image.open('avatar.jpg')
im = im.resize((120, 120));
bigsize = (im.size[0] * 3, im.size[1] * 3)
mask = Image.new('L', bigsize, 0)
draw = ImageDraw.Draw(mask)
draw.ellipse((0, 0) + bigsize, fill=255)
mask = mask.resize(im.size, Image.ANTIALIAS)
im.putalpha(mask)
output = ImageOps.fit(im, mask.size, centering=(0.5, 0.5))
output.putalpha(mask)
output.save('output.png')
background = Image.open('back.jpg')
background.paste(im, (150, 10), im)
background.save('overlap.png')
| StarcoderdataPython |
1854590 | <filename>tests_functional/conftest.py
import pytest
from dialog_api.groups_pb2 import GROUPTYPE_GROUP, GROUPTYPE_CHANNEL
from google.protobuf import empty_pb2
from sdk_testing_framework.messaging import Messaging
from shared.data_generators import Generators
import os
import shutil
from shared.constants import DefaultValues as DV
@pytest.fixture(scope="class")
def d_user(users_gen, users_real_gen, cmdopt, request):
"""
Initializing object for pre-defining tests:
Got two args for choice needed type of users
( generate dummy users or using users with real authorization with password and login )
Quantity of needed users setiing in pytest decorator
@pytest.mark.parametrize('d_user', [(val)], indirect=True)
where val - quantity of users. !!! Note max number of users 3
"""
global users_num
if request.param == "2 users":
users_num = 2
elif request.param == "3 users":
users_num = 3
if cmdopt == 'type1':
print("\n*** START test session with %s dummy-generated users ***" % users_num)
yield Messaging(users_gen, users_num)
print("\n*** END test session ***")
elif cmdopt == 'type2':
print("\n*** START test session with %s pre-defined users ***" % users_num)
yield Messaging(users_real_gen, users_num, real_users=True)
print("\n*** END test session ***")
@pytest.fixture(autouse=True)
def any_custom_setup():
""" Text status for readability """
print("*** SETUP COMPLITE ***")
@pytest.fixture(scope='class')
def update1(d_user):
""" Pre-condition: 'User1 calls seq updates' """
yield d_user.u1.updates.SeqUpdates(empty_pb2.Empty())
@pytest.fixture(scope='class')
def update2(d_user):
""" Pre-condition: 'User2 calls seq updates' """
yield d_user.u2.updates.SeqUpdates(empty_pb2.Empty())
@pytest.fixture(scope='class')
def update3(d_user):
""" Pre-condition: 'User3 calls seq updates' """
yield d_user.u3.updates.SeqUpdates(empty_pb2.Empty())
@pytest.fixture(scope='class', autouse=True)
def send_message(d_user):
""" Pre-condition: 'User1 send one text message to User2' """
d_user.send(d_user.u1, d_user.outpeer1)
@pytest.fixture(scope='class')
def _channel(d_user):
""" Pre-condition: 'User1 create channel with User2' """
yield d_user.create_group(d_user.u1, [d_user.outpeer1], GROUPTYPE_CHANNEL, with_shortname=True)
@pytest.fixture(scope='class')
def _group(d_user):
""" Pre-condition: 'User1 create group with User2' """
yield d_user.create_group(d_user.u1, [d_user.outpeer1], GROUPTYPE_GROUP, with_shortname=True)
@pytest.fixture(scope="class")
def gen_txt():
""" Pre-condition: generate 5mb txt file """
yield Generators.random_txt_file()
@pytest.fixture(scope="class")
def clean_up_txt():
""" Post-condition: clean txt file """
yield
if os.path.exists(DV.txt):
os.remove(DV.txt)
else:
print("Nothing to clean")
@pytest.fixture(scope="function", autouse=True)
def clean_up():
""" Workspace: create tmp directory for file sharing&uploading tests
Remove directory after test session
"""
if not os.path.exists(DV.downloads):
os.makedirs(DV.downloads)
yield
if os.path.exists(DV.downloads):
shutil.rmtree(DV.downloads)
| StarcoderdataPython |
1860450 | <reponame>liweitianux/atoolbox<gh_stars>1-10
# -*- coding: utf-8 -*-
#
# <NAME>
# 2015/06/19
"""
Class Region for regions on the spherical surface.
Used in astronomy to select/define a certian region, e.g, DS9.
"""
import sys
class Region(object):
"""
Basic region class for regions on the spherical surface,
similar definition as to DS9 regions.
Coordinate style: (ra, dec)
Unit: degree
ra: [0, 2\pi)
dec: [-\pi/2, \pi/2]
"""
# currently supported region types (similar to DS9)
REGION_TYPES = ["circle", "ellipse", "box", "annulus", "pie", "panda"]
def __init__(self, regtype, xc, yc,
radius=None, radius2=None,
width=None, height=None, rotation=None,
start=None, end=None):
if regtype.lower() not in self.REGION_TYPES:
raise ValueError("only following region types supported: %s" %\
" ".join(self.REGION_TYPES))
self.regtype = regtype.lower()
self.xc = xc
self.yc = yc
self.radius = radius
self.radius2 = radius2
self.width = width
self.height = height
self.rotation = rotation
def __repr__(self):
return "Region: %s" % self.regtype
def dump(self):
return {"regtype": self.regtype,
"xc": self.xc,
"yc": self.yc,
"radius": self.radius,
"radius2": self.radius2,
"width": self.width,
"height": self.height,
"rotation": self.rotation
}
def is_inside(self, point):
"""
Determine whether the given point is inside the region.
"""
x = point[0]
y = point[1]
if self.regtype == "box":
#print("WARNING: rotation box currently not supported!",
# file=sys.stderr)
xmin = self.xc - self.width/2.0
xmax = self.xc + self.width/2.0
ymin = self.yc - self.height/2.0
ymax = self.yc + self.height/2.0
if all([x >= xmin, x <= xmax, y >= ymin, y <= ymax]):
return True
else:
return False
else:
raise ValueError("region type '%s' currently not implemented" %\
self.regtype)
| StarcoderdataPython |
3267696 | <reponame>katyakats/mlrun
import http
import deepdiff
import pytest
import requests_mock as requests_mock_package
import mlrun.api.schemas
import mlrun.api.utils.clients.opa
import mlrun.config
import mlrun.errors
@pytest.fixture()
async def api_url() -> str:
api_url = "http://127.0.0.1:8181"
mlrun.mlconf.httpdb.authorization.opa.address = api_url
return api_url
@pytest.fixture()
async def permission_query_path() -> str:
permission_query_path = "/v1/data/service/authz/allow"
mlrun.mlconf.httpdb.authorization.opa.permission_query_path = permission_query_path
return permission_query_path
@pytest.fixture()
async def opa_client(
api_url: str, permission_query_path: str,
) -> mlrun.api.utils.clients.opa.Client:
mlrun.mlconf.httpdb.authorization.opa.log_level = 10
mlrun.mlconf.httpdb.authorization.mode = "opa"
client = mlrun.api.utils.clients.opa.Client()
# force running init again so the configured api url will be used
client.__init__()
return client
def test_query_permissions_success(
api_url: str,
permission_query_path: str,
opa_client: mlrun.api.utils.clients.opa.Client,
requests_mock: requests_mock_package.Mocker,
):
resource = "/projects/project-name/functions/function-name"
action = mlrun.api.schemas.AuthorizationAction.create
auth_info = mlrun.api.schemas.AuthInfo(
user_id="user-id", user_group_ids=["user-group-id-1", "user-group-id-2"]
)
def mock_permission_query_success(request, context):
assert (
deepdiff.DeepDiff(
opa_client._generate_permission_request_body(
resource, action.value, auth_info
),
request.json(),
ignore_order=True,
)
== {}
)
context.status_code = http.HTTPStatus.OK.value
return {"result": True}
requests_mock.post(
f"{api_url}{permission_query_path}", json=mock_permission_query_success
)
allowed = opa_client.query_permissions(resource, action, auth_info)
assert allowed is True
def test_query_permissions_failure(
api_url: str,
permission_query_path: str,
opa_client: mlrun.api.utils.clients.opa.Client,
requests_mock: requests_mock_package.Mocker,
):
resource = "/projects/project-name/functions/function-name"
action = mlrun.api.schemas.AuthorizationAction.create
auth_info = mlrun.api.schemas.AuthInfo(
user_id="user-id", user_group_ids=["user-group-id-1", "user-group-id-2"]
)
def mock_permission_query_failure(request, context):
assert (
deepdiff.DeepDiff(
opa_client._generate_permission_request_body(
resource, action.value, auth_info
),
request.json(),
ignore_order=True,
)
== {}
)
context.status_code = http.HTTPStatus.OK.value
return {"result": False}
requests_mock.post(
f"{api_url}{permission_query_path}", json=mock_permission_query_failure
)
with pytest.raises(
mlrun.errors.MLRunAccessDeniedError,
match=f"Not allowed to {action} resource {resource}",
):
opa_client.query_permissions(resource, action, auth_info)
| StarcoderdataPython |
1863620 | <reponame>YuqianJiang/tl_shaping_experiments
import gym
import deepr
import cartpole_continuing
def main():
env = cartpole_continuing.CartPoleContinuingEnv()
act = deepr.learn(
env,
network='mlp',
method_type="shaping" #shielding, baseline
)
print("Saving model to cartpole_model.pkl")
act.save("cartpole_model.pkl")
if __name__ == '__main__':
main()
| StarcoderdataPython |
3566588 | import os
import shutil
import numpy as np
import cmph
from diskarray import DiskVarArray
from deeputil import Dummy
DUMMY_LOG = Dummy()
class VarArray(DiskVarArray):
def __init__(self, dpath, mode="r+", growby=DiskVarArray.GROWBY, log=DUMMY_LOG):
super(VarArray, self).__init__(
dpath, dtype=np.uint8, mode=mode, growby=growby, log=log
)
def __getitem__(self, idx):
data = super(VarArray, self).__getitem__(idx)
if not len(data):
return None
n = int(np.fromstring(data[:8].tostring(), dtype=np.uint64)[0])
s = data[8:].tostring()
return n, s
def _convert(self, v):
if v is None:
return np.array([], dtype=np.uint8)
n, s = v
n = np.fromstring(np.uint64(n).tostring(), dtype=np.uint8)
s = np.array(list(s), dtype=np.uint8)
v = np.concatenate([n, s])
return v
def append(self, v):
return super(VarArray, self).append(self._convert(v))
def extend(self, v):
v = [self._convert(x) for x in v]
return super(VarArray, self).extend(v)
class StaticStringIndexDict:
def __init__(self, path, keys=None, log=DUMMY_LOG):
self._path = path
self.log = log
self._data = None
self._mph = None
self._mph_path = os.path.join(path, "mph")
if keys:
self._data, self._mph = self._storedata(path, keys)
else:
self._data = VarArray(path)
self._mph = cmph.load_hash(open(self._mph_path, "rb"))
def _storedata(self, path, keys):
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
mph = cmph.generate_hash(keys)
mph.save(self._mph_path)
indices = [mph(k) for k in keys]
_max = max(indices)
keyindices = dict((k, i) for i, k in enumerate(keys))
data = dict((zip(indices, keys)))
d = VarArray(path)
_data = []
for i in range(_max + 1):
k = data.get(i, None)
v = None if k is None else (keyindices[k], k)
_data.append(v)
d.extend(_data)
d.flush()
return d, mph
def get(self, k, default=None):
i = self._mph(k) # FIXME: what if i is out of range?
n, _k = self._data[i]
if _k != k:
return None
return n
def __getitem__(self, k):
n = self.get(k)
if n is None:
raise KeyError
return n
def items(self):
for i in range(len(self._data)):
v = self._data[i]
if v is None:
continue
n, k = v
yield k, n
def values(self):
for k, v in self.items():
yield v
def keys(self):
for k, v in self.items():
yield k
def flush(self):
self._data.flush()
def close(self):
self._data.close()
| StarcoderdataPython |
3506182 | <reponame>kbase/IndexRunner
# -*- coding: utf-8 -*-
import json
import os
import unittest
from unittest.mock import patch
from IndexRunner.EventProducer import EventProducer
class EventProducerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_dir = os.path.dirname(os.path.abspath(__file__))
cls.mock_dir = os.path.join(cls.test_dir, 'mock_data')
with open(cls.mock_dir + '/list_objects.json') as f:
d = f.read()
cls.objects = json.loads(d)
@patch('IndexRunner.EventProducer.Producer', autospec=True)
def test_producer(self, mock_prod):
ep = EventProducer({})
ep.index_objects(self.objects)
ep.prod.produce.assert_called()
| StarcoderdataPython |
3525500 | <filename>pip_services_runtime/data/__init__.py
# -*- coding: utf-8 -*-
"""
pip_services_runtime.data.__init__
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Data module initialization
:copyright: Digital Living Software Corp. 2015-2016, see AUTHORS for more details.
:license: MIT, see LICENSE for more details.
"""
__all__ = [ \
'IdGenerator', 'DataPage', 'FilterParams', 'PagingParams' \
]
from .IdGenerator import IdGenerator
from .DataPage import DataPage
from .FilterParams import FilterParams
from .PagingParams import PagingParams
| StarcoderdataPython |
4870146 | from typing import Iterable, Sequence
from .base import api_function
from .request import Request
__all__ = (
'Agent',
)
class Agent:
'''
Provides a shortcut of :func:`Admin.query()
<ai.backend.client.admin.Admin.query>` that fetches various agent
information.
.. note::
All methods in this function class require your API access key to
have the *admin* privilege.
'''
session = None
'''The client session instance that this function class is bound to.'''
@api_function
@classmethod
async def list(cls,
status: str = 'ALIVE',
fields: Iterable[str] = None) -> Sequence[dict]:
'''
Fetches the list of agents with the given status.
:param status: An upper-cased string constant representing agent
status (one of ``'ALIVE'``, ``'TERMINATED'``, ``'LOST'``,
etc.)
:param fields: Additional per-agent query fields to fetch.
'''
if fields is None:
fields = (
'id',
'addr',
'status',
'first_contact',
'mem_slots',
'cpu_slots',
'gpu_slots',
)
q = 'query($status: String) {' \
' agents(status: $status) {' \
' $fields' \
' }' \
'}'
q = q.replace('$fields', ' '.join(fields))
variables = {
'status': status,
}
rqst = Request(cls.session, 'POST', '/admin/graphql')
rqst.set_json({
'query': q,
'variables': variables,
})
async with rqst.fetch() as resp:
data = await resp.json()
return data['agents']
def __init__(self, agent_id):
self.agent_id = agent_id
@api_function
async def info(self, fields: Iterable[str] = None) -> dict:
'''
Returns the agent's information including resource capacity and usage.
.. versionadded:: 18.12
'''
if fields is None:
fields = (
'id',
'addr',
'status',
'first_contact',
'mem_slots',
'cpu_slots',
'gpu_slots',
)
q = 'query($agent_id: String!) {' \
' agent(agent_id: $agent_id) {' \
' $fields' \
' }' \
'}'
q = q.replace('$fields', ' '.join(fields))
variables = {
'agent_id': self.agent_id,
}
rqst = Request(self.session, 'POST', '/admin/graphql')
rqst.set_json({
'query': q,
'variables': variables,
})
async with rqst.fetch() as resp:
data = await resp.json()
return data['agent']
| StarcoderdataPython |
6683760 | from datasets.base.factory_seed import BaseSeed
from datasets.types.data_split import DataSplit
class OpenImages_Seed(BaseSeed):
def __init__(self, root_path: str=None, data_split=DataSplit.Training | DataSplit.Validation | DataSplit.Testing):
if root_path is None:
root_path = self.get_path_from_config('Open_Images_PATH')
super(OpenImages_Seed, self).__init__('Open-Images-V6', root_path, data_split, 2)
def construct(self, constructor):
from .impl.OpenImages import construct_OpenImages
construct_OpenImages(constructor, self)
| StarcoderdataPython |
5167970 | <reponame>samuk/ros2-line-follower
import rclpy
from sensor_msgs.msg import Image
def callback(msg):
print('Received {}'.format(msg.header))
def main():
rclpy.init()
global node
node = rclpy.create_node('tester')
subscription = node.create_subscription(
Image, '/camera/image_raw', callback, rclpy.qos.qos_profile_sensor_data)
subscription # prevent unused variable warning
while rclpy.ok():
print('waiting')
rclpy.spin_once(node)
# Destroy the node explicitly
# (optional - otherwise it will be done automatically
# when the garbage collector destroys the node object)
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1804654 | <reponame>Dimanitto/yatube
from django.test import TestCase, Client
from django.contrib.auth import get_user_model
from ..models import Group, Post
from http import HTTPStatus
User = get_user_model()
class TaskURLTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.user = User.objects.create_user(username='auth')
cls.group = Group.objects.create(
title='Тестовая группа',
slug='test_slug',
description='Тестовое описание',
)
cls.post = Post.objects.create(
author=cls.user,
text='Тестовая пост',
)
cls.urls = {
'/': 'posts/index.html',
f'/group/{cls.group.slug}/': 'posts/group_list.html',
f'/profile/{cls.user.username}/': 'posts/profile.html',
f'/posts/{cls.post.pk}/': 'posts/post_detail.html',
}
cls.private_urls = {
f'/posts/{cls.post.pk}/edit/': 'posts/create_post.html',
'/create/': 'posts/create_post.html',
}
def setUp(self):
# Создаем неавторизованный клиент
self.guest_client = Client()
# Создаем пользователя
self.user = User.objects.create_user(username='HasNoName')
# Создаем второй клиент
self.authorized_client = Client()
# Авторизуем пользователя
self.authorized_client.force_login(self.user)
def test_all_urls_for_auth_user(self):
"""Аутентифицированному пользователю доступны все страницы"""
# Залогинемся под автором поста
self.authorized_client.force_login(self.post.author)
# Сложим два словаря адрессов
all_urls = {**self.urls, **self.private_urls}
for adress in all_urls:
with self.subTest(adress=adress):
response = self.authorized_client.get(adress)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_urls_home_group_profile_post(self):
"""Доступность страниц для всех"""
for adress in self.urls:
with self.subTest(adress=adress):
response = self.guest_client.get(adress)
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_urls_edit(self):
"""Доступность редактировать пост автору"""
# Пришлось авторизироваться под пользователем auth, т.к
# без него проверка шла через HasNoName, который не является
# автором, т.к у него нету поста
self.authorized_client.force_login(self.post.author)
response = self.authorized_client.get(f'/posts/{self.post.pk}/edit/')
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_urls_create(self):
"""Доступность создать пост авторизированному клиенту"""
response = self.authorized_client.get('/create/')
self.assertEqual(response.status_code, HTTPStatus.OK)
def test_urls_uses_correct_template(self):
"""URL-адрес использует соответствующий шаблон."""
# Шаблон по адресам
for adress, template in self.urls.items():
with self.subTest(adress=adress):
response = self.guest_client.get(adress)
self.assertTemplateUsed(response, template)
# Сложим все адресса и шаблоны чтобы проверить доступ
# аутентифицированному пользователю
# (должны быть доступны все страницы)
all_urls = {**self.urls, **self.private_urls}
# Т.к проверка идет от авторизованного пользователя
# не имеющего поста, будет код 302 т.е редирект
# значит нам нужно залогинется под автором auth снова
self.authorized_client.force_login(self.post.author)
for adress, template in all_urls.items():
with self.subTest(adress=adress):
response = self.authorized_client.get(adress)
self.assertTemplateUsed(response, template)
def test_page_404(self):
"""Запрос к несуществующей странице вернет ошибку 404"""
response = self.guest_client.get('/unexisting_page/')
# Страница 404 отдает кастомный шаблон
self.assertTemplateUsed(response, 'core/404.html')
self.assertEqual(response.status_code, HTTPStatus.NOT_FOUND)
def test_urls_anonimous_redirect(self):
"""У анонимного пользователя при
создании/редактировании происходит редирект"""
urls = (
f'/posts/{self.post.pk}/edit/',
'/create/'
)
for adress in urls:
with self.subTest(adress=adress):
response = self.guest_client.get(adress)
self.assertEqual(response.status_code, HTTPStatus.FOUND)
def test_auth_user_non_author_redirect(self):
"""У авторизованного пользователя (не автора поста)
происходит редирект"""
url = f'/posts/{self.post.pk}/edit/'
response = self.authorized_client.get(url)
self.assertEqual(response.status_code, HTTPStatus.FOUND)
def test_urls_create_comment(self):
"""Доступность создать комментарий только
авторизированному клиенту"""
url = f'/posts/{self.post.pk}/comment/'
response = self.authorized_client.get(url)
# Т.к там идет редирект сразу код проверки 302
self.assertEqual(response.status_code, HTTPStatus.FOUND)
# Проверим что не авторизованный пользователь перейдет
# на страницу входа
response = self.guest_client.get(url)
self.assertEqual(response.url, f'/auth/login/?next={url}')
| StarcoderdataPython |
1625995 | import sys
import asyncio
import time
import argparse
import logging
from typing import Dict, Tuple, List, Optional
from dataclasses import dataclass
from collections import OrderedDict
from hipee_messages import *
from bleak import BleakClient, BleakScanner
NOTIFY_CHARACTERISTIC_UUID = "0000FFF1-0000-1000-8000-00805F9B34FB"
WRITE_CHARACTERISTIC_UUID = "0000FFF2-0000-1000-8000-00805F9B34FB"
logger = logging.getLogger(__name__)
def parseCommand(data) -> Optional[CommandBase]:
commands = {
0x1: HelloRequest,
0x2: HelloResponse,
0x3: InitialDataRequest,
0x4: InitialDataResponse,
0x5: SetTimeRequest,
0x6: SetTimeResponse,
0x30: SetConfigDataRequest,
0x31: SetConfigDataResponse,
0x32: SetStandardRequest,
0x33: SetStandardResponse,
0x34: GetLiveUpdateRequest,
0x35: LiveUpdateResponse,
0x44: GetBatteryStateRequest,
0x45: GetBatteryStateResponse,
0x46: GetConfigDataRequest,
0x47: GetConfigDataResponse,
0x50: SetExtConfigDataRequest,
0x51: SetExtConfigDataResponse,
0xFF: ErrorResponse,
}
if data[2] not in commands:
return None
logger.debug(f"Creating command object {commands[data[2]]}")
return commands[data[2]](data)
def notification_handler(sender, data):
"""Simple notification handler which prints the data received."""
logger.debug(f"{sender} {data.hex()}")
command = parseCommand(data)
if command:
logger.info(command)
else:
logger.warning("Command type not Implanted for message {data.hex()}")
async def start(address):
async with BleakClient(address) as client:
logger.info(f"Connected to {address}")
await client.start_notify(NOTIFY_CHARACTERISTIC_UUID, notification_handler)
await client.write_gatt_char(WRITE_CHARACTERISTIC_UUID, bytes(HelloRequest()))
await asyncio.sleep(3)
await client.write_gatt_char(
WRITE_CHARACTERISTIC_UUID, bytes(InitialDataRequest())
)
await asyncio.sleep(3.0)
await client.write_gatt_char(WRITE_CHARACTERISTIC_UUID, bytes(SetTimeRequest()))
await asyncio.sleep(3.0)
await client.write_gatt_char(
WRITE_CHARACTERISTIC_UUID, bytes(SetConfigDataRequest())
)
await asyncio.sleep(3.0)
await client.write_gatt_char(
WRITE_CHARACTERISTIC_UUID, bytes(GetBatteryStateRequest())
)
await asyncio.sleep(3.0)
await client.write_gatt_char(
WRITE_CHARACTERISTIC_UUID, bytes(GetConfigDataRequest())
)
await asyncio.sleep(3.0)
await client.write_gatt_char(
WRITE_CHARACTERISTIC_UUID, bytes(SetStandardRequest())
)
await client.write_gatt_char(
WRITE_CHARACTERISTIC_UUID, bytes(SetExtConfigDataRequest())
)
await asyncio.sleep(10.0)
await client.write_gatt_char(
WRITE_CHARACTERISTIC_UUID, bytes(GetLiveUpdateRequest())
)
await asyncio.sleep(150.0)
await client.stop_notify(char_uuid)
async def scan():
devices = await BleakScanner.discover()
for d in devices:
logger.info(d)
if __name__ == "__main__":
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setFormatter(logging.Formatter("%(asctime)s %(message)s"))
logger.addHandler(ch)
parser = argparse.ArgumentParser(
description="Hippe BLE API Example",
epilog="*Make sure hipee is not connected to phone app",
)
parser.add_argument(
"mac",
metavar="UUID or MAC",
nargs="?",
type=str,
help="Hipee Bluetooth UUID/MAC",
)
parser.add_argument(
"--scan", dest="scan", action="store_const", const=True, help="scan BLE devices"
)
parser.add_argument(
"--debug",
dest="debug",
action="store_const",
const=True,
help="scan BLE devices",
)
args = parser.parse_args()
if not (args.scan or args.mac):
parser.print_help()
sys.exit(1)
if args.debug:
logger.setLevel(logging.DEBUG)
asyncio.run(scan() if args.scan else start(args.mac))
| StarcoderdataPython |
4960263 | <filename>eval_test.py
from doctalk.talk import *
def save_summary_and_keywords(document,summary_file,keyword_file) :
T=Talker(from_file=document)
T.save_summary(summary_file)
T.save_keywords(keyword_file)
def go() :
''' saves summary and keywords, one line each to files of your choice'''
save_summary_and_keywords('examples/bfr.txt', 'summary.txt', 'keywords.txt')
| StarcoderdataPython |
9788222 | <reponame>d3rp/fissle
"""Doc string handling"""
import inspect
from collections import OrderedDict
def wrap_method_docstring(cls: object, nt):
"""
In place mutation of 'nt' (NamedTuple)
Uses the implicit Schema's fields as the
classes methods' signatures i.e. helps fire to show up the
defined arguments in Schema instead of a generic "**params" for the
subcommands on command line.
Returns:
None
"""
methods = [m.object for m in inspect.classify_class_attrs(cls)
if m.kind == 'method' and not m.name.startswith('_')]
for m in methods:
args = prepare_docstring_help(nt)
replace_docstring(m, args)
# TODO: rename - append args to docstring
def replace_docstring(func, args):
# TODO: subcommand level args replacement with something (see TODO)
docstring = func.__doc__
docstring = (docstring if docstring is not None else '') + '\nArgs:\n' + args
func.__doc__ = docstring
def attr_map(parsed_params):
"""Mapping for the schema's fields (parameters)"""
mapped_attrs = {}
for param_type, def_desc in parsed_params.items():
if ':' in param_type:
param, _type = param_type.split(':', 1)
_type = _type.strip()
else:
param = param_type
_type = None
# TODO: this won't handle # in strings ...
if '#' in def_desc:
default, description = def_desc.split('#', 1)
default = default.strip()
description = description.strip()
else:
default = def_desc.strip()
description = ''
mapped_attrs.update(
{
param: {
'type': _type,
'default': default,
'description': description,
}
}
)
return mapped_attrs
def parse_source_for_params(params):
"""
parse the source of the schema and split its
fields
"""
split_parameters = {
tuple(
str(argtype_and_defdesc).strip()
for argtype_and_defdesc in src_line.split('=', 1)
)
for src_line in params
if src_line.startswith(' ')
}
try:
params_dict = OrderedDict(split_parameters)
except ValueError as ex:
params_dict = {}
return params_dict
def argument_help(attr_name, attr):
_type = attr['type'] if attr['type'] is not None else ""
fmt = ' --{} ({}): {} (Default is {})'
return fmt.format(attr_name, _type, attr['description'], attr['default'])
# TODO: rename - list_of_source_lines
def filter_params(N):
"""Filter source lines of the class
Returns:
fields as source lines
"""
filtered_source = []
for line in inspect.getsourcelines(N.__class__)[0][1:]:
# When parsing, post_init would bleed into the attributes without this hack
if line.strip().startswith('def '):
break
filtered_source.append(line)
return filtered_source
# TODO: rename - parse_args_for_help
def prepare_docstring_help(N):
"""Replace docstrings to include the parameters (schema)"""
# at this point, the params have not yet been populated
args = []
if hasattr(N, '__annotations__'):
for attr_name, cls in N.__annotations__.items():
filtered = filter_params(N)
parsed = parse_source_for_params(filtered)
attr = attr_map(parsed).get(attr_name)
if attr is None:
continue
args.append(argument_help(attr_name, attr))
return '\n'.join(args)
| StarcoderdataPython |
9735836 | <filename>factory/alchemy.py<gh_stars>0
# -*- coding: utf-8 -*-
# Copyright (c) 2013 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import unicode_literals
from sqlalchemy.sql.functions import max
from . import base
class SQLAlchemyModelFactory(base.Factory):
"""Factory for SQLAlchemy models. """
ABSTRACT_FACTORY = True
FACTORY_SESSION = None
@classmethod
def _setup_next_sequence(cls, *args, **kwargs):
"""Compute the next available PK, based on the 'pk' database field."""
session = cls.FACTORY_SESSION
model = cls.FACTORY_FOR
pk = getattr(model, model.__mapper__.primary_key[0].name)
max_pk = session.query(max(pk)).one()[0]
if isinstance(max_pk, int):
return max_pk + 1 if max_pk else 1
else:
return 1
@classmethod
def _create(cls, target_class, *args, **kwargs):
"""Create an instance of the model, and save it to the database."""
session = cls.FACTORY_SESSION
obj = target_class(*args, **kwargs)
session.add(obj)
return obj
| StarcoderdataPython |
9716113 | import os
import sys
# noinspection PyUnresolvedReferences
import tests.mock_tables.dbconnector
modules_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.join(modules_path, 'src'))
from unittest import TestCase
import json
import mock
import re
import lldp_syncd
import lldp_syncd.conventions
import lldp_syncd.daemon
from swsssdk import SonicV2Connector, ConfigDBConnector
INPUT_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'subproc_outputs')
def create_dbconnector():
db = SonicV2Connector()
db.connect(db.APPL_DB)
return db
def make_seconds(days, hours, minutes, seconds):
"""
>>> make_seconds(0,5,9,5)
18545
"""
return seconds + (60 * minutes) + (60 * 60 * hours) + (24 * 60 * 60 * days)
class TestLldpSyncDaemon(TestCase):
def setUp(self):
with open(os.path.join(INPUT_DIR, 'lldpctl.json')) as f:
self._json = json.load(f)
with open(os.path.join(INPUT_DIR, 'lldpctl_mgmt_only.json')) as f:
self._json_short = json.load(f)
with open(os.path.join(INPUT_DIR, 'short_short.json')) as f:
self._json_short_short = json.load(f)
with open(os.path.join(INPUT_DIR, 'lldpctl_single_loc_mgmt_ip.json')) as f:
self._single_loc_mgmt_ip = json.load(f)
with open(os.path.join(INPUT_DIR, 'interface_only.json')) as f:
self._interface_only = json.load(f)
self.daemon = lldp_syncd.LldpSyncDaemon()
def test_parse_json(self):
jo = self.daemon.parse_update(self._json)
print(json.dumps(jo, indent=3))
def test_parse_short(self):
jo = self.daemon.parse_update(self._json_short)
print(json.dumps(jo, indent=3))
def test_parse_short_short(self):
jo = self.daemon.parse_update(self._json_short_short)
print(json.dumps(jo, indent=3))
def test_sync_roundtrip(self):
parsed_update = self.daemon.parse_update(self._json)
self.daemon.sync(parsed_update)
db = create_dbconnector()
keys = db.keys(db.APPL_DB)
dump = {}
for k in keys:
# The test case is for LLDP neighbor information.
# Need to filter LLDP_LOC_CHASSIS entry because the entry is removed from parsed_update after executing daemon.sync().
if k != 'LLDP_LOC_CHASSIS':
dump[k] = db.get_all(db.APPL_DB, k)
print(json.dumps(dump, indent=3))
# convert dict keys to ints for easy comparison
jo = {int(re.findall(r'\d+', k)[0]): v for k, v in parsed_update.items()}
r_out = {int(re.findall(r'\d+', k)[0]): v for k, v in dump.items()}
self.assertEqual(jo, r_out)
# test enumerations
for k, v in r_out.items():
chassis_subtype = v['lldp_rem_chassis_id_subtype']
chassis_id = v['lldp_rem_chassis_id']
if int(chassis_subtype) == lldp_syncd.conventions.LldpChassisIdSubtype.macAddress:
if re.match(r'^([0-9A-Fa-f]{2}[:-]){5}([0-9A-Fa-f]{2})$', chassis_id) is None:
self.fail("Non-mac returned for chassis ID")
else:
self.fail("Test data only contains chassis MACs")
def test_timeparse(self):
self.assertEquals(lldp_syncd.daemon.parse_time("0 day, 05:09:02"), make_seconds(0, 5, 9, 2))
self.assertEquals(lldp_syncd.daemon.parse_time("2 days, 05:59:02"), make_seconds(2, 5, 59, 2))
def parse_mgmt_ip(self, json_file):
parsed_update = self.daemon.parse_update(json_file)
mgmt_ip_str = parsed_update['local-chassis'].get('lldp_loc_man_addr')
json_chassis = json.dumps(json_file['lldp_loc_chassis']['local-chassis']['chassis'])
chassis_dict = json.loads(json_chassis)
json_mgmt_ip = chassis_dict.values()[0]['mgmt-ip']
if isinstance(json_mgmt_ip, list):
i=0
for mgmt_ip in mgmt_ip_str.split(','):
self.assertEquals(mgmt_ip, json_mgmt_ip[i])
i+=1
else:
self.assertEquals(mgmt_ip_str, json_mgmt_ip)
def test_multiple_mgmt_ip(self):
self.parse_mgmt_ip(self._json)
def test_single_mgmt_ip(self):
self.parse_mgmt_ip(self._single_loc_mgmt_ip)
def test_loc_chassis(self):
parsed_update = self.daemon.parse_update(self._json)
parsed_loc_chassis = parsed_update['local-chassis']
self.daemon.sync(parsed_update)
db = create_dbconnector()
db_loc_chassis_data = db.get_all(db.APPL_DB, 'LLDP_LOC_CHASSIS')
self.assertEquals(parsed_loc_chassis, db_loc_chassis_data)
def test_remote_sys_capability_list(self):
interface_list = self._interface_only['lldp'].get('interface')
for interface in interface_list:
(if_name, if_attributes), = interface.items()
capability_list = self.daemon.get_sys_capability_list(if_attributes)
self.assertNotEqual(capability_list, [])
| StarcoderdataPython |
3284895 | """Bootstrap."""
from itertools import zip_longest
from textwrap import wrap
from icecream import ic
from align_benchmark.benchmark import benchmark
from align_benchmark.benchmark import bm2
from align_benchmark.benchmark import bm3
def main():
"""Create __main__."""
# bm1
res = round(benchmark(), 2) # default to bm1
indent = " " * 16
print(" bm1 wh ch2 ".center(40))
# print(benchmark.bench)
# print("zip_longest:", benchmark.lst)
print(
"benchmark:".ljust(16),
"\n".join(wrap(str(benchmark.bench), subsequent_indent=indent)),
)
print(
"zip_longest:".ljust(16),
"\n".join(wrap(str(benchmark.lst), subsequent_indent=indent)),
)
ic(res)
# bm2
left, right = bm2[-1]
fillvalue = left if left < right else right
lst = [
*zip_longest(
range(left + 1),
range(right + 1),
fillvalue=fillvalue,
)
]
res = round(benchmark(lst, bm2), 2)
print(" bm2 wh ch1 ".center(40))
print("benchmark:".ljust(16), "\n".join(wrap(str(bm2), subsequent_indent=indent)))
print("zip_longest:".ljust(16), "\n".join(wrap(str(lst), subsequent_indent=indent)))
ic(res)
# bm3
left, right = [*zip(*bm3)]
left = [elm for elm in left if str(elm).strip()][-1]
right = [elm for elm in right if str(elm).strip()][-1]
try:
int(left)
int(right)
except ValueError as exc:
raise SystemExit(" Unable to continue... likely ill-formatted data.") from exc
fillvalue = ""
lst = [
*zip_longest(
range(left + 1),
range(right + 1),
fillvalue=fillvalue,
)
]
res = round(benchmark(lst, bm3), 2)
print(" bm3 nyt-article ".center(40))
print("benchmark:".ljust(16), "\n".join(wrap(str(bm3), subsequent_indent=indent)))
print("zip_longest:".ljust(16), "\n".join(wrap(str(lst), subsequent_indent=indent)))
ic(res)
if __name__ == "__main__":
main()
| StarcoderdataPython |
6510090 | <gh_stars>0
from sim import Particle, Simulator
def test_evolve_sim():
particles = [Particle( 0.3, 0.5, +1),
Particle( 0.0, -0.5, -1),
Particle(-0.1, -0.4, +3)]
sim = Simulator(particles)
sim.evolve(0.1)
p0, p1, p2 = particles
def fequal(a, b, eps=1e-5):
return abs(a - b) < eps
assert fequal(p0.x, 0.210269)
assert fequal(p0.y, 0.543863)
assert fequal(p1.x, -0.099334)
assert fequal(p1.y, -0.490034)
assert fequal(p2.x, 0.191358)
assert fequal(p2.y, -0.365227)
| StarcoderdataPython |
1768331 | <reponame>EdsonRomao/CursoEmVideo<gh_stars>0
"""
Faça um programa que calcule a soma entre todos os números impares que são
multiplos de três e que se encontram no intervalo de 1 até 500.
num1 = 0
for n in range(1, 501):
if n % 2 == 1:
num =+ n
if num % 3 == 0:
num1 = num1 + num
print(f'A soma de todos os número multiplos de 3, é {num1}')
"""
soma = 0
cont = 0
for c in range(1, 501, 2):
if c % 3 == 0:
cont = cont + 1
soma = soma + c
print(f'A soma dos {cont} números, é {soma}!')
| StarcoderdataPython |
377767 | <gh_stars>1-10
import os
import json
import xlwt
from urllib.parse import urlparse, parse_qs
import requests
import sys
import urllib
import time
import datetime
configs = [{"config_type":"301","config_name":"角色活动祈愿"},{"config_type":"302","config_name":"武器活动祈愿"},{"config_type":"200","config_name":"常驻祈愿"}]
def avgRank(rankArray,configName,currentCount) :
length = len(rankArray)
escape = "\n"
total_count = 0
log = "============================================"+escape
# if length == 0:
# return configName+"没有数据"
for rankJSON in rankArray :
count = rankJSON["count"]
name = rankJSON["name"]
total_count += count
log += " "+name+" "+str(count)+" 抽"+escape
if length != 0:
log += " "+configName+"平均5星:"+" "+str(total_count/length)+" 抽"+escape
log += " "+configName+"已累计:"+" "+str(currentCount)+" 抽未出5星"+escape
log += "============================================"+escape
return log
def main() :
if len(sys.argv) != 2:
print("请放入抓包后的接口后执行,如 python3 gacha.py 'https://hk4e-api.mihoyo.com/event/gacha_info/api/getGachaLog?xxxxx'")
return
URL = sys.argv[1]
parsed_url = urlparse(URL)
params = parse_qs(parsed_url.query)
authkey = params['authkey'][0]
print_str = ""
reuqest_param = {'size':20,
'authkey':authkey,
'authkey_ver':1,
'sign_type':2,
'auth_appid':"webview_gacha",
'init_type':301,
'timestamp':int(time.time()),
'lang':"zh-cn",
'device_type':"mobile",
'game_biz':"hk4e_cn"
}
for key in params:
reuqest_param[key] = params[key][0]
for config in configs :
workbook = xlwt.Workbook(encoding='utf-8')
sheet = workbook.add_sheet(config["config_name"])
titles = ["抽卡时间","名称","类别","星级"]
for col,column in enumerate(titles):
sheet.write(0, col, column)
gacha_type = config['config_type']
endId = 0
row = 0
rankCount = 0
rankArray = []
allData = []
for page in range(1, 9999):
config_name = config['config_name']
url = "https://hk4e-api.mihoyo.com/event/gacha_info/api/getGachaLog?"
reuqest_param["gacha_type"] = gacha_type
reuqest_param["page"] = page
reuqest_param["end_id"] = endId
url += urllib.parse.urlencode(reuqest_param)
print("正在查询"+config_name+":"+str(page))
request = requests.get(url)
formatJSON = request.json()
if formatJSON["retcode"] != 0:
print("发生错误:"+formatJSON["message"])
return
if len(formatJSON["data"]["list"]) == 0:
break
for data in formatJSON["data"]["list"]:
allData.append(data)
endId=data["id"]
sheet.write(row+1, 0, data["time"])
sheet.write(row+1, 1, data["name"])
sheet.write(row+1, 2, data["item_type"])
sheet.write(row+1, 3, data["rank_type"])
row += 1
workbook.save("./genshin_"+config_name+".xls")
allData.reverse()
currentCount = 0
for data in allData:
rankCount += 1
currentCount += 1
rank_type = data["rank_type"]
name = data["name"]
if rank_type == "5":
rankJSON = {}
rankJSON["count"] = rankCount
rankJSON["name"] = name
rankArray.append(rankJSON)
currentCount = 0
rankCount = 0
print_str += avgRank(rankArray,config['config_name'],currentCount)+"\n"
print(print_str)
main() | StarcoderdataPython |
1851553 | # Standard imports. Add analysis specific ones.
import sys
import datetime
import json
from os import path
import matplotlib.pyplot as plt
from matplotlib.dates import DateFormatter
from matplotlib.dates import HourLocator
from matplotlib.dates import YearLocator
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from utils import loadConfig
import utils
import generator
config = loadConfig.getConfig()
import csv
def load_data():
data_file = utils.load_file('garmin-activities.csv')
data_reader = csv.reader(data_file)
transaction_data = []
first_row = True
activities = []
for row in data_reader:
if first_row:
first_row = False
continue
# Keys: Activity Type,Date,Favorite,Title,Distance,Calories,Time,Avg HR,Max HR,Avg Run Cadence,Max Run Cadence,Avg Pace,Best Pace,Elev Gain,Elev Loss,Avg Stride Length,Avg Vertical Ratio,Avg Vertical Oscillation,
# Training Stress Score®,Grit,Flow,Total Strokes,Avg. Swolf,Avg Stroke Rate,Bottom Time,Min Water Temp,Surface Interval,Decompression
keys = [
'type', 'date', 'favorite', 'title', 'distance', 'calories',
'time', 'avg_hr', 'max_hr', 'avg_cadence', 'max_cadence',
'avg_pace', 'best_pace', 'elev_gain', 'elev_loss', 'avg_stride_length',
'avg_vertical_ratio', 'avg_vertical_oscillation', 'training_stress_score',
'grit', 'flow', 'total_strokes', 'avg_swolf', 'avg_stroke_rate',
'bottom_time', 'min_water_temp', 'surface_interval', 'decompression'
]
activity = { }
for i,val in enumerate(keys):
activity[val] = row[i]
activities += [activity]
return { 'activities': activities }
data = load_data()
# Graph average running pace
x = []
y = []
for i in data['activities']:
if i['type'] == 'running':
x += [i['date']]
y += [i['avg_pace']]
#plt.plot(x, y)
ax = plt.subplot()
#ax.plot(x, y)
ax.yaxis.set_major_locator(HourLocator())
ax.yaxis.set_major_formatter(DateFormatter('%M:%s'))
ax.xaxis_date()
ax.xaxis.set_major_locator(YearLocator())
ax.xaxis.set_major_formatter(DateFormatter('%Y-%m-%d'))
plt.plot(x, y)
generator.check_figure_directory()
plt.savefig('html/figures/garmin_total_avg_running_pace.png', dpi=400)
plt.close()
parts = [
['header', ['Garmin Report']],
['subheader', ['Average Running Pace']],
['image', ['figures/garmin_total_avg_running_pace.png']]
]
generator.build_report('garmin_main', parts)
| StarcoderdataPython |
4898788 | <filename>external/unbound/testdata/pylib.tdir/pylib.lookup.py
#!/usr/bin/env python
'''
Test for unbound lookup.
BSD licensed.
'''
import unbound
ctx = unbound.ub_ctx()
status = ctx.config("ub.conf")
if status != 0:
print "read config failed ", status
exit(1)
print "config created"
status, result = ctx.resolve("www.example.com", unbound.RR_TYPE_A, unbound.RR_CLASS_IN);
if status == 0 and result.havedata:
print "Result: ", result.data.address_list
else:
print "Failed ", status, " and data ", result
ctx = None
exit(0)
| StarcoderdataPython |
3366126 | # ---
# jupyter:
# jupytext:
# formats: ipynb,py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.9.1+dev
# kernelspec:
# display_name: Python [conda env:generic_expression] *
# language: python
# name: conda-env-generic_expression-py
# ---
# # Process recount2 data
# This notebook does the following:
#
# 1. Selects template experiment
# 2. Downloads subset of recount2 data, including the template experiment (subset of random experiments + 1 template experiment)
# 3. Train VAE on subset of recount2 data
# +
# %load_ext autoreload
# %load_ext rpy2.ipython
# %autoreload 2
import os
import numpy as np
import pandas as pd
from ponyo import utils, train_vae_modules
from generic_expression_patterns_modules import process
# -
# Set seeds to get reproducible VAE trained models
process.set_all_seeds()
# +
base_dir = os.path.abspath(os.path.join(os.getcwd(), "../"))
# Read in config variables
config_filename = os.path.abspath(os.path.join(base_dir, "configs", "config_test.tsv"))
params = utils.read_config(config_filename)
local_dir = params["local_dir"]
dataset_name = params["dataset_name"]
# File that contains gene ranks identified by Crow et. al.
DE_prior_filename = params["reference_gene_filename"]
# Template experiment ID
project_id = params["project_id"]
# Output file: pickled list of shared genes(generated during gene ID mapping)
shared_genes_filename = params["shared_genes_filename"]
# Output files of recount2 template experiment data
raw_template_filename = params["raw_template_filename"]
mapped_template_filename = params["mapped_template_filename"]
# Output files of recount2 compendium data
raw_compendium_filename = params["raw_compendium_filename"]
mapped_compendium_filename = params["mapped_compendium_filename"]
normalized_compendium_filename = params["normalized_compendium_filename"]
# Output file: pickled scaler (generated during compendium normalization)
scaler_filename = params["scaler_filename"]
# -
# ## Test: Downloading data
# +
# Directory where the downloaded files of template experiment will be saved into
template_download_dir = os.path.join(local_dir, "template_download")
# Make sure this directory already exists
os.makedirs(template_download_dir, exist_ok=True)
# + magic_args="-i project_id -i template_download_dir -i raw_template_filename -i base_dir" language="R"
#
# source(paste0(base_dir, '/generic_expression_patterns_modules/download_recount2_data.R'))
#
# get_recount2_template_experiment(project_id, template_download_dir, raw_template_filename)
# -
assert os.path.exists(raw_template_filename)
# ## Test: Renaming gene ids
# File mapping ensembl ids to hgnc symbols
gene_id_filename = os.path.join(
base_dir, dataset_name, "data", "metadata", "ensembl_hgnc_mapping.tsv"
)
# + magic_args="-i raw_template_filename -i gene_id_filename -i base_dir" language="R"
#
# # Get mapping between ensembl gene ids (ours) to HGNC gene symbols (published)
# # Input: raw_template_filename, output: gene_id_filename
#
# source(paste0(base_dir, '/generic_expression_patterns_modules/process_names.R'))
#
# # Note: This mapping file from ensembl ids to hgnc symbols is based on the library("biomaRt")
# # that gets updated. In order to get the most up-to-date version, you can delete the
# # ensembl_hgnc_mapping file to re-run the script that generates this mapping.
#
# if (file.exists(gene_id_filename) == FALSE) {
# get_ensembl_symbol_mapping(raw_template_filename, gene_id_filename)
# }
# -
# ## Test: processing template data
# This step will map the ensembl gene IDs in raw template data file to hgnc gene symbols, and delete certain columns (genes) and rows (samples).
#
# Output files generated in this step:
# - `shared_genes_filename`: pickled list of shared genes (created only if it doesn't exist yet)
# - `mapped_template_filename`: template data with column names mapped to hgnc gene symbols
# +
manual_mapping = {
"ENSG00000187510.7": "PLEKHG7",
"ENSG00000230417.11": "LINC00595",
"ENSG00000276085.1": "CCL3L1",
"ENSG00000255374.3": "TAS2R45",
}
process.map_recount2_data(
raw_template_filename,
gene_id_filename,
manual_mapping,
DE_prior_filename,
shared_genes_filename,
mapped_template_filename,
)
# -
# ## Test: Processing compendium
process.process_raw_compendium_recount2(
raw_compendium_filename,
gene_id_filename,
manual_mapping,
DE_prior_filename,
shared_genes_filename,
mapped_compendium_filename,
normalized_compendium_filename,
scaler_filename,
)
# +
# Check number of genes is equal between the compendium and the template
compendium_data = pd.read_csv(
normalized_compendium_filename, sep="\t", index_col=0, header=0
)
template_data = pd.read_csv(mapped_template_filename, header=0, sep="\t", index_col=0)
assert compendium_data.shape[1] == template_data.shape[1]
# -
# ## Train: VAE training and reproducibility
# +
# Create VAE directories
output_dirs = [
os.path.join(base_dir, dataset_name, "models"),
os.path.join(base_dir, dataset_name, "logs"),
]
NN_architecture = params["NN_architecture"]
for each_dir in output_dirs:
new_dir = os.path.join(each_dir, NN_architecture)
os.makedirs(new_dir, exist_ok=True)
# -
# Train VAE on new compendium data
train_vae_modules.train_vae(config_filename, normalized_compendium_filename)
# Test reproducibility
expected_log = "data/test_vae_logs.tsv"
actual_log = "logs/NN_2500_30/tybalt_2layer_30latent_stats.tsv"
assert pd.read_csv(actual_log, sep="\t")["val_loss"].values[-1] < 15000, pd.read_csv(
actual_log, sep="\t"
)["val_loss"].values[-1]
| StarcoderdataPython |
6524175 | <filename>notaso/search/urls.py
from django.urls import path
from .views import SearchView
app_name = "search"
urlpatterns = [path("", SearchView.as_view(), name="search_list")]
| StarcoderdataPython |
8133866 | <reponame>arnov/lru-ttl<gh_stars>1-10
import unittest
from lruttl import LRUCache
from time import sleep
class TestCache(unittest.TestCase):
def test_basic(self):
cache = LRUCache(10)
cache.set('id', ['some object'], 1)
self.assertEqual(cache.get('id'), ['some object'])
def test_ttl(self):
cache = LRUCache(10)
cache.set('id', ['some object'], 1)
sleep(3)
self.assertEqual(cache.get('id'), None)
def test_size(self):
cache = LRUCache(10)
for i in range(12):
# Do a get so the item is moved down in the stack
cache.get(0)
cache.set(i, 'object {}'.format(i))
self.assertTrue(0 in cache)
self.assertTrue(1 not in cache)
self.assertTrue(11 in cache)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
4810999 | <filename>src/bioio.py
"""
A bunch of miscellaneous helpful functions copied from sonLib.
"""
import subprocess
import tempfile
import sys
def system(cmd):
"""Run a command or die if it fails"""
sts = subprocess.call(cmd, shell=True, bufsize=-1, stdout=sys.stdout, stderr=sys.stderr)
if sts != 0:
raise RuntimeError("Command: %s exited with non-zero status %i" % (cmd, sts))
def getTempDirectory(rootDir=None):
"""
returns a temporary directory that must be manually deleted
"""
if rootDir is None:
return tempfile.mkdtemp()
else:
while True:
rootDir = os.path.join(rootDir, "tmp_" + getRandomAlphaNumericString())
if not os.path.exists(rootDir):
break
os.mkdir(rootDir)
os.chmod(rootDir, 0777) #Ensure everyone has access to the file.
return rootDir
def nameValue(name, value, valueType=str, quotes=False):
"""Little function to make it easier to make name value strings for commands.
"""
if valueType == bool:
if value:
return "--%s" % name
return ""
if value is None:
return ""
if quotes:
return "--%s '%s'" % (name, valueType(value))
return "--%s %s" % (name, valueType(value))
def popenCatch(command, stdinString=None):
"""Runs a command and return standard out.
"""
if stdinString != None:
process = subprocess.Popen(command, shell=True,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1)
output, nothing = process.communicate(stdinString)
else:
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=sys.stderr, bufsize=-1)
output, nothing = process.communicate() #process.stdout.read().strip()
sts = process.wait()
if sts != 0:
raise RuntimeError("Command: %s with stdin string '%s' exited with non-zero status %i" % (command, stdinString, sts))
return output
| StarcoderdataPython |
5185738 | <reponame>yipstar/surf_python
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.hooks.postgres_hook import PostgresHook
from airflow.operators.slack_operator import SlackAPIPostOperator
from airflow.hooks.base_hook import BaseHook
| StarcoderdataPython |
383805 | <gh_stars>1-10
#!/usr/bin/python3
import sys
result = {}
for line in sys.stdin:
line = line.strip()
line = line.split(",")
if(line[0] == "ball"):
bat = line[4]
bowl = line[6]
b = bowl+"/"+bat
if b not in result:
result[b]=[int(line[7],10)+int(line[8],10),1]
else:
result[b][0]=result[b][0]+int(line[7],10)+int(line[8],10)
result[b][1]+=1
for bat in result:
print("%s\t%s"%(bat,str(result[bat][0])+"/"+str(result[bat][1])))
| StarcoderdataPython |
6656008 | #-*- coding: utf-8 -*-
import re
from proxy import Proxy
from basespider import BaseSpider
class KuaiDaiLiSpider(BaseSpider):
name = 'kuaidaili'
def __init__(self, *a, **kwargs):
super(KuaiDaiLiSpider, self).__init__(*a, **kwargs)
self.urls = ['http://www.kuaidaili.com/free/inha/%s/' % i for i in range(1, 2)]
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-US,en;q=0.5',
'Connection': 'keep-alive',
'Host': 'www.kuaidaili.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.11; rv:50.0) Gecko/20100101 Firefox/50.0',
}
self.init()
def parse_page(self, response):
pattern = re.compile(
'<tr>\s.*?<td.*?>(.*?)</td>\s.*?<td.*?>(.*?)</td>\s.*?<td.*?>(.*?)</td>\s.*?<td.*?>('
'.*?)</td>\s.*?<td.*?>(.*?)</td>\s.*?<td.*?>(.*?)</td>\s.*?<td.*?>(.*?)</td>\s.*?</tr>',
re.S)
items = re.findall(pattern, response.body)
for item in items:
proxy = Proxy()
proxy.set_value(
ip = item[0],
port = item[1],
country = item[4],
anonymity = item[2],
source = self.name,
)
self.add_proxy(proxy)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.