text stringlengths 957 885k |
|---|
import sys
import scipy.ndimage
import os.path
import HebbLearn as hl
import numpy as np
import matplotlib.pyplot as plt
try:
import h5py
except:
print('h5py cannot be loaded - may cause error')
pass
fl = hl.NonlinearGHA()
num_textures = 688
if os.path.isfile('textures.npy'):
print('==> Load previously saved textures data')
textures = np.load('textures.npy')
else:
print('==> Loading data')
textures = np.zeros((512,512,num_textures))
for i in range(num_textures):
fn = '/home/rabadi/data/textures/' + str(i) + '.jpg'
try:
textures[:,:,i] = scipy.ndimage.imread(fn, flatten=True)/255
except:
print('dimensionality miss-match - fixing')
tmp = scipy.ndimage.imread(fn, flatten=True)/255
if (np.shape(tmp)[0] < 512):
tmp = np.concatenate((tmp, np.random.rand(512-np.shape(tmp)[0],np.shape(tmp)[1])), axis=0)
if (np.shape(tmp)[1] < 512):
tmp = np.concatenate((tmp, np.random.rand(512, 512-np.shape(tmp)[1])), axis=1)
textures[:,:,i] = tmp
np.save('textures.npy',textures)
random = np.random.rand(512,512,np.shape(textures)[2])
random = random/np.max(random) # make sure all normalized
print('==> mean centering data')
pop_mean = np.mean(np.concatenate((random,textures),axis=2))
random = random - pop_mean
textures = textures - pop_mean
pop_std = np.std(np.concatenate((random,textures),axis=2))
random = random/pop_std
textures = textures/pop_std
#plt.imshow(textures[:,:,0], cmap=plt.get_cmap('gray'))
#plt.show()
if len(sys.argv)>1:
filter_size = int(sys.argv[1])
step_size = int(sys.argv[2])
out_dimension = int(sys.argv[3])
LR = float(sys.argv[4])
n_samples = int(sys.argv[5])
else:
filter_size = 512
step_size = 512
out_dimension = 1
LR = 1
n_samples = 500
nonlinearity = hl.LINEAR
LR=0
#print('==> Training')
#random_k = fl.Train(random[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
#textures_k = fl.Train(textures[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
#np.save('textures-k.npy',textures_k)
#output = fl.ImageReconstruction(textures[:,:,0], textures_k, filter_size, step_size, nonlinearity)
#plt.imshow(output, cmap=plt.get_cmap('gray'))
#plt.show()
print('==> Classification performance')
tex_vex = np.reshape(textures, (512*512,num_textures), order='F').T
rand_vex = np.reshape(random, (512*512,num_textures), order='F').T
diff_mean = (np.mean(rand_vex[:n_samples,:], axis=0) - np.mean(tex_vex[:n_samples,:], axis=0))
test = np.concatenate((tex_vex[500:600,:], rand_vex[500:600,:]), axis=0)
y = np.ones((200,1))
y[:100]=-1
shuff = np.random.permutation(200)
test = test[shuff,:]
y = y[shuff]
corr = 0
print('==> Training')
k_tex = fl.Train(textures[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
k_rand = fl.Train(random[:,:,:n_samples], filter_size, step_size, out_dimension, LR, nonlinearity)
tex_pop = np.zeros((512,512))
rand_pop = np.zeros((512,512))
for i in range(n_samples):
tex_pop = tex_pop + fl.ImageReconstruction(textures[:,:,i], np.reshape(k_tex,(1,262144,1)), filter_size, step_size, nonlinearity)
rand_pop = rand_pop + fl.ImageReconstruction(random[:,:,i], np.reshape(k_rand,(1,262144,1)), filter_size, step_size, nonlinearity)
tpv = np.reshape(tex_pop, (512*512,1), order='F').T/n_samples
rpv = np.reshape(rand_pop, (512*512,1), order='F').T/n_samples
diff_mean = (np.mean(rand_vex[:n_samples,:], axis=0) - np.mean(tex_vex[:n_samples,:], axis=0))
k_tex = k_tex[:,:,0]
k_rand = k_rand[:,:,0]
k = np.multiply(0.5,k_tex+k_rand).T
#w = np.dot(k,diff_mean).T
w = np.multiply(k[:,0],diff_mean) # works because k is vector and
for i in range(200):
#x = np.reshape(test[:,:,i],262144, order='F') # 512*512
x = test[i,:]
kx = np.reshape(fl.ImageReconstruction(np.reshape(x,(512,512),order='F'), np.reshape(k,(1,262144,1)), filter_size, step_size, nonlinearity),262144, order='F')
#yhat = np.sign(np.dot(w,x.T))
yhat = np.sign(np.dot(w,kx.T))
if (yhat == y[i]):
corr = corr+1
pc = corr/200.
print()
if (pc < 0.5):
print('flipped')
pc = 1.-pc
print('==> Percent Correct')
print(pc)
|
<reponame>aimanahmedmoin1997/DataCamp
'''
How often do we get no-hitters?
The number of games played between each no-hitter in the modern era (1901-2015) of Major League Baseball
is stored in the array nohitter_times.
If you assume that no-hitters are described as a Poisson process, then the time between no-hitters is
Exponentially distributed. As you have seen, the Exponential distribution has a single parameter, which
we will call \tau
\tau, the typical interval time. The value of the parameter τ that makes the exponential distribution
best match the data is the mean interval time (where time is in units of number of games) between no-hitters.
Compute the value of this parameter from the data. Then, use np.random.exponential() to "repeat" the history
of Major League Baseball by drawing inter-no-hitter times from an exponential distribution with the τ you found
and plot the histogram as an approximation to the PDF.
NumPy, pandas, matlotlib.pyplot, and seaborn have been imported for you as np, pd, plt, and sns, respectively.
INSTRUCTIONS
100XP
-Seed the random number generator with 42.
-Compute the mean time (in units of number of games) between no-hitters.
-Draw 100,000 samples from an Exponential distribution with the parameter you computed from the mean of the inter-no-hitter times.
-Plot the theoretical PDF using plt.hist(). Remember to use keyword arguments bins=50, normed=True, and histtype='step'. Be sure to label your axes.
-Show your plot.
'''
import numpy as np
import matplotlib.pyplot as plt
nohitter_times = np.array([843, 1613, 1101, 215, 684, 814, 278, 324, 161, 219, 545,
715, 966, 624, 29, 450, 107, 20, 91, 1325, 124, 1468,
104, 1309, 429, 62, 1878, 1104, 123, 251, 93, 188, 983,
166, 96, 702, 23, 524, 26, 299, 59, 39, 12, 2,
308, 1114, 813, 887, 645, 2088, 42, 2090, 11, 886, 1665,
1084, 2900, 2432, 750, 4021, 1070, 1765, 1322, 26, 548, 1525,
77, 2181, 2752, 127, 2147, 211, 41, 1575, 151, 479, 697,
557, 2267, 542, 392, 73, 603, 233, 255, 528, 397, 1529,
1023, 1194, 462, 583, 37, 943, 996, 480, 1497, 717, 224,
219, 1531, 498, 44, 288, 267, 600, 52, 269, 1086, 386,
176, 2199, 216, 54, 675, 1243, 463, 650, 171, 327, 110,
774, 509, 8, 197, 136, 12, 1124, 64, 380, 811, 232,
192, 731, 715, 226, 605, 539, 1491, 323, 240, 179, 702,
156, 82, 1397, 354, 778, 603, 1001, 385, 986, 203, 149,
576, 445, 180, 1403, 252, 675, 1351, 2983, 1568, 45, 899,
3260, 1025, 31, 100, 2055, 4043, 79, 238, 3931, 2351, 595,
110, 215, 0, 563, 206, 660, 242, 577, 179, 157, 192,
192, 1848, 792, 1693, 55, 388, 225, 1134, 1172, 1555, 31,
1582, 1044, 378, 1687, 2915, 280, 765, 2819, 511, 1521, 745,
2491, 580, 2072, 6450, 578, 745, 1075, 1103, 1549, 1520, 138,
1202, 296, 277, 351, 391, 950, 459, 62, 1056, 1128, 139,
420, 87, 71, 814, 603, 1349, 162, 1027, 783, 326, 101,
876, 381, 905, 156, 419, 239, 119, 129, 467])
# Seed random number generator
np.random.seed(42)
# Compute mean no-hitter time: tau
tau = np.mean(nohitter_times)
# Draw out of an exponential distribution with parameter tau: inter_nohitter_time
inter_nohitter_time = np.random.exponential(tau, 100000)
# Plot the PDF and label axes
_ = plt.hist(inter_nohitter_time,
bins=50, normed=True, histtype='step')
_ = plt.xlabel('Games between no-hitters')
_ = plt.ylabel('PDF')
# Show the plot
plt.show()
|
<filename>pysvc/unified/client.py
##############################################################################
# Copyright 2019 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
'''Unified SSH client for IBM Spectrum Virtualize Family Storage'''
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import pkg_resources
from logging import getLogger
import pysvc.errors as ce
from pysvc.messages import UnifiedMessages
from pysvc.transports.ssh_transport import SSHTransport
from pysvc.unified.clispec import parse
from pysvc import PYSVC_DEFAULT_LOGGER
from .scp_cli_client import ScpClient
from pysvc.unified.helpers import etree
from pysvc.unified.helpers.xml_util import XMLException
__all__ = ['connect']
xlog = getLogger(PYSVC_DEFAULT_LOGGER)
class IncorrectDeviceTypeError(ce.UnableToConnectException):
'''Raise if the expected device type is not found.'''
pass
class NoSpecificationError(ce.UnableToConnectException):
'''Raise if no CLI specification is found for the storage array.'''
pass
class UnifiedSSHClient(object):
'''Unified SSH client for IBM Spectrum Virtualize Family Storage.
It creates callable stubs for the CLI commands of storage array.
The stub is implemented as :py:class:`.Proxy` for
:py:class:`pysvc.pysvc.unified.clispec.CLICommand`.
'''
def __init__(self):
super(UnifiedSSHClient, self).__init__()
self.transport = None
self.specification = None
self.flexible = False
def close(self):
'''Close the connection.'''
if self.transport:
self.transport.disconnect()
self.transport = None
self.specification = None
def send_raw_command(self, cmd, extra=None, stdin=None):
'''Send plain string as command to storage array and execute.
:param cmd: The command.
:type cmd: str
:param extra: (optional) The extra parameters.
* timeout: (float) Response timeout for each sending
command in seconds.
:type extra: dict
:return: The content from stdout and stderr of the executed command.
:rtype: tuple
'''
timeout = extra.get('timeout', 0) if extra else 0
xlog.debug("+++{0}+++".format(cmd))
_, stdout, stderr = self.transport.send_command(
cmd, raw=True, timeout=timeout, stdin_input=stdin)
return stdout, stderr
def get_device_info(self):
'''Get the device information of storage array.
:return: The device type and version.
:rtype: tuple
'''
return (self.specification.array_type,
self.specification.array_infos) if self.specification else None
def get_dump_element_tree(self, remote_path, timeout=None):
"""
get the element tree of xml remote_path specified
:param remote_path: full path on the SVC,
e.g. /dumps/iostats/Nn_stats_151240_151120_162817
:param timeout: int, the timeout of the session
:return: ElementTree
"""
raw_xml = self.get_dump(remote_path, timeout)
try:
return etree.parse(StringIO(raw_xml))
except XMLException as ex:
err_msg = (r'The dump file context is not valid XML: {}'
.format(raw_xml))
xlog.error(err_msg)
raise ex(err_msg)
def get_dump(self, remote_path, timeout=None):
"""
:param remote_path: full path on the SVC,
e.g. /dumps/iostats/Nn_stats_151240_151120_162817
:param timeout: int, the timeout of the session
:return: str, the context of the file
"""
scp_client = ScpClient(self.transport.transport.get_transport(),
timeout)
return scp_client.receive(remote_path)
def __getattr__(self, name):
obj = getattr(self.specification, name, None)
if obj is None:
raise AttributeError(
"'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
return Proxy(obj, self.send_raw_command)
def __dir__(self):
return dir(self.specification)
class Proxy(object):
'''Proxy for CLI command
Read __doc__ attribute to get document instead of
using __builtins__.help(), e.g.
>>> print a_proxy.__doc__
Help on ...
'''
def __init__(self, referent, context):
super(Proxy, self).__init__()
self.referent = referent
self.context = context
@property
def __doc__(self):
return getattr(self.referent, '__doc__', None)
def __getattr__(self, name):
at = getattr(self.referent, name, None)
if at is None:
raise AttributeError(
"'%s' object has no attribute '%s'" %
(self.__class__.__name__, name))
return Proxy(at, self.context)
def __dir__(self):
return dir(self.referent)
def __call__(self, **kwargs):
'''Call the wrapped referent with given parameters and
return the result.'''
return self.referent(self.context, kwargs)
def yield_device_type(conn):
conn.specification, oldspec = parse_cli_spec(
conn, get_cli_spec('xsf', '1.0')), conn.specification
try:
try:
for clu in conn.cli.lscluster():
device = 'ifs' if clu.get('Profile') == 'IFS' else 'sonas'
for nd in conn.cli.lsnode(cluster=clu.get('Name')):
yield device, canonical_version(
nd.get('Product Version', '')
or nd.get('Product version', '')
or nd.get('product version', ''))
except GeneratorExit:
raise
except Exception:
xlog.exception('No IFS or SoNAS is found, and continue.')
try:
for clu in conn.svcinfo.lscluster():
if clu.get('location') == 'local':
for clu1 in conn.svcinfo.lscluster(cluster=clu.get('id')):
yield 'svc', canonical_version(
clu1.get('code_level', ''))
except GeneratorExit:
raise
except Exception:
xlog.exception('No SVC or Storwize is found, and continue.')
finally:
conn.specification = oldspec
def set_specification(conn, with_remote_clispec=True):
spec = get_remote_cli_spec(conn) if with_remote_clispec else None
if not spec:
xlog.info(UnifiedMessages.UNIFIED_PARSE_LOCAL_START)
for d, t in yield_device_type(conn):
try:
spec = parse_cli_spec(conn, get_cli_spec(d, t))
except Exception:
xlog.exception(
'No CLI specification found for "%s, %s", and continue.' %
(d, t))
if spec:
break
if not spec:
raise NoSpecificationError(UnifiedMessages.UNIFIED_NO_CLI_SPEC)
conn.specification = spec
def check_device_type(conn, device_type):
if not device_type:
return
info = conn.get_device_info()
if not info or info[0] != device_type:
raise IncorrectDeviceTypeError(
UnifiedMessages.UNIFIED_INCORRECT_DEVICE_TYPE(device_type))
def get_remote_cli_spec(conn):
try:
stdout, stderr = conn.send_raw_command('catxmlspec')
if stdout:
return parse_cli_spec(conn, StringIO(stdout.decode()))
xlog.warning(UnifiedMessages.UNIFIED_CATXMLSPEC_FAIL(stderr))
except Exception:
xlog.exception(UnifiedMessages.UNIFIED_PARSE_REMOTE_FAIL)
def parse_cli_spec(conn, source):
spec = parse(source, flexible=conn.flexible)
# make sure there is CLI command defined in CLI spec
return spec if spec and spec.cmds else None
def device_type_alias(device, version):
if device in ('storwize', 'storwise'):
device = 'svc'
if device == 'svc':
if version.startswith('6.') and version not in ('6.1', '6.2', '6.3'):
version = '6.3'
return device, version
def get_cli_spec(device, version):
# pylint: disable-msg=E1101
return pkg_resources.resource_stream(
__name__, '%s-%s.xml' %
device_type_alias(
device, version))
def canonical_version(data):
return '.'.join(data.strip().split('.')[:2])
def connect(address, **kwargs):
'''Connect to storage array through SSH.
:param address: The IP address or host name of storage array.
:type address: str
:param username: (optional) The username of login account.
:type username: str
:param password: (optional) The password of login account or private key.
:type password: str
:param port: (optional) The port of SSH service in storage array,
it is 22 by default.
:type port: int
:param privatekey: (optional) The private key of login account.
:type privatekey: str
:param privatekey_filename: (optional) The file name of private key.
:type privatekey_filename: str
:param add_hostkey: (optional) Indicates whether to add SSH host key of
storage array to known hosts, it is True by default.
:type add_hostkey: bool
:param timeout: (optional) Connection timeout in seconds,
it is 30 by default.
:type timeout: int
:param cmd_timeout: (optional) Response timeout for each sending command
in seconds, it is 60.0 by default.
:type cmd_timeout: float
:param device_type: (optional) The device type of storage array, e.g.
"svc", "storwize", None.
:type device_type: str
:param flexible: (optional) Indicates whether to enable flexible mode
which bypasses strict error checking, it is
False by default.
:type flexible: bool
:param with_remote_clispec: (optional) Indicates whether to read CLI
specification from remote storage
array, it is True by default.
:type with_remote_clispec: bool
:return: The connection object to storage array.
:rtype: :py:class:`.UnifiedSSHClient`
Example:
>>> connect('ip', username='admin', privatekey_filename=r'/local/key')
<pysvc.pysvc.unified.client.UnifiedSSHClient object at 0x...>
'''
g = kwargs.get
trans = SSHTransport(
host=address,
user=g('username'),
password=g('password'),
port=g(
'port',
22),
auto_add=g(
'add_hostkey',
True),
pkey=g('privatekey'),
pkey_file=g('privatekey_filename'),
timeout=g(
'timeout',
30),
cmd_timeout=g(
'cmd_timeout',
60.0))
conn = UnifiedSSHClient()
try:
trans.connect()
conn.flexible = g('flexible', False)
conn.transport = trans
set_specification(conn, g('with_remote_clispec', True))
check_device_type(conn, g('device_type'))
return conn
except BaseException:
trans.disconnect()
conn.close()
raise
|
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import importlib
import time
class WidgetBase(QtCore.QObject):
messageBoxSignal = QtCore.pyqtSignal(str)
def __init__(self, *args):
super().__init__(*args)
self.signalManager = None
self.listener = None
self.listener_class = ['mrigtlbridge.listener_base', 'ListenerBase']
self.threadActive = False
self.signalManager = None
self.listenerParameter = {}
def __del__(self):
if self.listener:
self.listener.terminate()
def buildGUI(self, parent):
pass
def updateGUI(self, state):
if state == 'listenerConnected':
pass
elif state == 'listenerDisconnected':
pass
pass
def setSignalManager(self, sm):
self.signalManager = sm
self.signalManager.connectSlot('listenerConnected', self.onListenerConnected)
self.signalManager.connectSlot('listenerDisconnected', self.onListenerDisconnected)
self.signalManager.connectSlot('listenerTerminated', self.onListenerTerminated)
# Add custom signals for the listener
module = importlib.import_module(self.listener_class[0])
class_ = getattr(module, self.listener_class[1])
listener = class_()
signalList = listener.customSignalList
for name in signalList.keys():
self.signalManager.addCustomSignal(name, signalList[name])
def startListener(self):
if self.signalManager == None:
raise Exception("SignalManager is not set!")
if (not self.listener):
try:
module = importlib.import_module(self.listener_class[0])
class_ = getattr(module, self.listener_class[1])
self.listener = class_()
self.listener.connectSlots(self.signalManager)
self.listener.configure(self.listenerParameter)
self.listener.start()
# At this point, it is not clear if the connection is succsssful.
#self.updateGUI('listenerConnected')
except:
print("Failed to start Listener: ")
self.listener.stop()
del self.listner
self.listener = None
return
else:
dlg = QMessageBox()
dlg.setWindowTitle("Warning")
dlg.setText("A listener is already running. Kill it?")
dlg.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
dlg.setIcon(QMessageBox.Question)
button = dlg.exec()
if button == QMessageBox.Yes:
# TODO: Should we call terminate() instead?
self.listener.terminate()
del self.listener
self.listener = None
else:
# Do nothing. Keep the existing listener.
pass
return
def stopListener(self):
if (self.listener):
self.listener.stop()
del self.listener
self.listener = None
self.updateGUI('Disconnected')
else:
raise Exception("No existing Listener to stop!")
def onListenerConnected(self, className):
module = importlib.import_module(self.listener_class[0])
class_ = getattr(module, self.listener_class[1])
if self.listener and class_.__name__ == className:
self.updateGUI('listenerConnected')
def onListenerDisconnected(self, className):
module = importlib.import_module(self.listener_class[0])
class_ = getattr(module, self.listener_class[1])
if class_.__name__ == className:
del self.listener
self.listener = None
self.updateGUI('listenerDisconnected')
def onListenerTerminated(self, className):
module = importlib.import_module(self.listener_class[0])
class_ = getattr(module, self.listener_class[1])
if class_.__name__ == className:
self.listener = None
self.updateGUI('listenerDisconnected')
|
from collections import namedtuple
from math import sqrt
import random
try:
import Image
except ImportError:
from PIL import Image
from colormath.color_objects import LabColor, sRGBColor
from colormath.color_conversions import convert_color
from colormath.color_diff import delta_e_cie1976
import pandas as pd
import csv
black=LabColor (lab_l=0.0000,lab_a=0.0000,lab_b=0.0000)
gray=LabColor (lab_l=53.5850,lab_a=-0.0003,lab_b=-0.0051)
sliver=LabColor (lab_l=77.7044,lab_a=-0.0004,lab_b=-0.0069)
white=LabColor (lab_l=100.0000,lab_a=-0.0005,lab_b=-0.0086)
maroon=LabColor (lab_l=25.5344,lab_a=48.0439,lab_b=38.0559)
red=LabColor (lab_l=53.2390,lab_a=80.0905,lab_b=67.2014)
olive=LabColor (lab_l=51.8687,lab_a=-12.9319,lab_b=56.6742)
yellow=LabColor (lab_l=97.1388,lab_a=-21.5578,lab_b=94.4773)
green=LabColor (lab_l=46.2276,lab_a=-51.6986,lab_b=49.8970)
lime=LabColor (lab_l=87.7350,lab_a=-86.1829,lab_b=83.1795)
teal=LabColor (lab_l=48.2545,lab_a=-28.8437,lab_b=-8.4814)
aqua=LabColor (lab_l=91.1140,lab_a=-48.0832,lab_b=-14.1386)
navy=LabColor (lab_l=12.9734,lab_a=47.5046,lab_b=-64.7053)
blue=LabColor (lab_l=32.2994,lab_a=79.1914,lab_b=-107.8655)
orange=LabColor (lab_l=74.9347,lab_a=23.9292,lab_b=78.9486)
purple=LabColor (lab_l=29.7843,lab_a=58.9285,lab_b=-36.4932)
fuchsia=LabColor (lab_l=60.3236,lab_a=98.2353,lab_b=-60.8350)
brown=LabColor (lab_l=37.4691,lab_a=26.4404,lab_b=40.9820)
Point = namedtuple('Point', ('coords', 'n', 'ct'))
Cluster = namedtuple('Cluster', ('points', 'center', 'n'))
def get_points(img):
points = []
w, h = img.size
for count, color in img.getcolors(w * h):
points.append(Point(color, 3, count))
return points
rtoh = lambda rgb: '#%s' % ''.join(('%02x' % p for p in rgb))
def colorz(filename, n=5):
img = Image.open(filename)
img.thumbnail((200, 200))
w, h = img.size
points = get_points(img)
clusters = kmeans(points, n, 1)
rgbs = [map(int, c.center.coords) for c in clusters]
col_dict={} # using dictionary to store the colors as keys
# I will also store the lab difference into it
# then output as a csv file
col_dict['index']=['html_color','r,g,b','black','gray','sliver','white','maroon','red','olive','yellow','green','lime','teal','aqua','navy','blue','orange','purple','fuchsia','brown']
counter=1
for col in rgbs:
col_index=filename+str(counter)
html=rtoh(col)
scale_rgbs= sRGBColor(col[0]/255.0,col[1]/255.0,col[2]/255.0)
lab_n = convert_color(scale_rgbs, LabColor)
#compared with other basic colors:
de_black= delta_e_cie1976(lab_n, black)
de_gray= delta_e_cie1976(lab_n, gray)
de_sliver= delta_e_cie1976(lab_n, sliver)
de_white= delta_e_cie1976(lab_n, white)
de_maroon= delta_e_cie1976(lab_n, maroon)
de_red= delta_e_cie1976(lab_n, red)
de_olive= delta_e_cie1976(lab_n, olive)
de_yellow= delta_e_cie1976(lab_n, yellow)
de_green= delta_e_cie1976(lab_n, green)
de_lime= delta_e_cie1976(lab_n, lime)
de_teal= delta_e_cie1976(lab_n, teal)
de_aqua= delta_e_cie1976(lab_n, aqua)
de_navy= delta_e_cie1976(lab_n, navy)
de_blue= delta_e_cie1976(lab_n, blue)
de_orange= delta_e_cie1976(lab_n, orange)
de_purple= delta_e_cie1976(lab_n, purple)
de_fuchsia= delta_e_cie1976(lab_n, fuchsia)
de_brown= delta_e_cie1976(lab_n, brown)
# store the html color as the first col
col_dict[col_index]=[html,tuple(col),de_black,de_gray,de_sliver,de_white,de_maroon,de_red,de_olive,de_yellow,de_green,de_lime,de_teal,de_aqua,de_navy,de_blue,de_orange,de_purple,de_fuchsia,de_brown]
counter+=1
# I can only store the difference
# col_dict[col]=tuple(lab_n)
#col_dict[col]=lab_n
# input another function to comapre between colors and basic colors
#delta_e = delta_e_cie1976(color1, color2)
return col_dict
#xyz
#colorsys.rgb_to_hsv(rgbs)
#map(rtoh, rgbs)# retrun rgbs directly
#rgbs # return the html color code
# I need a big loop for all the images I got, with the key as the image name
def euclidean(p1, p2):
return sqrt(sum([
(p1.coords[i] - p2.coords[i]) ** 2 for i in range(p1.n)
]))
def calculate_center(points, n):
vals = [0.0 for i in range(n)]
plen = 0
for p in points:
plen += p.ct
for i in range(n):
vals[i] += (p.coords[i] * p.ct)
return Point([(v / plen) for v in vals], n, 1)
def kmeans(points, k, min_diff):
clusters = [Cluster([p], p, p.n) for p in random.sample(points, k)]
while 1:
plists = [[] for i in range(k)]
for p in points:
smallest_distance = float('Inf')
for i in range(k):
distance = euclidean(p, clusters[i].center)
if distance < smallest_distance:
smallest_distance = distance
idx = i
plists[idx].append(p)
diff = 0
for i in range(k):
old = clusters[i]
center = calculate_center(plists[i], old.n)
new = Cluster(plists[i], center, old.n)
clusters[i] = new
diff = max(diff, euclidean(old.center, new.center))
if diff < min_diff:
break
return clusters
## try get points
image_type = "comedy"
csv_name=image_type+".csv"
for i in range(28):
img_name= image_type+"_"+str(i)+".jpg"
if i==0:
total=colorz(img_name)
else:
total.update(colorz(img_name))
writer = csv.writer(open(csv_name, 'wb'))
for key, value in total.items():
writer.writerow([key, value])
## try get points
image_type = "action"
csv_name=image_type+".csv"
for i in range(28):
img_name= image_type+"_"+str(i)+".jpg"
if i==0:
total=colorz(img_name)
else:
total.update(colorz(img_name))
writer = csv.writer(open(csv_name, 'wb'))
for key, value in total.items():
writer.writerow([key, value])
## try get points
image_type = "animation"
csv_name=image_type+".csv"
for i in range(28):
img_name= image_type+"_"+str(i)+".jpg"
if i==0:
total=colorz(img_name)
else:
total.update(colorz(img_name))
writer = csv.writer(open(csv_name, 'wb'))
for key, value in total.items():
writer.writerow([key, value])
## try get points
image_type = "Horror"
csv_name=image_type+".csv"
for i in range(28):
img_name= image_type+"_"+str(i)+".jpg"
if i==0:
total=colorz(img_name)
else:
total.update(colorz(img_name))
writer = csv.writer(open(csv_name, 'wb'))
for key, value in total.items():
writer.writerow([key, value])
#total[str(i)]=[colorz(img_name)]
#total_df=pd.DataFrame(total)
#total_df
#csv_name=image_type+".csv"
#total_df.to_csv(csv_name,sep='\t',index=False) |
<filename>src/enums.py
from enum import auto, Enum
from typing import List
class Interval(Enum):
"""
This enum describes the interval name
and the number of half steps it contains
"""
MINOR_2ND = (1, 'm2', 'minor 2nd')
MAJOR_2ND = (2, 'M2', 'major 2nd')
MINOR_3RD = (3, 'm3', 'minor 3rd')
MAJOR_3RD = (4, 'M3', 'major 3rd')
PERFECT_4TH = (5, 'P4', '4th')
TRITONE = (6, 'TT', 'tritone')
PERFECT_5TH = (7, 'P5', '5th')
MINOR_6TH = (8, 'm6', 'minor 6th')
MAJOR_6TH = (9, 'M6', 'major 6th')
MINOR_7TH = (10, 'm7', 'minor 7th')
MAJOR_7TH = (11, 'M7', 'major 7th')
OCTAVE = (12, 'P8', 'octave')
def __init__(self, half_steps: int, short_name: str, full_name: str):
self.half_steps = half_steps
self.short_name = short_name
self.full_name = full_name
def is_major(self) -> bool:
return self in (
self.MAJOR_2ND,
self.MAJOR_3RD,
self.MAJOR_6TH,
self.MAJOR_7TH,
)
def is_minor(self) -> bool:
return self in (
self.MINOR_2ND,
self.MINOR_3RD,
self.MINOR_6TH,
self.MINOR_7TH,
)
def is_perfect(self) -> bool:
return self in (
self.PERFECT_4TH,
self.PERFECT_5TH,
self.OCTAVE,
)
@classmethod
def from_half_steps(cls, half_steps: int) -> 'Interval':
for interval in Interval:
if interval.half_steps == half_steps:
return interval
raise ValueError(f'Interval with {half_steps} is not valid.')
class Note(Enum):
"""
In music, two note names can represent the same pitch.
This concept cannot be described using python enums, since enums have unique values.
To work-around this issue, we create a tuple with 2 values.
We only care about the second value which represents the actual note number.
"""
C = (auto(), 1)
C_SHARP = (auto(), 2)
D_FLAT = (auto(), 2)
D = (auto(), 3)
D_SHARP = (auto(), 4)
E_FLAT = (auto(), 4)
E = (auto(), 5)
F = (auto(), 6)
F_SHARP = (auto(), 7)
G_FLAT = (auto(), 7)
G = (auto(), 8)
G_SHARP = (auto(), 9)
A_FLAT = (auto(), 9)
A = (auto(), 10)
A_SHARP = (auto(), 11)
B_FLAT = (auto(), 11)
B = (auto(), 12)
def __init__(self, _, number) -> None:
self.display_name = self.name.replace('_SHARP', '#').replace('_FLAT', 'b')
self.number = number
def is_sharp(self) -> bool:
return '#' in self.display_name
def is_flat(self) -> bool:
return 'b' in self.display_name
@classmethod
def from_name(cls, note_name: str) -> 'Note':
note_name = note_name.capitalize()
for note in Note:
if note_name == note.display_name:
return note
raise ValueError(f'Cannot find note with name: {note_name}')
@classmethod
def from_number(cls, note_number: int) -> List['Note']:
"""
If a "note_number" resolves to 2 note names,
it will always return them in this order:
[G#, Ab], [D#, Eb], etc...
"""
return [note for note in Note if note.number == note_number]
|
from math import sin, cos, radians
import numpy as np
from matplotlib import pyplot as plt
from shapely.geometry import Point, Polygon
def easyplt(poly1, poly2,img): # it is for development of the code for annotation.
poly1 = np.array(poly1)
xs1, ys1 = poly1[:,0], poly1[:,1]
xs2, ys2 = poly2[:,0], poly2[:,1]
plt.figure()
im=plt.imread(img)
plt.imshow(im)
plt.plot(xs1,ys1)
plt.plot(xs2,ys2)
plt.show()
def easyplt2(poly,im1,im2): # it is for development of the code for annotation.
xs, ys = poly[:,0], poly[:,1]
# plt.figure()
plt.subplot(1, 2, 1)
plt.imshow(im1)
plt.plot(xs,ys)
plt.subplot(1, 2, 2)
plt.imshow(im2)
plt.show()
def rot_gen(degree):
""" Rotate polygon the given angle about_loop its center. """
theta = radians(degree) # Convert angle to radians
return cos(theta), sin(theta)
def random_sample(arr: np.array, size: int = 1) -> np.array: # randomly select one component
return arr.T[np.random.choice(len(arr[0]), size=size, replace=False)]
def translation(vector, tx, ty):
vector[0] = vector[0] + tx
vector[1] = vector[1] + ty
return vector[0], vector[1]
def IOU(temp_x,temp_y,segmentation,target_class=2): # not yet developed.
overlap=segmentation[temp_x,temp_y]
if np.sum(overlap==target_class)==0:
return True
else: return False
def get_pixel_inside(pl):
minx, miny, maxx, maxy = pl.bounds
minx, miny, maxx, maxy = int(minx), int(miny), int(maxx), int(maxy)
box_patch = [[x,y] for x in range(minx,maxx+1) for y in range(miny,maxy+1)]
pixels = []
for pb in box_patch:
pt = Point(pb[0],pb[1])
if(pl.contains(pt)): # check if the pixel is in polygon area.
pixels.append([int(pb[0]), int(pb[1])]) # if inside, append.
return pixels
def affine (tx, ty, affine_mat, polys):
cx, cy = affine_mat[0][2], affine_mat[1][2] # center of polygon
polygon = np.array(polys)
aug_polygon = np.insert(polygon, 2, values=1, axis=1) # [x, y, 1]
rel_polygon = aug_polygon - np.array([cx,cy,0])
temp = np.array( affine_mat @ rel_polygon.T )
return np.round(translation(temp,tx,ty))
def synImage(im_array,affine_mat_used, tx_used, ty_used, poly_used,syn_array,plot_mode=False):
if len(poly_used) >= 1: # when there is the generated polygons
polys = np.array(poly_used)
pl = Polygon(polys[0]) # same polygons are appended for different random affine. so use only one.
else: # nothing is feeded, so return
return syn_array
pixels = get_pixel_inside(pl)
for j in range(len(tx_used)):
tx_use = tx_used[j]
ty_use = ty_used[j]
affine_mat_used_use = affine_mat_used[j]
rotated_pixels_x, rotated_pixels_y = affine(tx_use, ty_use, affine_mat_used_use,pixels)
for k in pixels:
for m in zip(rotated_pixels_x,rotated_pixels_y):
try:
syn_array[int(m[1]),int(m[0])] = im_array[k[1],k[0]]
except: # rarely pixel exceed the border. so ignore it.
pass
if plot_mode:
print('x:',rotated_pixels_x[0],'y:',rotated_pixels_y[0])
easyplt2(polys[0],im_array, syn_array)
return syn_array
def compute_IOU(temp_x,temp_y,segmentation):
polys = Polygon(np.array((temp_x,temp_y)).T)
in_pixels = get_pixel_inside(polys)
score = 0
for i in in_pixels:
if segmentation[i[1],i[0]]==2:
score += 1
IOU = score / len(in_pixels)
return IOU
# polys are all objects in one image file. k is the index of interested objects.
def randomPolygon(polys,size,segmentation,image,plot_mode=False, copy_count=2,IOU_thres=0.9):
degree= np.random.randint(180)
cosang, sinang = rot_gen(degree)
polygon = np.array(polys)
cx, cy =np.average(polygon, axis=0) # center of polygon
poly_used = []
rotated_polygon = []
affine_mat_used=[]
tx_used = []
ty_used = []
num=0
out_loop=0
# copy_count & IOU_thres : it will be set as input & hyper_parameter
while num<copy_count and out_loop < 4: # out_loop is parameter to avoid too much iteration.
affine_mat = np.array([[cosang,-sinang, cx],[sinang,cosang,cy],[0,0,1]])
loc_roadsurf = np.asarray(np.where(segmentation==2)) # class 2 : Roadsurface
ly, lx = random_sample(loc_roadsurf)[0] # position of randomly selected road surface pixel
tx = lx - cx # distance to translate
ty = ly - cy
temp_x , temp_y = affine(tx, ty, affine_mat, polys) # location of polygon(pixels) after affine transformation.
if np.max(temp_x) < size[0] and np.min(temp_x) > 0 \
and np.max(temp_y) < size[1] and np.min(temp_y) > 0 \
and compute_IOU(temp_x,temp_y,segmentation) > IOU_thres :
poly_used.append(polys)
rotated_polygon.append(np.array((temp_x,temp_y)).T)
if plot_mode:
easyplt(polys,rotated_polygon[-1],image)
affine_mat_used.append(affine_mat)
tx_used.append(tx)
ty_used.append(ty)
num += 1
out_loop = 0
continue
out_loop += 1
return rotated_polygon, affine_mat_used, tx_used, ty_used, poly_used# location of roated polygon in k_th object
|
# game.py
#
# GameGenerator is free to use, modify, and redistribute for any purpose
# that is both educational and non-commercial, as long as this paragraph
# remains unmodified and in its entirety in a prominent place in all
# significant portions of the final code. No warranty, express or
# implied, is made regarding the merchantability, fitness for a
# particular purpose, or any other aspect of the software contained in
# this module.
import os
import sys
import struct
import gg.colors
import gg.utils
try:
import pygame
except ImportError as err:
print(gg.utils._ERR_PREFIX, err, ':_( Terminating game.', file=sys.stderr)
sys.exit(1)
class Game:
"""The entire environment for creating and playing a Flak game.
The client (you) should set his or her own values for the game
attributes, but they are set up by default so that everything except
images works out of the box.
Modifiable attributes:
-name: the name of the game, displayed on the window title bar.
-images_dir: the path of the directory where the images are.
-window_icon: file name of the icon to display next to the name.
-splash_image: the image that covers the screen at the beginning.
-screen_width: the window width in pixels if not fullscreen.
-aspect_ratio: the aspect ratio of the window if not fullscreen.
-is_fullscreen: whether the window covers the entire screen.
-font_color: the color of the text that appears on the screen.
-screen_font_size: the point size of the info text on the screen.
-background_color: a solid color used if no image is specified.
-background_image: file name of the image to put as background.
-player_image: the image file for the player object.
-player_num_lives: number of tries the player gets before losing.
-player_num_shots: number of shots per reload. 0 means no reloading.
-player_speed: how far the player moves left or right in one second.
-player_x_pos: the initial x-coordinate of the player's top left.
-player_y_pos: the initial y-coordinate of the player's top left.
-has_player_sprite_dir: flip the player sprite when moving?
-missile_image: the image file for the missile fired by the player.
-missile_speed: how fast the player missile travels.
-is_missile_upward: does the missile move up or down? Up if true.
-enemy_image: the image for all the enemy objects.
-enemy_speed: how fast the enemy airplanes move.
-enemy_count: max number of enemies on the screen at any given time.
-enemy_top_edge: top of the boundary where enemies can spawn.
-enemy_bottom_edge: bottom of the boundary where enemies can spawn.
-bomb_image: the image file for the bomb dropped by the enemy.
-bomb_speed: how fast the enemy bombs travel.
-is_bomb_downward: does the bomb move down or up? Down if true.
-building_image: the image file for the ground structure objects.
-building_razed_image: optional image for buildings that are hit.
-building_count: how many buildings to start game with. Must be > 1.
-building_y_pos: y-coordinate of buildings; None means near bottom.
-score_pos: the position where the score is displayed on the screen.
-score_factor: how many points the player gets per hit.
-score_loss_factor: points lost when a building is destroyed.
-high_score_pos: where to display highscore; None means top-center.
-num_lives_pos: the location of the player's remaining lives panel.
-num_shots_pos: the location of the player's remaining shots panel.
-thumbnails_height: the height of the lives and shots thumbnails.
-message_high_score: message to show when the player gets highscore.
-message_game_over: message to show when the player loses.
-keys_move_left: list of keys that move the player left.
-keys_move_right: list of keys that move the player right.
-keys_shoot: list of keys that fire the missile.
-keys_reload_ammo: list of keys that reload the ammo when out.
-keys_pause: list of keys that pause the game.
Client-invoked method:
-run(): once all the modifiable attributes are set as desired, call
this method to start the game.
"""
def __init__(self):
"""Set default values for all the game attributes."""
# Modifiable game attributes
self.name = '<NAME>'
self.images_dir = None
self.window_icon = None
self.splash_image = None
self.screen_width = 800
self.aspect_ratio = 1.7778
self.is_fullscreen = False
self.font_color = gg.colors.WHITE
self.screen_font_size = 36
self.background_color = gg.colors.BLACK
self.background_image = None
self.player_image = None
self.player_num_lives = 3
self.player_num_shots = 10
self.player_speed = 800
self.player_x_pos = None
self.player_y_pos = None
self.has_player_sprite_dir = True
self.missile_image = None
self.missile_speed = 2000
self.is_missile_upward = True
self.enemy_image = None
self.enemy_speed = 600
self.enemy_count = 5
self.enemy_top_edge = None
self.enemy_bottom_edge = None
self.bomb_image = None
self.bomb_speed = 800
self.is_bomb_downward = True
self.building_image = None
self.building_razed_image = None
self.building_count = 4
self.building_y_pos = None
self.score_pos = (10, 10)
self.score_factor = 1
self.score_loss_factor = 10
self.high_score_pos = None
self.num_lives_pos = (10, 40)
self.num_shots_pos = (10, 74)
self.thumbnails_height = 24
self.message_high_score = 'You beat the high score!'
self.message_game_over = 'Game over'
self.keys_move_left = [pygame.K_LEFT]
self.keys_move_right = [pygame.K_RIGHT]
self.keys_shoot = [pygame.K_SPACE]
self.keys_reload_ammo = [pygame.K_LCTRL, pygame.K_RCTRL]
self.keys_pause = [pygame.K_p, pygame.K_PAUSE]
# Attributes you shouldn't change from your own code
self._screen = None
self._screen_rect = None
self._screen_height = None
self._background_surf = None
self._screen_font = None
self._is_still_playing = True
self._is_main_loop_running = True
self._is_paused = False
self._is_pause_displayed = False
self._is_screen_info_shown = False
self._keyboard_state = None
self._player = None
self._player_thumbnails = []
self._data_dir = 'gamedata'
self._data_file = os.path.join(self._data_dir, 'game.dat')
self._score = None
self._score_text = None
self._score_rect = None
self._high_score = 0
self._high_score_text = None
self._high_score_rect = None
self._modal_text_font = None
self._enemy_group = None
self._missile_group = None
self._bomb_group = None
self._building_group = None
self._thumbnail_group = None
self._missile_thumbnails = []
self._buildings_left = self.building_count
self._clock = None
self.TARGET_FPS = 60
def run(self):
"""Start the game and keep it going.
Initialize all the pertinent game objects and then run the main
game loop.
"""
# Initialize the game environment
self._init_environment()
# Display the splash screen if one is given
if self.splash_image is not None:
self._display_splash_screen(self._screen)
# Begin playing the game
while self._is_still_playing:
self._init_new_game()
# The main loop
self._run_main_loop()
# Post-loop work: update the high score, etc.
if self._score > self._high_score:
self._update_high_score()
has_high_score = True
else:
has_high_score = False
if self._player.is_alive and self._buildings_left > 0:
end_message = None
else:
if has_high_score:
end_message = self.message_high_score
else:
end_message = self.message_game_over
if end_message is not None:
self._display_modal_text(end_message)
self._prompt_play_again()
# Here the player has exited both loops
# Quit pygame once we're done with itnmiuy zzzcucv
# (I meant to say just "with it," but my 3-year-old disagreed)
pygame.quit()
def _run_main_loop(self):
"""Run the main loop of the game.
This is it - where the magic of the game happens.
"""
MAX_FPS = self.TARGET_FPS
delta_time = 0
self._clock = pygame.time.Clock()
# Start the loop
while (self._is_main_loop_running and
self._player.is_alive and self._buildings_left > 0):
has_score_changed = False
if not self._is_paused:
# Check if the player is hit by a bomb
if pygame.sprite.spritecollide(self._player,
self._bomb_group, True):
self._player.knock_out()
self._thumbnail_group.remove(self._player_thumbnails.pop())
# Check for bomb hits on the buildings
for building in pygame.sprite.groupcollide(
self._building_group, self._bomb_group, False, True):
if not building.is_razed:
building.is_razed = True
self._buildings_left -= 1
self._score -= self.score_loss_factor
if not has_score_changed:
has_score_changed = True
# Check for missile hits on the enemies
for enemy in pygame.sprite.groupcollide(self._enemy_group,
self._missile_group,
False, True):
enemy.knock_out()
self._score += self.score_factor
if not has_score_changed:
has_score_changed = True
# Check for missile hits on the bombs
for bomb in pygame.sprite.groupcollide(self._bomb_group,
self._missile_group,
True, True):
self._score += self.score_factor
if not has_score_changed:
has_score_changed = True
# Update the frame
self._screen.blit(self._background_surf, (0, 0))
self._bomb_group.update(delta_time)
bomb_rects = self._bomb_group.draw(self._screen)
self._missile_group.update(delta_time)
missile_rects = self._missile_group.draw(self._screen)
self._enemy_group.update(delta_time)
enemy_rects = self._enemy_group.draw(self._screen)
self._building_group.update()
building_rects = self._building_group.draw(self._screen)
if self._player.is_alive:
self._screen.blit(self._player.image, self._player.rect)
self._blit_current_score(has_score_changed)
self._screen.blit(self._high_score_text, self.high_score_pos)
thumbnail_rects = self._thumbnail_group.draw(self._screen)
if self._is_screen_info_shown:
info_rects = self._blit_screen_info(self._clock.get_fps())
else:
info_rects = ()
# Draw the updates
pygame.display.flip()
elif not self._is_pause_displayed:
self._display_pause_message()
# Handle the player's input
self._handle_input(delta_time)
# Make sure we don't go above the target frame rate
delta_time = self._clock.tick(MAX_FPS) / 1000.0
def _init_environment(self):
"""Initialize modules and values necessary to play the game."""
pygame.init()
pygame.mouse.set_visible(False)
pygame.display.set_caption(self.name)
pygame.event.set_allowed(None)
pygame.event.set_allowed([pygame.KEYDOWN, pygame.KEYUP,
pygame.MOUSEBUTTONUP, pygame.QUIT])
# Give the window a custom icon if one was specified
if self.window_icon is not None:
self._set_window_icon()
# Initialize the screen
if self.is_fullscreen:
scr_flags = pygame.FULLSCREEN | pygame.HWSURFACE | pygame.DOUBLEBUF
self._screen = pygame.display.set_mode((0, 0), scr_flags)
else:
self._set_screen_height()
self._screen = pygame.display.set_mode((self.screen_width,
self._screen_height))
self._screen.set_alpha(None, pygame.RLEACCEL)
self._screen_rect = self._screen.get_rect()
self._screen_font = pygame.font.Font(None, self.screen_font_size)
self._modal_text_font = pygame.font.Font(None, 72)
# Initialize the background
self._background_surf = gg.utils._get_surface(
self._screen.get_size())[0]
self._background_surf.set_alpha(None, pygame.RLEACCEL)
# Blit the background onto the screen
if self.background_image is None:
self._background_surf.fill(self.background_color)
else:
bg_image, bg_rect = self._fit_image_to_screen(
self.background_image)
self._background_surf.blit(bg_image, bg_rect)
# Read the high score
self._read_high_score()
def _init_new_game(self):
"""Initialize the sprites at the beginning of the game."""
# Create the groups
self._enemy_group = pygame.sprite.LayeredDirty()
self._missile_group = pygame.sprite.LayeredDirty()
self._bomb_group = pygame.sprite.LayeredDirty()
self._building_group = pygame.sprite.RenderUpdates()
self._thumbnail_group = pygame.sprite.RenderUpdates()
# Get the groups ready for drawing
self._enemy_group.clear(self._screen, self._background_surf)
self._missile_group.clear(self._screen, self._background_surf)
self._bomb_group.clear(self._screen, self._background_surf)
self._building_group.clear(self._screen, self._background_surf)
self._thumbnail_group.clear(self._screen, self._background_surf)
# Data to pass to the player to create missiles
missile_data = {
'group': self._missile_group,
'screen_rect': self._screen_rect,
'image_file': self.missile_image,
'image_dir': self.images_dir,
'is_direction_up': self.is_missile_upward,
'speed': self.missile_speed,
}
# Put the player 75% of the way down the screen
if self.player_y_pos is None:
self.player_y_pos = self._screen_rect.height * 0.75
self._player = gg.player.Player(missile_data, self._screen_rect,
self.player_image, self.images_dir,
self.player_x_pos, self.player_y_pos,
self.player_speed,
self.player_num_lives,
self.player_num_shots,
self.has_player_sprite_dir)
# The bad guys
if self.enemy_top_edge is None:
self.enemy_top_edge = 0
# Allow enemies to be only on the top half of the screen
if self.enemy_bottom_edge is None:
self.enemy_bottom_edge = round(self._screen_rect.height / 2)
enemy_boundaries = (self.enemy_top_edge, self.enemy_bottom_edge)
# Data to pass to the enemies to create bombs
bomb_data = {
'group': self._bomb_group,
'screen_rect': self._screen_rect,
'image_file': self.bomb_image,
'image_dir': self.images_dir,
'is_direction_up': self.is_bomb_downward,
'speed': self.bomb_speed,
}
# Create these stinkin' guys
for i in range(self.enemy_count):
gg.enemy.Enemy(self._enemy_group, bomb_data, self._screen_rect,
enemy_boundaries, self.enemy_image, self.images_dir,
self.enemy_speed)
# Place the buildings at regular intervals
building_rect = gg.utils._load_image(self.building_image,
self.images_dir,
'the building')[1]
building_width = building_rect.width
building_interval = ((self._screen_rect.width - building_width
* self.building_count) / self.building_count)
building_x_pos = building_interval / 2
if self.building_y_pos is None:
self.building_y_pos = (self._screen_rect.height
- building_rect.height - 10)
for i in range(self.building_count):
building_pos = (building_x_pos, self.building_y_pos)
gg.groundobject.GroundObject(self._building_group, building_pos,
self.building_image,
self.building_razed_image,
self.images_dir)
building_x_pos += building_interval + building_width
# Keep track of the buildings we lose
self._buildings_left = self.building_count
# Reset the score
self._score = 0
# First call, to ensure it works properly later
self._blit_current_score(True)
# Position the high score
high_score_text = ''.join(['High score: ', str(self._high_score)])
self._high_score_text = self._screen_font.render(high_score_text, True,
self.font_color)
self._high_score_rect = self._high_score_text.get_rect()
if self.high_score_pos is None:
self._high_score_rect.centerx = self._screen_rect.centerx
self.high_score_pos = (self._high_score_rect.x, 10)
self._high_score_rect.topleft = self.high_score_pos
# Create the thumbnails for the number of lives
self._create_thumbnails(self._player_thumbnails, self.num_lives_pos,
self.player_image, self.player_num_lives)
# Create the thumbnails for the number of shots if not unlimited
if self._player.MAX_SHOTS > 0:
self._create_thumbnails(self._missile_thumbnails,
self.num_shots_pos, self.missile_image,
self._player.shots_left)
def _create_thumbnails(self, thumb_list, pos, image_file, num_thumbs):
"""Create the thumbnails and add them to their container."""
for i in range(num_thumbs):
if i == 0:
thumbnail_pos = pos
else:
thumbnail_pos = (thumbnail_pos[0]
+ last_thumbnail.rect.width + 6,
thumbnail_pos[1])
last_thumbnail = gg.thumbnail.Thumbnail(self._thumbnail_group,
thumbnail_pos,
self.thumbnails_height,
image_file,
self.images_dir)
thumb_list.append(last_thumbnail)
def _set_window_icon(self):
"""Change the default pygame icon on the game window."""
ICON_SIZE = (32, 32)
try:
if self.images_dir is None:
path = self.window_icon
window_icon = pygame.image.load(self.window_icon)
else:
path = os.path.join(self.images_dir, self.window_icon)
window_icon = pygame.image.load(path)
try:
window_icon = pygame.transform.smoothscale(window_icon,
ICON_SIZE)
except ValueError:
window_icon = pygame.transform.scale(window_icon, ICON_SIZE)
except RuntimeError:
# Can't load the icon, so use a replacement square
window_icon = _get_square_of_doom()
print(gg.utils._ERR_PREFIX, "Couldn't load the icon",
path, file=sys.stderr)
pygame.display.set_icon(window_icon)
def _set_screen_height(self):
"""Set the window height based on the width and aspect ratio."""
self._screen_height = round(self.screen_width / self.aspect_ratio)
def _fit_image_to_screen(self, file_name):
"""Load, scale, and center an image to fit the screen exactly.
Return the image object and its corresponding rect.
It is up to the caller to blit and update the image after
calling this method. Once that happens, there are no black bars
anywhere around the image, and no stretching of it in any
direction.
"""
screen_width, screen_height = self._screen.get_size()
screen_aspect_ratio = screen_width / screen_height
image = gg.utils._load_image(file_name, self.images_dir,
'a full-screen image')[0]
image_width, image_height = image.get_size()
image_aspect_ratio = image_width / image_height
image.set_alpha(None, pygame.RLEACCEL)
if image_width != screen_width or image_height != screen_height:
if screen_aspect_ratio == image_aspect_ratio:
image_size = (screen_width, screen_height)
elif screen_aspect_ratio > image_aspect_ratio:
image_size = (screen_width,
round(image_height * screen_width / image_width))
else:
image_size = (round(image_width *
screen_height / image_height),
screen_height)
try:
image = pygame.transform.smoothscale(image, image_size)
except ValueError:
image = pygame.transform.scale(image, image_size)
image_rect = image.get_rect()
image_rect.center = self._screen_rect.center
return image, image_rect
def _display_splash_screen(self, screen):
"""Display a splash screen until a key, any key, is pressed."""
image, img_rect = self._fit_image_to_screen(self.splash_image)
screen.blit(image, img_rect)
pygame.display.flip()
# Detect if the player quit or if a key was pressed and released
is_screen_done = False
while not is_screen_done:
for event in pygame.event.get():
if self._has_quit(event):
self._handle_quit()
is_screen_done = True
elif (event.type == pygame.KEYUP or
event.type == pygame.MOUSEBUTTONUP):
is_screen_done = True
def _display_pause_message(self):
"""Print "Pause" on top of the game screen."""
self._display_modal_text('Pause')
self._is_pause_displayed = True
def _display_modal_text(self, modal_text):
"""Display an important modal message centered on the screen."""
text, text_rect = gg.utils._get_rendered_text(self._modal_text_font,
modal_text,
self.font_color)
text_rect.center = self._screen_rect.center
# Display text shadow
text_shadow = gg.utils._get_rendered_text(
self._modal_text_font, modal_text, gg.colors.MEDIUM_DARK_GRAY)[0]
text_shadow_rect = text_rect.move(2, 2)
self._screen.blit(text_shadow, text_shadow_rect)
self._screen.blit(text, text_rect)
pygame.display.update([text_shadow_rect, text_rect])
def _handle_input(self, delta_time):
"""React to the player's input as necessary."""
for event in pygame.event.get():
if self._has_quit(event):
self._handle_quit()
return
elif event.type == pygame.KEYDOWN:
if event.key in self.keys_shoot and not self._is_paused:
self._player.shoot()
if len(self._missile_thumbnails) > 0:
self._thumbnail_group.remove(
self._missile_thumbnails.pop())
elif event.key in self.keys_pause:
# Toggle paused state
self._is_paused = not self._is_paused
if self._is_pause_displayed:
self._is_pause_displayed = False
elif event.key == pygame.K_F1:
self._is_screen_info_shown = not self._is_screen_info_shown
elif (event.type == pygame.KEYUP and
event.key in self.keys_reload_ammo and
not self._is_paused):
# Detect ammo reload when the reload key is released
self._player.reload()
# Update the ammo thumbs if not at max and not unlimited
if (self._player.shots_left < self._player.MAX_SHOTS or
self._player.MAX_SHOTS > 0):
for i in range(len(self._missile_thumbnails)):
self._thumbnail_group.remove(
self._missile_thumbnails.pop())
self._create_thumbnails(self._missile_thumbnails,
self.num_shots_pos, self.missile_image,
self._player.shots_left)
if self._is_paused:
return
# Detect left and right movement inputs
self._keyboard_state = pygame.key.get_pressed()
player_rect = self._player.rect
self._player.is_moving_left = (
self._is_key_active(self.keys_move_left) and player_rect.left > 0)
self._player.is_moving_right = (
self._is_key_active(self.keys_move_right) and
player_rect.right < self._screen_rect.right)
# Avoid moving to both left and right at the same time :O
if self._player.is_moving_left and self._player.is_moving_right:
self._player.is_moving_left = False
self._player.is_moving_right = False
# Update the player
if self._player.is_moving_left or self._player.is_moving_right:
self._player.update(delta_time)
def _is_key_active(self, event_keys):
"""Return true if one the keys to a particular event is down."""
num_keys = len(event_keys)
for i in range(num_keys):
if self._keyboard_state[event_keys[i]]:
return True
return False
def _blit_current_score(self, has_changed):
"""Blit the player's current score to the screen."""
if has_changed:
score_text = ''.join(['Score: ', str(self._score)])
self._score_text = self._screen_font.render(score_text, True,
self.font_color)
self._score_rect = self._score_text.get_rect()
self._score_rect.topleft = self.score_pos
self._screen.blit(self._score_text, self.score_pos)
def _blit_screen_info(self, fps):
"""Blit the screen resolution and current FPS to the screen.
This method is not optimized for speed.
"""
left_margin = 10
bottom_offset = self._screen_rect.height - 70
fps = str(round(fps, 1))
fps_rect = self._blit_info_text(''.join(['FPS: ' + fps]),
(left_margin, bottom_offset))
bottom_offset = self._screen_rect.height - 40
screen_res = ''.join([str(self._screen_rect.width), 'x',
str(self._screen_rect.height)])
screen_res_rect = self._blit_info_text(''.join(['Screen size: ',
screen_res]),
(left_margin, bottom_offset))
return [fps_rect, screen_res_rect]
def _blit_info_text(self, text, pos):
"""Blit text info to the screen and return the rect."""
text_surf = self._screen_font.render(text, True, self.font_color)
self._screen.blit(text_surf, pos)
return text_surf.get_rect().move(pos)
def _read_high_score(self):
"""Read the high score from the file.
If the file doesn't exist yet, the high score is assumed to be
0, and the file will be created later.
"""
if os.path.isfile(self._data_file):
with open(self._data_file, 'rb') as file:
file_content = file.read(4)
self._high_score = struct.unpack('I', file_content)[0]
def _update_high_score(self):
"""Write the high score to the file."""
self._high_score = self._score
# Thanks to <NAME> at Stack Overflow for this algorithm
# https://stackoverflow.com/questions/273192/how-can-i-create-a-
# directory-if-it-does-not-exist
if not os.path.exists(self._data_dir):
try:
os.makedirs(self._data_dir)
except OSError as error:
if error.errno != errno.EEXIST:
print(_ERR_PREFIX, "Couldn't create the", self._data_dir,
'folder to record the high score :(',
file=sys.stderr)
return
with open(self._data_file, 'wb') as file:
binary_score = struct.pack('I', self._high_score)
file.write(binary_score)
def _prompt_play_again(self):
"""Wait for the player to indicate if he wants to try again."""
prompt_text = 'Press Enter to play again'
prompt, prompt_rect = gg.utils._get_rendered_text(self._screen_font,
prompt_text,
self.font_color)
prompt_rect.centerx = self._screen_rect.centerx
prompt_rect.y = self._screen_rect.centery + 40
self._screen.blit(prompt, prompt_rect)
pygame.display.update(prompt_rect)
# Wait for the keypress to play again
is_waiting = True
while is_waiting:
for event in pygame.event.get():
if self._has_quit(event):
self._handle_quit()
is_waiting = False
elif (event.type == pygame.KEYDOWN and
(event.key == pygame.K_RETURN or
event.key == pygame.K_KP_ENTER)):
is_waiting = False
self._is_main_loop_running = True
def _has_quit(self, event):
"""Return true if the player has given an exit command."""
# Check for the quit event, Esc key, or Alt+F4
return (event.type == pygame.QUIT or
(event.type == pygame.KEYDOWN and
(event.key == pygame.K_ESCAPE or
(event.key == pygame.K_F4 and
pygame.KMOD_ALT & pygame.key.get_mods()))))
def _handle_quit(self):
"""Ask the player for confirmation before exiting the game."""
with gg.polardialogbox.PolarDialogBox(self._screen, self._clock)\
as box:
is_sure_quit = box.get_answer('Are you sure you want to quit?')
if is_sure_quit:
self._is_still_playing = False
self._is_main_loop_running = False
return
if self._is_paused:
self._is_paused = False
if self._background_surf is not None:
self._screen.blit(self._background_surf, (0, 0))
pygame.display.flip()
|
<filename>tests/test_issues/test_linkml_issue_723.py<gh_stars>0
import unittest
from dataclasses import dataclass
from enum import Enum
import rdflib
from linkml_runtime import SchemaView
from linkml_runtime.dumpers import json_dumper, yaml_dumper, rdflib_dumper
from linkml_runtime.linkml_model import PermissibleValue
from linkml_runtime.loaders import json_loader
from linkml_runtime.utils.compile_python import compile_python
from rdflib import URIRef, Graph, Literal
from linkml.generators.pydanticgen import PydanticGenerator
from linkml.generators.pythongen import PythonGenerator
from linkml.reporting.model import RDF
from tests.utils.test_environment import TestEnvironmentTestCase
from tests.test_issues.environment import env
# reported in https://github.com/linkml/linkml/issues/723
schema_str = """
id: http://example.org
name: issue-723
imports:
- https://w3id.org/linkml/types
prefixes:
x: http://example.org/
default_prefix: x
default_range: string
description: test
classes:
Person:
attributes:
status:
range: VitalStatus
roles:
range: Role
multivalued: true
enums:
VitalStatus:
permissible_values:
ALIVE:
meaning: x:Alive
DEAD:
meaning: x:Dead
Role:
permissible_values:
INVESTIGATOR:
SAMPLE_COLLECTOR:
ANALYST:
"""
EXAMPLE = rdflib.Namespace('http://example.org/')
class StatusEnumDC(Enum):
ALIVE = "ALIVE"
DEAD = "ALIVE"
@dataclass
class PersonDC:
status: StatusEnumDC = None
class Issue723ExportCase(TestEnvironmentTestCase):
env = env
def setUp(self) -> None:
gen = PythonGenerator(schema_str)
output = gen.serialize()
#print(output)
mod = compile_python(output)
self.mod = mod
self.schemaview = SchemaView(schema_str)
gen = PydanticGenerator(schema_str)
output = gen.serialize()
#print(output)
self.pydantic_mod = compile_python(output)
def test_plain_dataclasses(self):
"""
Tests the behavior of plain non-linkml enums
"""
p = PersonDC(status=StatusEnumDC.ALIVE)
self.assertEqual(p.status, StatusEnumDC.ALIVE)
self.assertEqual(p.status.value, StatusEnumDC.ALIVE.value)
self.assertEqual(p.status.value, "ALIVE")
self.assertNotEqual(p.status, "ALIVE")
self.assertEqual(type(p.status), StatusEnumDC)
self.assertEqual(type(p.status.value), str)
def test_raises(self):
mod = self.mod
with self.assertRaises(ValueError) as e:
p = mod.Person(status="FAKE")
def test_initialized_enums(self):
"""
Test the behavior of enums that are created on initialization:
.. code:: python
p = Person(status=VitalStatus.ALIVE, roles=[...])
In this case the dictionary/json/yaml serialization is compact
.. code:: python
{'status': 'ALIVE', 'roles': ['ANALYST', 'INVESTIGATOR']}
However, the user should be aware that the type of person.role
is NOT PermissibleValue, it is the enum, i.e
.. code:: python
p.status != mod.VitalStatus.ALIVE
p.status == mod.VitalStatus(mod.VitalStatus.ALIVE)
"""
mod = self.mod
p = mod.Person(status=mod.VitalStatus.ALIVE, roles=[mod.Role.ANALYST, mod.Role.INVESTIGATOR])
# Test behavior of dumpers
pd = json_dumper.to_dict(p)
#print(pd)
self.assertEqual(pd['status'], 'ALIVE')
self.assertCountEqual(pd['roles'], ['ANALYST', 'INVESTIGATOR'])
p_json = json_dumper.dumps(p)
p_roundtrip = json_loader.loads(p_json, target_class=mod.Person)
self.assertEqual(p_roundtrip, p)
#print(yaml_dumper.dumps(p))
# Current behavior: when enums are created at time of initialization,
# they are created as Enum instances, NOT permissible value instances
self.assertEqual(p.status, mod.VitalStatus(mod.VitalStatus.ALIVE))
self.assertNotEqual(p.status, mod.VitalStatus.ALIVE)
self.assertCountEqual(p.roles, [mod.Role(mod.Role.INVESTIGATOR), mod.Role(mod.Role.ANALYST)])
self.assertEqual(type(p.status), mod.VitalStatus)
self.assertNotEqual(type(p.status), PermissibleValue)
self.assertEqual(type(p.roles[0]), mod.Role)
g = rdflib_dumper.as_rdf_graph(p, schemaview=self.schemaview)
[subj] = list(g.subjects(RDF.type, EXAMPLE.Person))
#for t in g.triples((None,None,None)):
# print(t)
self.assertEqual(list(g.objects(subj, EXAMPLE.status)), [EXAMPLE.Alive])
self.assertCountEqual(list(g.objects(subj, EXAMPLE.roles)), [Literal('INVESTIGATOR'), Literal('ANALYST')])
def test_assigned_enum(self):
"""
Test the behavior of enums that are created post-initialization:
.. code:: python
p = Person()
p.status = VitalStatus.ALIVE
In this case, the dict/json/yaml is inconveniently expanded
.. code:: python
{'status': {'text': 'ALIVE'}, 'roles': [{'text': 'ANALYST'}, {'text': 'INVESTIGATOR'}]}
"""
mod = self.mod
p = mod.Person()
p.status = mod.VitalStatus.ALIVE
p.roles = [mod.Role.ANALYST, mod.Role.INVESTIGATOR]
pd = json_dumper.to_dict(p)
print(pd)
# we might expect this
#self.assertEqual(pd['status'], 'ALIVE')
self.assertCountEqual(pd['roles'], [{'text': 'ANALYST'}, {'text': 'INVESTIGATOR'}])
p_json = json_dumper.dumps(p)
# this does NOT roundtrip:
#p_roundtrip = json_loader.loads(p_json, target_class=mod.Person)
#self.assertEqual(p_roundtrip, p)
print(yaml_dumper.dumps(p))
self.assertEqual(p.status, mod.VitalStatus.ALIVE)
self.assertCountEqual(p.roles, [mod.Role.INVESTIGATOR, mod.Role.ANALYST])
self.assertEqual(type(p.status), PermissibleValue)
self.assertNotEqual(type(p.status), mod.VitalStatus)
self.assertEqual(type(p.roles[0]), PermissibleValue)
# currently fails
#g = rdflib_dumper.as_rdf_graph(p, schemaview=self.schemaview)
#for t in g.triples((None,None,None)):
# print(t)
def test_assigned_wrapped_enums(self):
"""
Test the behavior of enums that are created post-initialization,
but using an additional "wrap" of the enum
.. code:: python
p = mod.Person()
p.status = mod.VitalStatus(mod.VitalStatus.ALIVE)
p.roles = [mod.Role(mod.Role.ANALYST), mod.Role(mod.Role.INVESTIGATOR)]
Here the behavior should be identical to doing this on initialization:
.. code:: python
mod.Person(status=mod.VitalStatus.ALIVE, roles=[mod.Role.ANALYST, mod.Role.INVESTIGATOR])
or using strings as shorthand
.. code:: python
mod.Person(status="ALIVE", roles=["ANALYST", "INVESTIGATOR"])
"""
mod = self.mod
p = mod.Person()
p.status = mod.VitalStatus(mod.VitalStatus.ALIVE)
p.roles = [mod.Role(mod.Role.ANALYST), mod.Role(mod.Role.INVESTIGATOR)]
p2 = mod.Person(status=mod.VitalStatus.ALIVE, roles=[mod.Role.ANALYST, mod.Role.INVESTIGATOR])
self.assertEqual(p2, p)
p3 = mod.Person(status="ALIVE", roles=["ANALYST", "INVESTIGATOR"])
self.assertEqual(p3, p)
# Test behavior of dumpers
pd = json_dumper.to_dict(p)
#print(pd)
self.assertEqual(pd['status'], 'ALIVE')
self.assertCountEqual(pd['roles'], ['ANALYST', 'INVESTIGATOR'])
p_json = json_dumper.dumps(p)
p_roundtrip = json_loader.loads(p_json, target_class=mod.Person)
self.assertEqual(p_roundtrip, p)
self.assertEqual(p.status, mod.VitalStatus(mod.VitalStatus.ALIVE))
self.assertNotEqual(p.status, mod.VitalStatus.ALIVE)
self.assertCountEqual(p.roles, [mod.Role(mod.Role.INVESTIGATOR), mod.Role(mod.Role.ANALYST)])
self.assertEqual(type(p.status), mod.VitalStatus)
self.assertNotEqual(type(p.status), PermissibleValue)
self.assertEqual(type(p.roles[0]), mod.Role)
g = rdflib_dumper.as_rdf_graph(p, schemaview=self.schemaview)
[subj] = list(g.subjects(RDF.type, EXAMPLE.Person))
#for t in g.triples((None,None,None)):
# print(t)
self.assertEqual(list(g.objects(subj, EXAMPLE.status)), [EXAMPLE.Alive])
self.assertCountEqual(list(g.objects(subj, EXAMPLE.roles)), [Literal('INVESTIGATOR'), Literal('ANALYST')])
def test_pydantic(self):
mod = self.pydantic_mod
p = mod.Person(status="ALIVE", roles=["ANALYST", "INVESTIGATOR"])
print(p)
with self.assertRaises(ValueError) as e:
p = mod.Person(status="FAKE")
#with self.assertRaises(ValueError) as e:
# p = mod.Person()
# p.status = "FAKE"
p2 = mod.Person(status=mod.VitalStatus.ALIVE, roles=[mod.Role.ANALYST, mod.Role.INVESTIGATOR])
self.assertEqual(p, p2)
p3 = mod.Person()
p3.status = mod.VitalStatus.ALIVE
p3.roles = [mod.Role.ANALYST, mod.Role.INVESTIGATOR]
self.assertEqual(p, p3)
self.assertEqual(p.status, mod.VitalStatus.ALIVE)
self.assertEqual(type(p.status), mod.VitalStatus)
self.assertEqual(p.roles, [mod.Role.ANALYST, mod.Role.INVESTIGATOR])
# test the "double wrap" code
p.status = mod.VitalStatus(mod.VitalStatus.ALIVE)
self.assertEqual(p.status, mod.VitalStatus.ALIVE)
# TODO: not implemented?
#print(p.dict())
#not supported yet
#pd = json_dumper.to_dict(p)
if __name__ == '__main__':
unittest.main()
|
# @author = "avirambh"
# @email = "<EMAIL>"
import torch
import torch.nn as nn
def print_vcls(vcls, epoch):
for vcl in vcls:
print("Step {}: Name: {} Loss: {}".format(epoch,
vcl.name,
vcl.get_layer_loss()))
def get_vcl_loss(model, epoch, debug):
if debug and epoch % 5 == 0:
print_vcls(model.vcls, epoch)
vcl_loss = VCL.get_forward_loss()
VCL.zero_loss()
return vcl_loss
def apply_vcl(model, tmp_input, sample_size=5, eps_learn=True):
# Init VCL hooks
vcls = init_vcl(model)
init_tensor = torch.tensor(tmp_input, dtype=torch.float32).unsqueeze(0)
model.forward(init_tensor)
# Register beta parameters
for ix, vcl in enumerate(vcls):
name = 'vcl_{}'.format(ix)
model.register_parameter(name, vcl.vcl_beta)
vcl.set_name(name)
vcl.set_sample_size(sample_size)
vcl.vcl_beta.requires_grad = eps_learn
return vcls
def init_vcl(model, pre_activation=True):
vcls = []
for child in model.children():
cur_vcls = init_vcl(child)
if type(cur_vcls) == VCL:
vcls.append(cur_vcls)
elif cur_vcls:
vcls.extend(cur_vcls)
isActivation = type(model).__module__ == 'torch.nn.modules.activation'
isVCL = not (hasattr(model, 'no_vcl') and model.no_vcl)
if isActivation:
print("Adding VCL forward hook to {}: {}".format(type(model), isVCL))
if isVCL:
cur_vcl = VCL()
if pre_activation:
model.register_forward_pre_hook(cur_vcl)
else:
model.register_forward_hook(cur_vcl)
model.inplace = False
return cur_vcl
return vcls
class VCL(nn.Module):
'''
Implementing VCL. This loss is called usually before or after an activation
and calculated per a given input.
'''
forward_loss = []
step = 0
def __init__(self, device='cuda:0', beta_init=1.0, sample_size=5, name='vcl'):
super(VCL, self).__init__()
self.layer_loss = 0
self.initialized = False
self.beta_init = beta_init
self.sample_size = sample_size
self.device = device
self.name = name
@classmethod
def get_forward_loss(self):
VCL.step += 1
return sum(VCL.forward_loss)
@classmethod
def zero_loss(self):
VCL.forward_loss = []
def set_name(self, name):
self.name = name
def set_sample_size(self, sample_size):
self.sample_size = sample_size
def get_layer_loss(self):
return self.layer_loss
def forward(self, *inp):
# Unpack when VCL is called in a forward hook
if len(inp) > 1:
inp = inp[1][0]
# Unpack when VCL is a normal layer
else:
inp = inp[0] #TODO: change name
# Initialization pass
if not self.initialized:
N,C,H,W = inp.shape
tmp_tensor = torch.ones([C, H, W],
device=self.device, dtype=torch.float32) * self.beta_init
self.vcl_beta = nn.Parameter(tmp_tensor) # Requires grad True by default, but need to init before optimizer
self.initialized = True
return input
# Slice
slices = torch.split(inp, self.sample_size, dim=0)
# Calc variance per activation
offset = 0
var_a = slices[offset].var(dim=0).abs()
var_b = slices[offset+1].var(dim=0).abs()
# Calculate VCL term
with torch.enable_grad():
self.layer_loss = (1-var_a/(var_b + self.vcl_beta)).pow(2).mean()
VCL.forward_loss.append(self.layer_loss)
# Verify stable loss
if torch.isnan(self.layer_loss):
print("VCL exploded!")
exit(1)
return inp |
<reponame>CodeWithSwastik/Tech-Struck
import datetime
import re
from urllib.parse import urlencode
from discord import Color, Embed, Member
from discord.ext import commands
from jose import jwt
from cachetools import TTLCache
from config.common import config
from config.oauth import github_oauth_config
from models import UserModel
from svglib.svglib import svg2rlg
from reportlab.graphics import renderPM
from io import BytesIO
class GithubNotLinkedError(commands.CommandError):
def __str__(self):
return "Your github account hasn't been linked yet, please use the `linkgithub` command to do it"
class Github(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
self.files_regex = re.compile(r"\s{0,}```\w{0,}\s{0,}")
self.token_cache = TTLCache(maxsize=1000, ttl=600)
@property
def session(self):
return self.bot.http._HTTPClient__session
async def cog_check(self, ctx: commands.Context):
token = self.token_cache.get(ctx.author.id)
if not token:
user = await UserModel.get_or_none(id=ctx.author.id)
if ctx.command != self.link_github and (
user is None or user.github_oauth_token is None
):
raise GithubNotLinkedError()
token = user.github_oauth_token
self.token_cache[ctx.author.id] = token
ctx.gh_token = token
return True
@commands.command(name="linkgithub", aliases=["lngithub"])
async def link_github(self, ctx: commands.Context):
expiry = datetime.datetime.utcnow() + datetime.timedelta(seconds=120)
url = "https://github.com/login/oauth/authorize?" + urlencode(
{
"client_id": github_oauth_config.client_id,
"scope": "gist",
"redirect_uri": "https://tech-struck.vercel.app/oauth/github",
"state": jwt.encode(
{"id": ctx.author.id, "expiry": str(expiry)}, config.secret
),
}
)
await ctx.author.send(
embed=Embed(
title="Connect Github",
description=f"Click [this]({url}) to link your github account. This link invalidates in 2 minutes",
)
)
@commands.command(name="creategist", aliases=["crgist"])
async def create_gist(self, ctx: commands.Context, *, inp):
"""
Create gists from within discord
Example:
filename.py
```
# Codeblock with contents of filename.py
```
filename2.txt
```
Codeblock containing filename2.txt's contents
```
"""
files_and_names = self.files_regex.split(inp)[:-1]
# Dict comprehension to create the files 'object'
files = {
name: {"content": content + "\n"}
for name, content in zip(files_and_names[0::2], files_and_names[1::2])
}
req = await self.github_request(ctx, "POST", "/gists", json={"files": files})
res = await req.json()
# TODO: Make this more verbose to the user and log errors
await ctx.send(res.get("html_url", "Something went wrong."))
@commands.command(name="githubsearch", aliases=["ghsearch", "ghse"])
async def github_search(self, ctx: commands.Context, *, term: str):
# TODO: Docs
req = await self.github_request(
ctx, "GET", "/search/repositories", dict(q=term, per_page=5)
)
data = await req.json()
if not data["items"]:
return await ctx.send(
embed=Embed(
title=f"Searched for {term}",
color=Color.red(),
description="No results found",
)
)
em = Embed(
title=f"Searched for {term}",
color=Color.green(),
description="\n\n".join(
[
"[{0[owner][login]}/{0[name]}]({0[html_url]})\n{0[stargazers_count]:,} :star:\u2800{0[forks_count]} \u2387\u2800\n{1}".format(
result, self.repo_desc_format(result)
)
for result in data["items"]
]
),
)
await ctx.send(embed=em)
@commands.command(name="githubstats", aliases=["ghstats", "ghst"])
async def github_stats(self, ctx, username="codewithswastik", theme="radical"):
theme = theme.lower()
themes = "default dark radical merko gruvbox tokyonight onedark cobalt synthwave highcontrast dracula".split(
" "
)
if theme not in themes:
return await ctx.send(
"Not a valid theme. List of all valid themes:- default, dark, radical, merko, gruvbox, tokyonight, onedark, cobalt, synthwave, highcontrast, dracula"
)
url = "https://github-readme-stats.codestackr.vercel.app/api?" + urlencode(
{
"username": username,
"show_icons": "true",
"hide_border": "true",
"theme": theme,
}
)
url = f"https://github-readme-stats.codestackr.vercel.app/api?username={username}&show_icons=true&hide_border=true&theme={theme}"
file = await self.get_file_from_svg_url(url, exclude=[b"A++", b"A+"])
await ctx.send(file=discord.File(file, filename="stats.png"))
@commands.command(name="githublanguages", aliases=["ghlangs", "ghtoplangs"])
async def github_top_languages(
self, ctx, username="codewithswastik", theme="radical"
):
theme = theme.lower()
themes = "default dark radical merko gruvbox tokyonight onedark cobalt synthwave highcontrast dracula".split(
" "
)
if theme not in themes:
return await ctx.send(
"Not a valid theme. List of all valid themes:- default, dark, radical, merko, gruvbox, tokyonight, onedark, cobalt, synthwave, highcontrast, dracula"
)
url = (
"https://github-readme-stats.codestackr.vercel.app/api/top-langs/?"
+ urlencode({"username": username, "theme": theme})
)
file = await self.get_file_from_svg_url(url)
await ctx.send(file=discord.File(file, filename="langs.png"))
async def get_file_from_svg_url(self, url, exclude=[], fmt="PNG"):
res = await (await self.session.get(url)).content.read()
for i in exclude:
res = res.replace(
i, b""
) # removes everything that needs to be excluded (eg. the uncentered A+)
drawing = svg2rlg(BytesIO(res))
file = BytesIO(renderPM.drawToString(drawing, fmt=fmt))
return file
@staticmethod
def repo_desc_format(result):
description = result["description"]
if not description:
return ""
return description if len(description) < 100 else (description[:100] + "...")
def github_request(
self,
ctx: commands.Context,
req_type: str,
endpoint: str,
params: dict = None,
json: dict = None,
):
return self.session.request(
req_type,
f"https://api.github.com{endpoint}",
params=params,
json=json,
headers={"Authorization": f"Bearer {ctx.gh_token}"},
)
def setup(bot: commands.Bot):
bot.add_cog(Github(bot))
|
# Copyright (c) 2011, <NAME>, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Author Ken Conley/<EMAIL>, <NAME>/<EMAIL>
import os
import traceback
from mock import Mock, patch
import rospkg.os_detect
def is_gentoo():
return rospkg.os_detect.Gentoo().is_os()
def get_test_dir():
# not used yet
return os.path.abspath(os.path.join(os.path.dirname(__file__), 'gentoo'))
# Requires 2.7 @unittest.skipIf(not rospkg.os_detect.Gentoo().is_os(), "not running Gentoo")
def test_portage_available():
if not is_gentoo():
print("Skipping not Gentoo")
return
from rosdep2.platforms.gentoo import portage_available
original_exists = os.path.exists
path_overrides = {}
def mock_path(path):
if path in path_overrides:
return path_overrides[path]
else:
return original_exists(path)
m = Mock(side_effect=mock_path)
os.path.exists = m
#Test with portageq missing
m.reset_mock()
path_overrides = {}
path_overrides['/usr/bin/portageq'] = False
path_overrides['/usr/bin/emerge'] = True
val = portage_available()
assert val==False, "Portage should not be available without portageq"
#Test with emerge missing
m.reset_mock()
path_overrides = {}
path_overrides['/usr/bin/portageq'] = True
path_overrides['/usr/bin/emerge'] = False
val = portage_available()
assert val==False, "Portage should not be available without emerge"
# Test with nothing missing
m.reset_mock()
path_overrides = {}
path_overrides['/usr/bin/portageq'] = True
path_overrides['/usr/bin/emerge'] = True
val = portage_available()
assert val==True, "Portage should be available"
os.path.exists = original_exists
# This actually tests portage_detect_single and portage_detect
def test_portage_detect():
if not is_gentoo():
print("Skipping not Gentoo")
return
from rosdep2.platforms.gentoo import portage_detect
m = Mock()
m.return_value = []
val = portage_detect([], exec_fn=m)
assert val == [], val
# Test checking for a package that we do not have installed
m = Mock(return_value = [])
val = portage_detect(['tinyxml[stl]'], exec_fn=m)
assert val == [], "Result was actually: %s" % val
m.assert_called_with(['portageq', 'match', '/', 'tinyxml[stl]'])
# Test checking for a package that we do have installed
m = Mock(return_value = ['dev-libs/tinyxml-2.6.2-r1'])
val = portage_detect(['tinyxml[stl]'], exec_fn=m)
assert val == ['tinyxml[stl]'], "Result was actually: %s" % val
m.assert_called_with(['portageq', 'match', '/', 'tinyxml[stl]'])
# Test checking for two packages that we have installed
m = Mock(side_effect = [['sys-devel/gcc-4.5.3-r2'], ['dev-libs/tinyxml-2.6.2-r1']])
val = portage_detect(['tinyxml[stl]', 'gcc'], exec_fn=m)
assert val == ['gcc', 'tinyxml[stl]'], "Result was actually: %s" % val
m.assert_any_call(['portageq', 'match', '/', 'tinyxml[stl]'])
m.assert_any_call(['portageq', 'match', '/', 'gcc'])
# Test checking for two missing packages
m = Mock(side_effect = [[],[]])
val = portage_detect(['tinyxml[stl]', 'gcc'], exec_fn=m)
assert val == [], "Result was actually: %s" % val
m.assert_any_call(['portageq', 'match', '/', 'tinyxml[stl]'])
m.assert_any_call(['portageq', 'match', '/', 'gcc'])
# Test checking for one missing, one installed package
m = Mock(side_effect = [['sys-devel/gcc-4.5.3-r2'], []])
val = portage_detect(['tinyxml[stl]', 'gcc'], exec_fn=m)
assert val == ['gcc'], "Result was actually: %s" % val
m.assert_any_call(['portageq', 'match', '/', 'tinyxml[stl]'])
m.assert_any_call(['portageq', 'match', '/', 'gcc'])
# Test checking for one installed, one missing package (reverse order)
m = Mock(side_effect = [[], ['dev-libs/tinyxml-2.6.2-r1']])
val = portage_detect(['tinyxml[stl]', 'gcc'], exec_fn=m)
assert val == ['tinyxml[stl]'], "Result was actually: %s" % val
m.assert_any_call(['portageq', 'match', '/', 'tinyxml[stl]'])
m.assert_any_call(['portageq', 'match', '/', 'gcc'])
# Test duplicates (requesting the same package twice)
#TODO what's the desired behavior here
m = Mock(side_effect = [['dev-libs/tinyxml-2.6.2-r1'],['dev-libs/tinyxml-2.6.2-r1']])
val = portage_detect(['tinyxml[stl]', 'tinyxml[stl]'], exec_fn=m)
assert val == ['tinyxml[stl]','tinyxml[stl]'], "Result was actually: %s" % val
m.assert_any_call(['portageq', 'match', '/', 'tinyxml[stl]'])
# and a second of the same, but any_call won't show that.
# Test packages with multiple slot
m = Mock(side_effect = [['dev-lang/python-2.7.2-r3','dev-lang/python-3.2.2']])
val = portage_detect(['python'], exec_fn=m)
assert val == ['python'], "Result was actually: %s" % val
m.assert_any_call(['portageq', 'match', '/', 'python'])
def test_PortageInstaller():
if not is_gentoo():
print("Skipping not Gentoo")
return
from rosdep2.platforms.gentoo import PortageInstaller
@patch.object(PortageInstaller, 'get_packages_to_install')
def test(mock_method):
installer = PortageInstaller()
mock_method.return_value = []
assert [] == installer.get_install_command(['fake'])
mock_method.return_value = ['a', 'b']
expected = [['sudo', 'emerge', 'a'],
['sudo', 'emerge', 'b']]
val = installer.get_install_command(['whatever'], interactive=False)
assert val == expected, val
expected = [['sudo', 'emerge', '-a', 'a'],
['sudo', 'emerge', '-a', 'b']]
val = installer.get_install_command(['whatever'], interactive=True)
assert val == expected, val
try:
test()
except AssertionError:
traceback.print_exc()
raise
|
<filename>python-obj-system.py
#!/usr/bin/env python3
from mdpyformat import *
import pprintex
header_md("""Python object primer for Python3 / meta classes""" )
header_md("""Introduction""", nesting = 2)
print_md("""
Python is good at creating the illusion of being a simple programming language. Sometimes this illusion fails, like when you have to deal with the import/module system [my attempts to get it](https://github.com/MoserMichael/pythonimportplayground). Another area of complexity is the object system, last week I tried to understand [python enums](https://docs.python.org/3/library/enum.html), it turns that they are built on top of [meta classes](https://github.com/python/cpython/blob/2c56c97f015a7ea81719615ddcf3c745fba5b4f3/Lib/enum.py#L511), So now I have come to realize, that I really don't know much about python and its object system. The purpose of this text is to figure out, how the python object system ticks.
""")
header_md("""The Python object system""", nesting=2)
header_md("""How objects are represented""", nesting=3)
print_md("""
Lets look at a simple python class Foo with a single base class Base, and see how objects are created and represented in memory
""")
eval_and_quote("""
# The base class. All Python3 classes have the base class of type object.
# The long form is therefore
# class Base(object):
# However Pylint will tell you, that this long form is redundant
class Base:
# Class variables are shared between all instances of the class Base, and declared like this:
base_class_var = "Base"
# The object constructor/init method, Note the first 'self' argument, which refers to the object instance.
def __init__(self):
print("calling Base.__init__")
# Object variables are specific to a given instance of Base
# Each object has a builtin hash member: __dict__ this one lists all object members (including those added by the base class __init__ method)
self.obj_var_base = 10
# An object method - needs to access the object instance, which is passed as first 'self' argument.
def show_base(self):
print_md("obj_var_base: ", self.obj_var_base)
# A class method/static method is called without an object instance.
@staticmethod
def make_base():
return Base()
# class Foo with a base class Base
class Foo(Base):
# Class variables are shared between all instances of the class Foo, and declared like this:
class_var = 42
class_var2 = 43
# The object constructor/init method, Note the first 'self' argument, which is the object instance.
def __init__(self):
# When not calling the base class __init__ method: the base class object variables are not added to the object !!!
# The base class __init__ adds the 'obj_var_base' member to the __dict__ member of this object instance.
# By convention: you first init the base classes, before initialising the derived class.
super().__init__()
print("calling Foo.__init__")
# Object variables are specific to a given instance of Foo
# Each object has a builtin hash member: __dict__ this one lists all object members (including those added by the base class __init__ method)
# Define object variable: obj_var_a
self.obj_var_a=42
# Define object variable: obj_var_b
self.obj_var_b="name"
# An object method - needs to access the object instance, which is passed as first 'self' argument.
def show_derived(self):
print_md("obj_var_a:", self.obj_var_a, "obj_var_b:", self.obj_var_b)
# A class method/static method is called without an object instance.
@staticmethod
def make_foo():
return Foo()
# Make a new object instance of type Foo class.
foo_obj=Foo()
""")
print_md("The memory address of object foo_obj is returned by the [id built-in](https://docs.python.org/3/library/functions.html#id)")
eval_and_quote('print("id(foo_obj) : ", id(foo_obj))')
print_md("If two variables have the same object id value, then they both refer to the very same object/instance!")
print_md("""
Each user defined object has a __dict__ attribute, this is a dictionary that lists all the object instance variables.
This also includes instance members that were added by the __init__ method of the base class !!
""")
eval_and_quote("""print("foo_obj.__dict__ : ", foo_obj.__dict__)""")
print_md("""
So you see that the following is exactly the same thing:
""")
eval_and_quote("""assert id(foo_obj.obj_var_a) == id( foo_obj.__dict__['obj_var_a'] ) """)
print_md("""
Wait, but where does the __dict__ attribute come from?
The [built-in getattr](https://docs.python.org/3/library/functions.html#getattr) function can return this built-in __dict__ attribute!
Interesting: the python notation object.member_name can mean different things:
1) for built-in attributes it means a call to getattr
2) for object instances (assigned in the __init__ method of the class) it means a call to retrieve the __dict__ attribute, and then a lookup of the variable name in that dictionary.
""")
print_md( """foo_obj.__dict__ and getattr(foo_obj,'__dict__',None) is the same thing! """)
eval_and_quote("""assert id(foo_obj.__dict__) == id( getattr(foo_obj,'__dict__',None) )""")
print_md("""
The getattr builtin function has a good part, its return value can be checked for None. This can be used, in order to check if the argument is an object with a __dict__ attribute.
""")
eval_and_quote("""base_obj = object()""")
print_md("An object of built-in type ", type(base_obj), " doesn't have a __dict__ member")
eval_and_quote("""assert getattr(base_obj, '__dict__', None) is None""")
eval_and_quote("""int_obj = 42""")
print_md("An object of built-in type ", type(int_obj), " doesn't have a __dict__ member")
eval_and_quote("""assert getattr(int_obj, '__dict__', None) is None""")
print_md("""
The [dir builtin](https://docs.python.org/3/library/functions.html#dir) function does different things, depending on the argument,
for regular objects it returns a "list that contains the object’s attributes’ names, the names of its class’s attributes, and recursively of the attributes of its class’s base classes.",
all this is sorted alphabetically.
""")
eval_and_quote("""print("dir(foo_obj) : ", dir(foo_obj))""")
# doesn't have __slots__, how odd.
#print_md("foo_obj.__slots__ : ", foo_obj.__slots__)
header_md("""How classes are represented""", nesting=3)
print_md("""The built-in function [type](https://docs.python.org/3/library/functions.html#type), is returning the class of an object, when applied to a variable (to be more exact: type is a built-in class, and not a built-in function, more on that later)""")
eval_and_quote("""
# Make a new object instance of type Foo class.
foo_obj=Foo()
print("class of object foo_obj - type(foo_obj): ", type(foo_obj))
# That's the same as showing the __class__ member of the variable (in Python3)
print("foo_obj.__class__ :", foo_obj.__class__)
""")
print_md("""
The class is an object, it's purpose is to hold the static data that is shared between all object instances.
Each object has a built-in __class__ attribute, that refers to this class object.
Note that the name of the class includes the module name, __main__ if the class is defined in the file given as argument to the python interpreter.
Also note that the type built-in of type(foo_obj) is really the same as: str(foo_obj.__class__) (for Python3)
""")
print_md("""
Again, the built in attribute __class__ can also be accessed with the getattr built-in function.
""")
eval_and_quote( """
print("foo_obj.__class__ and getattr(foo_obj,'__class__',None) is the same thing!")
assert id(foo_obj.__class__) == id( getattr(foo_obj,'__class__',None) )
""")
print_md("""The __name__ and __qualname__ built-in attributes return the name of the class, without the module name """)
eval_and_quote( """
print("foo_boj.__class__.__name__ : ", foo_obj.__class__.__name__)
print("foo_boj.__class__.__qualname__ : ", foo_obj.__class__.__qualname__)""" )
print_md("""
To get the immediate base class list as declared in that particular class.
""")
eval_and_quote( """print("foo_obj.__class__.__bases__ :", foo_obj.__class__.__bases__)""")
print_md("""
The __mro__ member is a list of types that stands for 'method resoultion order', when searching for an instance method, this list is searched in order to resolve the method name.
The Python runtime creates this lists by enumerating all of its base classes recursively, in depth first traversal order. For each class it follows the base classes, from the left ot the right
This list is used to resolve a member function 'member_function' of an object, when you call it via: obj_ref.member_function()
""")
eval_and_quote( """print("foo_obj.__class__.__mro__ :", foo_obj.__class__.__mro__) """ )
print_md("Computing the method resolution order by hand")
eval_and_quote("""
# function to a class hierarchy, in depth first search order (like what you get in MRO - method resolution order)
def show_type_hierarchy(type_class):
def show_type_hierarchy_imp(type_class, nesting):
if len(type_class.__bases__) == 0:
return
prefix = "\t" * nesting
print( prefix + "type:", type_class.__name__ , "base types:", ",".join( map( lambda ty : ty.__name__, type_class.__bases__) ) )
#print( prefix + "str(", type_class.__name__ , ").__dict__ : ", type_class.__dict__ )
for base in type_class.__bases__:
show_type_hierarchy_imp(base, nesting+1)
if not inspect.isclass(type_class):
print("object ", str(type_class), " is not class")
return
print("show type hierarchy of class:")
show_type_hierarchy_imp(type_class, 0)
class LevelOneFirst:
pass
class LevelOneSecond:
pass
class LevelOneThird:
pass
class LevelTwoFirst(LevelOneFirst, LevelOneSecond):
pass
class LevelThree(LevelTwoFirst,LevelOneThird):
pass
show_type_hierarchy(LevelThree)
print("LevelThree.__mro__:", LevelThree.__mro__)
""")
eval_and_quote("""
print("*** mro in detail:")
for cls in foo_obj.__class__.__mro__:
print_md("\tclass-in-mro: ", str(cls), "id:", id(cls), "cls.__dict__: ", cls.__dict__)
print("*** eof mro in detail")
""")
print_md("""
The class object has a __dict__ too - here you will see all the class variables (for Foo these are class_var and class_var2) and class methods (defined with @staticmethod), but also the object methods (with the self parameter)
""")
eval_and_quote( """print("foo_obj.__class__.__dict__ : ", foo_obj.__class__.__dict__)""" )
# doen't have slots, how odd.
#print_md("foo_obj.__class__.__slots__ : ", foo_obj.__class__.__slots__)
print_md("""
Again, the [dir](https://docs.python.org/3/library/functions.html#dir) built-in function does different things, depending on the argument type
for a class object it returns a "list that contains the names of its attributes, and recursively of the attributes of its bases"
That means it displays both the names of static variables, and the names of the static functions, for the class and it's base classes.
Note that the names are sorted.
""")
eval_and_quote("""print("dir(foo_obj.__class__) : ", dir( foo_obj.__class__ ) )""")
print_md("""
The class object derives from built-in class type, you can check if an object is a class by checking if it is an instance of class 'type'!
""")
# check that foo_obj.__class__ is a type - it is derived from built-in class type
eval_and_quote("""
assert isinstance(foo_obj.__class__, type)
# same thing as
assert inspect.isclass(foo_obj.__class__)
# an object is not derived from class type.
assert not isinstance(foo_obj, type)
# same thng as
assert not inspect.isclass(foo_obj)
""")
print_md( """
Now there is much more: there is the inspect module that returns it all, a kind of rosetta stone of the python object model.
inspect.getmembers returns everything! You can see the source of inspect.getmembers [here](https://github.com/python/cpython/blob/3.10/Lib/inspect.py)
""")
eval_and_quote("""print("inspect.getmembers(foo_obj): ", inspect.getmembers(foo_obj))""")
print_md("""
Attention!
the type of the object is the class of the object (remember: the classes is an object, where the __dict__ member holds the class variables)
""")
eval_and_quote("""
print("type(foo_obj) : ", type(foo_obj))
# same thing in python3
print("str(foo_obj.__class__) : ", str(foo_obj.__class__) )""")
print_md("""
Let's look at both the type and identity of all these objects:
""")
eval_and_quote("""print("id(foo_obj) : ", id(foo_obj), " str(foo_obj) : ", str(foo_obj))""")
print_md("""
The following expressions refer to the same thing: the type of the object foo_obj, also known as the class of foo_obj
""")
eval_and_quote("""
print("type(foo_obj) :", type(foo_obj), " id(type(foo_obj)) :", id(type(foo_obj)), " type(foo_obj).__name__ : ", type(foo_obj).__name__ )
print("str(foo_obj.__class__) :", str(foo_obj.__class__), " id(foo_obj.__class__) :", id(foo_obj.__class__), "foo_obj.__class__.__name__ : ", foo_obj.__class__.__name__)
print("str(Foo) :", str(Foo), " id(Foo) :", id( Foo ), "Foo.__name__ :", Foo.__name__)
assert id(Foo) == id(type(foo_obj))
assert id(type(foo_obj)) == id(foo_obj.__class__)
""")
print_md("""
The Foo class members
""")
eval_and_quote("""
print("foo_obj.__class__.__dict__ :", foo_obj.__class__.__dict__)
print("Foo.__dict__ :", Foo.__dict__)
# everything accessible form the class
print("dir(foo_obj.__class__) :", dir( foo_obj.__class__))
""")
print_md("""
The following expressions refer to the same thing: the meta-type of the foo_obj.
""")
eval_and_quote("""
print("type(foo_obj.__class__.__class__):", type(foo_obj.__class__.__class__), " id( foo_obj.__class__.__class__ ) :" , id( foo_obj.__class__.__class__ ) , "foo_obj.__class__.__class__.__name__ : ", foo_obj.__class__.__class__.__name__ )
print("type(Foo) :", type(Foo), " id(type(Foo)) : ", id( type( Foo ) ), " Foo.__class__.__name__ :", Foo.__class__.__name__)
print("type(Foo.__class__) :", type(Foo.__class__), " id(type(Foo.__class__)) : ", id( type( Foo.__class__ ) ), " Foo.__class__.__name__ :", Foo.__class__.__name__)
print("type(Foo.__class__.__class__) :", type(Foo.__class__.__class__), " id(type(Foo.__class__.__class__)) :", id( type( Foo.__class__.__class__ ) ) )
assert type(Foo) == type(Foo.__class__)
assert type(Foo.__class__) == type(Foo.__class__.__class__)
""")
print_md("""
The type of the type is the metaclass - the metaclass constructs the Class object! (the class of an object is also an object!)
""")
eval_and_quote("""
print("type( type( foo_obj ) ) :", type( type( foo_obj ) ) )
print("str( foo_obj.__class__.__class__ ) :", str(foo_obj.__class__.__class__) )
""")
# result:
eval_and_quote("""
print(" metaclass members: foo_obj.__class__.__class__.__dict__ : ", foo_obj.__class__.__class__.__dict__)
print(" everything accessible form metaclass: dir( foo_obj.__class__.__class__ ) : ", dir( foo_obj.__class__.__class__) )
""")
print_md("""
Wow, any class can tell all of its derived classes! I wonder how that works...
""")
eval_and_quote("""print("Base.__subclasses__() : ", Base.__subclasses__())""")
header_md("""Object creation""", nesting=3)
print_md("""
Objects recap:
The object instance holds the __dict__ attribute of the object instance, it's value is a dictionary that holds the object instance members.
The class is an object that is shared between all object instances, and it holds the static data (class variables, class methods)
What happens upon: foo = Foo() ?
Take the type of Foo - the metaclass of Foo, the metaclass both knows how to create an instance of the class Foo, and the object instances.
A metaclass is derived from built-in class 'type', The 'type' constructor with three argument creates a new class object. [see reference](https://docs.python.org/3/library/functions.html#type)
class_obj = Foo
The metaclass is used as a 'callable' - it has a __call__ method, and can therefore be called as if it were a function (see more about callables in the course on [decorators](https://github.com/MoserMichael/python-obj-system/blob/master/decorator.md))
Now this __call__ method creates and initialises the object instance.
The implementation of __call__ now does two steps:
- Class creation is done in the [__new__](https://docs.python.org/3/reference/datamodel.html#object.__new__) method of the metaclass. The __new__ method creates the Foo class, it is called exactly once, upon class declaration (you will see this shortly, in the section on custom meta classes)
- It uses the Foo class and calls its to create and initialise the object (call the __new__ method of the Foo class, in order to create an instance of Foo, then calls the __init__ instance method of the Foo class, on order to initialise it). This all done by the __call__ method of the metaclass.
instance_of_foo = meta_class_obj.__call__()
(actually that was a bit of a simplification...
)
""")
eval_and_quote("""
# same as: foo_obj = Foo()
foo_obj = Foo.__call__()
print("foo_obj : ", foo_obj)
print("foo_obj.__dict__ : ", foo_obj.__dict__)
""")
print_md("This is the same as:")
eval_and_quote("""
class_obj = Foo
instance_of_foo = class_obj()
print("instance_of_foo : ", instance_of_foo)
print("instance_of_foo.__dict__ : ", instance_of_foo.__dict__)
""")
header_md("""Custom metaclasses""", nesting = 2)
header_md("""Metaclasses for implementing singleton objects""", nesting = 3)
print_md("""
An object can define a different way of creating itself, it can define a custom metaclass, which will do exactly the same object creation steps described in the last section.
Let's examine a custom metaclass for creating singleton objects.
""")
eval_and_quote("""
# metaclass are always derived from the type class.
# the type class has functions to create class objects
# the type class has also a default implementation of the __call__ method, for creating object instances.
class Singleton_metaclass(type):
# invoked to create the class object instance (for holding static data)
# this function is called exactly once, in order to create the class instance!
def __new__(meta_class, name, bases, cls_dict, **kwargs):
print("Singleton_metaclass: __new__ meta_class:", meta_class, "name:", name, "bases:", bases, "cls_dict:", cls_dict, f'kwargs: {kwargs}')
class_instance = super().__new__(meta_class, name, bases, cls_dict)
print("Singleton_metaclass: __new__ return value: ", class_instance, "type(class_instance):", type(class_instance))
# the class class variable __singleton_instance__ will hold a reference to the one an only object instance of this class.
class_instance.__singleton_instance__ = None
return class_instance
def __call__(cls, *args, **kwargs):
# we get here to create an object instance. the class object has already been created.
print("Singleton_metaclass: __call__ args:", *args, f'kwargs: {kwargs}')
# check if the singleton has already been created.
if cls.__singleton_instance__ is None:
# create the one an only instance object.
instance = cls.__new__(cls)
# initialise the one and only instance object
instance.__init__(*args, **kwargs)
# store the singleton instance object in the class variable __singleton_instance__
cls.__singleton_instance__ = instance
# return the singleton instance
return cls.__singleton_instance__
import math
# the metaclass specifier tells python to use the Singleton_metaclass, for the creation of an instance of type SquareRootOfTwo
class SquareRootOfTwo(metaclass=Singleton_metaclass):
# the __init__ method is called exactly once, when the first instance of the singleton is created.
# the square root of two is computed exactly once.
def __init__(self):
self.value = math.sqrt(2)
print("SquareRootOfTwo.__init__ self:", self)
print("creating the objects instances...")
sqrt_root_two_a = SquareRootOfTwo()
print("sqrt_two_a id(sqrt_root_two_a):", id(sqrt_root_two_a), "type(sqrt_root_two_a):", type(sqrt_root_two_a), "sqrt_root_two_a.value:", sqrt_root_two_a.value)
sqrt_root_two_b = SquareRootOfTwo()
print("sqrt_two_b id(sqrt_root_two_b)", id(sqrt_root_two_b), "type(sqrt_root_two_b):", type(sqrt_root_two_b), "sqrt_root_two_b.value:", sqrt_root_two_b.value)
# all singleton objects of the same class are referring to the same object
assert id(sqrt_root_two_a) == id(sqrt_root_two_b)
""")
header_md("""Passing arguments to metaclasses""", nesting = 3)
print_md(""""
Lets extend the previous singleton creating metaclass, so that it can pass parameters to the __init__ method of the object, these parameters are defined together with the metaclass specifier.
""")
eval_and_quote("""
# metaclass are always derived from the type class.
# The type class has functions to create class objects
# The type class has also a default implementation of the __call__ method, for creating object instances.
class Singleton_metaclass_with_args(type):
# invoked to create the class object instance (for holding static data)
# this function is called exactly once, in order to create the class instance!
def __new__(meta_class, name, bases, cls_dict, **kwargs):
print("Singleton_metaclass_with_args: __new__ meta_class:", meta_class, "name:", name, "bases:", bases, "cls_dict:", cls_dict, f'kwargs: {kwargs}')
class_instance = super().__new__(meta_class, name, bases, cls_dict)
print("Singleton_metaclass_with_args: __new__ return value: ", class_instance, "type(class_instance):", type(class_instance))
# the class class variable __singleton_instance__ will hold a reference to the one an only object instance of this class.
class_instance.__singleton_instance__ = None
# the keywords that have been specified, are passed into the class creation method __new__.
# save them as a class variable, so as to pass them to the object constructor!
class_instance.__kwargs__ = kwargs
return class_instance
def __call__(cls, *args, **kwargs):
# we get here to create an object instance. the class object has already been created.
print("Singleton_metaclass_with_args: __call__ args:", *args, f'kwargs: {kwargs}')
# check if the singleton has already been created.
if cls.__singleton_instance__ is None:
# create the one an only instance object.
instance = cls.__new__(cls)
# initialise the one and only instance object
# pass it the keyword parameters specified for the class!
instance.__init__(*args, **cls.__kwargs__)
# store the singleton instance object in the class variable __singleton_instance__
cls.__singleton_instance__ = instance
# return the singleton instance
return cls.__singleton_instance__
import math
class AnySquareRoot:
def __init__(self, arg_val):
self.value = math.sqrt(arg_val)
# the metaclass specifier tells python to use the Singleton_metaclass, for the creation of an instance of type SquareRootOfTwo
class SquareRootOfTwo(AnySquareRoot, metaclass=Singleton_metaclass_with_args, arg_num=2):
# the init method is called with arg_num specified in the class definition (value of 2)
def __init__(self, arg_num):
super().__init__(arg_num)
class SquareRootOfThree(AnySquareRoot, metaclass=Singleton_metaclass_with_args, arg_num=3):
# the init method is called with arg_num specified in the class definition (value of 3)
def __init__(self, arg_num):
super().__init__(arg_num)
print("creating the objects instances...")
sqrt_root_two_a = SquareRootOfTwo()
print("sqrt_two_a id(sqrt_root_two_a):", id(sqrt_root_two_a), "type(sqrt_root_two_a):", type(sqrt_root_two_a), "sqrt_root_two_a.value:", sqrt_root_two_a.value)
sqrt_root_two_b = SquareRootOfTwo()
print("sqrt_two_b id(sqrt_root_two_b)", id(sqrt_root_two_b), "type(sqrt_root_two_b):", type(sqrt_root_two_b), "sqrt_root_two_b.value:", sqrt_root_two_b.value)
# all singleton objects of the same class are referring to the same object
assert id(sqrt_root_two_a) == id(sqrt_root_two_b)
sqrt_root_three_a = SquareRootOfThree()
print("sqrt_three_a id(sqrt_root_three_a):", id(sqrt_root_three_a), "type(sqrt_root_three_a):", type(sqrt_root_three_a), "sqrt_root_three_a.value:", sqrt_root_three_a.value)
sqrt_root_three_b = SquareRootOfThree()
print("sqrt_three_b id(sqrt_root_three_b)", id(sqrt_root_three_b), "type(sqrt_root_three_b):", type(sqrt_root_three_b), "sqrt_root_three_b.value:", sqrt_root_three_b.value)
# all singleton objects of the same class are referring to the same object
assert id(sqrt_root_three_a) == id(sqrt_root_three_b)
""")
header_md("""Metaclasses in the Python3 standard library""", nesting=2)
print_md("""
This section lists examples of meta-classes in the python standard library. Looking at the standard library of a language is often quite useful, when learning about the intricacies of a programming language.
""")
header_md("""ABCMeta class""", nesting=3)
print_md("""The purpose of this metaclass is to define abstract base classes (also known as ABC's), as defined in [PEP 3119](https://www.python.org/dev/peps/pep-3119/), the documentation for the metaclass [ABCMeta class](https://docs.python.org/3/library/abc.html#abc.ABCMeta).
A python metaclass imposes a different behavior for builtin function [isinstance](https://docs.python.org/3/library/functions.html#isinstance) and [issubclass](https://docs.python.org/3/library/functions.html#issubclass) Only classes that are [registered](https://docs.python.org/3/library/abc.html#abc.ABCMeta.register) with the metaclass, are reported as being subclasses of the given metaclass. The referenced PEP explains, why this is needed, i didn't quite understand the explanation. Would be helpful if the reader can clarify this issue.
""")
header_md("""Enum classes""", nesting=3)
print_md("""Python has support for [enum classes](https://docs.python.org/3/library/enum.html). An enum class lists a set of integer class variables, these variables can then be accessed both by their name, and by their integer value.
An example usage: Note that the class doesn't have a constructor, everything is being taken care of by the baseclass [enum.Enum](https://docs.python.org/3/library/enum.html#enum.Enum) which is making use of a meta-class in he definition of the Enum class [here](https://docs.python.org/3/library/enum.html), this metaclass [EnumMeta source code](https://github.com/python/cpython/blob/f6648e229edf07a1e4897244d7d34989dd9ea647/Lib/enum.py#L161) then creates a behind the scene dictionary, that maps the integer values to their constant names.
The advantage is, that you get an exception, when accessing an undefined constant, or name. There are also more things there, please refer to the linked [documentation](https://docs.python.org/3/library/enum.html)
""")
eval_and_quote("""
import enum
class Rainbow(enum.Enum):
RED=1
ORANGE=2
YELLOW=3
GREEN=4
BLUE=5
INDIGO=6
VIOLET=7
color=Rainbow.GREEN
print("type(Rainbow.GREEN):", type(Rainbow.GREEN))
print("The string rep Rainbow.Green.name:", Rainbow.GREEN.name, "type(Rainbow.GREEN.name):", type(Rainbow.GREEN.name))
print("The integer rep Rainbow.GREEN.value: ", Rainbow.GREEN.value, "type(Rainbow.GREEN.value):", type(Rainbow.GREEN.value))
print("Access by name: Rainbow['GREEN']:", Rainbow['GREEN'])
print("Access by value: Rainbow(4):", Rainbow(4))
# which is the same thing
assert id(Rainbow['GREEN']) == id(Rainbow(4))
""")
header_md("""Conclusion""", nesting=2)
print_md("""
Python meta-classes and decorators are very similar in their capabilities.
Both are tools for [metaprogramming](https://en.wikipedia.org/wiki/Metaprogramming), tools for modifying the program text, and treating and modifying code, as if it were data.
I would argue, that decorators are most often the easiest way of achieving the same goal.
However some things, like hooking the classification of classes and objects (implementing class methods [__instancecheck__ and __subclasscheck__](https://docs.python.org/3/reference/datamodel.html#customizing-instance-and-subclass-checks), can only be done with meta-classes.
I hope, that this course has given you a better understanding, of what is happening under the hood, which would be a good thing.
""")
print_md("*** eof tutorial ***")
|
<reponame>bds-ailab/logflow
# Copyright 2020 BULL SAS All rights reserved #
from collections import Counter
from torch.utils.data import Dataset
import word2vec # type: ignore
import h5py # type: ignore
from loguru import logger
import time
import pickle
import numpy as np # type: ignore
DTYPE = np.float32
from typing import Dict, List, Any
class Cardinality(Dataset):
""" A cardinality describes the number of examples per pattern. Each cardinality contains a 10 power examples. For example, the cardinality 7 contains all the patterns with 10^7 examples. It extendes
the dataloader class of pytorch to be able the provide the data to the pytorch deep learning model.
Args:
cardinality (int): the value of the cardinality
path_list_classes (str): path to the data. The data is the list of patterns to learn.
path_w2v ([type]): path to the word2vec model. The word2vec model is used to turn a pattern into a vector.
size (int, optional): number of examples to use. Defaults to -1.
one_model (bool, optional): use one global model instead of one model per cardinality.
set_cardinalities (set, optional): use several cardinalities for the learning step of one model. Must be used with one_model.
"""
def __init__(self, cardinality : int, path_list_classes : str, path_w2v, size=-1, one_model=False, set_cardinalities=()):
self.cardinality = cardinality
self.path_list_classes = path_list_classes
self.size = size
self.path_model_w2v = path_w2v
self.number_of_classes = 0
self.set_classes_kept = ()
self.loaded = False
self.size_windows = 30
self.one_model = one_model
self.set_cardinalities = set_cardinalities
def compute_position(self):
"""Compute the position of each example in the initial list of patterns. Indeed, one cardinality learns only to predict the patterns with a specific cardinality. We store the index
of each pattern with the right cardinality on the initial data to be able to provide it to the model during the learning step.
"""
np_list_classes = np.asarray(self.list_classes)
set_classes = np.unique(np_list_classes)
list_classes_kept = []
if self.one_model:
for event in set_classes:
cardinality = len(str(self.counter[event]))
if cardinality in self.set_cardinalities:
list_classes_kept.append(event)
else:
for event in set_classes:
cardinality = len(str(self.counter[event]))
if cardinality == self.cardinality:
list_classes_kept.append(event)
self.set_classes_kept = np.unique(list_classes_kept)
ix = np.isin(np_list_classes, self.set_classes_kept)
self.list_position = np.where(ix)[0]
try:
self.number_of_classes = max(self.set_classes_kept) + 1
except:
self.number_of_classes = 0
self.loaded = True
def load_files(self):
"""Load the data and the word2vec model.
"""
logger.info("Loading file for cardinality: " + str(self.cardinality) + " " + str(self.path_list_classes))
with h5py.File(self.path_list_classes, 'r') as file_h5py:
if self.size != -1:
self.list_classes = file_h5py['list_classes'][:self.size]
else:
self.list_classes = file_h5py['list_classes'][()]
self.list_position = []
with open(self.path_model_w2v, "rb") as file_model:
dict_local = pickle.load(file_model)
self.w2v = dict_local["word2vec"]
self.counter = dict_local["counter_patterns"]
def __len__(self) -> int:
"""Return the length of the data (the number of examples)
Returns:
int: length of the data.
"""
return len(self.list_position)
def __getitem__(self, idx : int) -> Dict[str, Any]:
"""Implement the get_item method used by the dataloader. For each pattern, we select the 30 previous patterns to learn to predict it. 30 is the size of the window.
For each pattern in the window, the w2v method is then used to get the corresponding vector. Note that the previous selected pattern must be different from the pattern to predict.
For example, with a window of 5.
Initial data : [10, 4, 5, 3, 10, 9, 5, 3, 5].
The last pattern is the pattern to predict (5 here.)
The window selected to predict this pattern is [4, 3, 10, 9, 3]. The logs corresponding to the pattern "5" are removed because we want to predict this pattern.
Returns:
dict: the pattern to predict and its vector
"""
index_real = self.list_position[idx]
list_embedding = np.zeros((self.size_windows, 20), dtype=DTYPE)
index_add = 0
output = self.list_classes[index_real]
index = index_real - 1
while index_add != self.size_windows:
# If we don't have
if index < 0 and np.count_nonzero(list_embedding) == 0:
return {'output':-1, 'input':[-1]}
if index < 0:
for i in range(self.size_windows - index_add):
list_embedding[index_add] = list_embedding[-1]
index_add += 1
break
# If pattern is different from the output.
if self.list_classes[index] != output:
list_embedding[index_add] = self.w2v[str(self.list_classes[index])]
index_add += 1
index -= 1
return {'output':output, 'input':list_embedding}
|
<filename>Image Classifier Part-2/predict_helper.py<gh_stars>0
from PIL import Image
import numpy as np
import torch
from make_model import make_model
from torch import optim
from torchvision import transforms
import argparse
def get_input_args():
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=str, default=False, help='Usase: --gpu True')
parser.add_argument('--img', type=str, default= 'flowers/test/97/image_07719.jpg', help= 'path of image file')
parser.add_argument('--checkpoint', type=str, default= 'checkpoint/vgg_train_test.pt', help='path of checkpoint')
parser.add_argument('--topk', type= int, default=5, help= 'select top k(int) probability')
parser.add_argument('--categeory', type= str, default='cat_to_name.json', help= 'path of json file')
return parser.parse_args()
def load_checkpoint(path):
#state = torch.load(path)
checkpoint = torch.load(path, map_location=lambda storage, loc: storage)
drop = checkpoint['dropout']
hidden_units = checkpoint['hidden_units']
arch = checkpoint['arch']
lr = checkpoint['lr']
epochs = checkpoint['epochs']
state_dict = checkpoint['state_dict']
class_to_idx = checkpoint['class_to_idx']
model = make_model(arch, hidden_units, drop)
model.class_to_idx = class_to_idx
print("Loading ", model.name, " checkpoint\n")
if model.name == 'vgg16' or model.name == 'densenet121':
model.classifier.load_state_dict(state_dict)
optimizer = optim.Adam(model.classifier.parameters(), lr=lr)
elif model.name == 'resnet50':
model.fc.load_state_dict(state_dict)
optimizer = optim.Adam(model.fc.parameters(), lr=lr)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
#############################################################
# following code is needed if we want train model further #
# after loading from checkpoint since it requires gpu #
#############################################################
# for state in optimizer.state.values():
# for k, v in state.items():
# if isinstance(v, torch.Tensor):
# state[k] = v.cuda()
print(model.name, " loaded successfully\n")
return model, optimizer
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
img_loader = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor()])
pil_image = Image.open(image_path)
pil_image = img_loader(pil_image).float()
np_image = np.array(pil_image)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np.transpose(np_image, (1, 2, 0)) - mean)/std
np_image = np.transpose(np_image, (2, 0, 1))
return np_image
#############################################################################
##### Alternate process_image ##############################################
###########################################################################
# def process_image(image):
# ''' Scales, crops, and normalizes a PIL image for a PyTorch model,
# returns an Numpy array
# '''
# img_loader = transforms.Compose([
# transforms.Resize(256),
# transforms.CenterCrop(224),
# transforms.ToTensor()])
# pil_image = Image.open(image)
# pil_image = img_loader(pil_image).float()
# np_image = np.array(pil_image)
# mean = np.array([0.485, 0.456, 0.406])
# std = np.array([0.229, 0.224, 0.225])
# np_image = (np.transpose(np_image, (1, 2, 0)) - mean)/std
# np_image = np.transpose(np_image, (2, 0, 1))
# return np_image
def make_prediction(image_path, model, topk, device):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
# TODO: Implement the code to predict the class from an image file
print("Predicting ", topk, " flowers name for given image\n")
image = process_image(image_path)
# convert img to tensor
image = torch.from_numpy(image)
# fix the tensor dtype
image = image.type(torch.FloatTensor)
# copy img to proper device
image = image.to(device)
# adjust dimension
image = image.reshape((-1, image.shape[0], image.shape[1], image.shape[2]))
# turn on eval mode
model.eval()
with torch.no_grad():
logps = model(image)
# get actual ps from logps
ps = torch.exp(logps)
# get topk ps and classes respectively
top_p, top_class = ps.topk(topk, dim=1)
# copy to cpu and convert into numpy
top_p = top_p.cpu()
top_class = top_class.cpu()
top_p = top_p.numpy()
top_class = top_class.numpy()
# get mapping of class to idx
class_to_idx = model.class_to_idx
# reverse mapping
idx_to_class = { v:k for k,v in class_to_idx.items()}
# get class from its idx
top_class = [idx_to_class[x] for x in top_class[0,:]]
return top_p[0], top_class
|
import sys
import os
import time
import functools
import itertools
from collections import OrderedDict
import contextlib
import queue
import threading
import subprocess
import signal
import shutil
import termios
import fcntl
import bisect
import numpy
import scipy
import scipy.signal
import pyaudio
import wave
import audioread
def datanode(gen_func):
@functools.wraps(gen_func)
def node_func(*args, **kwargs):
return DataNode(gen_func(*args, **kwargs))
return node_func
class DataNodeStateError(Exception):
pass
class DataNode:
def __init__(self, generator):
self.generator = generator
self.initialized = False
self.finalized = False
self.result = None
def send(self, value=None):
if not self.initialized:
raise DataNodeStateError("try to access un-initialized data node")
if self.finalized:
raise StopIteration(self.result)
try:
res = self.generator.send(value)
except StopIteration as e:
self.finalized = True
self.result = e.value
raise e
except:
self.finalized = True
raise
else:
return res
def join(self, value=None):
try:
while True:
value = yield self.send(value)
except StopIteration:
return value
def __next__(self):
return self.send(None)
def __iter__(self):
return self
def __enter__(self):
if self.finalized:
raise DataNodeStateError("try to initialize finalized data node")
if self.initialized:
return self
self.initialized = True
try:
next(self.generator)
except StopIteration as e:
self.finalized = True
self.result = e.value
return self
else:
return self
def __exit__(self, type=None, value=None, traceback=None):
if not self.initialized:
raise DataNodeStateError("try to finalize un-initialized data node")
if self.finalized:
return False
self.close()
return False
def close(self):
self.generator.close()
self.finalized = True
@staticmethod
@datanode
def from_iter(iterator):
yield
for data in iterator:
yield data
@staticmethod
@datanode
def from_func(function):
data = yield
while True:
data = yield function(data)
@staticmethod
def wrap(node_like):
if isinstance(node_like, DataNode):
return node_like
elif hasattr(node_like, '__iter__'):
return DataNode.from_iter(node_like)
elif hasattr(node_like, '__call__'):
return DataNode.from_func(node_like)
else:
raise ValueError
def exhaust(self, dt=0.0, interruptible=False):
stop_event = threading.Event()
def SIGINT_handler(sig, frame):
stop_event.set()
with self:
if interruptible:
signal.signal(signal.SIGINT, SIGINT_handler)
while True:
if stop_event.wait(dt):
raise KeyboardInterrupt
try:
self.send(None)
except StopIteration as e:
return e.value
# basic data nodes
@datanode
def delay(prepend):
"""A data node delays signals and prepends given values.
Parameters
----------
prepend : int or DataNode
The number of delay with prepending `None`, or data node of prepended values.
Receives
--------
data : any
The input signal.
Yields
------
data : any
The delayed signal.
"""
if isinstance(prepend, int):
prepend = itertools.repeat(None, prepend)
prepend = DataNode.wrap(prepend)
with prepend:
buffer = list(prepend)
data = yield
while True:
buffer.append(data)
data = yield buffer.pop(0)
@datanode
def skip(node, prefeed):
"""A data node skips signals by feeding given values when initializing.
Parameters
----------
prefeed : int or DataNode
The number of skips with prefeeding `None`, or data node of prefeeded values.
Receives
--------
data : any
The input signal.
Yields
------
data : any
The advance signal.
"""
node = DataNode.wrap(node)
if isinstance(prefeed, int):
prefeed = itertools.repeat(None, prefeed)
prefeed = DataNode.wrap(prefeed)
with prefeed:
buffer = list(prefeed)
with node:
try:
for dummy in buffer:
node.send(dummpy)
yield from node.join((yield))
except StopIteration:
return
@datanode
def take(predicate):
"""A data node takes finite signals.
Parameters
----------
predicate : int or DataNode
The number of period to take, or data node of predicate.
Receives
--------
data : any
The input signal.
Yields
------
data : any
The output signal.
"""
if isinstance(predicate, int):
predicate = itertools.repeat(True, predicate)
predicate = DataNode.wrap(predicate)
with predicate:
data = yield
try:
while predicate.send(data):
data = yield data
except StopIteration:
return
@datanode
def pipe(*nodes):
"""A data node processes data sequentially.
Parameters
----------
nodes : list of DataNode
The data nodes to pipe.
Receives
--------
data : any
The input signal.
Yields
------
data : any
The processed signal.
"""
nodes = [DataNode.wrap(node) for node in nodes]
with contextlib.ExitStack() as stack:
for node in nodes:
stack.enter_context(node)
data = yield
while True:
res = data
for node in nodes:
try:
res = node.send(res)
except StopIteration:
return
data = yield res
@datanode
def pair(*nodes):
"""A data node processes data parallelly.
Parameters
----------
nodes : list of DataNode
The data nodes to pair.
Receives
--------
data : tuple
The input signal; its length should equal to number of nodes.
Yields
------
data : tuple
The processed signal; its length should equal to number of nodes.
"""
nodes = [DataNode.wrap(node) for node in nodes]
with contextlib.ExitStack() as stack:
for node in nodes:
stack.enter_context(node)
data = yield
while True:
data_ = []
for node, subdata in zip(nodes, data):
try:
data_.append(node.send(subdata))
except StopIteration:
return
data = yield tuple(data_)
@datanode
def chain(*nodes):
"""A data node processes data with chaining nodes.
Parameters
----------
nodes : list of DataNode
The data nodes to chain.
Receives
--------
data : any
The input signal.
Yields
------
data : any
The processed signal.
"""
nodes = [DataNode.wrap(node) for node in nodes]
with contextlib.ExitStack() as stack:
for node in nodes:
stack.enter_context(node)
data = yield
for node in nodes:
try:
data = yield from node.join(data)
except StopIteration:
return
@datanode
def branch(*nodes):
"""A data node processes data additionally.
Parameters
----------
nodes : list of DataNode
The sequence of data nodes to branch.
Receives
--------
data : any
The input signal.
Yields
------
data : any
The input signal.
"""
node = pipe(*nodes)
with node:
data = yield
while True:
try:
node.send(data)
except StopIteration:
break
data = yield data
@datanode
def merge(*nodes):
"""A data node processes additional data.
Parameters
----------
nodes : list of DataNode
The sequence of data nodes to merge.
Receives
--------
data : any
The input signal.
Yields
------
data : tuple
The input signal and additional data.
"""
node = pipe(*nodes)
with node:
data = yield
while True:
try:
data = yield (data, node.send())
except StopIteration:
return
# signal analysis
@datanode
def frame(win_length, hop_length):
"""A data node to frame signal, prepend by zero.
Parameters
----------
win_length : int
The length of framed data.
hop_length : int
The length of input data.
Receives
--------
data : ndarray
The input signal.
Yields
------
data : ndarray
The framed signal.
"""
if win_length < hop_length:
data = yield
while True:
data = yield numpy.copy(data[-win_length:])
return
data_last = yield
data = numpy.zeros((win_length, *data_last.shape[1:]), dtype=numpy.float32)
data[-hop_length:] = data_last
while True:
data_last = yield numpy.copy(data)
data[:-hop_length] = data[hop_length:]
data[-hop_length:] = data_last
@datanode
def power_spectrum(win_length, samplerate=44100, windowing=True, weighting=True):
"""A data node maps signal `x` to power spectrum `J`.
Without windowing and weighting, they should satisfy
(J * df).sum(axis=0) == (x**2).mean(axis=0)
where the time resolution `dt = 1/samplerate` and the frequency resolution `df = samplerate/win_length`.
Parameters
----------
win_length : int
The length of input signal.
samplerate : int, optional
The sample rate of input signal, default is `44100`.
windowing : bool or ndarray, optional
The window function of signal, `True` for default Hann window, `False` for no windowing.
weighting : bool or ndarray, optional
The weight function of spectrum, `True` for default A-weighting, `False` for no weighting.
Receives
--------
x : ndarray
The input signal.
Yields
------
J : ndarray
The power spectrum, with length `win_length//2+1`.
"""
if isinstance(windowing, bool):
windowing = get_Hann_window(win_length) if windowing else 1
if isinstance(weighting, bool):
weighting = get_A_weight(samplerate, win_length) if weighting else 1
weighting *= 2/win_length/samplerate
x = yield
if x.ndim > 1:
windowing = windowing[:, None] if numpy.ndim(windowing) > 0 else windowing
weighting = weighting[:, None] if numpy.ndim(weighting) > 0 else weighting
while True:
x = yield weighting * numpy.abs(numpy.fft.rfft(x*windowing, axis=0))**2
@datanode
def onset_strength(df):
"""A data node maps spectrum `J` to onset strength `st`.
Parameters
----------
df : float
The frequency resolution of input spectrum.
Receives
--------
J : ndarray
Input spectrum.
Yields
------
st : float
The onset strength between previous and current input spectrum.
"""
curr = yield
prev = numpy.zeros_like(curr)
while True:
prev, curr = curr, (yield numpy.mean(numpy.maximum(0.0, curr - prev).sum(axis=0)) * df)
@datanode
def pick_peak(pre_max, post_max, pre_avg, post_avg, wait, delta):
"""A data node of peak detaction.
Parameters
----------
pre_max : int
post_max : int
pre_avg : int
post_avg : int
wait : int
delta : float
Receives
--------
y : float
The input signal.
Yields
------
detected : bool
Whether the signal reaches its peak.
"""
center = max(pre_max, pre_avg)
delay = max(post_max, post_avg)
buffer = numpy.zeros(center+delay+1, dtype=numpy.float32)
max_buffer = buffer[center-pre_max:center+post_max+1]
avg_buffer = buffer[center-pre_avg:center+post_avg+1]
index = -1-delay
prev_index = -1-wait
# center
# pre_avg | post_avg
# ___________ | ___________
# / \ v / \
# [x, x, x, x, x, x, x, x, x, x, x]
# \_____/ \_____/ \____ new data
# pre_max post_max
buffer[-1] = yield
while True:
index += 1
strength = buffer[center]
detected = True
detected = detected and index > prev_index + wait
detected = detected and strength == max_buffer.max()
detected = detected and strength >= avg_buffer.mean() + delta
if detected:
prev_index = index
buffer[:-1] = buffer[1:]
buffer[-1] = yield detected
@datanode
def chunk(node, chunk_shape=1024):
"""Make a data node be able to produce fixed width data.
Parameters
----------
node : DataNode
The data node to chunk.
chunk_shape : int or tuple, optional
The shape of chunk, default is `1024`.
Yields
------
data : ndarray
The chunked signal with shape `chunk_shape`.
"""
node = DataNode.wrap(node)
with node:
try:
yield
chunk = numpy.zeros(chunk_shape, dtype=numpy.float32)
index = 0
data = node.send()
jndex = 0
while True:
length = min(chunk.shape[0]-index, data.shape[0]-jndex)
chunk[index:index+length] = data[jndex:jndex+length]
index += length
jndex += length
if index == chunk.shape[0]:
yield chunk
chunk = numpy.zeros(chunk_shape, dtype=numpy.float32)
index = 0
if jndex == data.shape[0]:
data = node.send()
jndex = 0
except StopIteration:
if index > 0:
yield chunk
@datanode
def unchunk(node, chunk_shape=1024):
"""Make a data node be able to receive data with any length.
Parameters
----------
node : DataNode
The data node to unchunk.
chunk_shape : int or tuple, optional
The received shape of given data node, default is `1024`.
Receives
------
data : ndarray
The unchunked signal with any length.
"""
node = DataNode.wrap(node)
with node:
try:
chunk = numpy.zeros(chunk_shape, dtype=numpy.float32)
index = 0
data = yield
jndex = 0
while True:
length = min(chunk.shape[0]-index, data.shape[0]-jndex)
chunk[index:index+length] = data[jndex:jndex+length]
index += length
jndex += length
if index == chunk.shape[0]:
node.send(chunk)
chunk = numpy.zeros(chunk_shape, dtype=numpy.float32)
index = 0
if jndex == data.shape[0]:
data = yield
jndex = 0
except StopIteration:
return
except GeneratorExit:
if index > 0:
try:
node.send(chunk)
except StopIteration:
return
@datanode
def attach(node):
with node:
try:
data = yield
index = 0
signal = node.send()
jndex = 0
while True:
length = min(data.shape[0]-index, signal.shape[0]-jndex)
data[index:index+length] += signal[jndex:jndex+length]
index += length
jndex += length
if index == data.shape[0]:
data = yield data
index = 0
if jndex == signal.shape[0]:
signal = node.send()
jndex = 0
except StopIteration:
if index > 0:
yield data
def rechannel(channels):
"""A data node to rechannel data.
Parameters
----------
channels : int or list
The channel mapping.
Receives
------
data : ndarray
The original signal.
Yields
------
data : ndarray
The rechanneled signal.
"""
if channels == 0:
return lambda data: (data if data.ndim == 1 else numpy.mean(data, axis=1))
elif isinstance(channels, int):
return lambda data: (data if data.ndim == 1 else numpy.mean(data, axis=1))[:, None][:, [0]*channels]
else:
return lambda data: (data[:, None] if data.ndim == 1 else data)[:, channels]
@datanode
def resample(ratio):
"""A data node to resample data.
Parameters
----------
ratio : float or tuple
The resampling factor.
Receives
------
data : ndarray
The original signal.
Yields
------
data : ndarray
The resampled signal.
"""
index = 0.0
up, down = (ratio, 1) if isinstance(ratio, float) else ratio
data = yield
while True:
next_index = index + data.shape[0] * up/down
length = int(next_index) - int(index)
data_ = scipy.signal.resample(data, length, axis=0)
index = next_index % 1.0
data = yield data_
@datanode
def tslice(node, samplerate, start=None, end=None):
"""A data node sliced by given timespan.
Parameters
----------
node : DataNode
The data node to slice.
samplerate : int
The sample rate of data.
start : float, optional
The start time, default is no slicing.
end : float, optional
The end time, default is no slicing.
Yields
------
data : ndarray
The sliced signal.
"""
node = DataNode.wrap(node)
index = 0
start = max(0, round(start*samplerate)) if start is not None else 0
end = round(end*samplerate) if end is not None else end
with node:
for data in node:
index += data.shape[0]
if index <= start:
continue
if index - data.shape[0] <= start:
yield
data = data[start-index:]
if end is not None and index > end:
data = data[:end-index]
yield data
if end is not None and index > end:
break
class IOCancelledError(Exception):
pass
@datanode
def load(filename, stop_event=None):
"""A data node to load sound file.
Parameters
----------
filename : str
The sound file to load.
stop_event : threading.Event
The event to cancel loading file.
Yields
------
data : ndarray
The loaded signal.
"""
if stop_event is None:
stop_event = threading.Event()
if filename.endswith(".wav"):
with wave.open(filename, 'rb') as file:
nchannels = file.getnchannels()
width = file.getsampwidth()
scale = 2.0 ** (1 - 8*width)
fmt = f'<i{width}'
def frombuffer(data):
return scale * numpy.frombuffer(data, fmt).astype(numpy.float32).reshape(-1, nchannels)
remaining = file.getnframes()
while remaining > 0:
data = file.readframes(256)
remaining -= len(data)//width
yield frombuffer(data)
if stop_event.is_set():
raise IOCancelledError(f"The operation of loading file {filename} has been cancelled.")
else:
with audioread.audio_open(filename) as file:
width = 2
scale = 2.0 ** (1 - 8*width)
fmt = f'<i{width}'
def frombuffer(data):
return scale * numpy.frombuffer(data, fmt).astype(numpy.float32).reshape(-1, file.channels)
for data in file:
yield frombuffer(data)
if stop_event.is_set():
raise IOCancelledError(f"The operation of loading file {filename} has been cancelled.")
@datanode
def save(filename, samplerate=44100, channels=1, width=2, stop_event=None):
"""A data node to save as .wav file.
Parameters
----------
filename : str
The sound file to save.
samplerate : int, optional
The sample rate, default is `44100`.
channels : int, optional
The number of channels, default is `1`.
width : int, optional
The sample width in bytes.
stop_event : threading.Event
The event to cancel saving file.
Receives
------
data : ndarray
The signal to save.
"""
if stop_event is None:
stop_event = threading.Event()
with wave.open(filename, 'wb') as file:
scale = 2.0 ** (8*width - 1)
fmt = f'<i{width}'
def tobuffer(data):
return (data * scale).astype(fmt).tobytes()
file.setsampwidth(width)
file.setnchannels(channels)
file.setframerate(samplerate)
file.setnframes(0)
while True:
file.writeframes(tobuffer((yield)))
if stop_event.is_set():
raise IOCancelledError(f"The operation of saving file {filename} has been cancelled.")
# others
class TimedVariable:
def __init__(self, value=None, duration=numpy.inf):
self._queue = queue.Queue()
self._lock = threading.Lock()
self._scheduled = []
self._default_value = value
self._default_duration = duration
self._item = (value, None, numpy.inf)
def get(self, time, ret_sched=False):
with self._lock:
value, start, duration = self._item
if start is None:
start = time
while not self._queue.empty():
item = self._queue.get()
if item[1] is None:
item = (item[0], time, item[2])
self._scheduled.append(item)
self._scheduled.sort(key=lambda item: item[1])
while self._scheduled and self._scheduled[0][1] <= time:
value, start, duration = self._scheduled.pop(0)
if start + duration <= time:
value, start, duration = self._default_value, None, numpy.inf
self._item = (value, start, duration)
return value if not ret_sched else self._item
def set(self, value, start=None, duration=None):
if duration is None:
duration = self._default_duration
self._queue.put((value, start, duration))
def reset(self, start=None):
self._queue.put((self._default_value, start, numpy.inf))
class Scheduler(DataNode):
"""A data node schedule given data nodes dynamically.
Receives
--------
data : tuple
The input signal and the meta signal.
Yields
------
data : any
The output signal.
"""
def __init__(self):
self.queue = queue.Queue()
super().__init__(self.proxy())
def proxy(self):
nodes = OrderedDict()
try:
data, *meta = yield
while True:
while not self.queue.empty():
key, node, zindex = self.queue.get()
if key in nodes:
nodes[key][0].__exit__()
del nodes[key]
if node is not None:
node.__enter__()
zindex_func = zindex if hasattr(zindex, '__call__') else lambda z=zindex: z
nodes[key] = (node, zindex_func)
for key, (node, _) in sorted(nodes.items(), key=lambda item: item[1][1]()):
try:
data = node.send((data, *meta))
except StopIteration:
del nodes[key]
data, *meta = yield data
finally:
for node, _ in nodes.values():
node.__exit__()
class _NodeKey:
def __init__(self, parent, node):
self.parent = parent
self.node = node
def is_initialized(self):
return self.node.initialized
def is_finalized(self):
return self.node.finalized
def remove(self):
self.parent.remove_node(self)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.remove()
def add_node(self, node, zindex=(0,)):
key = self._NodeKey(self, node)
self.queue.put((key, node, zindex))
return key
def remove_node(self, key):
self.queue.put((key, None, (0,)))
@datanode
def tick(dt, t0=0.0, shift=0.0, stop_event=None):
if stop_event is None:
stop_event = threading.Event()
ref_time = time.time()
yield
for i in itertools.count():
if stop_event.wait(max(0.0, ref_time+t0+i*dt - time.time())):
break
yield time.time()-ref_time+shift
@datanode
def timeit(node, log=print):
if hasattr(time, 'thread_time'):
get_time = time.thread_time
elif hasattr(time, 'clock_gettime'):
get_time = lambda: time.clock_gettime(time.CLOCK_THREAD_CPUTIME_ID)
else:
get_time = time.perf_counter
N = 10
start = 0.0
stop = numpy.inf
count = 0
total = 0.0
total2 = 0.0
worst = [0.0]*N
best = [numpy.inf]*N
with node:
try:
data = yield
start = stop = time.time()
while True:
t0 = get_time()
data = node.send(data)
t = get_time() - t0
stop = time.time()
count += 1
total += t
total2 += t**2
bisect.insort(worst, t)
worst.pop(0)
bisect.insort_left(best, t)
best.pop()
data = yield data
except StopIteration:
return
finally:
stop = time.time()
if count == 0:
log(f"count=0")
else:
avg = total/count
dev = (total2/count - avg**2)**0.5
eff = total/(stop - start)
if count < N:
log(f"count={count}, avg={avg*1000:5.3f}±{dev*1000:5.3f}ms ({eff: >6.1%})")
else:
best_time = sum(best)/N
worst_time = sum(worst)/N
log(f"count={count}, avg={avg*1000:5.3f}±{dev*1000:5.3f}ms"
f" ({best_time*1000:5.3f}ms ~ {worst_time*1000:5.3f}ms) ({eff: >6.1%})")
@datanode
def terminal_size():
resize_event = threading.Event()
def SIGWINCH_handler(sig, frame):
resize_event.set()
resize_event.set()
signal.signal(signal.SIGWINCH, SIGWINCH_handler)
yield
while True:
if resize_event.is_set():
resize_event.clear()
size = shutil.get_terminal_size()
yield size
# async processes
def _thread_task(thread, stop_event, error):
yield
thread.start()
try:
yield
while thread.is_alive():
yield
finally:
stop_event.set()
if thread.is_alive():
thread.join()
if not error.empty():
raise error.get()
def _stream_task(stream, error):
yield
stream.start_stream()
try:
yield
while stream.is_active():
yield
finally:
stream.stop_stream()
stream.close()
if not error.empty():
raise error.get()
@datanode
def subprocess_task(command):
yield
proc = subprocess.Popen(command)
try:
yield
while proc.poll() is None:
yield
finally:
proc.kill()
return proc.returncode
@datanode
def create_task(func):
res = queue.Queue()
error = queue.Queue()
stop_event = threading.Event()
def run():
try:
res.put(func(stop_event))
except Exception as e:
error.put(e)
thread = threading.Thread(target=run)
yield from _thread_task(thread, stop_event, error)
if res.empty():
raise ValueError("empty result")
return res.get()
@datanode
def interval(producer=lambda _:None, consumer=lambda _:None, dt=0.0, t0=0.0):
producer = DataNode.wrap(producer)
consumer = DataNode.wrap(consumer)
stop_event = threading.Event()
error = queue.Queue()
def run():
try:
ref_time = time.time()
for i, data in enumerate(producer):
delta = ref_time+t0+i*dt - time.time()
if stop_event.wait(delta) if delta > 0 else stop_event.is_set():
break
try:
consumer.send(data)
except StopIteration:
return
except Exception as e:
error.put(e)
with producer, consumer:
thread = threading.Thread(target=run)
yield from _thread_task(thread, stop_event, error)
@datanode
def record(manager, node, samplerate=44100, buffer_shape=1024, format='f4', device=-1):
"""A context manager of input stream processing by given node.
Parameters
----------
manager : pyaudio.PyAudio
The PyAudio object.
node : DataNode
The data node to process recorded sound.
samplerate : int, optional
The sample rate of input signal, default is `44100`.
buffer_shape : int or tuple, optional
The shape of input signal, default is `1024`.
format : str, optional
The sample format of input signal, default is `'f4'`.
device : int, optional
The input device index, and `-1` for default input device.
Yields
------
input_stream : pyaudio.Stream
The stopped input stream to record sound.
"""
node = DataNode.wrap(node)
pa_format = {'f4': pyaudio.paFloat32,
'i4': pyaudio.paInt32,
'i2': pyaudio.paInt16,
'i1': pyaudio.paInt8,
'u1': pyaudio.paUInt8,
}[format]
scale = 2.0 ** (8*int(format[1]) - 1)
normalize = {'f4': (lambda d: d),
'i4': (lambda d: d / scale),
'i2': (lambda d: d / scale),
'i1': (lambda d: d / scale),
'u1': (lambda d: (d - 64) / 64),
}[format]
if device == -1:
device = None
error = queue.Queue()
length, channels = (buffer_shape, 1) if isinstance(buffer_shape, int) else buffer_shape
def input_callback(in_data, frame_count, time_info, status):
try:
data = normalize(numpy.frombuffer(in_data, dtype=format).reshape(buffer_shape))
node.send(data)
return b"", pyaudio.paContinue
except StopIteration:
return b"", pyaudio.paComplete
except Exception as e:
error.put(e)
return b"", pyaudio.paComplete
input_stream = manager.open(format=pa_format,
channels=channels,
rate=samplerate,
input=True,
output=False,
input_device_index=device,
frames_per_buffer=length,
stream_callback=input_callback,
start=False)
with node:
yield from _stream_task(input_stream, error)
@datanode
def play(manager, node, samplerate=44100, buffer_shape=1024, format='f4', device=-1):
"""A context manager of output stream processing by given node.
Parameters
----------
manager : pyaudio.PyAudio
The PyAudio object.
node : DataNode
The data node to process playing sound.
samplerate : int, optional
The sample rate of output signal, default is `44100`.
buffer_shape : int or tuple, optional
The length of output signal, default is `1024`.
format : str, optional
The sample format of output signal, default is `'f4'`.
device : int, optional
The output device index, and `-1` for default output device.
Yields
------
output_stream : pyaudio.Stream
The stopped output stream to play sound.
"""
node = DataNode.wrap(node)
pa_format = {'f4': pyaudio.paFloat32,
'i4': pyaudio.paInt32,
'i2': pyaudio.paInt16,
'i1': pyaudio.paInt8,
'u1': pyaudio.paUInt8,
}[format]
scale = 2.0 ** (8*int(format[1]) - 1)
normalize = {'f4': (lambda d: d),
'i4': (lambda d: d * scale),
'i2': (lambda d: d * scale),
'i1': (lambda d: d * scale),
'u1': (lambda d: d * 64 + 64),
}[format]
if device == -1:
device = None
error = queue.Queue()
length, channels = (buffer_shape, 1) if isinstance(buffer_shape, int) else buffer_shape
def output_callback(in_data, frame_count, time_info, status):
try:
data = node.send(None)
out_data = normalize(data).astype(format).tobytes()
return out_data, pyaudio.paContinue
except StopIteration:
return b"", pyaudio.paComplete
except Exception as e:
error.put(e)
return b"", pyaudio.paComplete
output_stream = manager.open(format=pa_format,
channels=channels,
rate=samplerate,
input=False,
output=True,
output_device_index=device,
frames_per_buffer=length,
stream_callback=output_callback,
start=False)
with node:
yield from _stream_task(output_stream, error)
@contextlib.contextmanager
def input_ctxt(stream):
fd = stream.fileno()
old_attrs = termios.tcgetattr(fd)
new_attrs = list(old_attrs)
new_attrs[3] = new_attrs[3] & ~termios.ICANON & ~termios.ECHO
old_flags = fcntl.fcntl(fd, fcntl.F_SETFL)
new_flags = old_flags | os.O_NONBLOCK | os.O_ASYNC
old_owner = fcntl.fcntl(fd, fcntl.F_GETOWN)
new_owner = os.getpid()
io_event = threading.Event()
def SIGIO_handler(signal, frame):
io_event.set()
signal.signal(signal.SIGIO, SIGIO_handler)
try:
fcntl.fcntl(fd, fcntl.F_SETFL, new_flags)
fcntl.fcntl(fd, fcntl.F_SETOWN, new_owner)
termios.tcsetattr(fd, termios.TCSANOW, new_attrs)
yield io_event
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_attrs)
fcntl.fcntl(fd, fcntl.F_SETOWN, old_owner)
fcntl.fcntl(fd, fcntl.F_SETFL, old_flags)
@datanode
def input(node, stream=None):
node = DataNode.wrap(node)
MAX_KEY_LEN = 16
dt = 0.01
if stream is None:
stream = sys.stdin
stop_event = threading.Event()
error = queue.Queue()
with input_ctxt(stream) as io_event:
def run():
try:
ref_time = time.time()
while True:
occured = io_event.wait(dt)
if stop_event.is_set():
break
if not occured:
continue
io_event.clear()
key = stream.read(MAX_KEY_LEN)
if key:
try:
node.send((time.time()-ref_time, key))
except StopIteration:
return
if len(key) == MAX_KEY_LEN:
io_event.set()
except Exception as e:
error.put(e)
with node:
thread = threading.Thread(target=run)
yield from _thread_task(thread, stop_event, error)
@contextlib.contextmanager
def show_ctxt(stream, hide_cursor=False, end="\n"):
hide_cursor = hide_cursor and stream == sys.stdout
try:
if hide_cursor:
stream.write("\x1b[?25l")
yield
finally:
if hide_cursor:
stream.write("\x1b[?25h")
stream.write(end)
stream.flush()
@datanode
def show(node, dt, t0=0, stream=None, hide_cursor=False, end="\n"):
node = DataNode.wrap(node)
if stream is None:
stream = sys.stdout
stop_event = threading.Event()
error = queue.Queue()
def run():
try:
ref_time = time.time()
# stream.write("\n")
# dropped = 0
shown = False
i = -1
while True:
try:
view = node.send(shown)
except StopIteration:
break
shown = False
i += 1
delta = ref_time+t0+i*dt - time.time()
if delta < 0:
# dropped += 1
continue
if stop_event.wait(delta):
break
# stream.write(f"\x1b[A(spend:{(dt-delta)/dt:.3f}, drop:{dropped})\n")
stream.write(view)
stream.flush()
shown = True
except Exception as e:
error.put(e)
with show_ctxt(stream, hide_cursor, end):
with node:
thread = threading.Thread(target=run)
yield from _thread_task(thread, stop_event, error)
# not data nodes
def filter(x, distr):
return numpy.fft.irfft(numpy.fft.rfft(x, axis=0) * distr, axis=0)
def pulse(samplerate=44100, freq=1000.0, decay_time=0.01, amplitude=1.0, length=None):
if length is None:
length = decay_time
t = numpy.linspace(0, length, int(length*samplerate), endpoint=False, dtype=numpy.float32)
return amplitude * 2**(-t/decay_time) * numpy.sin(2 * numpy.pi * freq * t)
def power2db(power, scale=(1e-5, 1e6)):
return 10.0 * numpy.log10(numpy.maximum(scale[0], power*scale[1]))
def get_Hann_window(win_length):
a = numpy.linspace(0, numpy.pi, win_length)
window = numpy.sin(a)**2
gain = (3/8)**0.5 # (window**2).mean()**0.5
return window / gain
def get_half_Hann_window(win_length):
a = numpy.linspace(0, numpy.pi/2, win_length)
window = numpy.sin(a)**2
return window
def get_A_weight(samplerate, win_length):
f = numpy.arange(win_length//2+1) * (samplerate/win_length)
f1 = 20.6
f2 = 107.7
f3 = 737.9
f4 = 12194.0
weight = (f**4 * f4**2)**2
weight /= (f**2 + f1**2)**2
weight /= (f**2 + f2**2)
weight /= (f**2 + f3**2)
weight /= (f**2 + f4**2)**2
# normalize on 1000 Hz
f0 = 1000.0
weight0 = (f0**4 * f4**2)**2
weight0 /= (f0**2 + f1**2)**2
weight0 /= (f0**2 + f2**2)
weight0 /= (f0**2 + f3**2)
weight0 /= (f0**2 + f4**2)**2
# weight0 == 10**-0.1
weight /= weight0
weight[f<10] = 0.0
weight[f>20000] = 0.0
return weight
def load_sound(filepath, samplerate=None, channels=None, volume=0.0, start=None, end=None, chunk_length=1024, stop_event=None):
with audioread.audio_open(filepath) as file:
file_samplerate = file.samplerate
filenode = load(filepath, stop_event)
if start is not None or end is not None:
filenode = tslice(filenode, file_samplerate, start, end)
with filenode:
sound = numpy.concatenate(tuple(filenode), axis=0)
if volume != 0:
sound = sound * 10**(volume/20)
# resample
if samplerate is not None and file_samplerate != samplerate:
length = int(sound.shape[0] * samplerate/file_samplerate)
sound = scipy.signal.resample(sound, length, axis=0)
# rechannel
if sound.ndim == 1:
sound = sound[:,None]
if isinstance(channels, int):
if channels == 0:
sound = numpy.mean(sound, axis=1)
elif channels != sound.shape[1]:
sound = numpy.mean(sound, axis=1, keepdims=True)
sound = sound[:, [0]*channels]
elif isinstance(channels, list):
sound = sound[:, channels]
elif channels is None:
pass
else:
raise ValueError(f"invalid channel map: {repr(channels)}")
# chunk
if chunk_length is not None:
shape = (chunk_length, *sound.shape[1:])
with chunk([sound], shape) as node:
sound = list(node)
else:
sound = [sound]
return sound
|
<reponame>molguin92/ganglion-biosensing
from __future__ import annotations
import logging
import threading
import time
from typing import Any, Callable, Iterator, List, Optional, Tuple
import numpy as np
from bitstring import BitArray
from bluepy.btle import DefaultDelegate, Peripheral
from ganglion_biosensing.board.board import BaseBiosensingBoard, BoardType, \
OpenBCISample
from ganglion_biosensing.util.bluetooth import decompress_signed, find_mac
from ganglion_biosensing.util.constants.ganglion import GanglionCommand, \
GanglionConstants
# TODO: implement accelerometer reading
class GanglionBoard(BaseBiosensingBoard):
"""
Represents an OpenBCI Ganglion board, providing methods to access the
streaming data in an asynchronous manner using queues.
The easiest way to use this class is as a context manager, see the
included examples for reference.
"""
def __init__(self,
mac: Optional[str] = None,
callback: Optional[Callable[[OpenBCISample], Any]] = None):
"""
Initialize this board, indicating the MAC address of the target board.
If the MAC address is not provided, automatic discovery will be
attempted, which might require root privileges.
Note that this doesn't actually connect to the board until connect()
is manually called (or invoked through a context manager).
:param mac: MAC address of the board.
"""
super().__init__()
self._logger = logging.getLogger(self.__class__.__name__)
self._mac_address = find_mac() if not mac else mac
self._ganglion = None
if callback:
self._sample_callback = callback
self._shutdown_event = threading.Event()
self._shutdown_event.set()
self._streaming_thread = threading.Thread(
target=GanglionBoard._streaming,
args=(self,))
def _streaming(self):
self._ganglion.send_command(GanglionCommand.STREAM_START)
while not self._shutdown_event.is_set():
try:
self._ganglion.waitForNotifications(GanglionConstants.DELTA_T)
except Exception as e:
self._logger.error('Something went wrong: ', e)
return
def connect(self) -> None:
"""
Connect to the board.
Automatically called when this object is used as a context manager.
"""
if self._ganglion:
self._logger.warning('Already connected!')
return
self._logger.debug(f'Connecting to Ganglion with MAC address '
f'{self._mac_address}')
# TODO: fix
self._ganglion = _GanglionPeripheral(self._mac_address)
def disconnect(self) -> None:
"""
Disconnects from the board.
Automatically called when this object is used as a context manager,
at the end of the with-block
"""
if self._ganglion:
if not self._shutdown_event.is_set():
self.stop_streaming()
self._ganglion.disconnect()
self._ganglion = None
def start_streaming(self) -> None:
"""
Initiates streaming of data from the board. Samples are
asynchronously stored in self.samples queue of this object.
"""
if not self._shutdown_event.is_set():
self._logger.warning('Already streaming!')
else:
self._ganglion.setDelegate(_GanglionDelegate(self._sample_callback))
self._shutdown_event.clear()
self._streaming_thread.start()
def stop_streaming(self) -> None:
"""
Stop streaming from the board.
"""
self._logger.debug('Stopping stream.')
self._shutdown_event.set()
self._streaming_thread.join()
# reset the thread
self._streaming_thread = threading.Thread(
target=GanglionBoard._streaming,
args=(self,))
@property
def is_streaming(self) -> bool:
return not self._shutdown_event.is_set()
@property
def board_type(self) -> BoardType:
return BoardType.GANGLION
def set_callback(self, callback: Callable[[OpenBCISample], Any]) -> None:
if not self._shutdown_event.is_set():
self._logger.warning('Unable to set callback while streaming.')
else:
super().set_callback(callback)
class _GanglionDelegate(DefaultDelegate):
def __init__(self, callback: Callable[[OpenBCISample], Any]):
super().__init__()
self._last_values = np.array([0, 0, 0, 0], dtype=np.int32)
self._last_id = -1
self._result_callback = callback
self._sample_cnt = 0
self._timestamps = None
self._logger = logging.getLogger(self.__class__.__name__)
self._wait_for_full_pkt = True
@staticmethod
def _timestamp_generator() -> Iterator[float]:
timestamp = time.time()
while True:
yield timestamp
timestamp = timestamp + GanglionConstants.DELTA_T
def handleNotification(self, cHandle, data):
"""Called when data is received. It parses the raw data from the
Ganglion and returns an OpenBCISample object"""
if len(data) < 1:
self._logger.warning('A packet should at least hold one byte...')
return
bit_array = BitArray()
start_byte = data[0]
dropped, dummy_samples = self._upd_sample_count(start_byte)
if start_byte == 0:
# uncompressed sample
if not self._timestamps:
self._timestamps = _GanglionDelegate._timestamp_generator()
self._wait_for_full_pkt = False
for byte in data[1:13]:
bit_array.append(f'0b{byte:08b}')
results = []
# and split it into 24-bit chunks here
for sub_array in bit_array.cut(24):
# calling '.int' interprets the value as signed 2's complement
results.append(sub_array.int)
self._last_values = np.array(results, dtype=np.int32)
# store the sample
self._result_callback(
OpenBCISample(next(self._timestamps),
self._sample_cnt - 1,
start_byte,
self._last_values))
elif 1 <= start_byte <= 200:
if self._wait_for_full_pkt:
self._logger.warning('Need to wait for next full packet...')
for dummy in dummy_samples:
self._result_callback(dummy)
return
elif dropped > 0:
self._logger.error(f'Dropped {dropped} packets! '
'Need to wait for next full packet...')
for dummy in dummy_samples:
self._result_callback(dummy)
self._wait_for_full_pkt = True
return
else:
for byte in data[1:]:
bit_array.append(f'0b{byte:08b}')
delta_1, delta_2 = decompress_signed(start_byte, bit_array)
tmp_value = self._last_values - delta_1
self._last_values = tmp_value - delta_2
self._result_callback(
OpenBCISample(next(self._timestamps),
self._sample_cnt - 2,
start_byte, tmp_value))
self._result_callback(
OpenBCISample(next(self._timestamps),
self._sample_cnt - 1,
start_byte,
self._last_values))
def _upd_sample_count(self, num) -> Tuple[int, List[OpenBCISample]]:
"""Checks dropped packets"""
dropped = 0
dummy_samples = []
if num not in [0, 206, 207]:
if self._last_id == 0:
if num >= 101:
dropped = num - 101
else:
dropped = num - 1
else:
dropped = (num - self._last_id) - 1
# generate dummy samples
# generate NaN samples for the callback
dummy_samples = []
for i in range(dropped, -1, -1):
dummy_samples.extend([
OpenBCISample(next(self._timestamps),
self._sample_cnt,
num - i,
np.array([np.NaN] * 4)),
OpenBCISample(next(self._timestamps),
self._sample_cnt + 1,
num - i,
np.array([np.NaN] * 4))
])
self._sample_cnt += 2
else:
self._sample_cnt += 1
self._last_id = num
return dropped, dummy_samples
class _GanglionPeripheral(Peripheral):
def __init__(self, mac: str):
super().__init__(mac, 'random')
self._logger = logging.getLogger(self.__class__.__name__)
self._service = \
self.getServiceByUUID(GanglionConstants.BLE_SERVICE)
self._char_read = self._service.getCharacteristics(
GanglionConstants.BLE_CHAR_RECEIVE)[0]
self._char_write = \
self._service.getCharacteristics(
GanglionConstants.BLE_CHAR_SEND)[0]
self._char_discon = \
self._service.getCharacteristics(
GanglionConstants.BLE_CHAR_DISCONNECT)[0]
# enable notifications:
try:
desc_notify = \
self._char_read.getDescriptors(
forUUID=GanglionConstants.NOTIF_UUID)[0]
desc_notify.write(b'\x01')
except Exception as e:
self._logger.error(
'Something went wrong while trying to enable notifications:', e)
raise
self._logger.debug('Connection established.')
def send_command(self, cmd: GanglionCommand) -> None:
self._char_write.write(cmd.value)
def disconnect(self):
try:
self._char_discon.write(b' ')
except Exception as e:
# exceptions here don't really matter as we're disconnecting anyway
# although, it would be good to check WHY self.char_discon.write()
# ALWAYS throws an exception...
self._logger.debug(e)
pass
try:
super().disconnect()
except Exception as e:
self._logger.debug(e)
pass
|
<reponame>dreipol/django-green-grove<filename>django_green_grove/management/commands/backup_project.py
import logging
import os
import subprocess
import boto
from django.conf import settings
from django.core.management import BaseCommand
from django.utils.timezone import now
from ...backends import BackupStorage
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Backs up the database and bucket data to another S3 bucket.'
timestamp = None
temp_backup_path = ''
def handle(self, *args, **options):
# Set variables
self.timestamp = now().strftime('%Y%m%d%H%M%S')
self.temp_backup_path = 'tmp/backups/%s' % self.timestamp
backup_storage = BackupStorage()
self.prepare_backup()
self.create_pgpass()
self.back_up_database(backup_storage=backup_storage, temp_backup_path=self.temp_backup_path)
self.back_up_bucket()
logger.info('backup_project_success: Successfully backed up database and bucket.')
self.cleanup_backup()
def prepare_backup(self):
os.makedirs(self.temp_backup_path, exist_ok=True) # Set up the temporary directory.
def cleanup_backup(self):
# Cleanup all temporary files
if os.path.exists(self.temp_backup_path):
file_list = os.listdir(self.temp_backup_path)
for file_name in file_list:
file = os.path.join(self.temp_backup_path, file_name)
os.remove(file)
os.rmdir(self.temp_backup_path)
def create_pgpass(self):
file_path = '~/.pgpass'
connection_string = '{hostname}:{port}:{database}:{username}:{password}'.format(
hostname=settings.DATABASES['default']['HOST'],
port='5432',
database=settings.DATABASES['default']['NAME'],
username=settings.DATABASES['default']['USER'],
password=settings.DATABASES['default']['PASSWORD']
)
if os.path.exists(file_path):
backup_pgpass_text = '.pgpass has changed. Back it up to make sure no data is lost.'
# Prepare the check of the contents from the current version of the pgpass file.
grep = 'grep -q "{connection_string}" {file_path}; test $? -eq 0 && echo "\c" || ' \
'echo "{backup_pgpass_text}\c"'.format(connection_string=connection_string, file_path=file_path,
backup_pgpass_text=backup_pgpass_text)
# Backup the pgpass file if there is a difference.
if str(subprocess.check_output(grep, shell=True), 'utf-8') == backup_pgpass_text:
print(backup_pgpass_text)
os.system('mv {file_path} {file_path}_{timestamp}'.format(file_path=file_path,
timestamp=self.timestamp))
# Save the connection string to the pgpass file.
os.system('echo "{connection_string}\c" > {file_path} && chmod 600 {file_path}'.format(
connection_string=connection_string,
file_path=file_path
))
def back_up_database(self, backup_storage, temp_backup_path):
logger.info('Start backing up the database.')
file_path = '{database}_{timestamp}.dump'.format(
database=settings.DATABASES['default']['NAME'],
timestamp=self.timestamp
)
temp_file_path = '{backup_path}/{file_path}'.format(backup_path=temp_backup_path, file_path=file_path)
# Run the `pg_dump` command.
os.system('pg_dump -h {host} -U {user} {database} > {file_path}'.format(
host=settings.DATABASES['default']['HOST'],
user=settings.DATABASES['default']['USER'],
database=settings.DATABASES['default']['NAME'],
file_path=temp_file_path
))
# Store the dump file on the backup bucket.
with open(temp_file_path, 'rb') as database_backup_file:
target_file_path = '{timestamp}/{path}'.format(timestamp=self.timestamp, path=file_path)
backup_storage.save(target_file_path, database_backup_file)
logger.info('Database dump successfully copied to the target storage backend.')
def back_up_bucket(self):
logger.info('Start backing up the bucket data.')
boto_connection = boto.connect_s3(
aws_access_key_id=settings.AWS_ACCESS_KEY_ID,
aws_secret_access_key=settings.AWS_SECRET_ACCESS_KEY,
host=settings.AWS_S3_HOST,
)
source_bucket = boto_connection.get_bucket(settings.AWS_STORAGE_BUCKET_NAME)
destination_bucket = boto_connection.get_bucket(settings.BACKUP_BUCKET_BUCKET_NAME)
destination_sub_directory = '{location}/{timestamp}'.format(location=settings.BACKUP_BUCKET_LOCATION,
timestamp=self.timestamp)
try:
key_list = [source_key.key for source_key in source_bucket.list() if source_key.size]
except ValueError:
raise ValueError('The backup task was aborted because of some bucket keys with no size. Set '
'`DJANGO_GREEN_GROVE_EMPTY_S3_KEYS` in your settings to get a list of the keys.')
if hasattr(settings, 'DJANGO_GREEN_GROVE_EMPTY_S3_KEYS'):
error_message = 'Some bucket keys were ignored during the backup task because they have no size'
try:
empty_keys = [source_key.key for source_key in source_bucket.list() if not source_key.size]
error_message += ': %s' % ', '.join(empty_keys)
except:
error_message += '.'
logger.error(error_message)
for key in key_list:
logger.info(key)
new_key_name = '{sub_directory}/{name}'.format(sub_directory=destination_sub_directory, name=key)
destination_bucket.copy_key(
new_key_name=new_key_name,
src_bucket_name=source_bucket.name,
src_key_name=key
)
logger.info('Bucket data successfully copied to the target storage backend.')
|
<filename>test/test_cloud_translation_framework.py<gh_stars>10-100
import sys
import os
import pytest
import json
import functools
from nmtwizard.cloud_translation_framework import CloudTranslationFramework
from nmtwizard import serving
def _generate_numbers_file(path, max_count=12):
with open(path, "w") as f:
for i in range(max_count):
f.write("%d\n" % i)
return path
def _count_lines(path):
with open(path, "rb") as f:
i = 0
for _ in f:
i += 1
return i
class _CopyTranslationFramework(CloudTranslationFramework):
def translate_batch(self, batch, source_lang, target_lang):
return batch
def _test_framework(tmpdir, framework_class):
os.environ["WORKSPACE_DIR"] = str(tmpdir.join("workspace"))
framework = framework_class()
config = {"source": "en", "target": "fr"}
input_path = str(tmpdir.join("input.txt"))
output_path = str(tmpdir.join("output.txt"))
_generate_numbers_file(input_path)
args = [
"-c",
json.dumps(config),
"trans",
"-i",
input_path,
"-o",
output_path,
]
framework.run(args=args)
assert os.path.isfile(output_path)
assert _count_lines(input_path) == _count_lines(output_path)
def _test_real_framework(tmpdir, directory):
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.insert(0, os.path.join(root_dir, "frameworks", directory))
import entrypoint
class_name = None
for symbol in dir(entrypoint):
if symbol.endswith("Framework") and symbol != "CloudTranslationFramework":
class_name = symbol
_test_framework(tmpdir, getattr(entrypoint, class_name))
sys.path.pop(0)
del sys.modules["entrypoint"]
def test_cloud_translation_framework(tmpdir):
_test_framework(tmpdir, _CopyTranslationFramework)
def test_serve_cloud_translation_framework():
class _ReverseTranslationFramework(CloudTranslationFramework):
def translate_batch(self, batch, source_lang, target_lang):
assert source_lang == "en"
assert target_lang == "fr"
return ["".join(reversed(list(text))) for text in batch]
framework = _ReverseTranslationFramework()
config = {"source": "en", "target": "fr"}
_, service_info = framework.serve(config, None)
request = {"src": [{"text": "Hello"}]}
result = serving.run_request(
request, functools.partial(framework.forward_request, service_info)
)
assert result["tgt"][0][0]["text"] == "olleH"
@pytest.mark.skipif(
"BAIDU_APPID" not in os.environ or "BAIDU_KEY" not in os.environ,
reason="missing Baidu credentials",
)
def test_baidu_translate(tmpdir):
_test_real_framework(tmpdir, "baidu_translate")
@pytest.mark.skipif(
"DEEPL_CREDENTIALS" not in os.environ, reason="missing DeepL credentials"
)
def test_deepl_translate(tmpdir):
_test_real_framework(tmpdir, "deepl_translate")
@pytest.mark.skipif(
"GOOGLE_APPLICATION_CREDENTIALS" not in os.environ,
reason="missing Google credentials",
)
def test_google_translate(tmpdir):
_test_real_framework(tmpdir, "google_translate")
@pytest.mark.skipif(
"NAVER_CLIENT_ID" not in os.environ or "NAVER_SECRET" not in os.environ,
reason="missing Naver credentials",
)
def test_naver_translate(tmpdir):
_test_real_framework(tmpdir, "naver_translate")
@pytest.mark.skipif(
"SOGOU_PID" not in os.environ or "SOGOU_KEY" not in os.environ,
reason="missing Sogou credentials",
)
def test_sogou_translate(tmpdir):
_test_real_framework(tmpdir, "sogou_translate")
@pytest.mark.skipif(
"TENCENT_SecretId" not in os.environ or "TENCENT_SecretKey" not in os.environ,
reason="missing Tencent credentials",
)
def test_tencent_translate(tmpdir):
_test_real_framework(tmpdir, "tencent_translate")
@pytest.mark.skipif(
"YOUDAO_APPID" not in os.environ or "YOUDAO_KEY" not in os.environ,
reason="missing Youdao credentials",
)
def test_youdao_translate(tmpdir):
_test_real_framework(tmpdir, "youdao_translate")
|
<reponame>sphincs/pyspx<gh_stars>1-10
import pytest
import os
import random
import importlib
import struct
paramsets = [
'shake256_128s',
'shake256_128f',
'shake256_192s',
'shake256_192f',
'shake256_256s',
'shake256_256f',
'sha256_128s',
'sha256_128f',
'sha256_192s',
'sha256_192f',
'sha256_256s',
'sha256_256f',
'haraka_128s',
'haraka_128f',
'haraka_192s',
'haraka_192f',
'haraka_256s',
'haraka_256f',
]
expected_sizes = [
[32, 64, 8080],
[32, 64, 16976],
[48, 96, 17064],
[48, 96, 35664],
[64, 128, 29792],
[64, 128, 49216],
[32, 64, 8080],
[32, 64, 16976],
[48, 96, 17064],
[48, 96, 35664],
[64, 128, 29792],
[64, 128, 49216],
[32, 64, 8080],
[32, 64, 16976],
[48, 96, 17064],
[48, 96, 35664],
[64, 128, 29792],
[64, 128, 49216],
]
instances = []
for paramset in paramsets:
instances.append(importlib.import_module('pyspx.' + paramset))
@pytest.mark.parametrize("pyspx,sizes", zip(instances, expected_sizes))
def test_sizes(pyspx, sizes):
assert pyspx.crypto_sign_PUBLICKEYBYTES == sizes[0]
assert pyspx.crypto_sign_SECRETKEYBYTES == sizes[1]
assert pyspx.crypto_sign_BYTES == sizes[2]
@pytest.mark.parametrize("pyspx", instances)
def test_keygen(pyspx):
seed = bytes()
with pytest.raises(MemoryError):
pyspx.generate_keypair(seed)
seed = os.urandom(pyspx.crypto_sign_SEEDBYTES)
publickey, secretkey = pyspx.generate_keypair(seed)
@pytest.mark.parametrize("pyspx", instances)
def test_sign_verify(pyspx):
seed = os.urandom(pyspx.crypto_sign_SEEDBYTES)
publickey, secretkey = pyspx.generate_keypair(seed)
message = os.urandom(32)
signature = pyspx.sign(message, secretkey)
assert pyspx.verify(message, signature, publickey)
@pytest.mark.parametrize("pyspx", instances)
def test_invalid_signature(pyspx):
seed = os.urandom(pyspx.crypto_sign_SEEDBYTES)
publickey, secretkey = pyspx.generate_keypair(seed)
message = os.urandom(32)
# incorrect sk length
with pytest.raises(MemoryError):
pyspx.sign(message, bytes())
# incorrect type for message or key
with pytest.raises(TypeError):
pyspx.sign(42, secretkey)
with pytest.raises(TypeError):
pyspx.sign(message, 42)
signature = pyspx.sign(message, secretkey)
# flip a few random bytes in the signature
for i in range(10):
n = random.randint(0, len(signature))
# this is extremely convoluted to be python2/3-compatible
byte_as_int = struct.unpack('B', signature[n:n+1])[0]
flippedbyte = struct.pack('B', byte_as_int ^ 0xFF)
invsig = signature[:n] + flippedbyte + signature[n+1:]
assert not pyspx.verify(message, invsig, publickey)
# incorrect pk length
with pytest.raises(MemoryError):
pyspx.verify(message, signature, bytes())
# incorrect signature length
with pytest.raises(MemoryError):
pyspx.verify(message, bytes(), publickey)
# incorrect type for message, signature or key
with pytest.raises(TypeError):
pyspx.verify(42, signature, publickey)
with pytest.raises(TypeError):
pyspx.verify(message, 42, publickey)
with pytest.raises(TypeError):
pyspx.verify(message, signature, 42)
@pytest.mark.parametrize("pyspx", instances)
def test_long_message(pyspx):
seed = os.urandom(pyspx.crypto_sign_SEEDBYTES)
publickey, secretkey = pyspx.generate_keypair(seed)
message = bytes(2**20)
signature = pyspx.sign(message, secretkey)
assert pyspx.verify(message, signature, publickey)
|
<filename>pilates/utils/geog.py
import geopandas as gpd
import pandas as pd
import logging
import requests
from shapely.geometry import Polygon
from tqdm import tqdm
import os
logger = logging.getLogger(__name__)
def get_taz_geoms(region, taz_id_col_in='taz1454', zone_id_col_out='zone_id'):
if region == 'sfbay':
url = (
'https://opendata.arcgis.com/datasets/'
'94e6e7107f0745b5b2aabd651340b739_0.geojson')
gdf = gpd.read_file(url, crs="EPSG:4326")
gdf.rename(columns={taz_id_col_in: zone_id_col_out}, inplace=True)
# zone_id col must be str
gdf[zone_id_col_out] = gdf[zone_id_col_out].astype(str)
return gdf
def get_county_block_geoms(state_fips, county_fips, zone_type = 'blocks', result_size=10000):
if (zone_type == 'blocks') or (zone_type == 'taz'): #to map blocks to taz.
base_url = (
'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/'
# 'Tracts_Blocks/MapServer/12/query?where=STATE%3D{0}+and+COUNTY%3D{1}' #2020 census
'tigerWMS_Census2010/MapServer/18/query?where=STATE%3D{0}+and+COUNTY%3D{1}'#2010 census
'&resultRecordCount={2}&resultOffset={3}&orderBy=GEOID'
'&outFields=GEOID%2CSTATE%2CCOUNTY%2CTRACT%2CBLKGRP%2CBLOCK%2CCENTLAT'
'%2CCENTLON&outSR=%7B"wkid"+%3A+4326%7D&f=pjson')
elif zone_type == 'block_groups':
base_url = (
'https://tigerweb.geo.census.gov/arcgis/rest/services/TIGERweb/'
# 'Tracts_Blocks/MapServer/11/query?where=STATE%3D{0}+and+COUNTY%3D{1}' #2020 census
'tigerWMS_Census2010/MapServer/16/query?where=STATE%3D{0}+and+COUNTY%3D{1}'#2010 census
'&resultRecordCount={2}&resultOffset={3}&orderBy=GEOID'
'&outFields=GEOID%2CSTATE%2CCOUNTY%2CTRACT%2CBLKGRP%2CCENTLAT'
'%2CCENTLON&outSR=%7B"wkid"+%3A+4326%7D&f=pjson')
blocks_remaining = True
all_features = []
page = 0
while blocks_remaining:
offset = page * result_size
url = base_url.format(state_fips, county_fips, result_size, offset)
result = requests.get(url)
try:
features = result.json()['features']
except KeyError:
logger.error("No features returned. Try a smaller result size.")
all_features += features
if 'exceededTransferLimit' in result.json().keys():
if result.json()['exceededTransferLimit']:
page += 1
else:
blocks_remaining = False
else:
if len(features) == 0:
blocks_remaining = False
else:
page += 1
df = pd.DataFrame()
for feature in all_features:
tmp = pd.DataFrame([feature['attributes']])
tmp['geometry'] = Polygon(
feature['geometry']['rings'][0],
feature['geometry']['rings'][1:])
df = pd.concat((df, tmp))
gdf = gpd.GeoDataFrame(df, crs="EPSG:4326")
return gdf
def get_block_geoms(settings, data_dir='./tmp/'):
region = settings['region']
FIPS = settings['FIPS'][region]
state_fips = FIPS['state']
county_codes = FIPS['counties']
zone_type = settings['region_zone_type'][region]
all_block_geoms = []
file_name = zone_type + "_"+ region + ".shp"
if os.path.exists(os.path.join(data_dir, file_name)):
logger.info("Loading block geoms from disk!")
blocks_gdf = gpd.read_file(os.path.join(data_dir, file_name))
else:
logger.info("Downloading {} geoms from Census TIGERweb API!".format(zone_type))
# get block geoms from census tigerweb API
for county in tqdm(
county_codes, total=len(county_codes),
desc='Getting block geoms for {0} counties'.format(
len(county_codes))):
county_gdf = get_county_block_geoms(state_fips, county, zone_type)
all_block_geoms.append(county_gdf)
blocks_gdf = gpd.GeoDataFrame(
pd.concat(all_block_geoms, ignore_index=True), crs="EPSG:4326")
# save to disk
logger.info(
"Got {0} block geometries. Saving to disk.".format(
len(all_block_geoms)))
blocks_gdf.to_file(os.path.join(data_dir, file_name))
return blocks_gdf
def get_taz_from_block_geoms(blocks_gdf, zones_gdf, local_crs, zone_col_name):
logger.info("Assigning blocks to TAZs!")
# df to store GEOID to TAZ results
block_to_taz_results = pd.DataFrame()
# ignore empty geoms
zones_gdf = zones_gdf[~zones_gdf['geometry'].is_empty]
# convert to meter-based proj
zones_gdf = zones_gdf.to_crs(local_crs)
blocks_gdf = blocks_gdf.to_crs(local_crs)
zones_gdf['zone_area'] = zones_gdf.geometry.area
# assign zone ID's to blocks based on max area of intersection
intx = gpd.overlay(blocks_gdf, zones_gdf.reset_index(), how='intersection')
intx['intx_area'] = intx['geometry'].area
intx = intx.sort_values(['GEOID', 'intx_area'], ascending=False)
intx = intx.drop_duplicates('GEOID', keep='first')
# add to results df
block_to_taz_results = pd.concat((
block_to_taz_results, intx[['GEOID', zone_col_name]]))
# assign zone ID's to remaining blocks based on shortest
# distance between block and zone centroids
unassigned_mask = ~blocks_gdf['GEOID'].isin(block_to_taz_results['GEOID'])
if any(unassigned_mask):
blocks_gdf['geometry'] = blocks_gdf['geometry'].centroid
zones_gdf['geometry'] = zones_gdf['geometry'].centroid
all_dists = blocks_gdf.loc[unassigned_mask, 'geometry'].apply(
lambda x: zones_gdf['geometry'].distance(x))
nearest = all_dists.idxmin(axis=1).reset_index()
nearest.columns = ['blocks_idx', zone_col_name]
nearest.set_index('blocks_idx', inplace=True)
nearest['GEOID'] = blocks_gdf.reindex(nearest.index)['GEOID']
block_to_taz_results = pd.concat((
block_to_taz_results, nearest[['GEOID', zone_col_name]]))
return block_to_taz_results.set_index('GEOID')[zone_col_name]
def map_block_to_taz(
settings, region, zones_gdf=None, zone_id_col='zone_id',
reference_taz_id_col='taz1454', data_dir='./tmp/'):
"""
Returns:
A series named 'zone_id' with 'GEOID' as index name
"""
region = settings['region']
FIPS = settings['FIPS'][region]
state_fips = FIPS['state']
county_codes = FIPS['counties']
local_crs = settings['local_crs'][region]
if zones_gdf is None:
zones_gdf = get_taz_geoms(region, reference_taz_id_col, zone_id_col)
blocks_gdf = get_block_geoms(settings, data_dir)
blocks_gdf.crs = 'EPSG:4326'
blocks_to_taz = get_taz_from_block_geoms(
blocks_gdf, zones_gdf, local_crs, zone_id_col)
return blocks_to_taz.astype(str)
def get_zone_from_points(df, zones_gdf, local_crs):
'''
Assigns the gdf index (zone_id) for each index in df
Parameters:
-----------
- df columns names x, and y. The index is the ID of the point feature.
- zones_gdf: GeoPandas GeoDataFrame with zone_id as index, geometry, area.
Returns:
-----------
A series with df index and corresponding gdf id
'''
logger.info("Assigning zone IDs to {0}".format(df.index.name))
zone_id_col = zones_gdf.index.name
gdf = gpd.GeoDataFrame(
df, geometry=gpd.points_from_xy(df.x, df.y), crs="EPSG:4326")
zones_gdf.geometry.crs = "EPSG:4326"
# convert to meters-based local crs
gdf = gdf.to_crs(local_crs)
zones_gdf = zones_gdf.to_crs(local_crs)
# Spatial join
intx = gpd.sjoin(
gdf, zones_gdf.reset_index(),
how='left', op='intersects')
assert len(intx) == len(gdf)
return intx[zone_id_col]
|
import logging
import h5py
import numpy as np
from collections import defaultdict
from minibatcher import MiniBatcher
class IAM_MiniBatcher:
@staticmethod
def shingle_item_getter(f, key, shingle_dim=(120,120)):
'''
Retrieve a line from an iam hdf5 file and shingle into the line
NB: shingle_dim is in rows, cols format
'''
# Key format is {author:{form:data}}
(author, form) = key
# Extract line from HDF5 file
original_line = f[author][form]
# There was an option to index into form. That
# format is deprecated. It originally had a hierarchy
# Key format is {author:{form:{line:data}}
# and the code was:
# (author, form, line) = key
# original_line = f[author][form][line]
# Pull shingle from the line
# TODO: pull out shingle_dim[n] into two holder variables
(height, width) = original_line.shape
max_x = max(width - shingle_dim[1], 1)
max_y = max(height - shingle_dim[0], 1)
x_start = np.random.randint(0, max_x)
y_start = np.random.randint(0, max_y)
# check if the line is too small on at least one axis
if width < shingle_dim[1]:
x_slice = slice(0,width)
else:
x_slice = slice(x_start, x_start+shingle_dim[1])
if height < shingle_dim[0]:
y_slice = slice(0,height)
else:
y_slice = slice(y_start, y_start+shingle_dim[1])
slice_width = x_slice.stop - x_slice.start
slice_height = y_slice.stop - y_slice.start
# create an output shingle, copy our thing onto it
output_arr = np.zeros(shingle_dim)
output_arr.fill(255)
output_arr[:slice_height,:slice_width] = original_line[y_slice, x_slice]
return output_arr
def __init__(self, fname, num_authors, num_forms_per_author, default_mode=MiniBatcher.TRAIN, shingle_dim=(120,120), batch_size=32, train_pct=.7, test_pct=.2, val_pct=.1):
self.hdf5_file = fname
fIn = h5py.File(self.hdf5_file, 'r')
authors = []
# Filter on number of forms per author
for author in fIn.keys():
if len(fIn[author]) >= num_forms_per_author:
authors.append(author)
if len(authors) < num_authors:
raise ValueError("There are only %d authors with more than %d forms"%(len(authors), num_forms_per_author))
keys = []
# Get all the keys from our hdf5 file
for author in authors[:num_authors]: # Limit us to num_authors
forms = list(fIn[author])
for form in forms[:num_forms_per_author]: # Limit us to num_form_per_author
keys.append((author, form))
# Original hierarchy with "use_form" option:
# for line_name in fIn[author][form].keys():
# for shingle in range(fIn[author][form][line_name].shape[0]):
# keys.append((author,form,line_name))
# Remove duplicates to prevent test/val contamination
keys = list(set(keys))
normalize = lambda x: 1.0 - x.astype(np.float32)/255.0
item_getter = lambda f, key: IAM_MiniBatcher.shingle_item_getter(f, key, shingle_dim)
self.batch_size = batch_size
m = MiniBatcher(fIn, keys,item_getter=item_getter, normalize=normalize,
batch_size=self.batch_size, min_fragments=0, train_pct=train_pct, test_pct=test_pct, val_pct=val_pct)
self.m = m
self.default_mode = default_mode
def get_test_batch(self, num_items=None):
if num_items is None:
num_items = self.batch_size
return self.get_batch(num_items, MiniBatcher.TEST)
def get_train_batch(self, num_items=None):
if num_items is None:
num_items = self.batch_size
return self.get_batch(num_items, MiniBatcher.TRAIN)
def get_val_batch(self, num_items=None):
if num_items is None:
num_items = self.batch_size
return self.get_batch(num_items, MiniBatcher.VAL)
def get_batch(self, num_items, mode=None):
if mode is None:
mode = self.default_mode
self.m.set_mode(mode)
self.m.batch_size = num_items
return self.m.get_batch()
def main():
import time
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="filename",
help="Log file to read")
parser.add_option("--num_authors", dest='num_authors', type=int, help="Number of authors to include")
parser.add_option("--num_forms_per_author", dest='num_forms_per_author',
type=int, help="Number of forms per author required")
parser.add_option("--shingle_dim", dest='shingle_dim', help="Shingle dimensions, comma separated i.e. 120,120")
parser.add_option("--batch_size", dest="batch_size", type=int, default=32, help="Iteration Batch Size")
parser.add_option("--log_level", dest="log_level", type=int, default=logging.WARNING)
(options, args) = parser.parse_args()
logging.basicConfig(level=options.log_level)
shingle_dim = map(int, options.shingle_dim.split(','))
iam_m = IAM_MiniBatcher(options.filename, options.num_authors, options.num_forms_per_author,
shingle_dim=shingle_dim, default_mode=MiniBatcher.TRAIN,
batch_size=options.batch_size)
num_batches = 10
start_time = time.time()
for i in range(num_batches):
z = iam_m.get_train_batch()
print 'Completed %d batches in: '%num_batches,time.time() - start_time
print 'Batch shape: ', z[0].shape
print 'Number of unique authors in first batch: {}'.format(len(set(z[1])))
if __name__ == "__main__":
main()
|
# Generated by Django 2.2.3 on 2020-08-25 19:47
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import ghostwriter.rolodex.models
class Migration(migrations.Migration):
dependencies = [
('rolodex', '0005_auto_20191122_2304'),
]
operations = [
migrations.AlterField(
model_name='client',
name='codename',
field=models.CharField(blank=True, help_text='A codename for the client that might be used to discuss the client in public', max_length=255, null=True, verbose_name='Client Codename'),
),
migrations.AlterField(
model_name='client',
name='name',
field=models.CharField(help_text="Provide the client's full name as you want it to appear in a report", max_length=255, unique=True, verbose_name='Client Name'),
),
migrations.AlterField(
model_name='client',
name='note',
field=models.TextField(blank=True, help_text='Describe the client or provide some additional information', null=True, verbose_name='Client Note'),
),
migrations.AlterField(
model_name='client',
name='short_name',
field=models.CharField(blank=True, help_text='Provide an abbreviated name to be used in reports', max_length=255, null=True, verbose_name='Client Short Name'),
),
migrations.AlterField(
model_name='clientcontact',
name='email',
field=models.CharField(blank=True, help_text='Enter an email address for this contact', max_length=255, null=True, verbose_name='Email'),
),
migrations.AlterField(
model_name='clientcontact',
name='job_title',
field=models.CharField(blank=True, help_text="Enter the contact's job title or project role as you want it to appear in a report", max_length=255, null=True, verbose_name='Title or Role'),
),
migrations.AlterField(
model_name='clientcontact',
name='name',
field=models.CharField(help_text="Enter the contact's full name", max_length=255, null=True, verbose_name='Name'),
),
migrations.AlterField(
model_name='clientcontact',
name='note',
field=models.TextField(blank=True, help_text='Provide additional information about the contact', null=True, verbose_name='Client Note'),
),
migrations.AlterField(
model_name='clientcontact',
name='phone',
field=models.CharField(blank=True, help_text='Enter a phone number for this contact', max_length=50, null=True, verbose_name='Phone'),
),
migrations.AlterField(
model_name='clientnote',
name='note',
field=models.TextField(blank=True, help_text='Leave the client or related projects', null=True, verbose_name='Notes'),
),
migrations.AlterField(
model_name='clientnote',
name='timestamp',
field=models.DateField(auto_now_add=True, help_text='Creation timestamp', verbose_name='Timestamp'),
),
migrations.AlterField(
model_name='objectivestatus',
name='objective_status',
field=models.CharField(help_text='Enter an objective status (e.g. Active, On Hold)', max_length=255, unique=True, verbose_name='Objective Status'),
),
migrations.AlterField(
model_name='project',
name='client',
field=models.ForeignKey(help_text='Select the client to which this project should be attached', on_delete=django.db.models.deletion.CASCADE, to='rolodex.Client'),
),
migrations.AlterField(
model_name='project',
name='codename',
field=models.CharField(blank=True, help_text='A codename for the client that might be used to discuss the client in public', max_length=255, null=True, verbose_name='Project Codename'),
),
migrations.AlterField(
model_name='project',
name='complete',
field=models.BooleanField(default=False, help_text='Mark this project as complete', verbose_name='Completed'),
),
migrations.AlterField(
model_name='project',
name='end_date',
field=models.DateField(help_text='Enter the end date of this project', max_length=12, verbose_name='End Date'),
),
migrations.AlterField(
model_name='project',
name='note',
field=models.TextField(blank=True, help_text='Provide additional information about the project and planning', null=True, verbose_name='Notes'),
),
migrations.AlterField(
model_name='project',
name='slack_channel',
field=models.CharField(blank=True, help_text='Provide an Slack channel to be used for project notifications', max_length=255, null=True, verbose_name='Project Slack Channel'),
),
migrations.AlterField(
model_name='project',
name='start_date',
field=models.DateField(help_text='Enter the start date of this project', max_length=12, verbose_name='Start Date'),
),
migrations.AlterField(
model_name='projectassignment',
name='end_date',
field=models.DateField(blank=True, help_text='Enter the end date of the project', null=True, verbose_name='End Date'),
),
migrations.AlterField(
model_name='projectassignment',
name='note',
field=models.TextField(blank=True, help_text='Provide additional information about the project role and assignment', null=True, verbose_name='Notes'),
),
migrations.AlterField(
model_name='projectassignment',
name='operator',
field=models.ForeignKey(blank=True, help_text='Select a user to assign to this project', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='projectassignment',
name='role',
field=models.ForeignKey(blank=True, help_text="Select a role that best describes the selected user's role in this project", null=True, on_delete=django.db.models.deletion.SET_NULL, to='rolodex.ProjectRole'),
),
migrations.AlterField(
model_name='projectassignment',
name='start_date',
field=models.DateField(blank=True, help_text='Enter the start date of the project', null=True, verbose_name='Start Date'),
),
migrations.AlterField(
model_name='projectnote',
name='note',
field=models.TextField(blank=True, help_text='Leave a note about the project or related client', null=True, verbose_name='Notes'),
),
migrations.AlterField(
model_name='projectnote',
name='timestamp',
field=models.DateField(auto_now_add=True, help_text='Creation timestamp', verbose_name='Timestamp'),
),
migrations.AlterField(
model_name='projectobjective',
name='deadline',
field=models.DateField(blank=True, help_text='Provide a deadline for this objective', max_length=12, null=True, verbose_name='Due Date'),
),
migrations.AlterField(
model_name='projectobjective',
name='status',
field=models.ForeignKey(default=ghostwriter.rolodex.models.ProjectObjective.get_status, help_text='Set the status for this objective', on_delete=django.db.models.deletion.PROTECT, to='rolodex.ObjectiveStatus'),
),
migrations.AlterField(
model_name='projectrole',
name='project_role',
field=models.CharField(help_text='Enter an operator role used for project assignments', max_length=255, unique=True, verbose_name='Project Role'),
),
migrations.AlterField(
model_name='projecttype',
name='project_type',
field=models.CharField(help_text='Enter a project type (e.g. red team, penetration test)', max_length=255, unique=True, verbose_name='Project Type'),
),
]
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for math_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator as coordinator_lib
from tensorflow.python.training import queue_runner_impl
from tensorflow_estimator.python.estimator.canned.timeseries import math_utils
from tensorflow_estimator.python.estimator.canned.timeseries.feature_keys import TrainEvalFeatures
class InputStatisticsTests(tf.test.TestCase):
def _input_statistics_test_template(self,
stat_object,
num_features,
dtype,
warmup_iterations=0,
rtol=1e-6,
data_length=4):
graph = tf.Graph()
with graph.as_default():
data_length_range = tf.range(data_length, dtype=dtype)
num_features_range = tf.range(num_features, dtype=dtype)
times = 2 * data_length_range[None, :] - 3
values = (
data_length_range[:, None] + num_features_range[None, :])[None, ...]
features = {
TrainEvalFeatures.TIMES: times,
TrainEvalFeatures.VALUES: values,
}
statistics = stat_object.initialize_graph(features=features)
with self.session(graph=graph) as session:
tf.compat.v1.initializers.global_variables().run()
coordinator = tf.train.Coordinator()
tf.compat.v1.train.queue_runner.start_queue_runners(session, coord=coordinator)
for _ in range(warmup_iterations):
# A control dependency should ensure that, for queue-based statistics,
# a use of any statistic is preceded by an update of all adaptive
# statistics.
self.evaluate(statistics.total_observation_count)
self.assertAllClose(
tf.range(num_features, dtype=dtype) +
tf.math.reduce_mean(data_length_range)[None],
self.evaluate(statistics.series_start_moments.mean),
rtol=rtol)
self.assertAllClose(
tf.tile(
tf.math.reduce_variance(data_length_range)[None],
[num_features]),
self.evaluate(statistics.series_start_moments.variance),
rtol=rtol)
self.assertAllClose(
tf.math.reduce_mean(values[0], axis=0),
self.evaluate(statistics.overall_feature_moments.mean),
rtol=rtol)
self.assertAllClose(
tf.math.reduce_variance(values[0], axis=0),
self.evaluate(statistics.overall_feature_moments.variance),
rtol=rtol)
self.assertAllClose(-3, self.evaluate(statistics.start_time), rtol=rtol)
self.assertAllClose(
data_length,
self.evaluate(statistics.total_observation_count),
rtol=rtol)
coordinator.request_stop()
coordinator.join()
def test_queue(self):
for dtype in [tf.dtypes.float32, tf.dtypes.float64]:
for num_features in [1, 2, 3]:
self._input_statistics_test_template(
math_utils.InputStatisticsFromMiniBatch(
num_features=num_features, dtype=dtype),
num_features=num_features,
dtype=dtype,
warmup_iterations=1000,
rtol=0.1)
if __name__ == "__main__":
tf.test.main()
|
#!/usr/bin/python3
import unittest
from freeverse import Should, Expect, It
from freeverse.expectations import ActualValue
class ExpectationTests(unittest.TestCase):
def assertIsNotEmpty(self, sizedObject):
self.assertGreater(len(sizedObject), 0)
# Test the actual message here
class ShouldStyleAssertionsTests(ExpectationTests):
def test_basic_actual_value_should_method_takes_predicate(self):
self.assertEqual('', ActualValue(4).should(lambda x: x == 4))
self.assertEqual('Predicate not true of 4', ActualValue(4).should(lambda x: x == 2))
def test_actual_value_should_be_method_asserts_equality(self):
self.assertEqual('', ActualValue(4).should_be(4))
self.assertEqual('4 does not equal 2', ActualValue(4).should_be(2))
def test_actual_value_should_not_be_method_asserts_inequality(self):
self.assertEqual('', ActualValue(4).should_not_be(2))
self.assertEqual('4 does equal 4', ActualValue(4).should_not_be(4))
def test_actual_value_should_equal_method_asserts_equality(self):
self.assertEqual('', ActualValue(4).should_equal(4))
self.assertEqual('4 does not equal 2', ActualValue(4).should_equal(2))
def test_actual_value_should_not_equal_method_asserts_inequality(self):
self.assertEqual('', ActualValue(4).should_not_equal(2))
self.assertEqual('4 does equal 4', ActualValue(4).should_not_equal(4))
# The error message should be the same as the for the cases above, so only test
# that it is nonempty
class ExpectStyleAssertionsTests(ExpectationTests):
def test_basic_expect_to_method_takes_predicate(self):
self.assertEqual('', Expect(4).to(lambda x: x == 4))
self.assertIsNotEmpty(Expect(4).to(lambda x: x == 2))
def test_expect_to_be_method_asserts_equality(self):
self.assertEqual('', Expect(4).to_be(4))
self.assertIsNotEmpty(Expect(4).to_be(2))
def test_expect_not_to_be_method_asserts_inequality(self):
self.assertEqual('', Expect(4).not_to_be(2))
self.assertIsNotEmpty(Expect(4).not_to_be(4))
def test_expect_to_equal_method_asserts_equality(self):
self.assertEqual('', Expect(4).to_equal(4))
self.assertIsNotEmpty(Expect(4).to_equal(2))
def test_expect_not_to_equal_method_asserts_inequality(self):
self.assertEqual('', Expect(4).not_to_equal(2))
self.assertIsNotEmpty(Expect(4).not_to_equal(4))
# The error message should be the same as the for the cases above, so only test
# that it is nonempty
class ItStyleAssertionsTests(ExpectationTests):
def test_basic_it_should_method_takes_predicate(self):
self.assertEqual('', It.should(lambda x: x == 4)(ActualValue(4)))
self.assertIsNotEmpty(It.should(lambda x: x == 2)(ActualValue(4)))
def test_it_should_be_method_asserts_equality(self):
self.assertEqual('', It.should_be(4)(ActualValue(4)))
self.assertIsNotEmpty(It.should_be(2)(ActualValue(4)))
def test_it_should_not_be_method_asserts_inequality(self):
self.assertEqual('', It.should_not_be(2)(ActualValue(4)))
self.assertIsNotEmpty(It.should_not_be(4)(ActualValue(4)))
def test_it_should_equal_method_asserts_equality(self):
self.assertEqual('', It.should_equal(4)(ActualValue(4)))
self.assertIsNotEmpty(It.should_equal(2)(ActualValue(4)))
def test_it_should_not_equal_method_asserts_inequality(self):
self.assertEqual('', It.should_not_equal(2)(ActualValue(4)))
self.assertIsNotEmpty(It.should_not_equal(4)(ActualValue(4)))
if __name__ == '__main__':
unittest.main()
|
import sys, getopt, inspect
def parseOptions(arguments, shortOpts, longOpts):
try:
options, remainder = getopt.getopt(
arguments,
shortOpts,
longOpts
)
command = None
try:
command = remainder[0]
remainder = remainder[1:]
except:
remainder = None
return options, command, remainder
except getopt.GetoptError as err: raise(err)
class DeclarativeOptions:
def __init__(self):
self.__opts__ = [a for a in dir(self) if not a.startswith('__')]
self.__instructions_map__ = {}
self.__short_opts__ = []
self.__long_opts__ = []
for opt in self.__opts__:
variations = opt.split('_')
particular_instruction = getattr(self, opt).instructions
has_arg = self.__has_arguments__(particular_instruction)
long_opt = variations[0]
self.__instructions_map__['--' + long_opt] = particular_instruction
self.__long_opts__.append(long_opt + '=' if has_arg else long_opt)
if len(variations) > 1:
short_opt = variations[1]
self.__instructions_map__['-' + short_opt] = particular_instruction
self.__short_opts__.append(short_opt + ':' if has_arg else short_opt)
self.__short_opts__ = ''.join(self.__short_opts__)
@staticmethod
def __has_arguments__(fun):
try: # python3
return len(inspect.getfullargspec(fun).args) > 0
except: # python 2
return len(inspect.getargspec(fun)[0]) > 0
def __parse_options__(self, argv):
return parseOptions(argv, self.__short_opts__, self.__long_opts__)
def __handle_options__(self, options):
for opt, arg in options:
instructions_method = self.__instructions_map__[opt]
if arg: instructions_method(arg)
else: instructions_method()
def __documentation__(self):
for opt in self.__opts__:
line = []
variations = opt.split('_')
attr = getattr(self, opt)
has_arg = self.__has_arguments__(attr.instructions)
long_opt = ['--', variations[0], '=<arg>'] if has_arg else ['--', variations[0]]
if len(variations) > 1:
short_opt = [', -', variations[1], ' <arg>'] if has_arg else [', -', variations[1]]
else: short_opt = []
description = [': ', attr.description]
line += long_opt + short_opt + description
print(''.join(line))
class DeclarativeCommands:
def __list__(self):
return [a for a in dir(self) if not a.startswith('__')]
def __documentation__(self):
for cmd in self.__list__():
command_class = getattr(self, cmd)
print(''.join([command_class.__name__, ': ', command_class.description]))
def __run_command__(self, command, remainder):
if (command):
if command in self.__list__():
instructions = getattr(self, command).instructions
instructions(remainder)
else: self.__default_unspecified_command__(command, remainder)
else: self.__default_no_args__()
class DeclarativeCLI:
def run(self, arguments):
self.opts = self.Options()
options, command, remainder = self.opts.__parse_options__(arguments)
self.cmds = self.Commands()
self.opts.__handle_options__(options)
self.cmds.__run_command__(command, remainder)
def header(msg):
CYAN = '\033[0;36m'
NC = '\033[0m' # No Color
print('{}_____________________________________________________________________________{}'.format(CYAN, NC))
print('{}{}{}'.format(CYAN, msg, NC))
def document(particular_doc):
if hasattr(particular_doc, 'header'):
header(particular_doc.header)
else: header(particular_doc.__name__)
particular_doc.body()
|
<reponame>olafura/hwidgets<gh_stars>1-10
import gobject
from dbus.mainloop.glib import DBusGMainLoop
import dbus
import json
DBusGMainLoop(set_as_default=True)
import NetworkManager
from mapping import wifi_states
from PySide.QtCore import QObject, Slot, Signal
#A simple class simulate simulate the access point information
class AP(object):
Ssid = ""
object_path = ""
def __init__(self, Ssid, object_path):
self.Ssid = Ssid
self.object_path = object_path
class Wifi(QObject):
def __init__(self):
super(Wifi, self).__init__()
self.loop = gobject.MainLoop()
self.bus = dbus.SystemBus()
self.currentdev = None
#I have to keep track of the access points because dbus deletes the
#information
self.current_access_points = {}
for device in NetworkManager.NetworkManager.GetDevices():
if device.DeviceType == NetworkManager.NM_DEVICE_TYPE_WIFI:
self.currentdev = device.SpecificDevice()
break
self.currentdev.connect_to_signal("AccessPointAdded", self.handle_apadded)
self.currentdev.connect_to_signal("AccessPointRemoved", self.handle_apremove)
def getAP(self, point, active_op, available):
#print("point op", str(point.object_path))
#print("active op", str(active_op))
doc = {}
doc["ssid"] = point.Ssid
is_active = str(point.object_path) == str(active_op)
doc["is_available"] = available
doc["is_active"] = is_active
if available:
new_ap = AP(str(point.Ssid),str(point.object_path))
self.current_access_points[point.object_path] = new_ap
doc["strength"] = int(point.proxy.Get(point.interface_name, "Strength",dbus_interface="org.freedesktop.DBus.Properties"))
else:
doc["strength"] = 0
return doc
@Slot(str)
def checkAccessPoints(self,value):
if not self.currentdev == None:
try:
active = self.currentdev.ActiveAccessPoint
access_points = self.currentdev.GetAccessPoints()
except dbus.exceptions.DBusException:
return
active_op = active.object_path
ap_docs = []
for point in access_points:
doc = self.getAP(point, active_op, True)
ap_docs.append(doc)
#I use bulk update so I spend less time updating the access points
#and less events
#db.update(ap_docs)
#print "end"
self.on_wifi_status.emit(json.dumps(ap_docs))
def handle_apadded(self, ap):
#print("new_ap")
#print("ap", ap)
active = self.currentdev.ActiveAccessPoint
active_op = active.object_path
doc = self.getAP(ap, active_op, True)
self.on_wifi_status.emit(json.dumps([doc]))
#db.save(doc)
#print ap
def handle_apremove(self, ap):
#print("delete_ap")
#print("ap", ap)
new_ap = self.current_access_points[ap.object_path]
#by defination a deleted access_point can't be the current accesspoint
#that's why I pass False as the second argument
doc = self.getAP(new_ap, False, False)
self.on_wifi_status.emit(json.dumps([doc]))
#print("doc",doc)
#db.save(doc)
on_wifi_status = Signal(str)
#checkAccessPoints()
#@Slot(str)
#def printWifi(value):
# print("printWifi")
# print(value)
#wifi = Wifi()
#wifi.on_wifi_status.connect(printWifi)
#wifi.checkAccessPoints()
#wifi.loop.run()
|
# Copyright (c) 2020 NVIDIA Corporation. All rights reserved.
# This work is licensed under the NVIDIA Source Code License - Non-commercial. Full
# text can be found in LICENSE.md
"""FCN config system.
This file specifies default config options for Fast R-CNN. You should not
change values in this file. Instead, you should write a config file (in yaml)
and use cfg_from_file(yaml_file) to load it and override the default options.
Most tools in $ROOT/tools take a --cfg option to specify an override file.
- See tools/{train,test}_net.py for example code that uses cfg_from_file()
- See experiments/cfgs/*.yml for example YAML config override files
"""
import os
import os.path as osp
import numpy as np
import math
# `pip install easydict` if you don't have it
from easydict import EasyDict as edict
__C = edict()
# Consumers can get config by:
# from fast_rcnn_config import cfg
cfg = __C
__C.FLIP_X = False
__C.INPUT = 'RGBD'
__C.NETWORK = 'VGG16'
__C.RIG = ''
__C.CAD = ''
__C.POSE = ''
__C.BACKGROUND = ''
__C.USE_GPU_NMS = True
__C.MODE = 'TRAIN'
__C.ITERS = 0
__C.INTRINSICS = ()
__C.FLOW_HEIGHT = 512
__C.FLOW_WIDTH = 640
# Anchor scales for RPN
__C.ANCHOR_SCALES = (8,16,32)
# Anchor ratios for RPN
__C.ANCHOR_RATIOS = (0.5,1,2)
__C.FEATURE_STRIDE = 16
#
# Training options
#
__C.TRAIN = edict()
__C.TRAIN.WEIGHT_DECAY = 0.0001
__C.TRAIN.SEGMENTATION = True
__C.TRAIN.ITERNUM = 4
__C.TRAIN.HEATUP = 4
__C.TRAIN.GPUNUM = 1
__C.TRAIN.CLASSES = (1,2,3)
__C.TRAIN.MAX_OBJECT_PER_IMAGE = 4
__C.TRAIN.MAX_PXIEL_PER_OBJECT = 1000
__C.TRAIN.SINGLE_FRAME = False
__C.TRAIN.TRAINABLE = True
__C.TRAIN.VERTEX_REG_2D = False
__C.TRAIN.VERTEX_REG_3D = False
__C.TRAIN.T_TRANSFORM_DEPTH = False
__C.TRAIN.LABEL_W = 1.0
__C.TRAIN.VERTEX_W = 5.0
__C.TRAIN.VERTEX_W_INSIDE = 10.0
__C.TRAIN.POSE_W = 1.0
__C.TRAIN.BOX_W = 1.0
__C.TRAIN.THRESHOLD_LABEL = 1.0
__C.TRAIN.VOTING_THRESHOLD = -1
__C.TRAIN.VISUALIZE = False
__C.TRAIN.GAN = False
__C.TRAIN.POSE_REG = False
__C.TRAIN.MATCHING = False
# synthetic training
__C.TRAIN.SYNTHESIZE = False
__C.TRAIN.SYN_ONLINE = False
__C.TRAIN.SYN_WIDTH = 640
__C.TRAIN.SYN_HEIGHT = 480
__C.TRAIN.SYNROOT = '/var/Projects/Deep_Pose/data/LOV/data_syn/'
if not os.path.exists(__C.TRAIN.SYNROOT):
__C.TRAIN.SYNROOT = '/home/yuxiang/Projects/Deep_Pose/data/LOV/data_syn/'
__C.TRAIN.SYNITER = 0
__C.TRAIN.SYNNUM = 80000
__C.TRAIN.SYN_RATIO = 1
__C.TRAIN.SYN_CLASS_INDEX = 1
__C.TRAIN.SYN_TNEAR = 0.5
__C.TRAIN.SYN_TFAR = 2.0
__C.TRAIN.SYN_BACKGROUND_SPECIFIC = False
__C.TRAIN.SYN_BACKGROUND_SUBTRACT_MEAN = True
__C.TRAIN.SYN_BACKGROUND_CONSTANT_PROB = 0.1
__C.TRAIN.SYN_BACKGROUND_AFFINE = False
__C.TRAIN.SYN_SAMPLE_OBJECT = True
__C.TRAIN.SYN_SAMPLE_POSE = False
__C.TRAIN.SYN_STD_ROTATION = 15
__C.TRAIN.SYN_STD_TRANSLATION = 0.01
__C.TRAIN.SYN_CROP = False
__C.TRAIN.SYN_CROP_SIZE = 224
# domain adaptation
__C.TRAIN.ADAPT = False
__C.TRAIN.ADAPT_ROOT = ''
__C.TRAIN.ADAPT_NUM = 400
__C.TRAIN.ADAPT_RATIO = 1
__C.TRAIN.ADAPT_WEIGHT = 0.1
# learning rate
__C.TRAIN.OPTIMIZER = 'MOMENTUM'
__C.TRAIN.LEARNING_RATE = 0.0001
__C.TRAIN.MILESTONES = (100, 150, 200)
__C.TRAIN.MOMENTUM = 0.9
__C.TRAIN.BETA = 0.999
__C.TRAIN.GAMMA = 0.1
__C.TRAIN.SYMSIZE = 0
# voxel grid size
__C.TRAIN.GRID_SIZE = 256
# Scales to compute real features
__C.TRAIN.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# parameters for data augmentation
__C.TRAIN.CHROMATIC = True
__C.TRAIN.ADD_NOISE = False
# Images to use per minibatch
__C.TRAIN.IMS_PER_BATCH = 2 #deprecated
__C.TRAIN.MINIIMS_PER_IMS = 1
__C.TRAIN.NUM_STEPS = 5
__C.TRAIN.NUM_UNITS = 64
# Use horizontally-flipped images during training?
__C.TRAIN.USE_FLIPPED = True
# Iterations between snapshots
__C.TRAIN.SNAPSHOT_EPOCHS = 1
# solver.prototxt specifies the snapshot path prefix, this adds an optional
# infix to yield the path: <prefix>[_<infix>]_iters_XYZ.caffemodel
__C.TRAIN.SNAPSHOT_PREFIX = 'caffenet_fast_rcnn'
__C.TRAIN.SNAPSHOT_INFIX = ''
__C.TRAIN.DISPLAY = 20
# Whether to add ground truth boxes to the pool when sampling regions
__C.TRAIN.USE_GT = False
# Minibatch size (number of regions of interest [ROIs])
__C.TRAIN.BATCH_SIZE = 128
# Fraction of minibatch that is labeled foreground (i.e. class > 0)
__C.TRAIN.FG_FRACTION = 0.25
# Overlap threshold for a ROI to be considered foreground (if >= FG_THRESH)
__C.TRAIN.FG_THRESH = 0.5
__C.TRAIN.FG_THRESH_POSE = 0.2
# Overlap threshold for a ROI to be considered background (class = 0 if
# overlap in [LO, HI))
__C.TRAIN.BG_THRESH_HI = 0.5
__C.TRAIN.BG_THRESH_LO = 0.1
__C.TRAIN.ROBOT = ''
__C.TRAIN.BASE_LINK = 'panda_link0'
# Use RPN to detect objects
__C.TRAIN.HAS_RPN = True
# IOU >= thresh: positive example
__C.TRAIN.RPN_POSITIVE_OVERLAP = 0.7
# IOU < thresh: negative example
__C.TRAIN.RPN_NEGATIVE_OVERLAP = 0.3
# If an anchor satisfied by positive and negative conditions set to negative
__C.TRAIN.RPN_CLOBBER_POSITIVES = False
# Max number of foreground examples
__C.TRAIN.RPN_FG_FRACTION = 0.5
# Total number of examples
__C.TRAIN.RPN_BATCHSIZE = 256
# NMS threshold used on RPN proposals
__C.TRAIN.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TRAIN.RPN_PRE_NMS_TOP_N = 12000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TRAIN.RPN_POST_NMS_TOP_N = 2000
# Deprecated (outside weights)
__C.TRAIN.RPN_BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Give the positive RPN examples weight of p * 1 / {num positives}
# and give negatives a weight of (1 - p)
# Set to -1.0 to use uniform example weighting
__C.TRAIN.RPN_POSITIVE_WEIGHT = -1.0
# Normalize the targets (subtract empirical mean, divide by empirical stddev)
__C.TRAIN.BBOX_NORMALIZE_TARGETS = True
# Deprecated (inside weights)
__C.TRAIN.BBOX_INSIDE_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
# Normalize the targets using "precomputed" (or made up) means and stdevs
# (BBOX_NORMALIZE_TARGETS must also be True)
__C.TRAIN.BBOX_NORMALIZE_TARGETS_PRECOMPUTED = True
__C.TRAIN.BBOX_NORMALIZE_MEANS = (0.0, 0.0, 0.0, 0.0)
__C.TRAIN.BBOX_NORMALIZE_STDS = (0.1, 0.1, 0.2, 0.2)
__C.TRAIN.YCB_OCCLUDE = False
__C.TRAIN.OCCLUDER_NUM = 3
# Testing options
#
__C.TEST = edict()
__C.TEST.SEGMENTATION = True
__C.TEST.SINGLE_FRAME = False
__C.TEST.VERTEX_REG_2D = False
__C.TEST.VERTEX_REG_3D = False
__C.TEST.VISUALIZE = False
__C.TEST.RANSAC = False
__C.TEST.GAN = False
__C.TEST.POSE_REG = False
__C.TEST.POSE_REFINE = False
__C.TEST.SYNTHESIZE = False
__C.TEST.VOTING_THRESHOLD = -1
__C.TEST.IMS_PER_BATCH = 1
__C.TEST.CLASSES = (1,2,3)
__C.TEST.ROS_CAMERA = 'camera'
__C.TEST.ITERNUM = 4
__C.TEST.SYNNUM = 200
# Scales to compute real features
__C.TEST.SCALES_BASE = (0.25, 0.5, 1.0, 2.0, 3.0)
# voxel grid size
__C.TEST.GRID_SIZE = 256
## NMS threshold used on RPN proposals
__C.TEST.RPN_NMS_THRESH = 0.7
# Number of top scoring boxes to keep before apply NMS to RPN proposals
__C.TEST.RPN_PRE_NMS_TOP_N = 6000
# Number of top scoring boxes to keep after applying NMS to RPN proposals
__C.TEST.RPN_POST_NMS_TOP_N = 300
# Test using bounding-box regressors
__C.TEST.BBOX_REG = True
__C.TEST.INFIX = ''
# Overlap threshold used for non-maximum suppression (suppress boxes with
# IoU >= this threshold)
__C.TEST.NMS = 0.3
# Pixel mean values (BGR order) as a (1, 1, 3) array
# These are the values originally used for training VGG16
__C.PIXEL_MEANS = np.array([[[102.9801, 115.9465, 122.7717]]])
# For reproducibility
__C.RNG_SEED = 3
# A small number that's used many times
__C.EPS = 1e-14
# Root directory of project
__C.ROOT_DIR = osp.abspath(osp.join(osp.dirname(__file__), '..', '..'))
# Place outputs under an experiments directory
__C.EXP_DIR = 'default'
# Default GPU device id
__C.GPU_ID = 0
def get_output_dir(imdb, net):
"""Return the directory where experimental artifacts are placed.
A canonical path is built using the name from an imdb and a network
(if not None).
"""
path = osp.abspath(osp.join(__C.ROOT_DIR, 'output', __C.EXP_DIR, imdb.name))
if net is None:
return path
else:
return osp.join(path, net)
def _merge_a_into_b(a, b):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
if type(a) is not edict:
return
for k, v in a.items():
# a must specify keys that are in b
if k not in b:
raise KeyError('{} is not a valid config key'.format(k))
# the types must match, too
if type(b[k]) is not type(v):
raise ValueError(('Type mismatch ({} vs. {}) '
'for config key: {}').format(type(b[k]),
type(v), k))
# recursively merge dicts
if type(v) is edict:
try:
_merge_a_into_b(a[k], b[k])
except:
print('Error under config key: {}'.format(k))
raise
else:
b[k] = v
def cfg_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
_merge_a_into_b(yaml_cfg, __C)
def yaml_from_file(filename):
"""Load a config file and merge it into the default options."""
import yaml
with open(filename, 'r') as f:
yaml_cfg = edict(yaml.load(f))
return yaml_cfg
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import sys
sys.path.append("..")
from flask_restful import Resource, reqparse
from flask import g, request
from hackathon import api, RequiredFeature, Component
from hackathon.decorators import token_required, hackathon_name_required, admin_privilege_required
from hackathon.hackathon_response import not_found, bad_request, internal_server_error
__all__ = ["register_admin_routes"]
hackathon_manager = RequiredFeature("hackathon_manager")
register_manager = RequiredFeature("register_manager")
template_manager = RequiredFeature("template_manager")
azure_cert_management = RequiredFeature("azure_cert_management")
admin_manager = RequiredFeature("admin_manager")
expr_manager = RequiredFeature("expr_manager")
class AdminHackathonResource(Resource):
"""Resource for admin to create/update hackathon
url path: /api/admin/hackathon
"""
@hackathon_name_required
def get(self):
return g.hackathon.dic()
@token_required
def post(self):
args = request.get_json()
return hackathon_manager.create_new_hackathon(args)
@admin_privilege_required
def put(self):
args = request.get_json()
return hackathon_manager.update_hackathon(args)
def delete(self):
pass
class AdminHackathonListResource(Resource):
@token_required
def get(self):
return hackathon_manager.get_permitted_hackathon_list_by_admin_user_id(g.user.id)
class HackathonCheckNameResource(Resource):
def get(self):
parse = reqparse.RequestParser()
parse.add_argument('name', type=str, location='args', required=True)
args = parse.parse_args()
return hackathon_manager.get_hackathon_by_name(args['name']) is None
class AdminRegisterListResource(Resource):
@admin_privilege_required
def get(self):
return register_manager.get_hackathon_registration()
class AdminRegisterResource(Resource):
def get(self):
parse = reqparse.RequestParser()
parse.add_argument('id', type=int, location='args', required=True) # register_id
args = parse.parse_args()
rel = register_manager.get_registration_by_id(args["id"])
return rel.dic() if rel is not None else not_found("not found")
@admin_privilege_required
def post(self):
args = request.get_json()
return register_manager.create_registration(g.hackathon, args)
@admin_privilege_required
def put(self):
args = request.get_json()
return register_manager.update_registration(args)
@admin_privilege_required
def delete(self):
parse = reqparse.RequestParser()
parse.add_argument('id', type=int, location='args', required=True)
args = parse.parse_args()
return register_manager.delete_registration(args)
class AdminHackathonTemplateListResource(Resource):
@hackathon_name_required
def get(self):
templates = template_manager.get_templates_by_hackathon_id(g.hackathon.id)
return map(lambda x: x.dic(), templates)
class AdminHackathonTemplateResource(Resource):
# create a h-t-r for hacakthon
@admin_privilege_required
def post(self):
args = request.get_json()
if "template_name" not in args:
return bad_request("template name invalid")
return template_manager.add_template_to_hackathon(args['template_name'])
# delete a h-t-r for hacakthon
@admin_privilege_required
def delete(self):
parse = reqparse.RequestParser()
parse.add_argument('template_id', type=int, location='args', required=True)
args = parse.parse_args()
return template_manager.delete_template_from_hackathon(args['template_id'])
class ExperimentListResource(Resource):
@admin_privilege_required
def get(self):
parse = reqparse.RequestParser()
parse.add_argument('user_name', type=str, location='args')
parse.add_argument('status', type=int, location='args')
args = parse.parse_args()
return expr_manager.get_expr_list_by_hackathon_id(g.hackathon.id,
user_name=args['user_name'],
status=args['status'])
class AdminExperimentResource(Resource):
@admin_privilege_required
def post(self):
args = request.get_json()
if 'name' not in args:
return bad_request('template name name invalid')
template_name = args['name']
user_id = g.user.id
hackathon_name = g.hackathon.name
return expr_manager.start_expr(hackathon_name, template_name, user_id)
@admin_privilege_required
def put(self):
args = request.get_json()
if 'experiment_id' not in args:
return bad_request('experiment id invalid')
return expr_manager.stop_expr(args['experiment_id'])
class AdminAzureResource(Resource, Component):
@hackathon_name_required
def get(self):
certificates = azure_cert_management.get_certificates(g.hackathon.name)
if certificates is None:
return not_found("no certificates")
return certificates, 200
@hackathon_name_required
def post(self):
args = request.get_json()
if 'subscription_id' not in args or 'management_host' not in args:
return bad_request("subscription_id or management_host invalid")
subscription_id = args['subscription_id']
management_host = args['management_host']
try:
azure_cert_url = azure_cert_management.create_certificate(subscription_id, management_host,
g.hackathon.name)
return {'azure_cert_url': azure_cert_url}, 200
except Exception as err:
self.log.error(err)
return internal_server_error('fail to create certificate due to [%s]' % err)
@hackathon_name_required
def delete(self):
args = request.get_json()
if 'certificate_id' not in args:
return bad_request("certificate_id invalid")
certificate_id = args['certificate_id']
if azure_cert_management.delete_certificate(certificate_id, g.hackathon.name):
return {'message': 'certificate deleted'}, 200
else:
return internal_server_error("fail to delete certificate")
class HackathonFileResource(Resource):
@admin_privilege_required
def post(self):
return hackathon_manager.upload_files()
def delete(self):
# TODO call storage api to delete file
return True
class HackathonAdminListResource(Resource):
@hackathon_name_required
def get(self):
return admin_manager.get_hackathon_admins()
class HackathonAdminResource(Resource):
@admin_privilege_required
def post(self):
args = request.get_json()
return admin_manager.create_admin(args)
@admin_privilege_required
def put(self):
args = request.get_json()
return admin_manager.update_admin(args)
@admin_privilege_required
def delete(self):
parse = reqparse.RequestParser()
parse.add_argument('id', type=int, location='args', required=True)
args = parse.parse_args()
return admin_manager.delete_admin(args['id'])
def register_admin_routes():
"""
register API routes for admin site
"""
# hackathon api
api.add_resource(AdminHackathonResource, "/api/admin/hackathon")
api.add_resource(AdminHackathonListResource, "/api/admin/hackathon/list")
api.add_resource(HackathonCheckNameResource, "/api/admin/hackathon/checkname")
# registration APIs
api.add_resource(AdminRegisterListResource, "/api/admin/registration/list")
api.add_resource(AdminRegisterResource, "/api/admin/registration")
# template APIs
api.add_resource(AdminHackathonTemplateResource, "/api/admin/hackathon/template")
api.add_resource(AdminHackathonTemplateListResource, "/api/admin/hackathon/template/list")
# experiment APIs
api.add_resource(AdminExperimentResource, "/api/admin/experiment")
api.add_resource(ExperimentListResource, "/api/admin/experiment/list")
# azure resources
api.add_resource(AdminAzureResource, '/api/admin/azure')
# file upload
api.add_resource(HackathonFileResource, "/api/admin/file")
# hackathon administrators
api.add_resource(HackathonAdminListResource, "/api/admin/hackathon/administrator/list")
api.add_resource(HackathonAdminResource, "/api/admin/hackathon/administrator") |
#! /usr/bin/env python3
# main.py - Get contents of text files and output the most frequent "interesting" words to an HTML file
# Author - <NAME>
# Date - November 2020
from page import head, middle, tail
from nltk.stem import WordNetLemmatizer
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.corpus import stopwords, wordnet
from argparse import ArgumentParser
from os import path
import datetime
import webbrowser
import ntpath
import sys
import glob
import re
import nltk
# nltk.download() # uncomment to download nltk packages - only once
def get_wordnet_pos(word):
# Map POS tag to first character lemmatize() accepts
tag = nltk.pos_tag([word])[0][1][0].upper()
tagDict = {"J": wordnet.ADJ,
"N": wordnet.NOUN,
"V": wordnet.VERB,
"R": wordnet.ADV}
return tagDict.get(tag, wordnet.NOUN)
def lemmatize(string):
# Returns lemmatized token list
lemmatizer = WordNetLemmatizer()
tokens = ([lemmatizer.lemmatize(w, get_wordnet_pos(w))
for w in word_tokenize(string)])
return tokens
def clean_string(fileContents):
# Make lower case, remove punctuation, lemmatize, split into tokens and remove stop words
fileContents = fileContents.lower()
string = re.sub(r'\W+', ' ', fileContents)
tokens = lemmatize(string)
# Remove stopwords
return stop_words(tokens)
def stop_words(words):
# Removes stop words
stop_words = set(stopwords.words('english'))
# Add more words to the list
more_words = ['new', 'make', 'time', 'work', 'one', 'know', 'say', 'let', 'care', 'last', 'way', 'like', 'get', 'keep', 'give', 'could', 'way', 'come', 'well', 'need',
'see', 'go', 'take', 'must', 'many', 'also', 'u', 'want', 'come', 'think', 'today', 'every', 'even', 'told', 'two', 'long', 'tell', 'call', 'back', 'first',
'hard', 'day', 'end', 'look', 'saw']
for w in more_words:
stop_words.add(w)
filteredWords = []
for word in words:
if not word in stop_words:
filteredWords.append(word)
return filteredWords
def get_most_frequent(allWords, numResults):
# Returns a Dictionary of size numResults containing of word and number
freq = nltk.FreqDist(allWords)
# Sort by value in decending order
interestingWords = []
for w in sorted(freq, key=freq.get, reverse=True):
interestingWords.append([w, freq[w]])
# Get top results - numResults
mostFrequent = []
for x in range(0, numResults):
mostFrequent.append(interestingWords[x])
return mostFrequent
def read_file(file_name):
# Open given file within try-except, on success return string
try:
with open(file_name, encoding="utf8") as f:
string = f.read()
return string
except IOError as error:
print("Caught IOError: {}".format(error))
def get_all_words(files):
# Returns all words from a file
words = []
for currentFile in files:
string = read_file(currentFile)
words += (clean_string(string))
return words
def get_files_sentences(files, words):
# Return a dictionary of all files and strings where word is found
d = {}
for currentFile in files:
fileString = read_file(currentFile)
sentences = sent_tokenize(fileString)
for word in words:
for sentence in sentences:
# (?i) - case insensitive match
pattern = r'(?i)\b{}\b'.format(word[0])
# Find word in sentence and add to dict
if (re.findall(pattern, sentence)):
# Alter sentence to put <b> tags round the word
src_str = re.compile(word[0], re.IGNORECASE)
sentence = src_str.sub("<b>"+word[0]+"</b>", sentence)
# strip the filename from the path
fpath, ftail = path.split(currentFile)
d.setdefault(word[0], []).append(
[[ftail], [sentence.strip()]])
return d
def command_line():
parser = ArgumentParser(
description='Given a directory of .txt files, pull out the interesting words and display in HTML.')
parser.add_argument("directory", type=str,
help="Name of the directory") # Not optional
parser.add_argument("-o", "--output", dest="output", type=str,
help="Name of the output file", default="interesting") # optional
parser.add_argument("-n", "--results", dest="results", type=int,
help="Number of interesting words to output", default=5) # optional
args = parser.parse_args()
inputDir = args.directory
outputName = args.output
numResults = args.results
if not (path.exists(inputDir)):
print("Directory not found")
sys.exit()
filePath = path.join(".", inputDir, "*.txt")
fileList = glob.glob(filePath)
if not (fileList):
print("Only accepts text files")
sys.exit()
print("Using '{}' the top {} interesting words can be found in '{}.html'.".format(
inputDir, numResults, outputName))
print("This will open automatically once processed.")
# Get info to add to the output
currentDT = datetime.datetime.now()
info = "<p>On the {} at {}, the top {} interesting words were found in the directory '{}' in these files:</p>".format(
currentDT.strftime("%dth of %B %Y"), currentDT.strftime("%I:%M"), numResults, inputDir)
info += "<ul>"
for f in fileList:
info += "<li>" + ntpath.basename(f) + "</li>"
info += "</ul>"
return info, fileList, numResults, outputName
def create_table(frequentWords, dResults):
table = ''
for word in frequentWords:
setoffiles = set()
setofsentences = set()
# start row + 1st column <tr><td>word(frequency)</td> ..
table += '''<tr ><td ><h3><span id = "{}"></span>{} ({})</h3><p><a href="#top">Back</a></p></td>'''.format(
word[0], word[0].capitalize(), word[1])
fileSentence = (dResults.get(word[0]))
for fs in fileSentence:
setoffiles.add(fs[0][0])
setofsentences.add(fs[1][0])
# 2nd column <td><p>filenames</p></td>
table += '''<td>'''
for f in setoffiles:
table += '''<p>{}</p>'''.format(f)
table += '''</td>'''
# 3rd column <td><p>sentences</p></td>
table += '''<td>'''
for s in setofsentences:
table += '''<p>{}</p>'''.format(s)
table += '''</td>'''
# end of row
table += '''</tr>'''
return table
def main():
# get user instructions
info, fileList, numResults, outputName = command_line()
# Process files:
# Get number(numResults) of most frequent results
frequentWords = get_most_frequent(get_all_words(fileList), numResults)
# Add more information to the output
info += "<p>The most frequent interesting words found are: </p><ol>"
for w in frequentWords:
info += '''<li><a href="#{}">{}({})</a></li>'''.format(
w[0], w[0].capitalize(), w[1])
info += "</ol>"
# Extract locations and sentences
dResults = get_files_sentences(fileList, frequentWords)
# Create table:
table = create_table(frequentWords, dResults)
# Open new file, write doc to it and close
destinationFile = outputName+".html"
f = open(destinationFile, "w")
f.write(head+info+middle+table+tail)
f.close()
print("Opening generated file '{}'".format(destinationFile))
webbrowser.open_new_tab(destinationFile)
if __name__ == "__main__":
main()
|
from contextlib import suppress
import logging
import os
from threading import Thread, current_thread
from tempfile import NamedTemporaryFile
from uuid import uuid4
import requests
import prometheus_metrics
import custom_parser
import target_worker
logger = logging.getLogger()
CHUNK = 10240
MAX_RETRIES = 3
def _retryable(method: str, *args, **kwargs) -> requests.Response:
"""Retryable HTTP request.
Invoke a "method" on "requests.session" with retry logic.
:param method: "get", "post" etc.
:param *args: Args for requests (first should be an URL, etc.)
:param **kwargs: Kwargs for requests
:return: Response object
:raises: HTTPError when all requests fail
"""
thread = current_thread()
with requests.Session() as session:
for attempt in range(MAX_RETRIES):
try:
resp = getattr(session, method)(*args, **kwargs)
resp.raise_for_status()
except (requests.HTTPError, requests.ConnectionError) as e:
logger.warning(
'%s: Request failed (attempt #%d), retrying: %s',
thread.name, attempt, str(e)
)
continue
else:
return resp
raise requests.HTTPError('All attempts failed')
def download_job(
source_url: str,
source_id: str,
dest_url: str,
b64_identity: str = None
) -> None:
"""Spawn a thread worker for data downloading task.
Requests the data to be downloaded and pass it to the next service
:param source_url: Data source location
:param source_id: Data identifier
:param dest_url: Location where the collected data should be received
:param b64_identity: Redhat Identity base64 string
"""
# When source_id is missing, create our own
source_id = source_id or str(uuid4())
def worker_clustering(_clustering_info: dict) -> None:
"""Download, extract data and forward the content."""
thread = current_thread()
logger.debug('%s: Worker started', thread.name)
# Fetch data
prometheus_metrics.METRICS['gets'].inc()
try:
resp = _retryable('get', source_url, stream=True)
except requests.HTTPError as exception:
logger.error(
'%s: Unable to fetch source data for "%s": %s',
thread.name, source_id, exception
)
prometheus_metrics.METRICS['get_errors'].inc()
return
prometheus_metrics.METRICS['get_successes'].inc()
try:
with NamedTemporaryFile(delete=False) as tmp_file:
file_name = tmp_file.name
for chunk in filter(None, resp.iter_content(chunk_size=CHUNK)):
tmp_file.write(chunk)
except IOError as exception:
logger.error(
'%s: Unable to create temp file for "%s": %s',
thread.name, source_id, exception
)
return
# Unpack data and stream it
# Build the POST data object
data = {
'id': source_id,
'data': custom_parser.parse(file_name),
}
# Pass to next service
prometheus_metrics.METRICS['posts'].inc()
try:
resp = _retryable(
'post',
f'http://{dest_url}',
json=data,
headers={"x-rh-identity": b64_identity}
)
prometheus_metrics.METRICS['post_successes'].inc()
except requests.HTTPError as exception:
logger.error(
'%s: Failed to pass data for "%s": %s',
thread.name, source_id, exception
)
prometheus_metrics.METRICS['post_errors'].inc()
# Cleanup
with suppress(IOError):
os.remove(file_name)
logger.debug('%s: Done, exiting', thread.name)
def worker_topology(topology_info: dict) -> None:
"""Download and forward the content."""
thread = current_thread()
logger.debug('%s: Worker started', thread.name)
# Build the POST data object
data = {
'id': source_id,
'data': {}
}
for entity in topology_info['queries'].keys():
prometheus_metrics.METRICS['gets'].inc()
query_string = topology_info['queries'][entity]
try:
resp = _retryable(
'get',
f'{topology_info["endpoint"]}/{entity}',
params={query_string: ''},
verify=False
)
data['data'][entity] = resp.json()
prometheus_metrics.METRICS['get_successes'].inc()
except requests.HTTPError as exception:
prometheus_metrics.METRICS['get_errors'].inc()
logger.error(
'%s: Unable to fetch source data for "%s": %s',
thread.name, source_id, exception
)
return
# Pass to next service
prometheus_metrics.METRICS['posts'].inc()
try:
resp = _retryable(
'post',
f'http://{dest_url}',
json=data,
headers={"x-rh-identity": b64_identity}
)
prometheus_metrics.METRICS['post_successes'].inc()
except requests.HTTPError as exception:
logger.error(
'%s: Failed to pass data for "%s": %s',
thread.name, source_id, exception
)
prometheus_metrics.METRICS['post_errors'].inc()
logger.debug('%s: Done, exiting', thread.name)
thread_mappings = {
'worker_clustering': worker_clustering,
'worker_topology': worker_topology
}
name = target_worker.NAME
info = target_worker.INFO
thread = Thread(target=thread_mappings[name](info))
thread.start()
|
#!/usr/bin/env python
# Copyright (C) 2017 Udacity Inc.
#
# This file is part of Robotic Arm: Pick and Place project for Udacity
# Robotics nano-degree program
#
# All Rights Reserved.
# Author: <NAME>
# Import modules
import rospy
import pcl
import numpy as np
import math
import ctypes
import struct
import sensor_msgs.point_cloud2 as pc2
import matplotlib.colors
import matplotlib.pyplot as plt
from sensor_msgs.msg import PointCloud2, PointField
from std_msgs.msg import Header
from random import randint
from rospy_message_converter import message_converter
import yaml
def random_color_gen():
""" Generates a random color
Args: None
Returns:
list: 3 elements, R, G, and B
"""
r = randint(0, 255)
g = randint(0, 255)
b = randint(0, 255)
return [r, g, b]
def ros_to_pcl(ros_cloud):
""" Converts a ROS PointCloud2 message to a pcl PointXYZRGB
Args:
ros_cloud (PointCloud2): ROS PointCloud2 message
Returns:
pcl.PointCloud_PointXYZRGB: PCL XYZRGB point cloud
"""
points_list = []
for data in pc2.read_points(ros_cloud, skip_nans=True):
points_list.append([data[0], data[1], data[2], data[3]])
pcl_data = pcl.PointCloud_PointXYZRGB()
pcl_data.from_list(points_list)
return pcl_data
def pcl_to_ros(pcl_array):
""" Converts a pcl PointXYZRGB to a ROS PointCloud2 message
Args:
pcl_array (PointCloud_PointXYZRGB): A PCL XYZRGB point cloud
Returns:
PointCloud2: A ROS point cloud
"""
ros_msg = PointCloud2()
ros_msg.header.stamp = rospy.Time.now()
ros_msg.header.frame_id = "world"
ros_msg.height = 1
ros_msg.width = pcl_array.size
ros_msg.fields.append(PointField(
name="x",
offset=0,
datatype=PointField.FLOAT32, count=1))
ros_msg.fields.append(PointField(
name="y",
offset=4,
datatype=PointField.FLOAT32, count=1))
ros_msg.fields.append(PointField(
name="z",
offset=8,
datatype=PointField.FLOAT32, count=1))
ros_msg.fields.append(PointField(
name="rgb",
offset=16,
datatype=PointField.FLOAT32, count=1))
ros_msg.is_bigendian = False
ros_msg.point_step = 32
ros_msg.row_step = ros_msg.point_step * ros_msg.width * ros_msg.height
ros_msg.is_dense = False
buffer = []
for data in pcl_array:
s = struct.pack('>f', data[3])
i = struct.unpack('>l', s)[0]
pack = ctypes.c_uint32(i).value
r = (pack & 0x00FF0000) >> 16
g = (pack & 0x0000FF00) >> 8
b = (pack & 0x000000FF)
buffer.append(struct.pack('ffffBBBBIII', data[0], data[1], data[2], 1.0, b, g, r, 0, 0, 0, 0))
ros_msg.data = "".join(buffer)
return ros_msg
def XYZRGB_to_XYZ(XYZRGB_cloud):
""" Converts a PCL XYZRGB point cloud to an XYZ point cloud (removes color info)
Args:
XYZRGB_cloud (PointCloud_PointXYZRGB): A PCL XYZRGB point cloud
Returns:
PointCloud_PointXYZ: A PCL XYZ point cloud
"""
XYZ_cloud = pcl.PointCloud()
points_list = []
for data in XYZRGB_cloud:
points_list.append([data[0], data[1], data[2]])
XYZ_cloud.from_list(points_list)
return XYZ_cloud
def XYZ_to_XYZRGB(XYZ_cloud, color):
""" Converts a PCL XYZ point cloud to a PCL XYZRGB point cloud
All returned points in the XYZRGB cloud will be the color indicated
by the color parameter.
Args:
XYZ_cloud (PointCloud_XYZ): A PCL XYZ point cloud
color (list): 3-element list of integers [0-255,0-255,0-255]
Returns:
PointCloud_PointXYZRGB: A PCL XYZRGB point cloud
"""
XYZRGB_cloud = pcl.PointCloud_PointXYZRGB()
points_list = []
float_rgb = rgb_to_float(color)
for data in XYZ_cloud:
points_list.append([data[0], data[1], data[2], float_rgb])
XYZRGB_cloud.from_list(points_list)
return XYZRGB_cloud
def rgb_to_float(color):
""" Converts an RGB list to the packed float format used by PCL
From the PCL docs:
"Due to historical reasons (PCL was first developed as a ROS package),
the RGB information is packed into an integer and casted to a float"
Args:
color (list): 3-element list of integers [0-255,0-255,0-255]
Returns:
float_rgb: RGB value packed as a float
"""
hex_r = (0xff & color[0]) << 16
hex_g = (0xff & color[1]) << 8
hex_b = (0xff & color[2])
hex_rgb = hex_r | hex_g | hex_b
float_rgb = struct.unpack('f', struct.pack('i', hex_rgb))[0]
return float_rgb
def float_to_rgb(float_rgb):
""" Converts a packed float RGB format to an RGB list
Args:
float_rgb: RGB value packed as a float
Returns:
color (list): 3-element list of integers [0-255,0-255,0-255]
"""
s = struct.pack('>f', float_rgb)
i = struct.unpack('>l', s)[0]
pack = ctypes.c_uint32(i).value
r = (pack & 0x00FF0000) >> 16
g = (pack & 0x0000FF00) >> 8
b = (pack & 0x000000FF)
color = [r,g,b]
return color
def get_color_list(cluster_count):
""" Returns a list of randomized colors
Args:
cluster_count (int): Number of random colors to generate
Returns:
(list): List containing 3-element color lists
"""
if (cluster_count > len(get_color_list.color_list)):
for i in xrange(len(get_color_list.color_list), cluster_count):
get_color_list.color_list.append(random_color_gen())
return get_color_list.color_list
# Helper function to create a yaml friendly dictionary from ROS messages
def make_yaml_dict(test_scene_num, arm_name, object_name, pick_pose, place_pose):
yaml_dict = {}
yaml_dict["test_scene_num"] = test_scene_num.data
yaml_dict["arm_name"] = arm_name.data
yaml_dict["object_name"] = object_name.data
yaml_dict["pick_pose"] = message_converter.convert_ros_message_to_dictionary(pick_pose)
yaml_dict["place_pose"] = message_converter.convert_ros_message_to_dictionary(place_pose)
return yaml_dict
# Helper function to output to yaml file
def send_to_yaml(yaml_filename, dict_list):
data_dict = {"object_list": dict_list}
with open(yaml_filename, 'w') as outfile:
yaml.dump(data_dict, outfile, default_flow_style=False)
#############################################################################
# Some extra helper functions needed for the perception pipeline
# Author: <NAME> - a.k.a Daru
#############################################################################
"""
Transforms one histogram to another with smaller bin size
: param hist : source histogram
: param nbins : target number of bins of the transformed histogram
"""
def hist2hist( hist, nbins ) :
assert ( len( hist ) >= nbins )
_rmin = np.min( hist )
_rmax = np.max( hist )
_newhist = np.zeros( nbins )
_newedges = np.linspace( _rmin, _rmax, num = ( nbins + 1 ), endpoint = True )
# compute bin sizes, new and old, for indexing
_newbinsize = ( _rmax - _rmin ) / nbins
_oldbinsize = ( _rmax - _rmin ) / len( hist )
for i in range( nbins ) :
_startIndx = int( math.floor( _newedges[i] / _oldbinsize ) )
_stopIndx = int( math.floor( _newedges[i + 1] / _oldbinsize ) - 1 )
_newhist[i] = hist[ _startIndx : ( _stopIndx + 1 ) ].sum()
return _newhist
"""
Plots a histogram returned from numpy.histogram
Adapted from this post: https://stackoverflow.com/questions/5328556/histogram-matplotlib
: param hist : numpy histogram
: param rmin : min range for the values of the histogram
: param rmax : max range for the values of the histogram
: param title : optional title for the histogram
"""
def plotHistogram( hist, rmin, rmax, title = 'empty title' ) :
_nbins = len( hist )
_bins = np.linspace( rmin, rmax, num = ( _nbins + 1 ), endpoint = True )
_widths = np.diff( _bins )
_centers = ( _bins[:-1] + _bins[1:] ) / 2.0
plt.figure()
plt.bar( _centers, hist, align = 'center', width = _widths )
plt.title( title )
# plt.xticks( _bins )
"""
Normalizes a histogram to have cumsum = 1 ( percentages instead of frequencies )
: param hist : histogram to normalize
"""
def normalizeHistogram( hist ) :
return hist / float( np.sum( hist ) )
#############################################################################
# Some extra helper functions needed for the perception pipeline
# Author: <NAME> - a.k.a Daru
#############################################################################
"""
Converts a list of rgb values to a list of hsv values
: param rgbList : rgb list ( 0 - 255 ) to convert to hsv
"""
def rgb2hsv( rgbList ) :
_rgbNormalized = [ 1.0 * rgbList[0] / 255,
1.0 * rgbList[1] / 255,
1.0 * rgbList[2] / 255 ]
_hsvNormalized = matplotlib.colors.rgb_to_hsv( [ [ _rgbNormalized ] ] )[0][0]
return _hsvNormalized
"""
Computes a normalized feature vector ...
from the histograms of the buffers in buffer_list
:param buffer_list: a list of the buffers to use for the histograms
:param nbins: number of bins to generate the histograms
"""
def _featuresFromBuffers( buffer_list, nbins, ranges ) :
# compute histograms
_hists = []
for _buffer in buffer_list :
_hist, _ = np.histogram( _buffer, bins = nbins, range = ranges )
_hists.append( _hist )
# concatenate into single feature vector
_featureVector = np.concatenate( _hists ).astype( np.float64 )
# normalize feature vector
_normalizedFeatureVector = _featureVector / np.sum( _featureVector )
return _normalizedFeatureVector
"""
Computes a feature vector from the color histograms of the given cloud
:param cloud : ros cloud with color information on it
:param using_hsv : flag to whether or not to use hsv colorspace instead
:param nbins : number of bins to use as the size of the histogram
"""
def computeColorHistograms(cloud, using_hsv=True, nbins = 255):
point_colors_list = []
# Step through each point in the point cloud
for point in pc2.read_points( cloud, skip_nans = True ) :
rgb_list = float_to_rgb( point[3] )
if using_hsv :
point_colors_list.append( rgb2hsv( rgb_list ) * 255 )
else :
point_colors_list.append( rgb_list )
# Populate lists with color values
channel_1_vals = []
channel_2_vals = []
channel_3_vals = []
for color in point_colors_list:
channel_1_vals.append( color[0] )
channel_2_vals.append( color[1] )
channel_3_vals.append( color[2] )
# Compute feature vector - use 0 to 255 as range
normed_features = _featuresFromBuffers( [ channel_1_vals, channel_2_vals, channel_3_vals ], nbins, ( 0., 255. ) )
return normed_features
"""
Computes a feature vector from the normals histograms of the given cloud
:param cloud : ros cloud with normals information on it
:param nbins : number of bins to use as the size of the histogram
"""
def computeNormalHistograms( normal_cloud, nbins = 250 ):
norm_x_vals = []
norm_y_vals = []
norm_z_vals = []
for norm_component in pc2.read_points( normal_cloud,
field_names = ( 'normal_x', 'normal_y', 'normal_z' ),
skip_nans = True ):
norm_x_vals.append( norm_component[0] )
norm_y_vals.append( norm_component[1] )
norm_z_vals.append( norm_component[2] )
# Compute feature vector - use -1 to 1 as range
normed_features = _featuresFromBuffers( [ norm_x_vals, norm_y_vals, norm_z_vals ], nbins, ( -1., 1. ) )
return normed_features
|
<gh_stars>10-100
from api.Repositories.TrackRepository import TrackRepository
from api.Services.AudioFeatureAttacherService import AudioFeatureAttacherService
from api.Services.GenreLabelAttacherService import GenreLabelAttacherService
from api.Spotify.SpotifyAPI import SpotifyAPIAccess
from sklearn.metrics.pairwise import cosine_similarity
import random
import numpy as np
class TrackRecommender:
def __init__(self):
self.audio_feature_attacher = AudioFeatureAttacherService()
self.genre_label_attacher = GenreLabelAttacherService()
self.track_repository = TrackRepository()
self.spotify_api = SpotifyAPIAccess()
self.user_id = None
def init_label_values(self, tracks):
self.genre_label_attacher.predict_genre_label(tracks)
self.audio_feature_attacher.predict_audio_features_label(tracks)
def take_audio_features(self, track):
track_musical_features = []
track_musical_features.append(track['id'])
track_musical_features.append(track['acousticness'])
track_musical_features.append(track['danceability'])
track_musical_features.append(track['duration_ms'])
track_musical_features.append(track['energy'])
track_musical_features.append(track['instrumentalness'])
track_musical_features.append(track['liveness'])
track_musical_features.append(track['loudness'])
track_musical_features.append(track['mode'])
track_musical_features.append(track['speechiness'])
track_musical_features.append(track['tempo'])
track_musical_features.append(track['valence'])
return np.array(track_musical_features)
def search_track_by_id(self, track_id, tracks):
for track in tracks:
if track['id'] == track_id:
return track
return None
def recommend_tracks(self, token):
tracks = self.spotify_api.get_most_songs_according_to_user(token)
self.init_label_values(tracks)
self.track_repository.save_multiple_tracks_gecici(tracks)
recommended_songs = []
track_uris = []
track_map = []
for track in tracks:
genre_label = int(track['genre_labels'])
audio_label = int(track['audio_features_label'])
track_uris.append(track['track_uri'])
related_tracks_by_genre = self.track_repository.find_by_genre_and_music_labels(genre_label, audio_label)
if len(related_tracks_by_genre) > 40:
related_tracks_by_genre = random.sample(related_tracks_by_genre, 40)
cos_sim = []
track_features = self.take_audio_features(track)
track_features = np.delete(track_features, 0)
for track in related_tracks_by_genre:
candidate_track_musical_features = self.take_audio_features(track)
candidate_track_id = candidate_track_musical_features[0]
candidate_track_musical_features = np.delete(candidate_track_musical_features, 0)
track_uris.append(f"spotify:track:{candidate_track_id}")
similarity = cosine_similarity(candidate_track_musical_features.reshape(1, 11),
track_features.reshape(1, 11))
cos_sim.append((similarity, candidate_track_id))
cos_sim.sort(reverse=True)
cos_sim_len = len(cos_sim)
if cos_sim_len > 5:
cos_sim_len = 5
for most_similar in cos_sim[:cos_sim_len]:
similarity, id = most_similar
selected_track = self.search_track_by_id(id, related_tracks_by_genre)
if id not in track_map:
selected_track['_id'] = id
recommended_songs.append(selected_track)
track_map.append(id)
self.spotify_api.create_playlist(token, list(set(track_uris)))
return recommended_songs
|
# -*- coding: utf-8 -*-
import dataiku
import pandas as pd, numpy as np
import time
import json
import requests
from datetime import datetime
from dataiku.customrecipe import *
import dataiku_esri_content_utils
import dataiku_esri_utils
from dataiku_esri_utils import recipe_config_get_str_or_none
import common, enrichment
P_USERNAME = get_recipe_config()['username']
P_PASSWORD = get_recipe_config()['password']
P_TOKEN_EXPIRATION = int(get_recipe_config()["token_expiration"])
P_COLUMN_COUNTRY = recipe_config_get_str_or_none("country")
P_USER_COUNTRY_LIST = recipe_config_get_str_or_none("user_country_list")
(app_token,_) = dataiku_esri_utils.get_token_from_login_password(P_USERNAME,P_PASSWORD,P_TOKEN_EXPIRATION)
def get_layer_name(country_name,app_token):
call_url = 'https://geoenrich.arcgis.com/arcgis/rest/services/World/geoenrichmentserver/Geoenrichment/StandardGeographyLevels/' + country_name # ex: 'The Former Yugoslav Republic of Macedonia' or ...
return requests.get(call_url, params={
'f': 'json'
,'token': app_token
}).json()
def get_datacollections(country_name,app_token):
call_url = 'https://geoenrich.arcgis.com/arcgis/rest/services/World/geoenrichmentserver/Geoenrichment/dataCollections/' + country_name
custom_params = { 'token':app_token
, 'forStorage':common.FOR_STORAGE ## true
, 'f':'json'}
r = requests.post(call_url, params= custom_params)
return r.json()
# Open output handle
output_results = get_output_names_for_role('output')[0]
result_dataset = dataiku.Dataset(output_results)
# Get the input list of countries
try:
input_name = get_input_names_for_role('input_countries')[0]
df = dataiku.Dataset(input_name).get_dataframe()
is_input = True
except:
is_input = False
if is_input is True:
if P_COLUMN_COUNTRY is not None:
df = df[df[P_COLUMN_COUNTRY].notnull()]
country_list = df[P_COLUMN_COUNTRY].drop_duplicates().values.tolist()
else:
raise ValueError("The country column parameter is required when using an input dataset")
else:
try:
country_list = eval(P_USER_COUNTRY_LIST)
except:
raise ValueError("You have an issue in your country list")
df_main = pd.DataFrame()
'''
input_name = get_input_names_for_role('input_countries')[0]
if len(inputs) >0:
input_name =inputs[0]
df = dataiku.Dataset(input_name).get_dataframe()
if P_COLUMN_COUNTRY is None:
raise ValueError("The country column parameter is required when using an input dataset")
df = df[df[P_COLUMN_COUNTRY].notnull()]
country_list = df[P_COLUMN_COUNTRY].drop_duplicates().values.tolist()
else:
if P_COLUMN_COUNTRY is not None:
raise ValueError("No input countries dataset, you need to specify a fixed list of countries")
country_list = json.loads(P_USER_COUNTRY_LIST)
df_main = pd.DataFrame()
'''
# Main work loop
for c in country_list:
print 'Processing this country: %s' % (c)
api_result = get_datacollections(c,app_token)
if not "DataCollections" in api_result:
raise ValueError("No DataCollections returned for country %s: got answer=%s"% (c, api_result))
t = {'collection_id':[], 'country':[],'collection_hierarchie':[] ,'collection_long_description':[],'collection_keywords':[] }
for i in range(0,len(api_result[u'DataCollections'])):
collection_id = api_result[u'DataCollections'][i][u'dataCollectionID']
collection_hierarchie = api_result[u'DataCollections'][i]['metadata'][u'hierarchies'] #.split(',')[0]
t['collection_id'].append(collection_id)
t['country'].append(c)
t['collection_hierarchie'].append(collection_hierarchie)
try:
collection_longdescription = api_result[u'DataCollections'][i]['metadata'][u'longDescription']
t['collection_long_description'].append(collection_longdescription)
except:
t['collection_long_description'].append('')
try:
collection_kw = api_result[u'DataCollections'][i]['metadata'][u'keywords']
t['collection_keywords'].append(collection_kw)
except:
t['collection_keywords'].append('')
'''
collection_longdescription = api_result[u'DataCollections'][i]['metadata'].get('longDescription', None)
collection_kw = api_result[u'DataCollections'][i]['metadata'].get('keywords', None)
'''
df_collections = pd.DataFrame(t)
data_layer = get_layer_name(c,app_token) #dataiku_esri_utils.
df_layer=pd.DataFrame()
try:
hn = len(data_layer[u'geographyLevels'][0]['hierarchies'])
for h in range(0,hn):
layer_n = len(data_layer[u'geographyLevels'][0]['hierarchies'][h][u'levels'])
for iln in range(0,layer_n):
esri_dataset = data_layer[u'geographyLevels'][0]['hierarchies'][h][u'ID']
layer_name = data_layer[u'geographyLevels'][0]['hierarchies'][h][u'levels'][iln][u'name']
layer_id = data_layer[u'geographyLevels'][0]['hierarchies'][h][u'levels'][iln][u'id']
layer_tmp = [c,esri_dataset,layer_id,layer_name]
df_layer_tmp = pd.DataFrame(layer_tmp).T
df_layer_tmp.columns= ['country','esri_dataset','layer_id', 'layer_name']
df_layer = pd.concat((df_layer, df_layer_tmp), axis=0)
except:
df_layer_tmp=pd.DataFrame({'country':[c],'esri_dataset':[''],'layer_id':[''], 'layer_name':['']})
df_layer = pd.concat((df_layer, df_layer_tmp), axis=0)
df_all = pd.merge(left=df_layer,right=df_collections, how='left' , left_on=['country','esri_dataset'], right_on=['country','collection_hierarchie'])
df_main = pd.concat((df_main, df_all), axis=0)
result_dataset.write_with_schema(df_main) |
<reponame>mukundv-chrome/clusterfuzz
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for libFuzzer engine."""
# pylint: disable=unused-argument
from future import standard_library
standard_library.install_aliases()
import os
import mock
import pyfakefs.fake_filesystem_unittest as fake_fs_unittest
from bot.fuzzers.libFuzzer import engine
from bot.fuzzers.libFuzzer import launcher
from system import new_process
from tests.test_libs import helpers as test_helpers
from tests.test_libs import test_utils
TEST_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'launcher_test_data')
class PrepareTest(fake_fs_unittest.TestCase):
"""Prepare() tests."""
def setUp(self):
# Set up fake filesystem.
test_helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
test_helpers.patch(self, [
'bot.fuzzers.engine_common.unpack_seed_corpus_if_needed',
])
self.fs.create_dir('/inputs')
self.fs.create_file('/path/target')
self.fs.create_file('/path/blah.dict')
self.fs.create_file('/path/target_seed_corpus.zip')
self.fs.create_file(
'/path/target.options',
contents=('[libfuzzer]\n'
'max_len=31337\n'
'timeout=11\n'
'dict=blah.dict\n'))
os.environ['FUZZ_INPUTS_DISK'] = '/inputs'
test_helpers.patch(self, ['bot.fuzzers.libFuzzer.launcher.pick_strategies'])
self.mock.pick_strategies.return_value = launcher.StrategyInfo(
fuzzing_strategies=['strategy1', 'strategy2'],
arguments=['-arg1'],
additional_corpus_dirs=['/new_corpus_dir'],
extra_env={'extra_env': '1'},
use_dataflow_tracing=False,
is_mutations_run=True)
def test_prepare(self):
"""Test prepare."""
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
self.assertEqual('/corpus_dir', options.corpus_dir)
self.assertItemsEqual([
'-max_len=31337', '-timeout=11', '-rss_limit_mb=2048', '-arg1',
'-dict=/path/blah.dict'
], options.arguments)
self.assertItemsEqual(['strategy1', 'strategy2'], options.strategies)
self.assertItemsEqual(['/new_corpus_dir', '/corpus_dir'],
options.fuzz_corpus_dirs)
self.assertDictEqual({'extra_env': '1'}, options.extra_env)
self.assertFalse(options.use_dataflow_tracing)
self.assertTrue(options.is_mutations_run)
self.mock.unpack_seed_corpus_if_needed.assert_called_with(
'/path/target', '/corpus_dir')
def test_prepare_invalid_dict(self):
"""Test prepare with an invalid dict path."""
with open('/path/target.options', 'w') as f:
f.write('[libfuzzer]\n'
'max_len=31337\n'
'timeout=11\n'
'dict=not_exist.dict\n')
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
self.assertItemsEqual(
['-max_len=31337', '-timeout=11', '-rss_limit_mb=2048', '-arg1'],
options.arguments)
def test_prepare_auto_add_dict(self):
"""Test prepare automatically adding dict argument."""
with open('/path/target.options', 'w') as f:
f.write('[libfuzzer]\n' 'max_len=31337\n' 'timeout=11\n')
self.fs.create_file('/path/target.dict')
engine_impl = engine.LibFuzzerEngine()
options = engine_impl.prepare('/corpus_dir', '/path/target', '/path')
self.assertItemsEqual([
'-max_len=31337', '-timeout=11', '-rss_limit_mb=2048', '-arg1',
'-dict=/path/target.dict'
], options.arguments)
class FuzzTest(fake_fs_unittest.TestCase):
"""Fuzz() tests."""
def setUp(self):
# Set up fake filesystem.
test_helpers.patch_environ(self)
test_utils.set_up_pyfakefs(self)
self.fs.create_dir('/corpus')
self.fs.create_dir('/fuzz-inputs')
self.fs.create_dir('/fake')
self.fs.create_file('/target')
self.fs.add_real_directory(TEST_DIR)
test_helpers.patch(self, [
'bot.fuzzers.libfuzzer.LibFuzzerRunner.fuzz',
'bot.fuzzers.libfuzzer.LibFuzzerRunner.merge',
'os.getpid',
])
os.environ['JOB_NAME'] = 'libfuzzer_asan_job'
os.environ['FUZZ_INPUTS_DISK'] = '/fuzz-inputs'
self.mock.getpid.return_value = 9001
self.maxDiff = None # pylint: disable=invalid-name
def test_fuzz(self):
"""Test fuzz."""
engine_impl = engine.LibFuzzerEngine()
options = engine.LibFuzzerOptions(
'/corpus',
['-arg=1', '-timeout=123', '-dict=blah.dict', '-max_len=9001'], [],
['/corpus'], {}, False, False)
with open(os.path.join(TEST_DIR, 'crash.txt')) as f:
fuzz_output = f.read()
def mock_fuzz(*args, **kwargs): # pylint: disable=unused-argument
"""Mock fuzz."""
self.fs.create_file('/fuzz-inputs/temp-9001/new/A')
self.fs.create_file('/fuzz-inputs/temp-9001/new/B')
return new_process.ProcessResult(
command='command',
return_code=0,
output=fuzz_output,
time_executed=2.0,
timed_out=False)
def mock_merge(*args, **kwargs): # pylint: disable=unused-argument
"""Mock merge."""
self.fs.create_file('/fuzz-inputs/temp-9001/merge-corpus/A')
return new_process.ProcessResult(
command='merge-command',
return_code=0,
output='merge',
time_executed=2.0,
timed_out=False)
self.mock.fuzz.side_effect = mock_fuzz
self.mock.merge.side_effect = mock_merge
result = engine_impl.fuzz('/target', options, '/fake', 3600)
self.assertEqual(1, len(result.crashes))
self.assertEqual(fuzz_output, result.logs)
crash = result.crashes[0]
self.assertEqual('/fake/crash-1e15825e6f0b2240a5af75d84214adda1b6b5340',
crash.input_path)
self.assertEqual(fuzz_output, crash.stacktrace)
self.assertItemsEqual(['-arg=1', '-timeout=123'], crash.reproduce_args)
self.assertEqual(2, crash.crash_time)
self.mock.fuzz.assert_called_with(
mock.ANY, ['/fuzz-inputs/temp-9001/new', '/corpus'],
additional_args=[
'-arg=1', '-timeout=123', '-dict=blah.dict', '-max_len=9001',
'-artifact_prefix=/fake/'
],
extra_env={},
fuzz_timeout=1470.0)
self.mock.merge.assert_called_with(
mock.ANY, [
'/fuzz-inputs/temp-9001/merge-corpus', '/fuzz-inputs/temp-9001/new',
'/corpus'
],
additional_args=['-arg=1', '-timeout=123'],
merge_timeout=1800.0,
tmp_dir='/fuzz-inputs/temp-9001/merge-workdir')
self.assertDictEqual(
{
'actual_duration': 2,
'average_exec_per_sec': 21,
'bad_instrumentation': 0,
'corpus_crash_count': 0,
'corpus_size': 0,
'crash_count': 1,
'dict_used': 1,
'edge_coverage': 1603,
'edges_total': 398467,
'expected_duration': 1450,
'feature_coverage': 3572,
'fuzzing_time_percent': 0.13793103448275862,
'initial_edge_coverage': 1603,
'initial_feature_coverage': 3572,
'leak_count': 0,
'log_lines_from_engine': 2,
'log_lines_ignored': 67,
'log_lines_unwanted': 0,
'manual_dict_size': 0,
'max_len': 9001,
'merge_edge_coverage': 0,
'new_edges': 0,
'new_features': 0,
'new_units_added': 1,
'new_units_generated': 0,
'number_of_executed_units': 1249,
'oom_count': 0,
'peak_rss_mb': 1197,
'recommended_dict_size': 0,
'slow_unit_count': 0,
'slow_units_count': 0,
'slowest_unit_time_sec': 0,
'startup_crash_count': 0,
# TODO(ochang): Move strategy stats to common place, rather than in
# engine implementation.
'strategy_corpus_mutations_ml_rnn': 0,
'strategy_corpus_mutations_radamsa': 0,
'strategy_corpus_subset': 0,
'strategy_dataflow_tracing': 0,
'strategy_fork': 0,
'strategy_mutator_plugin': 0,
'strategy_random_max_len': 0,
'strategy_recommended_dict': 0,
'strategy_selection_method': 'default',
'strategy_value_profile': 0,
'timeout_count': 0,
'timeout_limit': 123,
},
result.stats)
|
<filename>models/AI-Model-Zoo/VAI-1.3-Model-Zoo-Code/caffe/cf_retinaface_wider_360_640_1.11G_1.3/code/test/visualTest/test.py
# -- Copyright 2019 Xilinx Inc.
# --
# -- Licensed under the Apache License, Version 2.0 (the "License");
# -- you may not use this file except in compliance with the License.
# -- You may obtain a copy of the License at
# --
# -- http://www.apache.org/licenses/LICENSE-2.0
# --
# -- Unless required by applicable law or agreed to in writing, software
# -- distributed under the License is distributed on an "AS IS" BASIS,
# -- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# -- See the License for the specific language governing permissions and
# -- limitations under the License.
#
# Apache License
#
# Version 2.0, January 2004
#
# http://www.apache.org/licenses/
#
# TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
#
# 1. Definitions.
#
# "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
#
# "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
#
# "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
#
# "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
#
# "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
#
# "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
#
# "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
#
# "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
#
# "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
#
# "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
#
# 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
#
# 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
#
# 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
#
# You must give any other recipients of the Work or Derivative Works a copy of this License; and
# You must cause any modified files to carry prominent notices stating that You changed the files; and
# You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
# If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
#
# You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
# 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
#
# 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
#
# 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
#
# 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
#
# 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
#
# END OF TERMS AND CONDITIONS
import cv2
import sys
import numpy as np
import datetime
import os
import glob
import argparse
sys.path.insert(0,'../precisionTest/')
from postprocessing import RetinaFace
from rcnn.config import config
from rcnn.processing.bbox_transform import bbox_overlaps
from rcnn.dataset import retinaface
scales = [360,640]
count = 1
def parse_args():
parser = argparse.ArgumentParser(description='visualTest by retinaface detector')
# general
parser.add_argument('--network', help='network name', default='net3', type=str)
parser.add_argument('--picture', help='picture path list', default='./image_list_test.txt', type=str)
parser.add_argument('--gpu', help='GPU device to test with', default=1, type=int)
# testing
parser.add_argument('--prefix', help='model to test with', default='../../../float/test', type=str)
parser.add_argument('--epoch', help='model to test with', default=0, type=int)
parser.add_argument('--output', help='output path', default='./outputs', type=str)
parser.add_argument('--nocrop', help='', action='store_true')
parser.add_argument('--thresh', help='valid detection threshold', default=0.5, type=float)
parser.add_argument('--caffe_dir', help='caffe dir', default='../../../../../../../caffe_xilinx/python/', type=str)
args = parser.parse_args()
return args
args = parse_args()
detector = RetinaFace(args.prefix, 0, args.gpu, args.network,vote=False, caffe_dir= args.caffe_dir, nocrop = args.nocrop)
with open(args.picture, 'r') as pics:
piclist = pics.readlines()
for line in piclist:
line = line.strip()
img = cv2.imread(line)
print(img.shape)
im_shape = img.shape
target_size = scales[0]
max_size = scales[1]
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
#im_scale = 1.0
#if im_size_min>target_size or im_size_max>max_size:
im_scale = float(target_size) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
print('im_scale', im_scale)
scales_detect = [im_scale]
print(scales_detect)
flip = False
#for c in range(count):
faces, landmarks = detector.detect(img, args.thresh,scales=scales_detect, do_flip=flip)
print(faces.shape)#landmarks.shape)
if faces is not None:
#print('find', faces.shape[0], 'faces')
minsquare = 100000
af_minsquare = 100000
count = 0
ab_count = 0
af_count = 0
for i in range(faces.shape[0]):
#print('score', faces[i][4])
box = faces[i].astype(np.int)
box1 = faces[i].astype(np.float)
#color = (255,0,0)
color = (0,0,255)
square = (box[2] - box[0])*(box[3] - box[1])
#if square >110:
# continue
if square < minsquare:
minsquare = square
cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), color, 2)
#font = cv2.FONT_HERSHEY_SIMPLEX
#cv2.putText(img, str(square), (box[0],box[1]), font, 0.01*(box[2]-box[0]), (255, 255, 255), 1)
#print(square)
count+=1
ab_count +=1
if landmarks is not None:
landmark5 = landmarks[i].astype(np.int)
#print(landmark.shape)
for l in range(landmark5.shape[0]):
color = (0,0,255)
if l==0 or l==3:
color = (0,255,0)
cv2.circle(img, (landmark5[l][0], landmark5[l][1]), 1, color, 2)
print('find', count, 'faces')
print('anchor based detector find', ab_count, 'faces')
print('minsize', minsquare)
fileprefix = args.output
if not os.path.exists(fileprefix):
os.mkdir(fileprefix)
filename = os.path.join(fileprefix, os.path.basename(line))
print('writing', filename)
cv2.imwrite(filename, img)
|
<filename>app/blueprints/dynamic/generators/apo.py
import os, errno
from datetime import datetime
from ....config import Config
from shutil import which
def generate(
selecao_arquivo, campo_forca, modelo_agua, tipo_caixa, distancia_caixa,
neutralizar_sistema, double, ignore, current_user
):
if which("gracebat") is not None:
grace = "gracebat"
elif which("grace") is not None:
grace = "grace"
else:
return "missing_grace"
if double:
if which("gmx_d") is not None:
gmx = "gmx_d"
elif which("gmx") is not None:
gmx = "gmx"
else:
return "missing_gromacs"
else:
if which("gmx") is not None:
gmx = "gmx"
else:
return "missing_gromacs"
arquivo = os.path.basename(selecao_arquivo)
(nome_arquivo, extensao) = arquivo.split('.')
# Criando pastas necessárias
pasta = Config.UPLOAD_FOLDER + current_user.username + '/' + nome_arquivo + '/'
try:
os.makedirs(pasta + 'graficos')
os.makedirs(pasta + 'run/logs/')
except OSError as e:
if e.errno != errno.EEXIST:
raise
# Preparando Nome dos arquivos que serão criados
arquivo_gro = nome_arquivo + '.gro'
arquivo_top = nome_arquivo + '.top'
arquivo_box = nome_arquivo + '_box'
arquivo_ionizado = nome_arquivo + '_charged'
# Preparando nome do arquivo que guardará os comandos usados
CompleteFileName = "{}|{}.txt".format(datetime.now().replace(microsecond=0).isoformat(), nome_arquivo)
comandos = open(pasta + CompleteFileName, "w")
os.chdir(pasta)
# Montagem do comando gmx pdb2gmx com parametros para geracao da topologia a partir da estrutura PDB selecionada, campos de forca e modelo de agua
comando = f"{gmx} pdb2gmx -f {arquivo} -o {arquivo_gro} -p {arquivo_top} -ff {campo_forca} -water {modelo_agua} {'-ignh -missing' if ignore else ''}"
comandos.write('#topology\n\n')
comandos.writelines(comando)
comandos.write('\n\n')
# Montagem do comando gmx editconf com parametros para geracao da caixa
comando = f"{gmx} editconf -f {arquivo_gro} -c -d {str(distancia_caixa)} -bt {tipo_caixa} -o"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx solvate com parametros para solvatacao da proteina
comando = f"{gmx} solvate -cp out.gro -cs -p {arquivo_top} -o {arquivo_box}"
comandos.write('#solvate\n\n')
comandos.writelines(comando)
comandos.write('\n\n')
# Montagem do comando gmx grompp para precompilar e ver se o sistema esta carregado
comando = f"{gmx} grompp -f ions.mdp -c {arquivo_box}.gro -p {arquivo_top} -o {arquivo_ionizado} -maxwarn 2"
comandos.write('#ions\n\n')
comandos.writelines(comando)
comandos.write('\n\n')
if neutralizar_sistema:
# Montagem do comando gmx genion para neutralizar o sistema
comando = f"echo 'SOL' | {gmx} genion -s {arquivo_ionizado}.tpr -o {arquivo_ionizado} -p {arquivo_top} -neutral"
comandos.writelines(comando)
comandos.write("\n\n")
# Refaz a pré-compilação caso deva neutralizar o sistema
comando = f"{gmx} grompp -f PME_em.mdp -c {arquivo_ionizado}.gro -p {arquivo_top} -o {arquivo_ionizado} -maxwarn 2"
comandos.write('#minimizationsteepdesc\n\n')
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx mdrun para executar a dinamica de minimizacao
arquivo_minimizado = f"{nome_arquivo}_sd_em"
comando = f"{gmx} mdrun -v -s {arquivo_ionizado}.tpr -deffnm {arquivo_minimizado}"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando energySD para criar grafico Steepest Decent
#comandos.write('#energysd\n\n')
comando = f"echo '10 0' | {gmx} energy -f {arquivo_minimizado}.edr -o {nome_arquivo}_potentialsd.xvg"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando GRACE para converter gráfico SD em Imagem
comando = f"{grace} -nxy {nome_arquivo}_potentialsd.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_potentialsd.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx grompp para precompilar a dinamica de minimizacao cg
comando = f"{gmx} grompp -f PME_cg_em.mdp -c {nome_arquivo}_sd_em.gro -p {arquivo_top} -o {nome_arquivo}_cg_em -maxwarn 2"
comandos.write('#minimizationconjgrad\n\n')
comandos.writelines(comando)
comandos.write('\n\n')
# Montagem do comando gmx mdrun para executar a dinamica de minimizacao cg
comando = f"{gmx} mdrun -v -s {nome_arquivo}_cg_em.tpr -deffnm {nome_arquivo}_cg_em"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando energyCG para criar grafico CG
comando = f"echo '10 0' | {gmx} energy -f {nome_arquivo}_cg_em.edr -o {nome_arquivo}_potentialcg.xvg"
comandos.write('\n\n')
# Montagem do comando GRACE para converter gráfico CG em Imagem
comando = f"{grace} -nxy {nome_arquivo}_potentialcg.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_potentialcg.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx grompp para precompilar a primeira etapa do equilibrio
comando = f"{gmx} grompp -f nvt.mdp -c {nome_arquivo}_cg_em.gro -r {nome_arquivo}_cg_em.gro -p {arquivo_top} -o {nome_arquivo}_nvt.tpr -maxwarn 2"
comandos.write('#equilibrationnvt\n\n')
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx mdrun para executar a primeira etapa do equilibrio
comando = f"{gmx} mdrun -v -s {nome_arquivo}_nvt.tpr -deffnm {nome_arquivo}_nvt"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando energy para criar do equilibrio nvt
comando = f"echo '16 0' | {gmx} energy -f {nome_arquivo}_nvt.edr -o {nome_arquivo}_temperature_nvt.xvg"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando GRACE para converter gráfico NVT em Imagem
comando = f"{grace} -nxy {nome_arquivo}_temperature_nvt.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_temperature_nvt.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx grompp para precompilar a segunda etapa do equilibrio
comando = f"{gmx} grompp -f npt.mdp -c {nome_arquivo}_nvt.gro -r {nome_arquivo}_nvt.gro -p {arquivo_top} -o {nome_arquivo}_npt.tpr -maxwarn 2"
comandos.write('#equilibrationnpt\n\n')
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx mdrun para executar a segunda etapa do equilibrio
comando = f"{gmx} mdrun -v -s {nome_arquivo}_npt.tpr -deffnm {nome_arquivo}_npt"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando energy para criar grafico do equilibrio npt
comando = f"echo '16 0' | {gmx} energy -f {nome_arquivo}_npt.edr -o {nome_arquivo}_temperature_npt.xvg"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando GRACE para converter gráfico NPT em Imagem
comando = f"{grace} -nxy {nome_arquivo}_temperature_npt.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_temperature_npt.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx grompp para precompilar a dinamica de position restraints VERSÃO 2
comando = f"{gmx} grompp -f md_pr.mdp -c {nome_arquivo}_npt.gro -p {arquivo_top} -o {nome_arquivo}_pr -maxwarn 2"
comandos.write('#productionmd\n\n')
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx mdrun para executar a dinamica de position restraints
comando = f"{gmx} mdrun -v -s {nome_arquivo}_pr.tpr -deffnm {nome_arquivo}_pr"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando conversao de trajetoria
comandos.write('#analyzemd\n\n')
comando = f"echo '1 0' | {gmx} trjconv -s {nome_arquivo}_pr.tpr -f {nome_arquivo}_pr.xtc -o {nome_arquivo}_pr_PBC.xtc -pbc mol -center"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx rms da producao
comando = f"echo '4 4' | {gmx} rms -s {nome_arquivo}_pr.tpr -f {nome_arquivo}_pr_PBC.xtc -o {nome_arquivo}_rmsd_prod.xvg -tu ns"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando grace
comando = f"{grace} -nxy {nome_arquivo}_rmsd_prod.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_rmsd_prod.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gmx rms da estrutura de cristal
comando = f"echo '4 4' | {gmx} rms -s {nome_arquivo}_charged.tpr -f {nome_arquivo}_pr_PBC.xtc -o {nome_arquivo}_rmsd_cris.xvg -tu ns"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando grace cristal
comando = f"{grace} -nxy {nome_arquivo}_rmsd_cris.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_rmsd_cris.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando grace producao+cristal
comando = f"{grace} -nxy {nome_arquivo}_rmsd_prod.xvg {nome_arquivo}_rmsd_cris.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_rmsd_prod_cris.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gyrate
comando = f"echo '1' | {gmx} gyrate -s {nome_arquivo}_pr.tpr -f {nome_arquivo}_pr_PBC.xtc -o {nome_arquivo}_gyrate.xvg"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando grace producao+cristal
comando = f"{grace} -nxy {nome_arquivo}_gyrate.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_gyrate.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gyrate
comando = f"echo '1' | {gmx} rmsf -s {nome_arquivo}_pr.tpr -f {nome_arquivo}_pr_PBC.xtc -o {nome_arquivo}_rmsf_residue.xvg -res"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando grace producao+cristal
comando = f"{grace} -nxy {nome_arquivo}_rmsf_residue.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_rmsf_residue.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando gyrate
comando = f"echo '1' | {gmx} sasa -s {nome_arquivo}_pr.tpr -f {nome_arquivo}_pr_PBC.xtc -o {nome_arquivo}_solvent_accessible_surface.xvg -or {nome_arquivo}_sas_residue.xvg"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando grace sas por total
comando = f"{grace} -nxy {nome_arquivo}_solvent_accessible_surface.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_solvent_accessible_surface.png"
comandos.writelines(comando)
comandos.write("\n\n")
# Montagem do comando grace sas por residue
comando = f"{grace} -nxy {nome_arquivo}_sas_residue.xvg -hdevice PNG -hardcopy -printfile ../graficos/{nome_arquivo}_sas_residue.png"
comandos.writelines(comando)
comandos.write("\n\n")
comandos.close()
return CompleteFileName |
import torch
import numpy as np
import resampy
from .mel_features import log_mel_spectrogram
"""
The only difference of this code from the original repository is that
it ensures the outputs contain at least a single frame
"""
def _preprocess(data, sample_rate):
# Architectural constants.
NUM_FRAMES = 96 # Frames in input mel-spectrogram patch.
NUM_BANDS = 64 # Frequency bands in input mel-spectrogram patch.
EMBEDDING_SIZE = 128 # Size of embedding layer.
# Hyperparameters used in feature and example generation.
SAMPLE_RATE = 16000
STFT_WINDOW_LENGTH_SECONDS = 0.025
STFT_HOP_LENGTH_SECONDS = 0.010
NUM_MEL_BINS = NUM_BANDS
MEL_MIN_HZ = 125
MEL_MAX_HZ = 7500
LOG_OFFSET = 0.01 # Offset used for stabilized log of input mel-spectrogram.
EXAMPLE_WINDOW_SECONDS = 0.96 # Each example contains 96 10ms frames
EXAMPLE_HOP_SECONDS = 0.96 # with zero overlap.
# Parameters used for embedding postprocessing.
PCA_EIGEN_VECTORS_NAME = 'pca_eigen_vectors'
PCA_MEANS_NAME = 'pca_means'
QUANTIZE_MIN_VAL = -2.0
QUANTIZE_MAX_VAL = +2.0
# Hyperparameters used in training.
INIT_STDDEV = 0.01 # Standard deviation used to initialize weights.
LEARNING_RATE = 1e-4 # Learning rate for the Adam optimizer.
ADAM_EPSILON = 1e-8 # Epsilon for the Adam optimizer.
# Names of ops, tensors, and features.
INPUT_OP_NAME = 'vggish/input_features'
INPUT_TENSOR_NAME = INPUT_OP_NAME + ':0'
OUTPUT_OP_NAME = 'vggish/embedding'
OUTPUT_TENSOR_NAME = OUTPUT_OP_NAME + ':0'
AUDIO_EMBEDDING_FEATURE_NAME = 'audio_embedding'
# Convert to mono.
if len(data.shape) > 1:
data = np.mean(data, axis=1)
resampled = data
# Resample to the rate assumed by VGGish.
if sample_rate != SAMPLE_RATE:
resampled = resampy.resample(resampled, sample_rate, SAMPLE_RATE)
def get_log_mel(x):
return log_mel_spectrogram(
x,
audio_sample_rate=SAMPLE_RATE,
log_offset=LOG_OFFSET,
window_length_secs=STFT_WINDOW_LENGTH_SECONDS,
hop_length_secs=STFT_HOP_LENGTH_SECONDS,
num_mel_bins=NUM_MEL_BINS,
lower_edge_hertz=MEL_MIN_HZ,
upper_edge_hertz=MEL_MAX_HZ)
log_mel = get_log_mel(resampled)
# Frame features into examples.
features_sample_rate = 1.0 / STFT_HOP_LENGTH_SECONDS
example_window_length = int(round(
EXAMPLE_WINDOW_SECONDS * features_sample_rate))
example_hop_length = int(round(
EXAMPLE_HOP_SECONDS * features_sample_rate))
num_samples = log_mel.shape[0]
num_frames = int(np.floor((num_samples - example_window_length) / example_hop_length))
num_frames = 1 + num_frames
shape = (num_frames, example_window_length) + log_mel.shape[1:]
strides = (log_mel.strides[0] * example_hop_length,) + log_mel.strides
log_mel_examples = np.lib.stride_tricks.as_strided(log_mel, shape=shape, strides=strides)
log_mel_examples_tensor = torch.tensor(
log_mel_examples, requires_grad=True)[:, None, :, :].float()
return log_mel_examples_tensor
|
<filename>ctrlengine/ai/test.py
###############################
### TESTING USB ACCELERATOR ###
###############################
# import cv2
# from face_detection import face_detection
# import time
# engine = face_detection()
# cam = cv2.VideoCapture(2)
# while True:
# start = time.time()
# ret, frame = cam.read()
# key = cv2.waitKey(10)
# if key == ord('q'):
# cam.release()
# break
# start = time.time()
# engine.detect(frame)
# boxes = engine.get_bounding_boxes()
# for box in boxes:
# cv2.rectangle(frame, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255,0,0), 2)
# cv2.imshow("frame", frame)
# print("{:.2f} ms".format((time.time()-start)*1000))
###############################################################
############################
### TESTING GOOGLE CLOUD ###
############################
# from cloud_vision import cloud_vision
# import cv2
# import time
# engine = cloud_vision()
# cam = cv2.VideoCapture(2)
# ret, frame = cam.read()
# start = time.time()
# print(engine.detect_faces(frame))
# print("{:.2f} ms".format((time.time()-start)*1000))
###############################################################
###########################
### TESTING AZURE CLOUD ###
###########################
# from azure_vision import azure_vision
# import cv2
# import time
# engine = azure_vision()
# cam = cv2.VideoCapture(2)
# ret, frame = cam.read()
# start = time.time()
# print(engine.detect_faces(frame))
# print("{:.2f} ms".format((time.time()-start)*1000))
###############################################################
############################
### TESTING HAAR CASCADE ###
############################
# import cv2
# from haar_cascade import haar_cascade
# import time
# engine = haar_cascade(model=haar_cascade.FACE)
# cam = cv2.VideoCapture(2)
# while True:
# start = time.time()
# ret, frame = cam.read()
# faces = engine.detect(frame)
# k = cv2.waitKey(10)
# if k == ord('q'):
# cam.release()
# break
# for (x,y,w,h) in faces:
# cv2.rectangle(frame, (x,y), (x+w,y+h), (255,0,0), 2)
# cv2.imshow('frame', frame)
# print("{:.2f} ms".format((time.time()-start)*1000))
###############################################################
#######################
### TESTING TRACKER ###
#######################
# from tracker import tracker
# import cv2
# import time
# WINDOW_NAME = 'Frame'
# track_obj = tracker(type='mosse')
# ref = None
# cam = cv2.VideoCapture(2)
# while True:
# start = time.time()
# ret, frame = cam.read()
# if ref is not None:
# box = track_obj.update(frame)
# if box is not None:
# (x, y, w, h) = [int(v) for v in box]
# cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# cv2.imshow(WINDOW_NAME, frame)
# key = cv2.waitKey(10)
# if key == ord("q"):
# cam.release()
# cv2.destroyAllWindows()
# break
# elif key == ord("s"):
# (x, y, w, h) = track_obj.init_from_selection(WINDOW_NAME, frame)
# ref = frame[y:y+h, x:x+w]
# cv2.imshow("Reference", ref)
# print("{:.2f} ms".format((time.time()-start)*1000))
###############################################################
|
<reponame>yangboz/maro
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
from flask import Blueprint, abort
from ...master_api_server.jwt_wrapper import check_jwt_validity
from ...master_api_server.objects import local_cluster_details, redis_controller
from ...utils.connection_tester import ConnectionTester
from ...utils.exception import ConnectionFailed
from ...utils.name_creator import NameCreator
from ...utils.params import NodeStatus, Paths
from ...utils.subprocess import Subprocess
# Flask related.
blueprint = Blueprint(name="nodes", import_name=__name__)
URL_PREFIX = "/v1/nodes"
# Api functions.
@blueprint.route(f"{URL_PREFIX}", methods=["GET"])
@check_jwt_validity
def list_nodes():
"""List the nodes in the cluster.
Returns:
None.
"""
name_to_node_details = redis_controller.get_name_to_node_details()
return list(name_to_node_details.values())
@blueprint.route(f"{URL_PREFIX}/<node_name>", methods=["GET"])
@check_jwt_validity
def get_node(node_name: str):
"""Get the node with node_name.
Returns:
None.
"""
node_details = redis_controller.get_node_details(node_name=node_name)
return node_details
@blueprint.route(f"{URL_PREFIX}", methods=["POST"])
@check_jwt_validity
def create_node(**kwargs):
"""Create a node.
Returns:
None.
"""
node_details = kwargs["json_dict"]
# Init runtime params.
if "name" not in node_details and "id" not in node_details:
node_name = NameCreator.create_node_name()
node_details["name"] = node_name
node_details["id"] = node_name
node_details["image_files"] = {}
node_details["containers"] = {}
node_details["state"] = {
"status": NodeStatus.PENDING
}
node_name = node_details["name"]
with redis_controller.lock(f"lock:name_to_node_details:{node_name}"):
redis_controller.set_node_details(
node_name=node_name,
node_details=node_details
)
return node_details
@blueprint.route(f"{URL_PREFIX}/<node_name>", methods=["DELETE"])
@check_jwt_validity
def delete_node(node_name: str):
"""Delete a node.
Returns:
None.
"""
# Get node_details.
node_details = redis_controller.get_node_details(node_name=node_name)
# leave the cluster
command = (
f"ssh -o StrictHostKeyChecking=no "
f"-i {Paths.MARO_LOCAL}/cluster/{local_cluster_details['name']}/master_to_node_openssh_private_key "
f"-p {node_details['ssh']['port']} "
f"{node_details['username']}@{node_details['hostname']} "
f"'python3 {Paths.MARO_LOCAL}/scripts/leave_cluster.py'"
)
Subprocess.run(command=command)
# Delete node_details at the end.
redis_controller.delete_node_details(node_name=node_name)
return node_details
@blueprint.route(f"{URL_PREFIX}/<node_name>:start", methods=["POST"])
@check_jwt_validity
def start_node(node_name: str):
"""Start a node.
Returns:
None.
"""
node_details = redis_controller.get_node_details(node_name=node_name)
# Make sure the node is able to connect
try:
ConnectionTester.retry_connection(
node_username=node_details["username"],
node_hostname=node_details["hostname"],
node_ssh_port=node_details["ssh"]["port"],
cluster_name=local_cluster_details["name"]
)
except ConnectionFailed:
abort(400)
command = (
f"ssh -o StrictHostKeyChecking=no "
f"-i {Paths.MARO_LOCAL}/cluster/{local_cluster_details['name']}/master_to_node_openssh_private_key "
f"-p {node_details['ssh']['port']} "
f"{node_details['username']}@{node_details['hostname']} "
f"'cd {Paths.MARO_SHARED}/lib/grass; python3 -m scripts.node.start_node_agent_service'"
)
_ = Subprocess.run(command=command)
command = (
f"ssh -o StrictHostKeyChecking=no "
f"-i {Paths.MARO_LOCAL}/cluster/{local_cluster_details['name']}/master_to_node_openssh_private_key "
f"-p {node_details['ssh']['port']} "
f"{node_details['username']}@{node_details['hostname']} "
f"'cd {Paths.MARO_SHARED}/lib/grass; python3 -m scripts.node.start_node_api_server_service'"
)
_ = Subprocess.run(command=command)
return {}
@blueprint.route(f"{URL_PREFIX}/<node_name>:stop", methods=["POST"])
@check_jwt_validity
def stop_node(node_name: str):
"""Stop a node.
Returns:
None.
"""
node_details = redis_controller.get_node_details(node_name=node_name)
# Make sure the node is able to connect
try:
ConnectionTester.retry_connection(
node_username=node_details["username"],
node_hostname=node_details["hostname"],
node_ssh_port=node_details["ssh"]["port"],
cluster_name=local_cluster_details["name"]
)
except ConnectionFailed:
abort(400)
command = (
f"ssh -o StrictHostKeyChecking=no "
f"-i {Paths.MARO_LOCAL}/cluster/{local_cluster_details['name']}/master_to_node_openssh_private_key "
f"-p {node_details['ssh']['port']} "
f"{node_details['username']}@{node_details['hostname']} "
f"'cd {Paths.MARO_SHARED}/lib/grass; python3 -m scripts.node.stop_node_api_server_service'"
)
_ = Subprocess.run(command=command)
command = (
f"ssh -o StrictHostKeyChecking=no "
f"-i {Paths.MARO_LOCAL}/cluster/{local_cluster_details['name']}/master_to_node_openssh_private_key "
f"-p {node_details['ssh']['port']} "
f"{node_details['username']}@{node_details['hostname']} "
f"'cd {Paths.MARO_SHARED}/lib/grass; python3 -m scripts.node.stop_node_agent_service'"
)
_ = Subprocess.run(command=command)
return {}
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.linalg import LinAlgError
from ... import opcodes as OperandDef
from ...serialize import KeyField
from ..datasource import tensor as astensor
from ..operands import TensorHasInput, TensorOperandMixin
from ..core import TensorOrder
class TensorInv(TensorHasInput, TensorOperandMixin):
_op_type_ = OperandDef.INV
_input = KeyField('input')
def __init__(self, dtype=None, sparse=False, **kw):
super(TensorInv, self).__init__(_dtype=dtype, _sparse=sparse, **kw)
def __call__(self, a):
a = astensor(a)
return self.new_tensor([a], a.shape, order=TensorOrder.C_ORDER)
@classmethod
def tile(cls, op):
"""
Use LU decomposition to compute inverse of matrix.
Given a square matrix A:
P, L, U = lu(A)
b_eye is an identity matrix with the same shape as matrix A, then,
(P * L * U) * A_inv = b_eye
L * (U * A_inv) = P.T * b_eye
use `solve_triangular` twice to compute the inverse of matrix A.
"""
from .lu import lu
from ..datasource import eye
from ..base.transpose import TensorTranspose
from .tensordot import tensordot
from .solve_triangular import solve_triangular
in_tensor = op.input
is_sparse = in_tensor.is_sparse()
b_eye = eye(in_tensor.shape[0], chunk_size=in_tensor.nsplits, sparse=is_sparse)
b_eye.single_tiles()
p, l, u = lu(in_tensor)
p.single_tiles()
# transposed p equals to inverse of p
p_transpose = TensorTranspose(
dtype=p.dtype, sparse=p.op.sparse, axes=list(range(in_tensor.ndim))[::-1]).new_tensor([p], p.shape)
p_transpose.single_tiles()
b = tensordot(p_transpose, b_eye, axes=((p_transpose.ndim - 1,), (b_eye.ndim - 2,)))
b.single_tiles()
# as `l` is a lower matrix, `lower=True` should be specified.
uy = solve_triangular(l, b, lower=True, sparse=op.sparse)
uy.single_tiles()
a_inv = solve_triangular(u, uy, sparse=op.sparse)
a_inv.single_tiles()
return [a_inv]
def inv(a, sparse=None):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
sparse: bool, optional
Return sparse value or not.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Examples
--------
>>> import mars.tensor as mt
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = mt.linalg.inv(a)
>>> mt.allclose(mt.dot(a, ainv), mt.eye(2)).execute()
True
>>> mt.allclose(mt.dot(ainv, a), mt.eye(2)).execute()
True
>>> ainv.execute()
array([[ -2. , 1. ],
[ 1.5, -0.5]])
"""
# TODO: using some parallel algorithm for matrix inversion.
a = astensor(a)
if a.ndim != 2:
raise LinAlgError('{0}-dimensional array given. '
'Tensor must be two-dimensional'.format(a.ndim))
if a.shape[0] != a.shape[1]:
raise LinAlgError('Input must be square')
tiny_inv = np.linalg.inv(np.array([[1, 2], [2, 5]], dtype=a.dtype))
sparse = sparse if sparse is not None else a.issparse()
op = TensorInv(dtype=tiny_inv.dtype, sparse=sparse)
return op(a)
|
<gh_stars>0
from numpy import inf, nan
from sklearn.decomposition import LatentDirichletAllocation as Op
from lale.docstrings import set_docstrings
from lale.operators import make_operator
class LatentDirichletAllocationImpl:
def __init__(self, **hyperparams):
self._hyperparams = hyperparams
self._wrapped_model = Op(**self._hyperparams)
def fit(self, X, y=None):
if y is not None:
self._wrapped_model.fit(X, y)
else:
self._wrapped_model.fit(X)
return self
def transform(self, X):
return self._wrapped_model.transform(X)
_hyperparams_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "inherited docstring for LatentDirichletAllocation Latent Dirichlet Allocation with online variational Bayes algorithm",
"allOf": [
{
"type": "object",
"required": [
"n_components",
"doc_topic_prior",
"topic_word_prior",
"learning_method",
"learning_decay",
"learning_offset",
"max_iter",
"batch_size",
"evaluate_every",
"total_samples",
"perp_tol",
"mean_change_tol",
"max_doc_update_iter",
"n_jobs",
"verbose",
"random_state",
"n_topics",
],
"relevantToOptimizer": [
"n_components",
"max_iter",
"batch_size",
"evaluate_every",
"total_samples",
"max_doc_update_iter",
],
"additionalProperties": False,
"properties": {
"n_components": {
"type": "integer",
"minimumForOptimizer": 2,
"maximumForOptimizer": 256,
"distribution": "uniform",
"default": 10,
"description": "Number of topics.",
},
"doc_topic_prior": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Prior of document topic distribution `theta`",
},
"topic_word_prior": {
"anyOf": [{"type": "number"}, {"enum": [None]}],
"default": None,
"description": "Prior of topic word distribution `beta`",
},
"learning_method": {
"enum": ["batch", "online"],
"default": "batch",
"description": "Method used to update `_component`",
},
"learning_decay": {
"type": "number",
"default": 0.7,
"description": "It is a parameter that control learning rate in the online learning method",
},
"learning_offset": {
"type": "number",
"default": 10.0,
"description": "A (positive) parameter that downweights early iterations in online learning",
},
"max_iter": {
"type": "integer",
"minimumForOptimizer": 10,
"maximumForOptimizer": 1000,
"distribution": "uniform",
"default": 10,
"description": "The maximum number of iterations.",
},
"batch_size": {
"type": "integer",
"minimumForOptimizer": 3,
"maximumForOptimizer": 128,
"distribution": "uniform",
"default": 128,
"description": "Number of documents to use in each EM iteration",
},
"evaluate_every": {
"type": "integer",
"minimumForOptimizer": (-1),
"maximumForOptimizer": 0,
"distribution": "uniform",
"default": (-1),
"description": "How often to evaluate perplexity",
},
"total_samples": {
"anyOf": [
{"type": "integer", "forOptimizer": False},
{
"type": "number",
"minimumForOptimizer": 0.0,
"maximumForOptimizer": 1.0,
"distribution": "uniform",
},
],
"default": 1000000.0,
"description": "Total number of documents",
},
"perp_tol": {
"type": "number",
"default": 0.1,
"description": "Perplexity tolerance in batch learning",
},
"mean_change_tol": {
"type": "number",
"default": 0.001,
"description": "Stopping tolerance for updating document topic distribution in E-step.",
},
"max_doc_update_iter": {
"type": "integer",
"minimumForOptimizer": 100,
"maximumForOptimizer": 101,
"distribution": "uniform",
"default": 100,
"description": "Max number of iterations for updating document topic distribution in the E-step.",
},
"n_jobs": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": 1,
"description": "The number of jobs to use in the E-step",
},
"verbose": {
"type": "integer",
"default": 0,
"description": "Verbosity level.",
},
"random_state": {
"anyOf": [
{"type": "integer"},
{"laleType": "numpy.random.RandomState"},
{"enum": [None]},
],
"default": None,
"description": "If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`.",
},
"n_topics": {
"anyOf": [{"type": "integer"}, {"enum": [None]}],
"default": None,
"description": "This parameter has been renamed to n_components and will be removed in version 0.21",
},
},
},
{
"description": "learning_method, only used in fit method",
"anyOf": [
{
"type": "object",
"properties": {"learning_method": {"enum": ["batch"]}},
},
{"type": "object", "properties": {"method": {"enum": ["fit"]}}},
],
},
{
"description": "batch_size, only used in online learning",
"anyOf": [
{"type": "object", "properties": {"batch_size": {"enum": [128]}}},
{"type": "object", "properties": {"learning": {"enum": ["online"]}}},
],
},
{
"description": "evaluate_every, only used in fit method",
"anyOf": [
{"type": "object", "properties": {"evaluate_every": {"enum": [(-1)]}}},
{"type": "object", "properties": {"method": {"enum": ["fit"]}}},
],
},
{
"description": "total_samples, only used in the partial_fit method",
"anyOf": [
{
"type": "object",
"properties": {"total_samples": {"enum": [1000000.0]}},
},
{"type": "object", "properties": {"method": {"enum": ["partial_fit"]}}},
],
},
{
"XXX TODO XXX": "Parameter: perp_tol > only used when evaluate_every is greater than 0"
},
],
}
_input_fit_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Learn model for the data X with variational Bayes method.",
"type": "object",
"required": ["X", "y"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape=(n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Document word matrix.",
},
"y": {},
},
}
_input_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Transform data X according to the fitted model.",
"type": "object",
"required": ["X"],
"properties": {
"X": {
"anyOf": [
{
"type": "array",
"items": {"laleType": "Any", "XXX TODO XXX": "item type"},
"XXX TODO XXX": "array-like or sparse matrix, shape=(n_samples, n_features)",
},
{
"type": "array",
"items": {"type": "array", "items": {"type": "number"}},
},
],
"description": "Document word matrix.",
}
},
}
_output_transform_schema = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Document topic distribution for X.",
"laleType": "Any",
"XXX TODO XXX": "shape=(n_samples, n_components)",
}
_combined_schemas = {
"$schema": "http://json-schema.org/draft-04/schema#",
"description": "Combined schema for expected data and hyperparameters.",
"documentation_url": "https://scikit-learn.org/0.20/modules/generated/sklearn.decomposition.LatentDirichletAllocation#sklearn-decomposition-latentdirichletallocation",
"import_from": "sklearn.decomposition",
"type": "object",
"tags": {"pre": [], "op": ["transformer"], "post": []},
"properties": {
"hyperparams": _hyperparams_schema,
"input_fit": _input_fit_schema,
"input_transform": _input_transform_schema,
"output_transform": _output_transform_schema,
},
}
set_docstrings(LatentDirichletAllocationImpl, _combined_schemas)
LatentDirichletAllocation = make_operator(
LatentDirichletAllocationImpl, _combined_schemas
)
|
<gh_stars>1-10
import os
import io
import sys
from unittest import TestCase, main as unittest_main
from eventhandler import EventHandler
class TestEventHandler(TestCase):
def test_001_initialization_args(self):
# Test init on no args
eh = EventHandler()
self.assertEqual(eh.count_events, 0) # checks there is no events
self.assertFalse(eh.verbose) # checks verbose is false
self.assertFalse(eh.tolerate_exceptions) # checks no exception toleration
self.assertIsNotNone(eh.stream_output)
def test_002_initiaization_with_events(self):
# Test init with args.
eh = EventHandler('MyEvent')
self.assertEqual(eh.count_events, 1)
self.assertFalse(eh.verbose) # checks verbose is false
self.assertFalse(eh.tolerate_exceptions) # checks no exception toleration
self.assertIsNotNone(eh.stream_output)
def test_003_initialization_verbose(self):
eh = EventHandler(verbose=True)
self.assertTrue(eh.verbose)
eh = EventHandler(verbose=False)
self.assertFalse(eh.verbose)
def test_004_initialization_tolerate_execeptions(self):
eh = EventHandler(tolerate_callbacks_exceptions=True)
self.assertTrue(eh.tolerate_exceptions)
eh = EventHandler(tolerate_callbacks_exceptions=False)
self.assertFalse(eh.tolerate_exceptions)
def test_005_initialization_file_to_verbose(self):
with open('test.txt', '+w') as f:
eh = EventHandler(stream_output=f, verbose=True)
self.assertEqual('test.txt', eh.stream_output.name)
instance_id = str(hex(id(eh)))
f.close()
with open('test.txt', 'r') as f:
content = f.read()
self.assertTrue((instance_id in content))
f.close()
os.remove('test.txt')
def test_006_event_registration(self):
eh = EventHandler()
event_name = 'onMyCoolEventHappens'
self.assertFalse(eh.is_event_registered(event_name))
self.assertTrue(eh.register_event(event_name))
self.assertTrue(eh.is_event_registered(event_name))
eh = EventHandler('one', 'two', 'three', verbose=True)
self.assertTrue(eh.is_event_registered('three'))
self.assertFalse(eh.register_event('one'))
def test_007_event_unregistration(self):
eh = EventHandler()
event_name = 'onMyCoolEventHappens'
self.assertFalse(eh.is_event_registered(event_name))
self.assertTrue(eh.register_event(event_name))
self.assertTrue(eh.is_event_registered(event_name))
eh.unregister_event(event_name)
self.assertFalse(eh.unregister_event('one'))
def test_008_is_callable(self):
func = lambda x: print(x)
not_func = 'This is not callable as method'
self.assertTrue(EventHandler.is_callable(func))
self.assertFalse(EventHandler.is_callable(not_func))
def test_009_bind_callbacks(self):
event_name = 'newCoolEvent'
eh = EventHandler(event_name)
def callback1(*args):
pass
self.assertFalse(eh.is_callback_in_event(event_name, callback1))
output = io.StringIO()
eh = EventHandler(event_name, verbose=True, stream_output=output)
with self.assertRaises(EventHandler.Exceptions.EventNotAllowedError) as context:
# Impossible to link to a not registered callback, will raie error
eh.link(callback1, 'onNotRegisteredEvent')
self.assertTrue(eh.link(callback1, event_name))
self.assertFalse(eh.link(callback1, event_name))
output = str(output.getvalue())
self.assertTrue(callback1.__name__ in output)
self.assertTrue(event_name in output)
self.assertTrue(eh.is_callback_in_event(event_name, callback1))
# tries to link not callable event
self.assertFalse(eh.link('not_callable', event_name))
def test_010_unbind_callbacks(self):
event_name = 'newCoolEvent'
eh = EventHandler(event_name)
def callback1(*args):
pass
self.assertTrue(eh.link(callback1, event_name))
self.assertTrue(eh.unlink(callback1, event_name))
# Test already unregistered event
output = io.StringIO()
eh = EventHandler(event_name, verbose=True, stream_output=output)
self.assertFalse(eh.unlink(callback1, event_name))
self.assertTrue(eh.link(callback1, event_name))
self.assertFalse(eh.link(callback1, event_name))
value = output.getvalue()
self.assertTrue(callback1.__name__ in value)
self.assertTrue(event_name in value)
# Test try unregister not exists event
self.assertFalse(eh.unlink(callback1, 'inexistentEventName'))
value = output.getvalue()
print(output)
for event in eh.event_list:
self.assertTrue(event in value)
def test_011_fire_event(self):
event_name = 'newCoolEvent'
eh = EventHandler(event_name)
def callback1(*args, **kwargs):
self.assertEqual(args[0], 1)
self.assertEqual(args[1], 2)
self.assertEqual(args[2], 3)
self.assertEqual(kwargs['extra'], 0)
self.assertTrue(eh.link(callback1, event_name))
self.assertTrue(eh.fire(event_name, 1, 2, 3, extra=0))
def will_fail_callback(number1, number2, number3, extra=0):
return number1 / extra
self.assertTrue(eh.link(will_fail_callback, event_name))
with self.assertRaises(ZeroDivisionError) as context:
eh.fire(event_name, 1, 2, 3, extra=0)
# Set callback fail toleration
eh.verbose = True
eh.tolerate_exceptions = True
output = io.StringIO()
eh.stream_output = output
self.assertFalse(eh.fire(event_name, 1, 2, 3, extra=0))
value = output.getvalue()
self.assertTrue('WARNING' in value)
self.assertTrue(will_fail_callback.__name__ in value)
def test_012_string_representation(self):
eh = EventHandler('one')
def check__str__output():
instance_id = str(hex(id(eh)))
self.assertTrue(instance_id in eh.__str__())
self.assertTrue(f'verbose={eh.verbose}' in eh.__str__())
self.assertTrue(f'tolerate_exceptions={eh.tolerate_exceptions}' in eh.__str__())
for event in eh.event_list:
self.assertTrue(event in eh.__str__())
for callback in eh.event_list:
self.assertTrue(callback in eh.__str__())
def callback1_in_one():
pass
def callback2_in_one():
pass
def callback3_in_one():
pass
def callback1_in_two():
pass
def callback2_in_two():
pass
def callback1_in_three():
pass
self.assertTrue(eh.link(callback1_in_one, 'one'))
check__str__output()
self.assertTrue(eh.link(callback2_in_one, 'one'))
check__str__output()
self.assertTrue(eh.link(callback3_in_one, 'one'))
check__str__output()
self.assertTrue(eh.register_event('two'))
self.assertTrue(eh.link(callback1_in_two, 'two'))
check__str__output()
self.assertTrue(eh.link(callback2_in_two, 'two'))
check__str__output()
self.assertTrue(eh.register_event('three'))
self.assertTrue(eh.link(callback1_in_three, 'three'))
check__str__output()
self.assertTrue(eh.unregister_event('three'))
check__str__output()
self.assertTrue(eh.unlink(callback2_in_two, 'two'))
def test_013_test_events_tuple(self):
eh = EventHandler('one', 'two', 'three')
self.assertDictEqual(eh.events, {'one': [], 'two': [], 'three': []})
def test_014_test_events_tuple(self):
eh = EventHandler('one', 'two', 'three')
self.assertDictEqual(eh.events, {'one': [], 'two': [], 'three': []})
self.assertTrue(eh.clear_events())
self.assertDictEqual(eh.events, {})
def test_015_test__rep(self):
eh = EventHandler('one', 'two', 'three')
self.assertEqual(eh.__str__(), eh.__repr__()) |
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ORStools
A QGIS plugin
QGIS client to query openrouteservice
-------------------
begin : 2017-02-01
git sha : $Format:%H$
copyright : (C) 2017 by <NAME>
email : <EMAIL>
***************************************************************************/
This plugin provides access to the various APIs from OpenRouteService
(https://openrouteservice.org), developed and
maintained by GIScience team at University of Heidelberg, Germany. By using
this plugin you agree to the ORS terms of service
(https://openrouteservice.org/terms-of-service/).
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import os.path
from PyQt5.QtGui import QIcon
from qgis.core import (QgsWkbTypes,
QgsCoordinateReferenceSystem,
QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingParameterField,
QgsProcessingParameterFeatureSource,
QgsProcessingParameterEnum,
QgsProcessingParameterFeatureSink,
QgsPointXY,
)
from . import HELP_DIR
from ORStools import RESOURCE_PREFIX, __help__
from ORStools.core import client, directions_core, PROFILES, PREFERENCES
from ORStools.utils import configmanager, transform, exceptions,logger, convert
class ORSdirectionsLinesAlgo(QgsProcessingAlgorithm):
"""Algorithm class for Directions Lines."""
ALGO_NAME = 'directions_lines'
ALGO_NAME_LIST = ALGO_NAME.split('_')
IN_PROVIDER = "INPUT_PROVIDER"
IN_LINES = "INPUT_LINE_LAYER"
IN_FIELD = "INPUT_LAYER_FIELD"
IN_PROFILE = "INPUT_PROFILE"
IN_PREFERENCE = "INPUT_PREFERENCE"
IN_MODE = "INPUT_MODE"
OUT = 'OUTPUT'
providers = configmanager.read_config()['providers']
def initAlgorithm(self, configuration, p_str=None, Any=None, *args, **kwargs):
providers = [provider['name'] for provider in self.providers]
self.addParameter(
QgsProcessingParameterEnum(
self.IN_PROVIDER,
"Provider",
providers
)
)
self.addParameter(
QgsProcessingParameterFeatureSource(
name=self.IN_LINES,
description="Input Line layer",
types=[QgsProcessing.TypeVectorLine],
)
)
self.addParameter(
QgsProcessingParameterField(
name=self.IN_FIELD,
description="Layer ID Field",
parentLayerParameterName=self.IN_LINES,
)
)
self.addParameter(
QgsProcessingParameterEnum(
self.IN_PROFILE,
"Travel mode",
PROFILES
)
)
self.addParameter(
QgsProcessingParameterEnum(
self.IN_PREFERENCE,
"Travel preference",
PREFERENCES
)
)
self.addParameter(
QgsProcessingParameterFeatureSink(
name=self.OUT,
description="Directions",
)
)
def name(self):
return self.ALGO_NAME
def shortHelpString(self):
"""Displays the sidebar help in the algorithm window"""
file = os.path.join(
HELP_DIR,
'algorithm_directions_line.help'
)
with open(file) as helpf:
msg = helpf.read()
return msg
def helpUrl(self):
"""will be connected to the Help button in the Algorithm window"""
return __help__
def displayName(self):
return 'Generate ' + " ".join(map(lambda x: x.capitalize(), self.ALGO_NAME_LIST))
def icon(self):
return QIcon(RESOURCE_PREFIX + 'icon_directions.png')
def createInstance(self):
return ORSdirectionsLinesAlgo()
def processAlgorithm(self, parameters, context, feedback):
# Init ORS client
providers = configmanager.read_config()['providers']
provider = providers[self.parameterAsEnum(parameters, self.IN_PROVIDER, context)]
clnt = client.Client(provider)
clnt.overQueryLimit.connect(lambda : feedback.reportError("OverQueryLimit: Retrying..."))
profile = PROFILES[self.parameterAsEnum(
parameters,
self.IN_PROFILE,
context
)]
preference = PREFERENCES[self.parameterAsEnum(
parameters,
self.IN_PREFERENCE,
context
)]
# Get parameter values
source = self.parameterAsSource(
parameters,
self.IN_LINES,
context
)
source_field_idx = self.parameterAsEnum(
parameters,
self.IN_FIELD,
context
)
source_field_name = self.parameterAsString(
parameters,
self.IN_FIELD,
context
)
params = {
'profile': profile,
'preference': preference,
'geometry': 'true',
'format': 'geojson',
'geometry_format': 'geojson',
'instructions': 'false',
'id': None
}
(sink, dest_id) = self.parameterAsSink(parameters, self.OUT, context,
directions_core.get_fields(from_type=source.fields().field(source_field_name).type(),
from_name=source_field_name,
line=True),
source.wkbType(),
QgsCoordinateReferenceSystem(4326))
count = source.featureCount()
for num, (line, field_value) in enumerate(self.get_sorted_lines(source, source_field_name)):
# Stop the algorithm if cancel button has been clicked
if feedback.isCanceled():
break
params['coordinates'] = convert.build_coords([[point.x(), point.y()] for point in line])
try:
response = clnt.request(provider['endpoints'][self.ALGO_NAME_LIST[0]], params)
except (exceptions.ApiError,
exceptions.InvalidKey,
exceptions.GenericServerError) as e:
msg = "Feature ID {} caused a {}:\n{}".format(
line[source_field_name],
e.__class__.__name__,
str(e))
feedback.reportError(msg)
logger.log(msg)
continue
sink.addFeature(directions_core.get_output_feature(
response,
profile,
preference,
from_value=field_value,
line=True
))
feedback.setProgress(int(100.0 / count * num))
return {self.OUT: dest_id}
def get_sorted_lines(self, layer, field_name):
"""
Generator to yield geometry and ID value sorted by feature ID. Careful: feat.id() is not necessarily
permanent
:param layer: source input layer
:type layer: QgsProcessingParameterFeatureSource
:param field_name: name of ID field
:type field_name: str
"""
# First get coordinate transformer
xformer = transform.transformToWGS(layer.sourceCrs())
for feat in sorted(layer.getFeatures(), key=lambda f: f.id()):
line = None
field_value = feat[field_name]
# for
if layer.wkbType() == QgsWkbTypes.MultiLineString:
line = [xformer.transform(QgsPointXY(point)) for point in feat.geometry().asMultiPolyline()[0]]
elif layer.wkbType() == QgsWkbTypes.LineString:
line = [xformer.transform(QgsPointXY(point)) for point in feat.geometry().asPolyline()]
yield line, field_value
|
import asyncio
import datetime
import logging
import sys
from typing import List, Optional
import jwt as jwtlib # name conflict with jwt query param in /ws
import sentry_sdk
import sqlalchemy.exc
from fastapi import BackgroundTasks, Depends, FastAPI, HTTPException, WebSocket, status
from fastapi.middleware.cors import CORSMiddleware
from sentry_sdk.integrations.asgi import SentryAsgiMiddleware
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
from sqlalchemy.orm import Session
from . import auth, calc, config, crud, gamedata, metrics, models, schemas, ws
from .database import SessionLocal, engine, get_db
models.Base.metadata.create_all(bind=engine)
with SessionLocal() as sess:
gamedata.upsert_all(gamedata_dir=config.GAMEDATA_DIR, db=sess)
log = logging.getLogger(__name__)
if 'debug' in sys.argv:
# noinspection PyArgumentList
logging.basicConfig(stream=sys.stdout, encoding='utf-8', level=logging.DEBUG)
app = FastAPI()
# ==== Middleware ====
# CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
# Sentry
if config.SENTRY_DSN is not None:
sentry_sdk.init(dsn=config.SENTRY_DSN, environment=config.SENTRY_ENV, integrations=[SqlalchemyIntegration()])
app.add_middleware(SentryAsgiMiddleware)
# Prometheus
metrics.register(app)
# ==== HTTP ====
@app.post("/wardInfo", status_code=202)
def ingest_wardinfo(
wardinfo: schemas.ffxiv.HousingWardInfo,
background: BackgroundTasks,
sweeper: schemas.paissa.JWTSweeper = Depends(auth.required),
db: Session = Depends(get_db)):
log.debug("Received wardInfo:")
log.debug(wardinfo.json())
try:
wardsweep = crud.ingest_wardinfo(db, wardinfo, sweeper)
except sqlalchemy.exc.IntegrityError:
db.rollback()
try:
wardsweep = crud.ingest_wardinfo(db, wardinfo, None)
except sqlalchemy.exc.IntegrityError:
raise HTTPException(400, "Could not ingest sweep")
db.close()
background.add_task(ws.queue_wardsweep_for_processing, wardsweep)
return {"message": "OK"}
@app.post("/hello")
def hello(
data: schemas.paissa.Hello,
sweeper: schemas.paissa.JWTSweeper = Depends(auth.required),
db: Session = Depends(get_db)):
if sweeper.cid != data.cid:
raise HTTPException(400, "Token CID and given CID do not match")
log.debug("Received hello:")
log.debug(data.json())
crud.upsert_sweeper(db, data)
crud.touch_sweeper_by_id(db, sweeper.cid)
return {"message": "OK"}
@app.get("/worlds", response_model=List[schemas.paissa.WorldSummary])
def list_worlds(db: Session = Depends(get_db)):
worlds = crud.get_worlds(db)
districts = crud.get_districts(db)
out = []
for world in worlds:
district_summaries = []
for district in districts:
latest_plots = crud.get_latest_plots_in_district(db, world.id, district.id, use_cache=True)
num_open_plots = sum(1 for p in latest_plots if not p.is_owned)
oldest_plot_time = min(p.timestamp for p in latest_plots) \
if latest_plots else datetime.datetime.fromtimestamp(0)
district_summaries.append(schemas.paissa.DistrictSummary(
id=district.id,
name=district.name,
num_open_plots=num_open_plots,
oldest_plot_time=oldest_plot_time
))
out.append(schemas.paissa.WorldSummary(
id=world.id,
name=world.name,
districts=district_summaries,
num_open_plots=sum(d.num_open_plots for d in district_summaries),
oldest_plot_time=min(d.oldest_plot_time for d in district_summaries)
))
return out
@app.get("/worlds/{world_id}", response_model=schemas.paissa.WorldDetail)
def get_world(world_id: int, db: Session = Depends(get_db)):
world = crud.get_world_by_id(db, world_id)
districts = crud.get_districts(db)
if world is None:
raise HTTPException(404, "World not found")
district_details = []
for district in districts:
district_details.append(calc.get_district_detail(db, world, district))
return schemas.paissa.WorldDetail(
id=world.id,
name=world.name,
districts=district_details,
num_open_plots=sum(d.num_open_plots for d in district_details),
oldest_plot_time=min(d.oldest_plot_time for d in district_details)
)
@app.get("/worlds/{world_id}/{district_id}", response_model=schemas.paissa.DistrictDetail)
def get_district_detail(world_id: int, district_id: int, db: Session = Depends(get_db)):
world = crud.get_world_by_id(db, world_id)
district = crud.get_district_by_id(db, district_id)
if world is None or district is None:
raise HTTPException(404, "World not found")
return calc.get_district_detail(db, world, district)
# ==== WS ====
@app.on_event("startup")
async def connect_broadcast():
await ws.manager.connect()
# this never gets cancelled explicitly, it's just killed when the app dies
asyncio.create_task(ws.process_wardsweeps())
@app.on_event("shutdown")
async def disconnect_broadcast():
for client in ws.clients:
await client.close(status.WS_1012_SERVICE_RESTART)
await ws.manager.disconnect()
@app.websocket("/ws")
async def plot_updates(websocket: WebSocket, jwt: Optional[str] = None, db: Session = Depends(get_db)):
# token must be present
if jwt is None:
await ws.connect(db, websocket, None) # fixme
# await websocket.close(code=status.WS_1008_POLICY_VIOLATION)
return
# and valid
try:
sweeper = auth.decode_token(jwt)
except jwtlib.InvalidTokenError:
await websocket.close(code=status.WS_1008_POLICY_VIOLATION)
return
await ws.connect(db, websocket, sweeper)
|
"""External function interface to NNPACK libraroes."""
from __future__ import absolute_import as _abs
from .. import api as _api
from .. import intrin as _intrin
from .._ffi.function import _init_api
def config(nthreads):
"""Configure the nnpack library.
Parameters
----------
nthreads : int
The threads number of nnpack thread pool, must be a nonnegative.
"""
_Config(nthreads)
def fully_connected_inference(lhs, rhs, nthreads=1):
"""Create an extern op that compute fully connected of 1D tensor lhs and
2D tensor rhs with nnpack.
Parameters
----------
lhs : Tensor
lhs 1D array input[input_channels] of FP32 elements
rhs : Tensor
lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements
Returns
-------
C : Tensor
lhs 1D array out[output_channels] of FP32 elements.
"""
m = rhs.shape[0]
return _api.extern(
(m, ), [lhs, rhs],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.nnpack.fully_connected_inference",
ins[0], ins[1], outs[0], nthreads), name="C")
def fully_connected_output(lhs, rhs, nthreads=1):
"""Create an extern op that compute fully connected of 2D tensor lhs and
2D tensor rhs with nnpack.
Parameters
----------
lhs : Tensor
lhs 2D matrix input[batch_size][input_channels] of FP32 elements
rhs : Tensor
lhs 2D matrix kernel[output_channels][input_channels] of FP32 elements
Returns
-------
C : Tensor
lhs 2D array out[batch_size][output_channels] of FP32 elements.
"""
n = lhs.shape[0]
m = rhs.shape[0]
return _api.extern(
(n, m), [lhs, rhs],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.nnpack.fully_connected_output",
ins[0], ins[1], outs[0], nthreads), name="C")
def convolution_inference(data, kernel, bias, padding, stride, nthreads=1):
"""Create an extern op to do inference convolution of 3D tensor data and
4D tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
data : Tensor
data 3D tensor input[input_channels][input_height][input_width] of
FP32 elements.
kernel : Tensor
kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
bias : Tensor
bias 1D array bias[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
padding : list
padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
which indicates the padding around the feature map.
stride : list
stride A 2-dim list of [stride_height, stride_width], which indicates
the stride.
Returns
-------
output : Tensor
output 3D tensor output[output_channels][output_height][output_width]
of FP32 elements.
"""
assert isinstance(padding, list) and len(padding) == 4
assert isinstance(stride, list) and len(stride) == 2
_, input_height, input_width = data.shape
output_channels, _, kernel_height, kernel_width = kernel.shape
output_height = (input_height + padding[0] + padding[1] - kernel_height) / stride[0] + 1
output_width = (input_width + padding[0] + padding[1] - kernel_width) / stride[1] + 1
return _api.extern(
(output_channels, output_height, output_width), [data, kernel, bias],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.nnpack.convolution_inference", ins[0], ins[1], ins[2],
outs[0], padding[0], padding[1], padding[2], padding[3],
stride[0], stride[1], nthreads), name="C")
def convolution_output(data, kernel, bias, padding, nthreads=1):
"""Create an extern op to compute convolution of 4D tensor data and
4D tensor kernel and 1D tensor bias with nnpack.
Parameters
----------
data : Tensor
data 4D tensor input[batch_size][input_channels][input_height]
[input_width] of FP32 elements.
kernel : Tensor
kernel 4D tensor kernel[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
bias : Tensor
bias 1D array bias[output_channels][input_channels][kernel_height]
[kernel_width] of FP32 elements.
padding : list
padding A 4-dim list of [pad_top, pad_bottom, pad_left, pad_right],
which indicates the padding around the feature map.
Returns
-------
output : Tensor
output 4D tensor output[batch_size][output_channels][output_height]
[output_width] of FP32 elements.
"""
assert isinstance(padding, list) and len(padding) == 4
batch, _, input_height, input_width = data.shape
output_channels, _, kernel_height, kernel_width = kernel.shape
output_height = (input_height + padding[0] + padding[1] - kernel_height) + 1
output_width = (input_width + padding[0] + padding[1] - kernel_width) + 1
return _api.extern(
(batch, output_channels, output_height, output_width), [data, kernel, bias],
lambda ins, outs: _intrin.call_packed(
"tvm.contrib.nnpack.convolution_output", ins[0], ins[1], ins[2],
outs[0], padding[0], padding[1], padding[2], padding[3], nthreads), name="C")
_init_api("tvm.contrib.nnpack")
|
<reponame>KvyatkovskyAleksey/ScrapeWebdriver<gh_stars>0
import os
import re
from itertools import cycle
# import seleniumwire.webdriver
from selenium.webdriver.remote.command import Command
from selenium import webdriver
from bs4 import BeautifulSoup
from webdriver_manager.firefox import GeckoDriverManager
from .extension_creator import create_extension, create_anticaptcha_extension
class ChangeProxyMixin:
"""Mixin with methods for scraping on selenium.webdriver(Firefox) base"""
def __init__(self,
change_proxies_on_each_request=True,
proxies=None,
install_adblock=True,
anticaptcha_api_key=None,
*args, **kwargs):
self.path = os.path.dirname(os.path.realpath(__file__))
self.change_proxies_on_each_request = change_proxies_on_each_request
self.proxies = proxies
if self.proxies:
self.proxies = cycle(self.proxies)
kwargs['executable_path'] = GeckoDriverManager().install()
super().__init__(*args, **kwargs)
self.execute(Command.GET, {'url': "about:config"})
# need for install extensions
self.set_preference('xpinstall.signatures.required', 'false')
if install_adblock:
self.install_addon(
f'{self.path}/extensions/adblocker_ultimate-3.7.10-an+fx.xpi')
if anticaptcha_api_key:
create_anticaptcha_extension(anticaptcha_api_key)
self.install_addon(
f'{self.path}/extensions/anticaptcha-plugin.xpi')
def soup(self):
"""Get soup from page"""
return BeautifulSoup(self.page_source, 'lxml')
def change_proxy(self, proxy):
"""Open config page and change proxy"""
proxy = re.split(':|@', proxy)
proxy_username = None
proxy_password = <PASSWORD>
proxy_type = proxy[0]
proxy_address = proxy[-2]
proxy_port = int(proxy[-1])
if len(proxy) > 3:
proxy_username = proxy[1].strip('//')
proxy_password = proxy[2]
self.execute(Command.GET, {'url': "about:config"})
if 'socks' in proxy_type:
self.set_preference('network.proxy.socks_version', int(proxy_type[-1]))
self.set_preference('network.proxy.socks', proxy_address)
self.set_preference('network.proxy.socks_port', proxy_port)
self.set_preference('network.proxy.type', 1)
elif 'https' in proxy[0].lower():
self.set_preference('network.proxy.ssl', proxy_address)
self.set_preference('network.proxy.ssl_port', proxy_port)
self.set_preference('network.proxy.type', 1)
elif 'http' in proxy[0].lower():
self.set_preference('network.proxy.http', proxy_address)
self.set_preference('network.proxy.ftp', proxy_address)
self.set_preference('network.proxy.socks', proxy_address)
self.set_preference('network.proxy.ssl', proxy_address)
self.set_preference('network.proxy.http_port', proxy_port)
self.set_preference('network.proxy.ftp_port', proxy_port)
self.set_preference('network.proxy.socks_port', proxy_port)
self.set_preference('network.proxy.ssl_port', proxy_port)
self.set_preference('network.proxy.type', 1)
self.set_preference('network.proxy.share_proxy_settings', 'true')
if proxy_username and proxy_password:
create_extension(proxy_username, proxy_password)
self.install_addon(
f'{self.path}/extensions/extension.xpi')
def disable_cache(self):
"""Disable browser cache"""
self.execute(Command.GET, {'url': "about:config"})
self.set_preference('browser.cache.disk.enable', 'false')
self.set_preference('browser.cache.memory.enable', 'false')
self.set_preference('browser.cache.offline.enable', 'false')
self.set_preference('network.http.use-cache', 'false')
def set_preference(self, pref, params):
"""Set preference in 'about:config' """
if params in ['false', 'true']:
self.execute_script('Components.classes["@mozilla.org/preferences-service;1"]\
.getService(Components.interfaces.nsIPrefBranch).setBoolPref("' + pref + '", ' + str(params) + ');')
elif type(params) == int:
self.execute_script('Components.classes["@mozilla.org/preferences-service;1"]\
.getService(Components.interfaces.nsIPrefBranch).setIntPref("' + pref + '", ' + str(params) + ');')
elif type(params) == str:
self.execute_script('Components.classes["@mozilla.org/preferences-service;1"]\
.getService(Components.interfaces.nsIPrefBranch).setCharPref("' + pref + '", "' + params + '");')
def get(self, url):
"""Loads a web page in the current browser session."""
if self.change_proxies_on_each_request and self.proxies and url != 'about:config':
proxy = self.proxies.__next__()
self.change_proxy(proxy)
self.execute(Command.GET, {'url': url})
class ScrapyWebdriver(ChangeProxyMixin, webdriver.Firefox):
pass
# class ScrapyWebdriverWire(ChangeProxyMixin, seleniumwire.webdriver.Firefox):
# pass
if __name__ == '__main__':
from proxies import proxies
driver = ScrapyWebdriver(proxies=proxies)
|
# -*- coding: utf-8 -*-
"""Tools to build Columns HighCharts parameters."""
from .base import JSONView
class BaseColumnsHighChartsView(JSONView):
"""Base Class to generate Column HighCharts configuration.
Define at least title, yUnit, providers, get_labels() and
get_data() to get started.
"""
providers = {}
credits = {'enabled': True}
def get_context_data(self, **kwargs):
"""Return graph configuration."""
context = super(BaseColumnsHighChartsView, self).get_context_data(**kwargs)
context.update({
'chart': self.get_type(),
'title': self.get_title(),
'subtitle': self.get_subtitle(),
'xAxis': self.get_xAxis(),
'yAxis': self.get_yAxis(),
'tooltip': self.get_tooltip(),
'plotOptions': self.get_plotOptions(),
'series': self.get_series(),
'credits': self.credits
})
return context
def get_type(self):
"""Return graph type."""
return {'type': 'column'}
def get_title(self):
"""Return graph title."""
try:
return {'text': u'%s' % self.title}
except AttributeError: # pragma: no cover
raise NotImplementedError( # pragma: no cover
'You should define self.title or self.get_title')
def get_subtitle(self):
"""Return graph subtitle."""
subtitle = u'%s' % getattr(self, 'subtitle', '')
return subtitle
def get_xAxis(self):
return {'categories': self.get_labels()}
def get_labels(self):
raise NotImplementedError( # pragma: no cover
'You should return a labels list. '
'(i.e: ["January", ...])')
def get_yAxis(self):
return {'min': getattr(self, 'yMin', 0),
'title': self.get_yTitle()}
def get_yTitle(self):
"""Return yAxis title."""
subtitle = u'%s' % getattr(self, 'subtitle', '')
return subtitle
def get_yUnit(self):
try:
return self.yUnit
except AttributeError: # pragma: no cover
raise NotImplementedError( # pragma: no cover
'Please define the yAxis unit (self.yUnit).')
def get_tooltip(self):
"""Return tooltip configuration."""
return {
'headerFormat': '''
<span style="font-size:10px">
{point.key}
</span>
<table>''',
'pointFormat': '''
<tr>
<td style="color:{series.color};padding:0">
{series.name}:
</td>
<td style="padding:0">
<b>{point.y:.0f} %s</b>
</td>
</tr>''' % self.get_yUnit(),
'footerFormat': '</table>',
'shared': True,
'useHTML': True
}
def get_plotOptions(self):
"""Return plotOptions configuration."""
return {'column': {'pointPadding': 0.2, 'borderWidth': 0}}
def get_series(self):
"""Generate HighCharts series from providers and data."""
series = []
data = self.get_data()
providers = self.get_providers()
for i, d in enumerate(data):
series.append({'name': providers[i],
'data': d})
return series
def get_data(self):
"""Return a list of series [[25, 34, 0, 1, 50], ...]).
In the same order as providers and with the same serie length
of xAxis.
"""
raise NotImplementedError( # pragma: no cover
'You should return a data list list. '
'(i.e: [[25, 34, 0, 1, 50], ...]).')
def get_providers(self):
"""Return the list of data series names.
Providers numbers should be equal to series numbers.
"""
try:
return self.providers
except AttributeError: # pragma: no cover
raise NotImplementedError( # pragma: no cover
'You should define self.providers of self.get_providers.')
|
class basegraph:
"""
Graph interface.
add_node(element)
remove_node(node_id)
add_arc(nodeA_id, nodeB_id, info)
remove_arc(nodeA_id, nodeB_id)
set_arc_status(nodeA_id, nodeB_id, status)
get_nodes()
get_arcs()
get_num_nodes()
get_num_arcs()
get_node_by_id(node_id)
get_incident_arcs(node_id)
are_adjacent(nodeA_id, nodeB_id)
bfs(root_node_id)
dfs(root_node_id)
"""
def add_node(self, element):
"""
Adds a new node in graph, with the specified element.
add_node(element) -> None
@type element: object
@param element: element to be assigned to the new node.
"""
raise NotImplementedError("add_node: You should have implemented this method!")
def remove_node(self, node_id):
"""
Removes from graph the node with the specified id.
remove_node(node_id) -> None
@type node_id: integer
@param node_id: id of the node to be removed from graph.
"""
raise NotImplementedError("remove_node: You should have implemented this method!")
def add_arc(self, nodeA_id, nodeB_id, info):
"""
Adds a new undirected arc in graph, between node_A and node_B with the specified id.
add_arc(nodeA_id, nodeB_id, info) -> None
@type nodeA_id: integer
@param nodeA_id: id of tail node.
@type nodeB_id: integer
@param nodeB_id: id of head node.
@type info: object
@param info: element to be added as info to the new arc.
"""
raise NotImplementedError("add_arc: You should have implemented this method!")
def remove_arc(self, nodeA_id, nodeB_id):
"""
Removed from graph the undirected arc between nodeA and nodeB.
remove_arc(nodeA_id, nodeB_id) -> None
@type nodeA_id: integer
@param nodeA_id: id of tail node.
@type nodeB_id: integer
@param nodeB_id: id of head node.
"""
raise NotImplementedError("remove_arc: You should have implemented this method!")
def set_arc_status(self, nodeA_id, nodeB_id, status):
"""
Sets the status of the directed arc between nodeA and nodeB.
set_arc_status(nodeA_id, nodeB_id, status) -> None
@type nodeA_id: integer
@param nodeA_id: id of tail node.
@type nodeB_id: integer
@param nodeB_id: id of head node.
@type status: object
@param status: element to be added as status info to the specified arc.
"""
raise NotImplementedError("set_arc_status: You should have implemented this method!")
def get_nodes(self):
"""
Returns all nodes in graph
get_nodes() -> nodes_list
@rtype: list
@return: list of nodes in graph.
"""
raise NotImplementedError("get_nodes: You should have implemented this method!")
def get_arcs(self):
"""
Returns all undirected arcs in graph.
get_arcs() -> arcs_list
@rtype: list
@return: list of undirected arcs in graph.
"""
raise NotImplementedError("get_arcs: You should have implemented this method!")
def get_num_nodes(self):
"""
Returns the number of nodes in graph.
get_num_nodes() -> number_of_nodes
@rtype: integer
@return: number of nodes in graph.
"""
raise NotImplementedError("get_num_nodes: You should have implemented this method!")
def get_num_arcs(self):
"""
Returns the number of undirected arcs in graph.
get_num_arcs() -> number_of_arcs
@rtype: integer
@return: number of undirected arcs in graph.
"""
raise NotImplementedError("get_num_arcs: You should have implemented this method!")
def get_node_by_id(self, node_id):
"""
Returns node in graph by id.
get_node_by_id(node_id) -> node
@type node_id: integer
@param node_id: id of the requested node in graph.
@rtype: node
@return: node corresponding to the given id.
"""
raise NotImplementedError("get_node_by_id: You should have implemented this method!")
def get_incident_arcs(self, node_id):
"""
Returns all incident arcs to the specified node.
get_incident_arcs(node_id) -> list/set
@type node_id: integer
@param node_id: id of node whos incident arcs have been requested.
@rtype: list/set
@return: all arcs that are incident to the node whose id has been specified.
"""
raise NotImplementedError("get_incident_arcs: You should have implemented this method!")
def are_adjacent(self, nodeA_id, nodeB_id):
"""
Returns True if nodeA and nodeB are adjacent, otherwise returns False.
are_adjacent(nodeA_id, nodeB_id) -> True/False
@type nodeA_id: integer
@param nodeA_id: first node's id.
@type nodeB_id: integer
@param nodeB_id: second node's id.
@rtype: boolean
@return: True if nodeA and nodeB are adjacent, otherwise False.
"""
raise NotImplementedError("are_adjacent: You should have implemented this method!")
def dfs(self, root_node_id):
"""
Returns the LIFO path-as-list from graph's root to all other nodes in graph.
dfs(root_node_id) -> path
@type root_node_id: integer
@param root_node_id: graph's root's id.
@rtype: list
@return: LIFO path from graph's root to all other nodes in graph.
"""
raise NotImplementedError("dfs: You should have implemented this method!")
def bfs(self, root_node_id):
"""
Returns the FIFO path-as-list from graph's root to all other nodes in graph.
bfs(root_node_id) -> path
@type root_node_id: integer
@param root_node_id: graph's root's id.
@rtype: list
@return: FIFO path from graph's root to all other nodes in graph.
"""
raise NotImplementedError("bfs: You should have implemented this method!")
|
<reponame>povellesto/blobydouche<filename>Blob Rage App/main.py
from kivy.app import App
from kivy.lang import Builder
from kivy.uix.widget import Widget
from kivy.vector import Vector
from kivy.uix.screenmanager import ScreenManager, Screen
from kivy.properties import NumericProperty, ReferenceListProperty, ObjectProperty
from kivy.core.audio import SoundLoader
from kivy.uix.textinput import TextInput
from kivy.uix.label import Label
from kivy.uix.progressbar import ProgressBar
from kivy.properties import StringProperty
from kivy.clock import Clock
#from kivy.uix.colorpicker import CBLColorWheel
from random import randint
# Create both screens. Please note the root.manager.current: this is how
# you can control the ScreenManager from kv. Each screen has by default a
# property manager that gives you the instance of the ScreenManager used.
def on_enter(instance, value):
print('User pressed enter in', instance)
Builder.load_string("""
<StartScreen>:
BoxLayout:
Button:
text: 'Options'
on_press: root.manager.current = 'options'
Button:
text: 'Play'
on_press: root.manager.current = 'gameOptions'
<Progresscreen>:
BoxLayout:
Label:
text_size: self.size
halign: 'left'
valign: 'bottom'
text: root.loading
<Error>:
BoxLayout:
orientation: 'vertical'
Label:
text:'ERROR'
Button:
text: 'Back'
on_press: root.manager.current = 'options'
<OptionsScreen>:
BoxLayout:
orientation: 'vertical'
Button:
text: 'Sound'
on_press: root.manager.current = 'error'
Button:
text:' Controls'
on_press: root.manager.current = 'controls'
Button:
text: 'Back to Main Menu'
on_press: root.manager.current = 'start'
<GameOptionsScreen>:
BoxLayout:
orientation: 'vertical'
Button:
text:'Level'
TextInput:
Button:
text:'Back'
on_press: root.manager.current = 'start'
<Blob>:
size: 50, 50
canvas:
Ellipse:
pos: self.pos
size: self.size
<GameScreen>:
Label:
font_size: 70
center_x: root.width / 2
top: root.top - 50
text: "The Game"
<ControlsScreen>:
BoxLayout:
orientation: 'vertical'
Button:
text:'Forward'
on_press: root.manager.current = 'forwardOptions'
Button:
text:'Right'
Button:
text:'Left'
Button:
text:'Back'
Button:
text:'Back to options'
on_press: root.manager.current = 'options'
Button:
text:'Back to main menu'
on_press: root.manager.current = 'start'
<forwardOptionsScreen>:
BoxLayout:
orientation: 'vertical'
Label:
text: 'PRESS A KEY'
BoxLayout:
orientation: 'horizontal'
TextInput:
Button:
text:'Back'
on_press: root.manager.current = 'options'
Button:
text:'ENTER'
on_press: root.manager.current = 'controls'
""")
class Blob(Widget):
x = NumericProperty(0)
y = NumericProperty(0)
pos = ReferenceListProperty(x, y)
x_velocity = NumericProperty(0)
y_velocity = NumericProperty(0)
velocity = ReferenceListProperty(x_velocity, y_velocity)
velocity = Vector(randint(0, 4), 0).rotate(randint(0, 360))
def move(self):
self.pos = Vector(*self.velocity) + Vector(*self.pos)
class Progresscreen(Screen):
loading = StringProperty()
def __init__(self, **kwargs):
self.dots = 0
super(Progresscreen, self).__init__(**kwargs)
self.pb = ProgressBar(max=100)
self.add_widget(self.pb)
def update(self, dt):
if self.pb.value < 99:
self.pb.value += dt * 5
self.dots += 1
if self.dots ==400:
self.dots= 0
self.loading = "Loading" + ("."*(self.dots/100))
else:
self.manager.current ='start'
class StartScreen(Screen):
pass
class OptionsScreen(Screen):
pass
class GameOptionsScreen(Screen):
pass
class ControlsScreen (Screen):
pass
class forwardOptionsScreen(Screen):
pass
class Error (Screen):
pass
class GameScreen(Screen):
#def __init__(self, **kwargs):
#super(GameScreen, self).__init__(**kwargs)
#self.blobs = []
#for index in range(30):
#blob = Blob()
#blob.x = randint(0, self.width)
#blob.y = randint(0, self.height)
#self.blobs.append(blob)
#self.add_widget(blob)
def update(self, dt):
self.blob.move()
#for blob in self.blobs:
#blob.move()
#bounce off top and bottom
#if (blob.y < 0) or (blob.top > self.height):
#blob.velocity_y *= -1
#bounce off left and right
#if (blob.x < 0) or (blob.right > self.width):
#blob.velocity_x *= -1
# Create the screen manager
sm = ScreenManager()
sm.add_widget(Progresscreen(name='progressbar'))
sm.add_widget(StartScreen(name='start'))
sm.add_widget(OptionsScreen(name='options'))
sm.add_widget(GameScreen(name='game'))
sm.add_widget(GameOptionsScreen(name='gameOptions'))
sm.add_widget(ControlsScreen(name='controls'))
sm.add_widget(forwardOptionsScreen(name='forwardOptions'))
sm.add_widget(Error(name='error'))
class TestApp(App):
sound = None
def build(self):
# self.sound = SoundLoader.load('Spewer - Main Theme.mp3')
#self.sound.volume = 1
#self.bind(on_stop=self.sound_replay)
#self.sound.play()
Clock.schedule_interval(sm.current_screen.update, 1.0/60.0)
return sm
if __name__ == '__main__':
TestApp().run()
|
<filename>modpybass/pybasswma.py
# Copyright(c) <NAME> 2009 <EMAIL>
# http://vosolok2008.narod.ru
# BSD license
__version__ = '0.2'
__versionTime__ = '2013-01-22'
__author__ = '<NAME> <<EMAIL>>'
__doc__ = '''
pybasswma.py - is ctypes python module for
BASSWMA - extension to the BASS audio library,
enabling the playback of WMA files and network streams.
The audio tracks of WMV files can also be played.
WMA file encoding and network broadcasting functions are also provided.
Requirements
============
BASS 2.4 is required. The Windows Media Format modules (v9 or above) are
also required to be installed on the user's system. They are installed with
Windows Media player, so will already be on most users' systems, but they
can also be installed separately (WMFDIST.EXE is available from the BASS website).
'''
import ctypes
try:
import bass
import pybass
except ImportError:
from .import bass
from .import pybass
basswma_module, func_type = bass.load(__file__)
QWORD = pybass.QWORD
HSTREAM = pybass.HSTREAM
BASS_FILEPROCS = pybass.BASS_FILEPROCS
HWMENCODE = ctypes.c_ulong # WMA encoding handle
# Additional error codes returned by BASS_ErrorGetCode
BASS_ERROR_WMA_LICENSE = 1000 # the file is protected
BASS_ERROR_WMA = 1001 # Windows Media (9 or above) is not installed
BASS_ERROR_WMA_WM9 = BASS_ERROR_WMA
BASS_ERROR_WMA_DENIED = 1002 # access denied (user/pass is invalid)
BASS_ERROR_WMA_INDIVIDUAL = 1004 # individualization is needed
BASS_ERROR_WMA_PUBINIT = 1005 # publishing point initialization problem
# Additional BASS_SetConfig options
BASS_CONFIG_WMA_PRECHECK = 0x10100
BASS_CONFIG_WMA_PREBUF = 0x10101
BASS_CONFIG_WMA_BASSFILE = 0x10103
BASS_CONFIG_WMA_NETSEEK = 0x10104
BASS_CONFIG_WMA_VIDEO = 0x10105
# additional WMA sync types
BASS_SYNC_WMA_CHANGE = 0x10100
BASS_SYNC_WMA_META = 0x10101
# additional BASS_StreamGetFilePosition WMA mode
BASS_FILEPOS_WMA_BUFFER = 1000 # internet buffering progress (0-100%)
# Additional flags for use with BASS_WMA_EncodeOpen/File/Network/Publish
BASS_WMA_ENCODE_STANDARD = 0x2000 # standard WMA
BASS_WMA_ENCODE_PRO = 0x4000 # WMA Pro
BASS_WMA_ENCODE_24BIT = 0x8000 # 24-bit
BASS_WMA_ENCODE_SCRIPT = 0x20000 # set script (mid-stream tags) in the WMA encoding
# Additional flag for use with BASS_WMA_EncodeGetRates
BASS_WMA_ENCODE_RATES_VBR = 0x10000 # get available VBR quality settings
# typedef void (CALLBACK CLIENTCONNECTPROC)(
# HWMENCODE handle, BOOL connect, const char *ip, void *user);
CLIENTCONNECTPROC = func_type(
None, HWMENCODE, ctypes.c_byte, ctypes.c_char_p, ctypes.c_void_p
)
# Client connection notification callback function.
# handle : The encoder
# connect: TRUE=client is connecting, FALSE=disconnecting
# ip : The client's IP (xxx.xxx.xxx.xxx:port)
# user : The 'user' parameter value given when calling BASS_WMA_EncodeSetNotify
# typedef void (CALLBACK WMENCODEPROC)(
# HWMENCODE handle, DWORD type, const void *buffer, DWORD length, void *user);
WMENCODEPROC = func_type(
None, HWMENCODE, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p
)
# Encoder callback function.
# handle : The encoder handle
# type : The type of data, one of BASS_WMA_ENCODE_xxx values
# buffer : The encoded data
# length : Length of the data
# user : The 'user' parameter value given when calling BASS_WMA_EncodeOpen
# WMENCODEPROC "type" values
BASS_WMA_ENCODE_HEAD = 0
BASS_WMA_ENCODE_DATA = 1
BASS_WMA_ENCODE_DONE = 2
# BASS_WMA_EncodeSetTag "form" values
BASS_WMA_TAG_ANSI = 0
BASS_WMA_TAG_UNICODE = 1
BASS_WMA_TAG_UTF8 = 2
BASS_WMA_TAG_BINARY = 0x100 # FLAG: binary tag (HIWORD=length)
# BASS_CHANNELINFO type
BASS_CTYPE_STREAM_WMA = 0x10300
BASS_CTYPE_STREAM_WMA_MP3 = 0x10301
# Additional BASS_ChannelGetTags types
BASS_TAG_WMA = 8 # WMA header tags : series of null-terminated UTF-8 strings
BASS_TAG_WMA_META = 11 # WMA mid-stream tag : UTF-8 string
# HSTREAM BASSWMADEF(BASS_WMA_StreamCreateFile)(
# BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_WMA_StreamCreateFile = func_type(
HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong
)(('BASS_WMA_StreamCreateFile', basswma_module))
# HSTREAM BASSWMADEF(BASS_WMA_StreamCreateFileAuth)(
# BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags, const char *user, const char *pass);
BASS_WMA_StreamCreateFileAuth = func_type(
HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong, ctypes.c_char_p, ctypes.c_char_p
)(('BASS_WMA_StreamCreateFileAuth', basswma_module))
# HSTREAM BASSWMADEF(BASS_WMA_StreamCreateFileUser)(
# DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_WMA_StreamCreateFileUser = func_type(
HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p
)(('BASS_WMA_StreamCreateFileUser', basswma_module))
# const char *BASSWMADEF(BASS_WMA_GetTags)(
# const void *file, DWORD flags);
BASS_WMA_GetTags = func_type(
ctypes.c_char_p, ctypes.c_void_p, ctypes.c_ulong
)(('BASS_WMA_GetTags', basswma_module))
# const DWORD *BASSWMADEF(BASS_WMA_EncodeGetRates)(
# DWORD freq, DWORD chans, DWORD flags);
BASS_WMA_EncodeGetRates = func_type(
ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong
)(('BASS_WMA_EncodeGetRates', basswma_module))
# HWMENCODE BASSWMADEF(BASS_WMA_EncodeOpen)(
# DWORD freq, DWORD chans, DWORD flags, DWORD bitrate, WMENCODEPROC *proc, void *user);
BASS_WMA_EncodeOpen = func_type(
HWMENCODE, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, WMENCODEPROC, ctypes.c_void_p
)(('BASS_WMA_EncodeOpen', basswma_module))
# HWMENCODE BASSWMADEF(BASS_WMA_EncodeOpenFile)(
# DWORD freq, DWORD chans, DWORD flags, DWORD bitrate, const char *file);
BASS_WMA_EncodeOpenFile = func_type(
HWMENCODE, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_char_p
)(('BASS_WMA_EncodeOpenFile', basswma_module))
# HWMENCODE BASSWMADEF(BASS_WMA_EncodeOpenNetwork)(
# DWORD freq, DWORD chans, DWORD flags, DWORD bitrate, DWORD port, DWORD clients);
BASS_WMA_EncodeOpenNetwork = func_type(
HWMENCODE, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong
)(('BASS_WMA_EncodeOpenNetwork', basswma_module))
# HWMENCODE BASSWMADEF(BASS_WMA_EncodeOpenNetworkMulti)(
# DWORD freq, DWORD chans, DWORD flags, const DWORD *bitrates, DWORD port, DWORD clients);
BASS_WMA_EncodeOpenNetworkMulti = func_type(
HWMENCODE, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong,
ctypes.POINTER(ctypes.c_ulong), ctypes.c_ulong, ctypes.c_ulong
)(('BASS_WMA_EncodeOpenNetworkMulti', basswma_module))
# HWMENCODE BASSWMADEF(BASS_WMA_EncodeOpenPublish)(
# DWORD freq, DWORD chans, DWORD flags, DWORD bitrate, const char *url, const char *user, const char *pass);
BASS_WMA_EncodeOpenPublish = func_type(
HWMENCODE, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong,
ctypes.c_ulong, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p
)(('BASS_WMA_EncodeOpenPublish', basswma_module))
# HWMENCODE BASSWMADEF(BASS_WMA_EncodeOpenPublishMulti)(
# DWORD freq, DWORD chans, DWORD flags, const DWORD *bitrates, const char *url, const char *user, const char *pass);
BASS_WMA_EncodeOpenPublishMulti = func_type(
HWMENCODE, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong,
ctypes.POINTER(ctypes.c_ulong), ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p
)(('BASS_WMA_EncodeOpenPublishMulti', basswma_module))
# DWORD BASSWMADEF(BASS_WMA_EncodeGetPort)(
# HWMENCODE handle);
BASS_WMA_EncodeGetPort = func_type(
ctypes.c_ulong, HWMENCODE
)(('BASS_WMA_EncodeGetPort', basswma_module))
# BOOL BASSWMADEF(BASS_WMA_EncodeSetNotify)(
# HWMENCODE handle, CLIENTCONNECTPROC *proc, void *user);
BASS_WMA_EncodeSetNotify = func_type(
ctypes.c_byte, HWMENCODE, CLIENTCONNECTPROC, ctypes.c_void_p
)(('BASS_WMA_EncodeSetNotify', basswma_module))
# DWORD BASSWMADEF(BASS_WMA_EncodeGetClients)(
# HWMENCODE handle);
BASS_WMA_EncodeGetClients = func_type(
ctypes.c_ulong, HWMENCODE
)(('BASS_WMA_EncodeGetClients', basswma_module))
# BOOL BASSWMADEF(BASS_WMA_EncodeSetTag)(
# HWMENCODE handle, const char *tag, const char *text, DWORD form);
BASS_WMA_EncodeSetTag = func_type(
ctypes.c_byte, HWMENCODE, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong
)(('BASS_WMA_EncodeSetTag', basswma_module))
# BOOL BASSWMADEF(BASS_WMA_EncodeWrite)(
# HWMENCODE handle, const void *buffer, DWORD length);
BASS_WMA_EncodeWrite = func_type(
ctypes.c_byte, HWMENCODE, ctypes.c_void_p, ctypes.c_ulong
)(('BASS_WMA_EncodeWrite', basswma_module))
# BOOL BASSWMADEF(BASS_WMA_EncodeClose)(
# HWMENCODE handle);
BASS_WMA_EncodeClose = func_type(
ctypes.c_byte, HWMENCODE
)(('BASS_WMA_EncodeClose', basswma_module))
# void *BASSWMADEF(BASS_WMA_GetWMObject)(
# DWORD handle);
BASS_WMA_GetWMObject = func_type(
ctypes.c_void_p, ctypes.c_ulong
)(('BASS_WMA_GetWMObject', basswma_module))
if __name__ == '__main__':
if not pybass.BASS_Init(-1, 44100, 0, 0, 0):
print('BASS_Init error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))
else:
handle = BASS_WMA_StreamCreateFile(False, b'test.wma', 0, 0, 0)
pybass.play_handle(handle)
if not pybass.BASS_Free():
print('BASS_Free error %s' % pybass.get_error_description(pybass.BASS_ErrorGetCode()))
|
<reponame>dsommerville-illumio/resilient-community-apps
# -*- coding: utf-8 -*-
"""Tests using pytest_resilient_circuits"""
import pytest
from mock import patch
from resilient_circuits.util import get_function_definition
from resilient_circuits import SubmitTestFunction, FunctionResult
from resilient_lib import IntegrationError
from illumio import IllumioException
from .mock_functions import mocked_policy_compute_engine, mock_results, get_mock_config
PACKAGE_NAME = "fn_illumio"
FUNCTION_NAME = "illumio_get_workloads"
# Read the default configuration-data section from the package
# config_data = get_config_data(PACKAGE_NAME)
config_data = get_mock_config()
# Provide a simulation of the Resilient REST API (uncomment to connect to a real appliance)
resilient_mock = "pytest_resilient_circuits.BasicResilientMock"
def call_illumio_get_workloads_function(circuits, function_params, timeout=5):
# Create the submitTestFunction event
evt = SubmitTestFunction("illumio_get_workloads", function_params)
# Fire a message to the function
circuits.manager.fire(evt)
# circuits will fire an "exception" event if an exception is raised in the FunctionComponent
# return this exception if it is raised
exception_event = circuits.watcher.wait("exception", parent=None, timeout=timeout)
if exception_event is not False:
exception = exception_event.args[1]
raise exception
# else return the FunctionComponent's results
else:
event = circuits.watcher.wait("illumio_get_workloads_result", parent=evt, timeout=timeout)
assert event
assert isinstance(event.kwargs["result"], FunctionResult)
pytest.wait_for(event, "complete", True)
return event.kwargs["result"].value
class TestIllumioGetWorkloads:
""" Tests for the illumio_get_workloads function"""
def test_function_definition(self):
""" Test that the package provides customization_data that defines the function """
func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)
assert func is not None
matching_workload_inputs = {
"illumio_workload_online": True,
"illumio_workload_enforcement_mode": "selective"
}
missing_workload_inputs = {
"illumio_workload_data_center_zone": "missing-data-centre"
}
failing_workload_inputs = {
"illumio_workload_hostname": '',
"illumio_workload_enforcement_mode": {'id': 113, 'name': 'visibility_only'},
"illumio_workload_ip_address": '',
"illumio_workload_name": '',
"illumio_workload_labels": '',
"illumio_workload_managed": True,
"illumio_workload_online": None
}
@patch('fn_illumio.components.funct_illumio_get_workloads.IllumioHelper.get_pce_instance', side_effect=mocked_policy_compute_engine)
@pytest.mark.parametrize("mock_inputs, expected_results", [
(matching_workload_inputs, mock_results('workloads_matching')),
(missing_workload_inputs, []),
(failing_workload_inputs, [])
])
def test_success(self, mock_pce, circuits_app, mock_inputs, expected_results):
""" Test calling with sample values for the parameters """
results = call_illumio_get_workloads_function(circuits_app, mock_inputs)
assert expected_results == results['content']['workloads']
@patch('fn_illumio.components.funct_illumio_get_workloads.IllumioHelper.get_pce_instance', side_effect=IllumioException)
@pytest.mark.parametrize("mock_inputs", [(matching_workload_inputs)])
def test_thrown_exception(self, mock_pce, circuits_app, mock_inputs):
with pytest.raises(IntegrationError):
call_illumio_get_workloads_function(circuits_app, mock_inputs)
|
import typer
from rich.console import Console
from rich.table import Table
from ..core.pricing import PricingHandler
app = typer.Typer()
_handler = PricingHandler()
_data = _handler.get_all_prices()["pricing"]
_currency = _data["currency"]
_vat = f"{float(_data['vat_rate']):6.4f}"
_console = Console()
@app.callback()
def callback() -> None:
"""
Information about price for all resources available on the platform
"""
@app.command("all", help="Information about price for all resources")
def get_all_prices() -> None:
"""
Print price for all resources available on the platform
"""
get_float_ip_price()
get_float_ips_price()
get_image_price()
get_load_balancers_price()
get_server_backup_price()
get_server_types_price()
get_traffic_price()
get_volume_price()
@app.command("float_ip", help="Information about price for floating IP")
def get_float_ip_price() -> None:
"""
Printing floating IP price as Table in console.
"""
floating_ip_price = Table(title="Floating IP")
floating_ip_price.add_column(f"Month, {_currency}\nWithout VAT", justify="center", style="bold green")
floating_ip_price.add_column(f"Month, {_currency}\nWith VAT", justify="center", style="bold green")
floating_ip_price.add_column("VAT, %", justify="center", style="bold")
global _data
global _vat
ip_ = _data["floating_ip"]
floating_ip_price.add_row(
f"{float(ip_['price_monthly']['net']):6.4f}",
f"{float(ip_['price_monthly']['gross']):6.4f}",
_vat
)
global _console
_console.print(floating_ip_price)
@app.command("float_ips", help="Information about price for floating IPs")
def get_float_ips_price() -> None:
"""
Printing floating IPs price as Table in console
"""
floating_ips_price = Table(title="Floating IPs")
floating_ips_price.add_column("Type", justify="center")
floating_ips_price.add_column("Location", justify="center")
floating_ips_price.add_column(f"Month, {_currency}\nWithout VAT", justify="center", style="bold green")
floating_ips_price.add_column(f"Month, {_currency}\nWith VAT", justify="center", style="bold green")
floating_ips_price.add_column("VAT, %", justify="center", style="bold")
global _data
global _vat
ips_ = _data["floating_ips"]
for i, ips_type in enumerate(ips_):
for j, location_type in enumerate(ips_type['prices']):
floating_ips_price.add_row(
f"{ips_type['type'] if not j else ''}",
f"{location_type['location']}",
f"{float(location_type['price_monthly']['net']):6.4f}",
f"{float(location_type['price_monthly']['gross']):6.4f}",
_vat
)
global _console
_console.print(floating_ips_price)
@app.command("image", help="Information about price for image")
def get_image_price() -> None:
"""
Printing image price as Table in console
"""
image_price = Table(title="Image")
image_price.add_column(f"Month, {_currency}\nPrice per GB\nWithout VAT", justify="center", style="bold green")
image_price.add_column(f"Month, {_currency}\nPrice per GB\nWith VAT", justify="center", style="bold green")
image_price.add_column("VAT, %", justify="center", style="bold")
global _data
global _vat
image_ = _data["image"]
image_price.add_row(
f"{float(image_['price_per_gb_month']['net']):6.4f}",
f"{float(image_['price_per_gb_month']['gross']):6.4f}",
_vat
)
global _console
_console.print(image_price)
@app.command("load_balancer", help="Information about price and types for load balancer")
def get_load_balancers_price() -> None:
"""
Printing load balancers types and price as Table in console
"""
load_balance_price = Table(title="Load Balancers")
load_balance_price.add_column(f"id", justify="center", style="bold")
load_balance_price.add_column(f"Name", justify="center", style="")
load_balance_price.add_column(f"Location", justify="center", style="")
load_balance_price.add_column(f"Hour, {_currency}\nWithout VAT", justify="center", style="bold green")
load_balance_price.add_column(f"Hour, {_currency}\nWith VAT", justify="center", style="bold green")
load_balance_price.add_column(f"Month, {_currency}\nWithout VAT", justify="center", style="bold green")
load_balance_price.add_column(f"Month, {_currency}\nWith VAT", justify="center", style="bold green")
load_balance_price.add_column("VAT, %", justify="center", style="bold")
global _data
global _vat
lb_ = _data["load_balancer_types"]
for i, lb_type in enumerate(lb_):
for j, location_type in enumerate(lb_type['prices']):
load_balance_price.add_row(
f"{lb_type['id'] if not j else ''}",
f"{lb_type['name'] if not j else ''}",
f"{location_type['location']}",
f"{float(location_type['price_hourly']['net']):6.4f}",
f"{float(location_type['price_hourly']['gross']):6.4f}",
f"{float(location_type['price_monthly']['net']):6.4f}",
f"{float(location_type['price_monthly']['gross']):6.4f}",
_vat
)
global _console
_console.print(load_balance_price)
@app.command("backup", help="Information about price for server backup")
def get_server_backup_price() -> None:
"""
Printing server backups price as Table in console
"""
server_backup_price = Table(title="Server backup")
server_backup_price.add_column("Percentage, %", justify="center", style="bold")
server_backup_price.add_column("About")
global _data
global _vat
server_backup_ = _data['server_backup']
server_backup_price.add_row(
f"{float(server_backup_['percentage']):6.4f}",
"increase base Server costs by specific percentage"
)
global _console
_console.print(server_backup_price)
@app.command("server", help="Information about price and types for server")
def get_server_types_price() -> None:
"""
Printing server configurations price as Table in console
"""
server_types_price = Table(title="Server types")
server_types_price.add_column(f"id", justify="center", style="bold")
server_types_price.add_column(f"Name", justify="center", style="")
server_types_price.add_column(f"Location", justify="center", style="")
server_types_price.add_column(f"Hour, {_currency}\nWithout VAT", justify="center", style="bold green")
server_types_price.add_column(f"Hour, {_currency}\nWith VAT", justify="center", style="bold green")
server_types_price.add_column(f"Month, {_currency}\nWithout VAT", justify="center", style="bold green")
server_types_price.add_column(f"Month, {_currency}\nWith VAT", justify="center", style="bold green")
server_types_price.add_column("VAT, %", justify="center", style="bold")
global _data
global _vat
servers_ = _data["server_types"]
for i, server_type_ in enumerate(servers_):
for j, location_type in enumerate(server_type_['prices']):
server_types_price.add_row(
f"{server_type_['id'] if not j else ''}",
f"{server_type_['name'] if not j else ''}",
f"{location_type['location']}",
f"{float(location_type['price_hourly']['net']):6.4f}",
f"{float(location_type['price_hourly']['gross']):6.4f}",
f"{float(location_type['price_monthly']['net']):6.4f}",
f"{float(location_type['price_monthly']['gross']):6.4f}",
_vat
)
global _console
_console.print(server_types_price)
@app.command("traffic", help="Information about traffic price")
def get_traffic_price() -> None:
"""
Printing traffic price as Table in console
"""
traffic_price = Table(title="Traffic")
traffic_price.add_column(f"per TB, {_currency}\nWithout VAT", justify="center", style="bold green")
traffic_price.add_column(f"per TB, {_currency}\nWith VAT", justify="center", style="bold green")
global _data
global _vat
traffic_ = _data["traffic"]
traffic_price.add_row(
f"{float(traffic_['price_per_tb']['net']):6.4f}",
f"{float(traffic_['price_per_tb']['gross']):6.4f}"
)
global _console
_console.print(traffic_price)
@app.command("volume", help="Information about volume price")
def get_volume_price() -> None:
"""
Printing volume price as Table in console
"""
volume_price = Table(title="Volume")
volume_price.add_column(f"Month, {_currency}\nper GB\nWithout VAT", justify="center", style="bold green")
volume_price.add_column(f"per GB, {_currency}\nper GB\nWith VAT", justify="center", style="bold green")
global _data
global _vat
volume_ = _data["volume"]
volume_price.add_row(
f"{float(volume_['price_per_gb_month']['net']):6.4f}",
f"{float(volume_['price_per_gb_month']['gross']):6.4f}"
)
global _console
_console.print(volume_price)
|
<filename>backend/admin/decapod_admin/external_execution.py
# -*- coding: utf-8 -*-
# Copyright (c) 2017 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import configparser
import json
import os.path
import pathlib
import shutil
import subprocess
import tempfile
import textwrap
import click
import pkg_resources
import yaml
from decapod_admin import main
from decapod_api import handlers
from decapod_common import log
from decapod_common import pathutils
from decapod_common import plugins
from decapod_common import process
from decapod_common.models import kv
from decapod_common.models import playbook_configuration
from decapod_common.models import task
LOG = log.getLogger(__name__)
"""Logger."""
@main.cli.command(name="external-execution")
@click.argument(
"playbook-configuration-id",
type=click.UUID
)
@click.argument(
"playbook-configuration-version",
type=click.INT
)
@click.argument(
"path",
required=False,
type=click.Path(
dir_okay=False,
exists=False,
file_okay=True,
writable=True
)
)
@click.pass_context
def external_execution(ctx, playbook_configuration_id,
playbook_configuration_version, path):
"""Create bundle for external execution.
This command creates tarball which has everything required for
external execution of the plugin. This tarball includes commandline
for execution with Ansible, the contents of the plugin, generated
dynamic inventory.
Please pay attention to following:
\b
- This execution won't be added to the decapod and
will be done without any Decapod interaction
- You should have installed Ansible 2.3 or newer
- Please be sure that ceph-ansible is present in role path
of the ansible.
http://docs.ansible.com/ansible/intro_configuration.html#roles-path
https://github.com/ceph/ceph-ansible
"""
playbook_configuration_id = str(playbook_configuration_id)
subdir_path = "{0}-{1}".format(
playbook_configuration_id,
playbook_configuration_version
)
if path is None:
path = subdir_path
path = pathlib.Path(path).absolute()
playbook_config = \
playbook_configuration.PlaybookConfigurationModel.find_version(
playbook_configuration_id, playbook_configuration_version)
if not playbook_config:
ctx.fail("Cannot find such playbook config")
plugin = get_plugin(playbook_config.playbook_id)
working_dir = tempfile.TemporaryDirectory(prefix="exec")
ctx.call_on_close(working_dir.cleanup)
working_dir = pathlib.Path(working_dir.name)
tmpdir = working_dir.joinpath(subdir_path).absolute()
tmpdir.mkdir()
tmpdir.joinpath("fetch_directory").mkdir()
copy_decapod_common_playbooks(tmpdir)
copy_ceph_ansible(tmpdir)
copy_private_ssh_key(tmpdir)
copy_ansible_config(tmpdir)
copy_plugin_contents(tmpdir, plugin)
copy_monitor_keyring(tmpdir, playbook_config)
copy_decapod_data(tmpdir, playbook_config)
dump_inventory(tmpdir, playbook_config)
compose_commandline(tmpdir, playbook_config)
shutil.make_archive(path.as_posix(), "gztar", working_dir.as_posix())
click.echo(path.with_suffix(".tar.gz").as_posix())
def copy_decapod_common_playbooks(path):
destpath = path.joinpath("common_playbooks")
path_to_common_playbooks = pathutils.resource(
"decapod_common", "playbooks"
)
shutil.copytree(path_to_common_playbooks.as_posix(), destpath.as_posix())
def copy_ceph_ansible(path):
destpath = path.joinpath("ceph-ansible")
ceph_ansible_path = subprocess.check_output(
[
"python2", "-c",
(
"import pkg_resources; print "
"pkg_resources.resource_filename('decapod_ansible', "
"'ceph-ansible')"
)
]
)
ceph_ansible_path = ceph_ansible_path.decode("utf-8").rstrip()
shutil.copytree(ceph_ansible_path, destpath.as_posix())
def copy_private_ssh_key(path):
destpath = path.joinpath("ssh-private-key.pem")
sourcepath = pathutils.HOME.joinpath(".ssh", "id_rsa")
shutil.copy(sourcepath.as_posix(), destpath.as_posix())
destpath.chmod(0o400)
def copy_ansible_config(path):
destpath = path.joinpath("ansible.cfg")
sourcepath = pathutils.ROOT.joinpath("etc", "ansible", "ansible.cfg")
shutil.copy2(sourcepath.as_posix(), destpath.as_posix())
parser = configparser.RawConfigParser()
with destpath.open() as fp:
parser.read_file(fp)
defaults_to_remove = (
"action_plugins",
"callback_plugins",
"connection_plugins",
"filter_plugins",
"lookup_plugins",
"vars_plugins"
)
for name in defaults_to_remove:
try:
parser.remove_option("defaults", name)
except Exception:
pass
try:
parser.remove_section("ssh_connection")
except Exception:
pass
parser.set("defaults", "roles_path", "ceph-ansible/roles")
parser.set("defaults", "private_key_file", "ssh-private-key.pem")
parser.set("defaults", "action_plugins", "ceph-ansible/plugins/actions")
with destpath.open("w") as fp:
parser.write(fp)
def copy_plugin_contents(path, plugin):
module_name = plugin.module_name.split(".", 1)[0]
plugin_path = path.joinpath("plugin")
plugin_path.mkdir()
for entry in plugin.dist.resource_listdir(module_name):
if entry == "__pycache__":
continue
filename = plugin.dist.get_resource_filename(
pkg_resources._manager,
os.path.join(module_name, entry)
)
filename = pathlib.Path(filename).absolute()
destpath = plugin_path.joinpath(filename.name)
if filename.is_dir():
shutil.copytree(filename.as_posix(), destpath.as_posix(),
symlinks=True)
else:
shutil.copy2(filename.as_posix(), destpath.as_posix(),
follow_symlinks=False)
def copy_monitor_keyring(path, config):
secret = kv.KV.find_one("monitor_secret",
config.configuration["global_vars"]["fsid"])
if secret:
path.joinpath("fetch_directory", "monitor_keyring").write_text(
secret.value
)
def copy_decapod_data(path, config):
destpath = path.joinpath("decapod_data")
destpath.mkdir()
destpath.joinpath("playbook-configuration.json").write_text(
json_dumps(config)
)
destpath.joinpath("cluster.json").write_text(json_dumps(config.cluster))
server_path = destpath.joinpath("servers")
server_path.mkdir()
for srv in config.servers:
server_path.joinpath("{0}.json".format(srv.model_id)).write_text(
json_dumps(srv)
)
server_path.joinpath("{0}.json".format(srv.ip)).symlink_to(
"{0}.json".format(srv.model_id)
)
cluster_servers_path = destpath.joinpath("cluster_servers")
cluster_servers_path.mkdir()
for srv in config.cluster.server_list:
cluster_servers_path.joinpath(
"{0}.json".format(srv.model_id)).write_text(
json_dumps(srv)
)
cluster_servers_path.joinpath("{0}.json".format(srv.ip)).symlink_to(
"{0}.json".format(srv.model_id)
)
def dump_inventory(path, config):
inventory = config.configuration["inventory"]
hostvars = inventory.get("_meta", {})
hostvars = hostvars.get("hostvars", {})
children = {}
yaml_inventory = {
"all": {"children": children},
"vars": {},
}
for groupname, groupstruct in inventory.items():
if groupname == "_meta":
continue
hostsdict = {}
children[groupname] = {
"hosts": hostsdict,
"vars": {}
}
if isinstance(groupstruct, dict):
children[groupname]["vars"] = groupstruct["vars"]
for hostname in groupstruct["hosts"]:
hostsdict[hostname] = hostvars.get(hostname, {})
else:
for hostname in groupstruct:
hostsdict[hostname] = hostvars.get(hostname, {})
path.joinpath("inventory.yaml").write_text(
yaml.dump(yaml_inventory,
default_flow_style=False, explicit_start=True, indent=4)
)
def compose_commandline(path, playbook_config):
destpath = path.joinpath("execute.sh")
faketask = task.PlaybookPluginTask(
playbook_config.playbook_id, playbook_config._id, None)
plugin = plugins.get_public_playbook_plugins()[playbook_config.playbook_id]
plugin = plugin()
plugin.compose_command(faketask)
proc = plugin.proc
proc.env = {}
proc.options["--inventory-file"] = "inventory.yaml"
extras = json.loads(proc.options["--extra-vars"])
extras["decapod_common_playbooks"] = "../common_playbooks"
extras["fetch_directory"] = "fetch_directory"
extras = patch_plugin_paths(extras, plugin)
proc.options["--extra-vars"] = process.jsonify(extras)
proc.command = "ansible-playbook"
proc.args = [
path.joinpath("plugin", plugin.playbook_filename)
.relative_to(path).as_posix()
]
shell_script = """\
#!/bin/bash
set +e
cd "$(dirname "$0")"
{0}
cd - >/dev/null 2>&1
""".format(proc.printable_commandline)
shell_script = textwrap.dedent(shell_script)
destpath.write_text(shell_script)
destpath.chmod(0o755)
def patch_plugin_paths(extras, plugin):
if isinstance(extras, dict):
return {k: patch_plugin_paths(v, plugin) for k, v in extras.items()}
elif isinstance(extras, list):
return [patch_plugin_paths(el, plugin) for el in extras]
elif isinstance(extras, str):
module_name = plugin.module_name.split(".", 1)[0]
local_path = pkg_resources.resource_filename(module_name, "")
return extras.replace(local_path + "/", "")
return extras
def get_plugin(plugin_id):
all_plugins = {
pkg.name: pkg
for pkg in pkg_resources.iter_entry_points(group=plugins.NS_PLAYBOOKS)
}
return all_plugins[plugin_id]
def json_dumps(data):
return json.dumps(data, cls=handlers.JSONEncoder, sort_keys=True, indent=4)
|
"""
253. Meeting Rooms II
Medium
Given an array of meeting time intervals intervals where intervals[i] = [starti, endi], return the minimum number of conference rooms required.
Example 1:
Input: intervals = [[0,30],[5,10],[15,20]]
Output: 2
Example 2:
Input: intervals = [[7,10],[2,4]]
Output: 1
Constraints:
1 <= intervals.length <= 104
0 <= starti < endi <= 106
"""
# V0
# IDEA : SCANNING LINE : Sort all time points and label the start and end points. Move a vertical line from left to right.
class Solution:
def minMeetingRooms(self, intervals):
lst = []
"""
NOTE THIS !!!
"""
for start, end in intervals:
lst.append((start, 1))
lst.append((end, -1))
# all of below sort work
lst.sort()
#lst.sort(key = lambda x : [x[0], x[1]])
#lst.sort(key = lambda x : x[0]) # <--- this is WRONG !!!
res, curr_rooms = 0, 0
for t, n in lst:
curr_rooms += n
res = max(res, curr_rooms)
return res
# V0'
# IDEA : Chronological Ordering
# https://leetcode.com/problems/meeting-rooms-ii/solution/
class Solution:
def minMeetingRooms(self, intervals):
# If there are no meetings, we don't need any rooms.
if not intervals:
return 0
used_rooms = 0
# Separate out the start and the end timings and sort them individually.
start_timings = sorted([i[0] for i in intervals])
end_timings = sorted(i[1] for i in intervals)
L = len(intervals)
# The two pointers in the algorithm: e_ptr and s_ptr.
end_pointer = 0
start_pointer = 0
# Until all the meetings have been processed
while start_pointer < L:
# If there is a meeting that has ended by the time the meeting at `start_pointer` starts
if start_timings[start_pointer] >= end_timings[end_pointer]:
# Free up a room and increment the end_pointer.
used_rooms -= 1
end_pointer += 1
# We do this irrespective of whether a room frees up or not.
# If a room got free, then this used_rooms += 1 wouldn't have any effect. used_rooms would
# remain the same in that case. If no room was free, then this would increase used_rooms
used_rooms += 1
start_pointer += 1
return used_rooms
# V0''
# IDEA : SCANNING LINE
# Step 1 : split intervals to points, and label start, end point
# Step 2 : reorder the points
# Step 3 : go through every point, if start : result + 1, if end : result -1, and record the maximum result in every iteration
class Solution:
def minMeetingRooms(self, intervals):
if intervals is None or len(intervals) == 0:
return 0
tmp = []
# set up start and end points
for inter in intervals:
tmp.append((inter[0], True))
tmp.append((inter[1], False))
# sort
tmp = sorted(tmp, key=lambda v: (v[0], v[1]))
n = 0
max_num = 0
for arr in tmp:
# start point +1
if arr[1]:
n += 1
# end point -1
else:
n -= 1 # release the meeting room
max_num = max(n, max_num)
return max_num
# V0'''
# IDEA : Priority Queues
# https://leetcode.com/problems/meeting-rooms-ii/solution/
class Solution:
def minMeetingRooms(self, intervals):
# If there is no meeting to schedule then no room needs to be allocated.
if not intervals:
return 0
# The heap initialization
free_rooms = []
# Sort the meetings in increasing order of their start time.
intervals.sort(key= lambda x: x[0])
# Add the first meeting. We have to give a new room to the first meeting.
heapq.heappush(free_rooms, intervals[0][1])
# For all the remaining meeting rooms
for i in intervals[1:]:
# If the room due to free up the earliest is free, assign that room to this meeting.
if free_rooms[0] <= i[0]:
heapq.heappop(free_rooms)
# If a new room is to be assigned, then also we add to the heap,
# If an old room is allocated, then also we have to add to the heap with updated end time.
heapq.heappush(free_rooms, i[1])
# The size of the heap tells us the minimum rooms required for all the meetings.
return len(free_rooms)
# V0''''
# TODO : fix below
# IDEA : SCANNING LINE
# Step 1 : split intervals to points, and label start, end point
# Step 2 : reorder the points
# Step 3 : go through every point, if start : result + 1, if end : result -1, and record the maximum result in every iteration
# https://www.1point3acres.com/bbs/thread-295648-1-1.html
# class Solution:
# """
# @param intervals: an array of meeting time intervals
# @return: the minimum number of conference rooms required
# """
# def minMeetingRooms(self, intervals):
# open_close = []
# needed_room = 0
# res = 0
# for interval in intervals:
# # "open" the room
# open_close.append((interval[0], "open"))
# # "close" the room
# open_close.append((interval[1], "close"))
#
# # sort the time
# open_close_ = open_close.sort(lambda x : x[0])
# # go through every start-end time slot
# for i in open_close_:
# # if there is a "open" => open 2 new room
# if i[1] == "open":
# needed_room += 2
# res = max(res, needed_room)
# # if there is a "close" => close 1 new room
# elif i[1] == "close":
# needed_room -= 1
# return res
# V1
# IDEA : Priority Queues
# https://leetcode.com/problems/meeting-rooms-ii/solution/
class Solution:
def minMeetingRooms(self, intervals):
# If there is no meeting to schedule then no room needs to be allocated.
if not intervals:
return 0
# The heap initialization
free_rooms = []
# Sort the meetings in increasing order of their start time.
intervals.sort(key= lambda x: x[0])
# Add the first meeting. We have to give a new room to the first meeting.
heapq.heappush(free_rooms, intervals[0][1])
# For all the remaining meeting rooms
for i in intervals[1:]:
# If the room due to free up the earliest is free, assign that room to this meeting.
if free_rooms[0] <= i[0]:
heapq.heappop(free_rooms)
# If a new room is to be assigned, then also we add to the heap,
# If an old room is allocated, then also we have to add to the heap with updated end time.
heapq.heappush(free_rooms, i[1])
# The size of the heap tells us the minimum rooms required for all the meetings.
return len(free_rooms)
# V1'
# IDEA : Chronological Ordering
# https://leetcode.com/problems/meeting-rooms-ii/solution/
class Solution:
def minMeetingRooms(self, intervals):
# If there are no meetings, we don't need any rooms.
if not intervals:
return 0
used_rooms = 0
# Separate out the start and the end timings and sort them individually.
start_timings = sorted([i[0] for i in intervals])
end_timings = sorted(i[1] for i in intervals)
L = len(intervals)
# The two pointers in the algorithm: e_ptr and s_ptr.
end_pointer = 0
start_pointer = 0
# Until all the meetings have been processed
while start_pointer < L:
# If there is a meeting that has ended by the time the meeting at `start_pointer` starts
if start_timings[start_pointer] >= end_timings[end_pointer]:
# Free up a room and increment the end_pointer.
used_rooms -= 1
end_pointer += 1
# We do this irrespective of whether a room frees up or not.
# If a room got free, then this used_rooms += 1 wouldn't have any effect. used_rooms would
# remain the same in that case. If no room was free, then this would increase used_rooms
used_rooms += 1
start_pointer += 1
return used_rooms
# V1''
# IDEA : Sort all time points and label the start and end points. Move a vertical line from left to right.
# https://leetcode.com/problems/meeting-rooms-ii/discuss/322622/Simple-Python-solutions
class Solution:
def minMeetingRooms(self, intervals):
lst = []
for start, end in intervals:
lst.append((start, 1))
lst.append((end, -1))
lst.sort()
res, curr_rooms = 0, 0
for t, n in lst:
curr_rooms += n
res = max(res, curr_rooms)
return res
# V1'''
# IDEA : Priority Queues
# https://leetcode.com/problems/meeting-rooms-ii/discuss/322622/Simple-Python-solutions
class Solution:
def minMeetingRooms(self, intervals):
intervals.sort(key = lambda x: x[0])
res = 0
heap, heap_size = [], 0
for interval in intervals:
while heap and heap[0] <= interval[0]:
heapq.heappop(heap)
heap_size -= 1
heapq.heappush(heap, interval[1])
heap_size += 1
res = max(res, heap_size)
return res
# V1''''
# https://leetcode.com/problems/meeting-rooms-ii/discuss/67965/Concise-python-implementation
class Solution(object):
def minMeetingRooms(self, intervals):
stimes, etimes = sorted([i[0] for i in intervals]), sorted([i[1] for i in intervals])
ei = 0
for st in stimes:
if st >= etimes[ei]:
ei += 1
return len(intervals) - ei
# V1'''''
# https://leetcode.com/problems/meeting-rooms-ii/discuss/208109/Python-solution
class Solution:
def minMeetingRooms(self, intervals):
if not intervals:
return 0
intervals = sorted(intervals, key = lambda x: x[0])
end_points = collections.deque(sorted([interval[1] for interval in intervals]))
res = 1
pop_count = 0
for i in range(1, len(intervals)):
while end_points and end_points[0] <= intervals[i][0]:
end_points.popleft()
pop_count += 1
res = max(res, i-pop_count+1)
return res
# V1'''''''
# IDEA : min-heap (priority queue)
# https://leetcode.com/problems/meeting-rooms-ii/discuss/208109/Python-solution
class Solution:
def minMeetingRooms(self, intervals):
"""
:type intervals: List[Interval]
:rtype: int
"""
if not intervals:
return 0
intervals = sorted(intervals, key = lambda x: x[0])
heap = []
heapq.heapify(heap)
res = 1
for interval in intervals:
if not heap:
heapq.heappush(heap, interval[1])
else:
if heap[0] <= interval[0]:
heapq.heappop(heap)
heapq.heappush(heap, interval[1])
res = max(res, len(heap))
return res
# V1''''''''
# IDEA : min-heap (priority queue)
# https://leetcode.com/problems/meeting-rooms-ii/discuss/1031292/Simple-Python-Solution
class Solution:
def minMeetingRooms(self, intervals):
intervals.sort(key=lambda i: i[0])
h = []
maxRooms = 0
for start,end in intervals:
if len(h) == 0:
heapq.heappush(h, end)
else:
free = heapq.heappop(h)
if start < free: # question, can a meeting start at exact same point? Assume no
heapq.heappush(h, free)
heapq.heappush(h,end)
if len(h) > maxRooms:
maxRooms = len(h)
return maxRooms
# V1'''''''''
# https://www.jiuzhang.com/solution/meeting-rooms-ii/#tag-highlight-lang-python
# IDEA : TO HAVE A ARRAY OF ALL "ROOM OPEN" AND "ROOM CLOSE" EVENTS
# "ROOM OPEN" EVENT : (TIME, 1)
# "ROOM CLOSE" EVENT : (TIME, -1)
# SO AFTER RE-ORDER ON THE TIME, WE WILL HAVE AN ORDERED EVENT ARRAY LIKE BELOW
# [ [t1, 1], [t1,1], [t3, -1], [t4, 1], [t5, -1]]
# THEN WE CAN GO THROUGH ALL EVENT IN THE EVENTS ARRAY AND CALCULATE # OF ROOM NEEDED
# DEMO 1
# In [23]: intervals= [[0, 30],[5, 10],[15, 20]]
# In [24]: Solution().minMeetingRooms(intervals)
# Out[24]: 2
# DEMO 2
# In [40]: intervals
# Out[40]: [[0, 30], [5, 10], [15, 20]]
# In [41]: sorted([(0, 1), (30, -1), (5, 1), (10, -1), (15, 1), (20, -1)])
# Out[41]: [(0, 1), (5, 1), (10, -1), (15, 1), (20, -1), (30, -1)]
# In [42]: Solution().minMeetingRooms(intervals)
# []
# [(0, 1), (30, -1)]
# [(0, 1), (30, -1), (5, 1), (10, -1)]
# [(0, 1), (30, -1), (5, 1), (10, -1), (15, 1), (20, -1)]
# Out[42]: 2
class Solution:
"""
@param intervals: an array of meeting time intervals
@return: the minimum number of conference rooms required
"""
def minMeetingRooms(self, intervals):
points = []
for interval in intervals:
#print (points)
points.append((interval[0], 1))
points.append((interval[1], -1))
#print (points)
meeting_rooms = 0
ongoing_meetings = 0
for _, delta in sorted(points):
ongoing_meetings += delta
meeting_rooms = max(meeting_rooms, ongoing_meetings)
return meeting_rooms
# V1'''''''''
# https://blog.csdn.net/yurenguowang/article/details/76665171
class Solution:
def minMeetingRooms(self, intervals):
if intervals is None or len(intervals) == 0:
return 0
tmp = []
# label start and end point
for inter in intervals:
tmp.append((inter.start, True))
tmp.append((inter.end, False))
# order the array with time
tmp = sorted(tmp, key=lambda v: (v[0], v[1]))
n = 0
max_num = 0
for arr in tmp:
# start point : +1
if arr[1]:
n += 1
# end point : -1
else:
n -= 1
max_num = max(n, max_num)
return max_num
# V2
# https://blog.csdn.net/yurenguowang/article/details/76665171
# Definition for an interval.
# class Interval(object):
# def __init__(self, s=0, e=0):
# self.start = s
# self.end = e
class Solution:
def minMeetingRooms(self, intervals):
if intervals is None or len(intervals) == 0:
return 0
tmp = []
# set up start and end points
for inter in intervals:
tmp.append((inter.start, True))
tmp.append((inter.end, False))
# sort
tmp = sorted(tmp, key=lambda v: (v[0], v[1]))
n = 0
max_num = 0
for arr in tmp:
# start point +1
if arr[1]:
n += 1
# end point -1
else:
n -= 1
max_num = max(n, max_num)
return max_num
# V3
# Time: O(nlogn)
# Space: O(n)
class Solution(object):
# @param {Interval[]} intervals
# @return {integer}
def minMeetingRooms(self, intervals):
starts, ends = [], []
for i in intervals:
starts.append(i.start)
ends.append(i.end)
starts.sort()
ends.sort()
s, e = 0, 0
min_rooms, cnt_rooms = 0, 0
while s < len(starts):
if starts[s] < ends[e]:
cnt_rooms += 1 # Acquire a room.
# Update the min number of rooms.
min_rooms = max(min_rooms, cnt_rooms)
s += 1
else:
cnt_rooms -= 1 # Release a room.
e += 1
return min_rooms
# time: O(nlogn)
# space: O(n)
from heapq import heappush, heappop
# V4
class Solution2(object):
def minMeetingRooms(self, intervals):
"""
:type intervals: List[Interval]
:rtype: int
"""
if not intervals:
return 0
intervals.sort(key=lambda x: x.start)
free_rooms = []
heappush(free_rooms, intervals[0].end)
for interval in intervals[1:]:
if free_rooms[0] <= interval.start:
heappop(free_rooms)
heappush(free_rooms, interval.end)
return len(free_rooms) |
from typing import Iterable, DefaultDict, List
from collections import defaultdict
from flask import Blueprint, Response, abort
from .models import Entry, Tag, EntryTag, AboutPage
from .utils import Paginator, template_response
blueprint = Blueprint(
name='controllers',
import_name=__name__,
static_folder='static',
template_folder='views'
)
@blueprint.route('/', defaults={'page': 1})
@blueprint.route('/page/<int:page>/')
def front(page: int) -> Response:
entries = Entry.select() \
.order_by(Entry.date.desc())
if not entries.count():
abort(404)
return template_response(
'front.html',
title=None,
paginator=Paginator(query=entries, current_page=page)
)
def _archive_response(entries: Iterable[Entry], title: str) -> Response:
groups: DefaultDict[int, DefaultDict[int, DefaultDict[int, List[Entry]]]] = defaultdict(
lambda: defaultdict(
lambda: defaultdict(list)
)
)
for entry in entries:
groups[entry.date.year][entry.date.month][entry.date.day].append(entry)
if not groups:
abort(404)
return template_response('archive.html', title=title, entries=groups)
@blueprint.route('/archive/')
def archive() -> Response:
entries: Iterable[Entry] = Entry.select() \
.iterator()
return _archive_response(entries, title='archive')
@blueprint.route('/<year>/')
def archive_by_year(year: str) -> Response:
entries: Iterable[Entry]
try:
entries = Entry.select() \
.where(Entry.date.year == int(year)) \
.iterator()
except ValueError:
abort(404)
return _archive_response(entries, title=year)
@blueprint.route('/<year>/<month>/')
def archive_by_month(year: str, month: str) -> Response:
entries: Iterable[Entry]
try:
entries = Entry.select() \
.where(Entry.date.year == int(year)) \
.where(Entry.date.month == int(month)) \
.iterator()
except ValueError:
abort(404)
return _archive_response(entries, title=f'{month}/{year}')
@blueprint.route('/<year>/<month>/<day>/')
def archive_by_day(year: str, month: str, day: str) -> Response:
entries: Iterable[Entry]
try:
entries = Entry.select() \
.where(Entry.date.year == int(year)) \
.where(Entry.date.month == int(month)) \
.where(Entry.date.day == int(day)) \
.iterator()
except ValueError:
abort(404)
return _archive_response(entries, title=f'{day}/{month}/{year}')
@blueprint.route('/tag/<slug>/')
def archive_by_tag(slug: str) -> Response:
tag_name: str
try:
tag_name = Tag.get(slug=slug) \
.name
except Tag.DoesNotExist:
abort(404)
entries: Iterable[Entry] = Entry.select() \
.join(EntryTag, on=Entry.id == EntryTag.entry_id) \
.join(Tag, on=EntryTag.definition_id == Tag.id) \
.where(Tag.slug == slug) \
.iterator()
return _archive_response(entries, title=tag_name)
@blueprint.route('/<year>/<month>/<day>/<slug>/')
def entry(year: str, month: str, day: str, slug: str) -> Response:
entry: Entry
try:
entry = Entry.select() \
.where(Entry.date.year == int(year)) \
.where(Entry.date.month == int(month)) \
.where(Entry.date.day == int(day)) \
.where(Entry.slug == slug) \
.get()
except (ValueError, Entry.DoesNotExist):
abort(404)
return template_response('entry.html', title=entry.title, entry=entry)
@blueprint.route('/about/')
def about() -> Response:
return template_response(
'about.html',
title='about',
entry=AboutPage.get()
)
|
<gh_stars>0
""" Official evaluation script for v1.1 of the SQuAD dataset. """
from __future__ import print_function
from collections import Counter
import nltk
import string
import re
import argparse
import json
import sys
def normalize_answer(s):
"""Lower text and remove punctuation, articles and extra whitespace."""
def remove_articles(text):
return re.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def f1_score(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def exact_match_score(prediction, ground_truth):
return (normalize_answer(prediction) == normalize_answer(ground_truth))
def metric_max_over_ground_truths(metric_fn, prediction, ground_truths):
scores_for_ground_truths = []
for ground_truth in ground_truths:
score = metric_fn(prediction, ground_truth)
scores_for_ground_truths.append(score)
if len(scores_for_ground_truths) == 0:
return 0
return max(scores_for_ground_truths)
class color:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
END = '\033[0m'
def evaluate(dataset, predictions):
f1 = exact_match = total = 0
for article in dataset:
for paragraph in article['paragraphs']:
for qa in paragraph['qas']:
total += 1
if qa['id'] not in predictions:
message = 'Unanswered question ' + qa['id'] + \
' will receive score 0.'
print(message, file=sys.stderr)
continue
ground_truths = list(map(lambda x: x['text'], qa['answers']))
prediction = predictions[qa['id']]
exact_match += metric_max_over_ground_truths(
exact_match_score, prediction, ground_truths)
f1 += metric_max_over_ground_truths(
f1_score, prediction, ground_truths)
context = paragraph['context']
for ans in qa['answers']:
if ans['answer_start'] != 0 and context[ans['answer_start']-1] not in [" ", "'", '"', '(']:
print(nltk.word_tokenize(context))
ans_l = len(ans['text'])
print(context[:ans['answer_start']] + color.RED + context[ans['answer_start']:(ans['answer_start']+ans_l)] + color.END + context[ans['answer_start']+ans_l:])
# print(paragraph['context'])
print('Question:', qa['question'])
print('Prediction:', ans['text'])
# print('Predictions:', prediction)
# print(predictions[qa['id']])
# print('Prediciton Full', predictions[qa['ans']])
print('Ground truths:', ground_truths)
print()
exact_match = 100.0 * exact_match / total
f1 = 100.0 * f1 / total
return {'exact_match': exact_match, 'f1': f1}
if __name__ == '__main__':
expected_version = '1.1'
parser = argparse.ArgumentParser(
description='Evaluation for SQuAD ' + expected_version)
parser.add_argument('dataset_file', help='Dataset file')
parser.add_argument('prediction_file', help='Prediction File')
args = parser.parse_args()
with open(args.dataset_file) as dataset_file:
dataset_json = json.load(dataset_file)
if (dataset_json['version'] != expected_version):
print('Evaluation expects v-' + expected_version +
', but got dataset with v-' + dataset_json['version'],
file=sys.stderr)
dataset = dataset_json['data']
with open(args.prediction_file) as prediction_file:
predictions = json.load(prediction_file)
#print(predictions)
print(json.dumps(evaluate(dataset, predictions)))
|
<reponame>oyente/oyente<gh_stars>10-100
# return true if the two paths have different flows of money
# later on we may want to return more meaningful output: e.g. if the concurrency changes
# the amount of money or the recipient.
from z3 import *
from z3util import get_vars
import json
import mmap
import os
import csv
import re
import difflib
import signal
def my_copy_dict(input):
output = {}
for key in input:
if isinstance(input[key], list):
output[key] = list(input[key])
elif isinstance(input[key], dict):
output[key] = dict(input[key])
else:
output[key] = input[key]
return output
# class Timeout():
# """Timeout class using ALARM signal."""
#
# def __init__(self, sec):
# self.sec = sec
#
# def __enter__(self):
# signal.signal(signal.SIGALRM, self.raise_timeout)
# signal.alarm(self.sec)
#
# def __exit__(self, *args):
# signal.alarm(0) # disable alarm
#
# def raise_timeout(self, *args):
# raise Exception("Timeout")
# check if a variable is a storage address in a contract
# currently accept only int addresses in the storage
def is_storage_var(var):
return isinstance(var, (int, long))
# return True
# else:
# return isinstance(var, str) and var.startswith("Ia_store_")
# copy only storage values/ variables from a given global state
# TODO: add balance in the future
def copy_global_values(global_state):
new_gstate = {}
for var in global_state["Ia"]:
if is_storage_var(var):
new_gstate[var] = global_state["Ia"][var]
return new_gstate
# check if a variable is in an expression
def is_in_expr(var, expr):
list_vars = get_vars(expr)
set_vars = set(i.decl().name() for i in list_vars)
return var in set_vars
# check if an expression has any storage variables
def has_storage_vars(expr, storage_vars):
list_vars = get_vars(expr)
for var in list_vars:
if var in storage_vars:
return True
return False
def get_all_vars(list_of_storage_exprs):
ret_vars = []
for expr in list_of_storage_exprs:
ret_vars += get_vars(list_of_storage_exprs[expr])
return ret_vars
# rename variables to distinguish variables in two different paths.
# e.g. Ia_store_0 in path i becomes Ia_store_0_old if Ia_store_0 is modified
# else we must keep Ia_store_0 if its not modified
def rename_vars(pcs, global_states):
ret_pcs = []
vars_mapping = {}
for expr in pcs:
list_vars = get_vars(expr)
for var in list_vars:
if var in vars_mapping:
expr = substitute(expr, (var, vars_mapping[var]))
continue
var_name = var.decl().name()
# check if a var is global
if var_name.startswith("Ia_store_"):
position = var_name.split('Ia_store_')[1]
# if it is not modified then keep the previous name
if position not in global_states:
continue
# otherwise, change the name of the variable
new_var_name = var_name + '_old'
new_var = BitVec(new_var_name, 256)
vars_mapping[var] = new_var
expr = substitute(expr, (var, vars_mapping[var]))
ret_pcs.append(expr)
ret_gs = {}
# replace variable in storage expression
for storage_addr in global_states:
expr = global_states[storage_addr]
# stupid z3 4.1 makes me add this line
if is_expr(expr):
list_vars = get_vars(expr)
for var in list_vars:
if var in vars_mapping:
expr = substitute(expr, (var, vars_mapping[var]))
continue
var_name = var.decl().name()
# check if a var is global
if var_name.startswith("Ia_store_"):
position = int(var_name.split('_')[len(var_name.split('_'))-1])
# if it is not modified
if position not in global_states:
continue
# otherwise, change the name of the variable
new_var_name = var_name + '_old'
new_var = BitVec(new_var_name, 256)
vars_mapping[var] = new_var
expr = substitute(expr, (var, vars_mapping[var]))
ret_gs[storage_addr] = expr
return ret_pcs, ret_gs
#split a file into smaller files
def split_dicts(filename, nsub = 500):
with open(filename) as json_file:
c = json.load(json_file)
current_file = {}
file_index = 1
for u, v in c.iteritems():
current_file[u] = v
if len(current_file) == nsub:
with open(filename.split(".")[0] + "_" + str(file_index) + '.json', 'w') as outfile:
json.dump(current_file, outfile)
file_index += 1
current_file.clear()
if len(current_file):
with open(filename.split(".")[0] + "_" + str(file_index) + '.json', 'w') as outfile:
json.dump(current_file, outfile)
current_file.clear()
def do_split_dicts():
for i in range(11):
split_dicts("contract" + str(i) + ".json")
os.remove("contract" + str(i) + ".json")
def run_re_file(re_str, fn):
size = os.stat(fn).st_size
with open(fn, 'r') as tf:
data = mmap.mmap(tf.fileno(), size, access=mmap.ACCESS_READ)
return re.findall(re_str, data)
def get_contract_info(contract_addr):
print "Getting info for contracts... " + contract_addr
file_name1 = "tmp/" + contract_addr + "_txs.html"
file_name2 = "tmp/" + contract_addr + ".html"
# get number of txs
txs = "unknown"
value = "unknown"
re_txs_value = r"<span>A total of (.+?) transactions found for address</span>"
re_str_value = r"<td>ETH Balance:\n<\/td>\n<td>\n(.+?)\n<\/td>"
try:
txs = run_re_file(re_txs_value, file_name1)
value = run_re_file(re_str_value, file_name2)
except Exception as e:
try:
os.system("wget -O %s http://etherscan.io/txs?a=%s" % (file_name1, contract_addr))
re_txs_value = r"<span>A total of (.+?) transactions found for address</span>"
txs = run_re_file(re_txs_value, file_name1)
# get balance
re_str_value = r"<td>ETH Balance:\n<\/td>\n<td>\n(.+?)\n<\/td>"
os.system("wget -O %s https://etherscan.io/address/%s" % (file_name2, contract_addr))
value = run_re_file(re_str_value, file_name2)
except Exception as e:
pass
return txs, value
def get_contract_stats(list_of_contracts):
with open("concurr.csv", "w") as stats_file:
fp = csv.writer(stats_file, delimiter=',')
fp.writerow(["Contract address", "No. of paths", "No. of concurrency pairs", "Balance", "No. of TXs", "Note"])
with open(list_of_contracts, "r") as f:
for contract in f.readlines():
contract_addr = contract.split()[0]
value, txs = get_contract_info(contract_addr)
fp.writerow([contract_addr, contract.split()[1], contract.split()[2],
value, txs, contract.split()[3:]])
def get_time_dependant_contracts(list_of_contracts):
with open("time.csv", "w") as stats_file:
fp = csv.writer(stats_file, delimiter=',')
fp.writerow(["Contract address", "Balance", "No. of TXs", "Note"])
with open(list_of_contracts, "r") as f:
for contract in f.readlines():
if len(contract.strip()) == 0:
continue
contract_addr = contract.split(".")[0].split("_")[1]
txs, value = get_contract_info(contract_addr)
fp.writerow([contract_addr, value, txs])
def get_distinct_contracts(list_of_contracts = "concurr.csv"):
flag = []
with open(list_of_contracts, "rb") as csvfile:
contracts = csvfile.readlines()[1:]
n =len(contracts)
for i in range(n):
flag.append(i) # mark which contract is similar to contract_i
for i in range(n):
if flag[i] != i:
continue
contract_i = contracts[i].split(",")[0]
npath_i = int(contracts[i].split(",")[1])
npair_i = int(contracts[i].split(",")[2])
file_i = "stats/tmp_" + contract_i + ".evm"
print " reading file " + file_i
for j in range(i+1, n):
if flag[j] != j:
continue
contract_j = contracts[j].split(",")[0]
npath_j = int(contracts[j].split(",")[1])
npair_j = int(contracts[j].split(",")[2])
if (npath_i == npath_j) and (npair_i == npair_j):
file_j = "stats/tmp_" + contract_j + ".evm"
with open(file_i, 'r') as f1, open(file_j, 'r') as f2:
code_i = f1.readlines()
code_j = f2.readlines()
if abs(len(code_i) - len(code_j)) >= 5:
continue
diff = difflib.ndiff(code_i, code_j)
ndiff = 0
for line in diff:
if line.startswith("+") or line.startswith("-"):
ndiff += 1
if ndiff < 10:
flag[j] = i
print flag
|
import io
import asyncio
import hashlib
import itertools
from datetime import datetime
from itertools import product
from contextlib import asynccontextmanager, AsyncExitStack
from typing import (
AsyncIterator,
Dict,
List,
Tuple,
Any,
NamedTuple,
Union,
Optional,
Set,
)
from .exceptions import ContextNotPresent, DefinitionNotInContext
from .types import Input, Parameter, Definition, Operation, Stage
from .base import (
OperationImplementation,
BaseConfig,
BaseContextHandle,
BaseKeyValueStoreContext,
BaseKeyValueStore,
BaseInputSetContext,
StringInputSetContext,
BaseInputSet,
BaseParameterSet,
BaseDefinitionSetContext,
BaseInputNetworkContext,
BaseInputNetwork,
BaseOperationNetworkContext,
BaseOperationNetwork,
BaseRedundancyCheckerConfig,
BaseRedundancyCheckerContext,
BaseRedundancyChecker,
BaseLockNetworkContext,
BaseLockNetwork,
BaseOperationImplementationNetworkContext,
BaseOperationImplementationNetwork,
BaseOrchestratorConfig,
BaseOrchestratorContext,
BaseOrchestrator,
)
from ..util.entrypoint import entry_point
from ..util.cli.arg import Arg
from ..util.cli.cmd import CMD
from ..util.data import ignore_args
from ..util.asynchelper import context_stacker, aenter_stack
from .log import LOGGER
class MemoryKeyValueStoreContext(BaseKeyValueStoreContext):
async def get(self, key: str) -> Union[bytes, None]:
async with self.parent.lock:
return self.parent.memory.get(key)
async def set(self, key: str, value: bytes):
async with self.parent.lock:
self.parent.memory[key] = value
@entry_point("memory")
class MemoryKeyValueStore(BaseKeyValueStore):
"""
Key Value store backed by dict
"""
CONTEXT = MemoryKeyValueStoreContext
def __init__(self, config: BaseConfig) -> None:
super().__init__(config)
self.memory: Dict[str, bytes] = {}
self.lock = asyncio.Lock()
class MemoryInputSetConfig(NamedTuple):
ctx: BaseInputSetContext
inputs: List[Input]
class MemoryInputSet(BaseInputSet):
def __init__(self, config: MemoryInputSetConfig) -> None:
super().__init__(config)
self.__inputs = config.inputs
async def definitions(self) -> Set[Definition]:
return set(map(lambda item: item.definition, self.__inputs))
async def inputs(self) -> AsyncIterator[Input]:
for item in self.__inputs:
yield item
class MemoryParameterSetConfig(NamedTuple):
ctx: BaseInputSetContext
parameters: List[Parameter]
class MemoryParameterSet(BaseParameterSet):
def __init__(self, config: MemoryParameterSetConfig) -> None:
super().__init__(config)
self.__parameters = config.parameters
async def parameters(self) -> AsyncIterator[Parameter]:
for parameter in self.__parameters:
yield parameter
async def inputs(self) -> AsyncIterator[Input]:
for item in itertools.chain(
*[
[parameter.origin] + list(parameter.origin.get_parents())
for parameter in self.__parameters
]
):
yield item
class NotificationSetContext(object):
def __init__(self, parent: "NotificationSet") -> None:
self.parent = parent
self.logger = LOGGER.getChild(self.__class__.__qualname__)
async def add(self, notification_item: Any, set_items: List[Any]):
"""
Add set_items to set and notification_item to the notification queue
"""
async with self.parent.lock:
map(self.parent.memory.add, set_items)
self.parent.notification_items.append(notification_item)
self.parent.event_added.set()
async def added(self) -> Tuple[bool, List[Any]]:
"""
Gets item from FIFO notification queue. Returns a bool for if there are
more items to get and one of the items.
"""
more = True
# Make sure waiting on event_added is done by one coroutine at a time.
# Multiple might be waiting and if there is only one event in the queue
# they would all otherwise be triggered
async with self.parent.event_added_lock:
await self.parent.event_added.wait()
async with self.parent.lock:
notification_item = self.parent.notification_items.pop(0)
# If there are still more items that the added event hasn't
# processed then make sure we will return immediately if called
# again
if not self.parent.notification_items:
more = False
self.parent.event_added.clear()
return more, notification_item
async def __aenter__(self) -> "NotificationSetContext":
return self
async def __aexit__(self, exc_type, exc_value, traceback):
pass
class NotificationSet(object):
"""
Set which can notifies user when it was added to (FIFO)
"""
def __init__(self) -> None:
# TODO audit use of memory (should be used sparingly)
self.memory = set()
self.lock = asyncio.Lock()
self.event_added = asyncio.Event()
self.event_added_lock = asyncio.Lock()
self.notification_items = []
def __call__(self) -> NotificationSetContext:
return NotificationSetContext(self)
class MemoryInputNetworkContextEntry(NamedTuple):
ctx: BaseInputSetContext
definitions: Dict[Definition, List[Input]]
class MemoryDefinitionSetContext(BaseDefinitionSetContext):
async def inputs(self, definition: Definition) -> AsyncIterator[Input]:
# Grab the input set context handle
handle = await self.ctx.handle()
handle_string = handle.as_string()
# Associate inputs with their context handle grouped by definition
async with self.parent.ctxhd_lock:
# Yield all items under the context for the given definition
entry = self.parent.ctxhd[handle_string]
for item in entry.definitions[definition]:
yield item
class MemoryInputNetworkContext(BaseInputNetworkContext):
async def add(self, input_set: BaseInputSet):
# Grab the input set context handle
handle = await input_set.ctx.handle()
handle_string = handle.as_string()
# If the context for this input set does not exist create a
# NotificationSet for it to notify the orchestrator
if not handle_string in self.parent.input_notification_set:
self.parent.input_notification_set[
handle_string
] = NotificationSet()
async with self.parent.ctx_notification_set() as ctx:
await ctx.add(input_set.ctx, [])
# Add the input set to the incoming inputs
async with self.parent.input_notification_set[handle_string]() as ctx:
await ctx.add(
input_set, [item async for item in input_set.inputs()]
)
# Associate inputs with their context handle grouped by definition
async with self.parent.ctxhd_lock:
# Create dict for handle_string if not present
if not handle_string in self.parent.ctxhd:
self.parent.ctxhd[
handle_string
] = MemoryInputNetworkContextEntry(
ctx=input_set.ctx, definitions={}
)
# Go through each item in the input set
async for item in input_set.inputs():
# Create set for item definition if not present
if (
not item.definition
in self.parent.ctxhd[handle_string].definitions
):
self.parent.ctxhd[handle_string].definitions[
item.definition
] = []
# Add input to by defintion set
self.parent.ctxhd[handle_string].definitions[
item.definition
].append(item)
async def sadd(self, context_handle_string, *args: Input):
"""
Shorthand for creating a MemoryInputSet with a StringInputSetContext.
>>> await octx.ictx.add(
... MemoryInputSet(
... MemoryInputSetConfig(
... ctx=StringInputSetContext(context_handle_string),
... inputs=list(args),
... )
... )
... )
"""
await self.add(
MemoryInputSet(
MemoryInputSetConfig(
ctx=StringInputSetContext(context_handle_string),
inputs=list(args),
)
)
)
async def ctx(self) -> Tuple[bool, BaseInputSetContext]:
async with self.parent.ctx_notification_set() as ctx:
return await ctx.added()
async def added(
self, watch_ctx: BaseInputSetContext
) -> Tuple[bool, BaseInputSet]:
# Grab the input set context handle
handle_string = (await watch_ctx.handle()).as_string()
# Notify whatever is listening for new inputs in this context
async with self.parent.input_notification_set[handle_string]() as ctx:
"""
return await ctx.added()
"""
async with ctx.parent.event_added_lock:
await ctx.parent.event_added.wait()
ctx.parent.event_added.clear()
async with ctx.parent.lock:
notification_items = ctx.parent.notification_items
ctx.parent.notification_items = []
return False, notification_items
async def definition(
self, ctx: BaseInputSetContext, definition: str
) -> Definition:
async with self.parent.ctxhd_lock:
# Grab the input set context handle
handle_string = (await ctx.handle()).as_string()
# Ensure that the handle_string is present in ctxhd
if not handle_string in self.parent.ctxhd:
raise ContextNotPresent(handle_string)
# Search through the definitions to find one with a matching name
found = list(
filter(
lambda check: check.name == definition,
self.parent.ctxhd[handle_string].definitions,
)
)
# Raise an error if the definition was not found in given context
if not found:
raise DefinitionNotInContext(
"%s: %s" % (handle_string, definition)
)
# If found then return the definition
return found[0]
def definitions(
self, ctx: BaseInputSetContext
) -> BaseDefinitionSetContext:
return MemoryDefinitionSetContext(self.parent, ctx)
async def gather_inputs(
self,
rctx: "BaseRedundancyCheckerContext",
operation: Operation,
ctx: Optional[BaseInputSetContext] = None,
) -> AsyncIterator[BaseParameterSet]:
# Create a mapping of definitions to inputs for that definition
gather: Dict[str, List[Parameter]] = {}
async with self.parent.ctxhd_lock:
# If no context is given we will generate input pairs for all
# contexts
contexts = self.parent.ctxhd.values()
# If a context is given only search definitions within that context
if not ctx is None:
# Grab the input set context handle
handle_string = (await ctx.handle()).as_string()
# Ensure that the handle_string is present in ctxhd
if not handle_string in self.parent.ctxhd:
return
# Limit search to given context via context handle
contexts = [self.parent.ctxhd[handle_string]]
for ctx, definitions in contexts:
# Check that all conditions are present and logicly True
if not all(
map(
lambda definition: (
definition in definitions
and all(
map(
lambda item: bool(item.value),
definitions[definition],
)
)
),
operation.conditions,
)
):
return
# Gather all inputs with matching definitions and contexts
for key, definition in operation.inputs.items():
# Return if any inputs are missing
if not definition in definitions:
return
else:
# Generate parameters from inputs
gather[key] = [
Parameter(
key=key,
value=item.value,
origin=item,
definition=definition,
)
for item in definitions[definition]
]
# Generate all possible permutations of applicable inputs
for permutation in product(*list(gather.values())):
# Create the parameter set
parameter_set = MemoryParameterSet(
MemoryParameterSetConfig(ctx=ctx, parameters=permutation)
)
# Check if this permutation has been executed before
if not await rctx.exists(operation, parameter_set):
# If not then return the permutation
yield parameter_set
@entry_point("memory")
class MemoryInputNetwork(BaseInputNetwork):
"""
Inputs backed by a set
"""
CONTEXT = MemoryInputNetworkContext
def __init__(self, config: BaseConfig) -> None:
super().__init__(config)
self.ctx_notification_set = NotificationSet()
self.input_notification_set = {}
# Organize by context handle string then by definition within that
self.ctxhd: Dict[str, Dict[Definition, Any]] = {}
# TODO Create ctxhd_locks dict to manage a per context lock
self.ctxhd_lock = asyncio.Lock()
class MemoryOperationNetworkConfig(NamedTuple):
# Starting set of operations
operations: List[Operation]
class MemoryOperationNetworkContext(BaseOperationNetworkContext):
async def add(self, operations: List[Operation]):
async with self.parent.lock:
map(self.parent.memory.add, operations)
async def operations(
self,
input_set: Optional[BaseInputSet] = None,
stage: Stage = Stage.PROCESSING,
) -> AsyncIterator[Operation]:
# Set list of needed input definitions if given
if not input_set is None:
input_definitions = await input_set.definitions()
# Yield all operations with an input in the input set
for operation in self.parent.memory:
# Only run operations of the requested stage
if operation.stage != stage:
continue
# If there is a given input set check each definition in it against
# the operation's inputs to verify we are only looking at the subset
if input_set is not None:
if not [
item
for item in operation.inputs.values()
if item in input_definitions
] and not [
item
for item in operation.conditions
if item in input_definitions
]:
continue
yield operation
@entry_point("memory")
class MemoryOperationNetwork(BaseOperationNetwork):
"""
Operations backed by a set
"""
CONTEXT = MemoryOperationNetworkContext
def __init__(self, config: BaseConfig) -> None:
super().__init__(config)
self.memory = config.operations.copy()
self.lock = asyncio.Lock()
@classmethod
def args(cls, args, *above) -> Dict[str, Arg]:
cls.config_set(args, above, "ops", Arg(type=Operation.load, nargs="+"))
return args
@classmethod
def config(cls, config, *above) -> MemoryOperationNetworkConfig:
return MemoryOperationNetworkConfig(
operations=cls.config_get(config, above, "ops")
)
class MemoryRedundancyCheckerContext(BaseRedundancyCheckerContext):
async def __aenter__(self) -> "MemoryRedundancyCheckerContext":
self.__stack = AsyncExitStack()
await self.__stack.__aenter__()
self.kvctx = await self.__stack.enter_async_context(
self.parent.key_value_store()
)
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.__stack.aclose()
async def unique(
self, operation: Operation, parameter_set: BaseParameterSet
) -> str:
"""
SHA384 hash of the parameter set context handle as a string, the
operation name, and the sorted list of input uuids.
"""
uid_list = sorted(
map(
lambda x: x.uid,
[item async for item in parameter_set.inputs()],
)
)
uid_list.insert(0, (await parameter_set.ctx.handle()).as_string())
uid_list.insert(0, operation.name)
return hashlib.sha384(", ".join(uid_list).encode("utf-8")).hexdigest()
async def exists(
self, operation: Operation, parameter_set: BaseParameterSet
) -> bool:
# self.logger.debug('checking parameter_set: %s', list(map(
# lambda p: p.value,
# [p async for p in parameter_set.parameters()])))
if (
await self.kvctx.get(await self.unique(operation, parameter_set))
!= "\x01"
):
return False
return True
async def add(self, operation: Operation, parameter_set: BaseParameterSet):
# self.logger.debug('adding parameter_set: %s', list(map(
# lambda p: p.value,
# [p async for p in parameter_set.parameters()])))
await self.kvctx.set(
await self.unique(operation, parameter_set), "\x01"
)
@entry_point("memory")
class MemoryRedundancyChecker(BaseRedundancyChecker):
"""
Redundancy Checker backed by Memory Key Value Store
"""
CONTEXT = MemoryRedundancyCheckerContext
async def __aenter__(self) -> "MemoryRedundancyCheckerContext":
self.__stack = AsyncExitStack()
await self.__stack.__aenter__()
self.key_value_store = await self.__stack.enter_async_context(
self.config.key_value_store
)
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self.__stack.aclose()
@classmethod
def args(cls, args, *above) -> Dict[str, Arg]:
# Enable the user to specify a key value store
cls.config_set(
args,
above,
"kvstore",
Arg(type=BaseKeyValueStore.load, default=MemoryKeyValueStore),
)
# Load all the key value stores and add the arguments they might require
for loaded in BaseKeyValueStore.load():
loaded.args(args, *cls.add_orig_label(*above))
return args
@classmethod
def config(cls, config, *above):
kvstore = cls.config_get(config, above, "kvstore")
return BaseRedundancyCheckerConfig(
key_value_store=kvstore.withconfig(config, *cls.add_label(*above))
)
class MemoryLockNetworkContext(BaseLockNetworkContext):
@asynccontextmanager
async def acquire(self, parameter_set: BaseParameterSet):
"""
Acquire the lock for each input in the input set which must be locked
prior to running an operation using the input.
"""
need_lock = {}
# Acquire the master lock to find and or create needed locks
async with self.parent.lock:
# Get all the inputs up the ancestry tree
inputs = [item async for item in parameter_set.inputs()]
# Only lock the ones which require it
for item in filter(lambda item: item.definition.lock, inputs):
# Create the lock for the input if not present
if not item.uid in self.parent.locks:
self.parent.locks[item.uid] = asyncio.Lock()
# Retrieve the lock
need_lock[item.uid] = (item, self.parent.locks[item.uid])
# Use AsyncExitStack to lock the variable amount of inputs required
async with AsyncExitStack() as stack:
# Take all the locks we found we needed for this parameter set
for _uid, (item, lock) in need_lock.items():
# Take the lock
self.logger.debug("Acquiring: %s(%r)", item.uid, item.value)
await stack.enter_async_context(lock)
self.logger.debug("Acquired: %s(%r)", item.uid, item.value)
# All locks for these parameters have been acquired
yield
@entry_point("memory")
class MemoryLockNetwork(BaseLockNetwork):
CONTEXT = MemoryLockNetworkContext
def __init__(self, config: BaseConfig) -> None:
super().__init__(config)
self.lock = asyncio.Lock()
self.locks: Dict[str, asyncio.Lock] = {}
class MemoryOperationImplementationNetworkConfig(NamedTuple):
operations: Dict[str, OperationImplementation]
class MemoryOperationImplementationNetworkContext(
BaseOperationImplementationNetworkContext
):
async def contains(self, operation: Operation) -> bool:
"""
Checks if operation in is operations we have loaded in memory
"""
return operation.name in self.parent.operations
async def instantiable(self, operation: Operation) -> bool:
"""
Looks for class registered with ____ entrypoint using pkg_resources.
"""
raise NotImplementedError()
async def instantiate(
self, operation: Operation, config: BaseConfig
) -> bool:
"""
Instantiate class registered with ____ entrypoint using pkg_resources.
Return true if instantiation was successful.
"""
raise NotImplementedError()
async def run(
self,
ctx: BaseInputSetContext,
ictx: BaseInputNetworkContext,
operation: Operation,
inputs: Dict[str, Any],
) -> Union[bool, Dict[str, Any]]:
"""
Run an operation in our network.
"""
async with self.parent.operations[operation.name](ctx, ictx) as opctx:
self.logger.debug("---")
self.logger.debug(
"Stage: %s: %s", operation.stage.value.upper(), operation.name
)
self.logger.debug("Inputs: %s", inputs)
self.logger.debug(
"Conditions: %s",
dict(
zip(
map(
lambda condition: condition.name,
operation.conditions,
),
([True] * len(operation.conditions)),
)
),
)
outputs = await opctx.run(inputs)
self.logger.debug("Output: %s", outputs)
self.logger.debug("---")
return outputs
async def operation_completed(self):
await self.parent.completed_event.wait()
self.parent.completed_event.clear()
async def run_dispatch(
self,
ictx: BaseInputNetworkContext,
lctx: BaseLockNetworkContext,
operation: Operation,
parameter_set: BaseParameterSet,
):
"""
Run an operation in the background and add its outputs to the input
network when complete
"""
# Lock all inputs which cannot be used simultaneously
async with lctx.acquire(parameter_set):
# Run the operation
outputs = await self.run(
parameter_set.ctx,
ictx,
operation,
await parameter_set._asdict(),
)
if outputs is None:
return []
# Create a list of inputs from the outputs using the definition mapping
try:
inputs = []
if operation.expand:
expand = operation.expand
else:
expand = []
parents = [item async for item in parameter_set.inputs()]
for key, output in outputs.items():
if not key in expand:
output = [output]
for value in output:
inputs.append(
Input(
value=value,
definition=operation.outputs[key],
parents=parents,
)
)
except KeyError as error:
raise KeyError(
"Value %s missing from output:definition mapping %s(%s)"
% (
str(error),
operation.name,
", ".join(operation.outputs.keys()),
)
) from error
# Add the input set made from the outputs to the input set network
await ictx.add(
MemoryInputSet(
MemoryInputSetConfig(ctx=parameter_set.ctx, inputs=inputs)
)
)
return inputs
async def dispatch(
self,
ictx: BaseInputNetworkContext,
lctx: BaseLockNetworkContext,
operation: Operation,
parameter_set: BaseParameterSet,
):
"""
Schedule the running of an operation
"""
self.logger.debug("[DISPATCH] %s", operation.name)
task = asyncio.create_task(
self.run_dispatch(ictx, lctx, operation, parameter_set)
)
task.add_done_callback(ignore_args(self.parent.completed_event.set))
return task
async def operations_parameter_set_pairs(
self,
ictx: BaseInputNetworkContext,
octx: BaseOperationNetworkContext,
rctx: BaseRedundancyCheckerContext,
ctx: BaseInputSetContext,
*,
new_input_set: Optional[BaseInputSet] = None,
stage: Stage = Stage.PROCESSING,
) -> AsyncIterator[Tuple[Operation, BaseInputSet]]:
"""
Use new_input_set to determine which operations in the network might be
up for running. Cross check using existing inputs to generate per
input set context novel input pairings. Yield novel input pairings
along with their operations as they are generated.
"""
# Get operations which may possibly run as a result of these new inputs
async for operation in octx.operations(
input_set=new_input_set, stage=stage
):
# Generate all pairs of un-run input combinations
async for parameter_set in ictx.gather_inputs(
rctx, operation, ctx=ctx
):
yield operation, parameter_set
@entry_point("memory")
class MemoryOperationImplementationNetwork(BaseOperationImplementationNetwork):
CONTEXT = MemoryOperationImplementationNetworkContext
def __init__(
self, config: MemoryOperationImplementationNetworkConfig
) -> None:
super().__init__(config)
self.opimps = self.config.operations
self.operations = {}
self.completed_event = asyncio.Event()
async def __aenter__(
self
) -> "MemoryOperationImplementationNetworkContext":
self.__stack = AsyncExitStack()
await self.__stack.__aenter__()
self.operations = {
opimp.op.name: await self.__stack.enter_async_context(opimp)
for opimp in self.opimps.values()
}
return self
async def __aexit__(self, exc_type, exc_value, traceback):
if self.__stack is not None:
await self.__stack.aclose()
self.__stack = None
@classmethod
def args(cls, args, *above) -> Dict[str, Arg]:
# Enable the user to specify operation implementations to be loaded via
# the entrypoint system (by ParseOperationImplementationAction)
cls.config_set(
args,
above,
"opimps",
Arg(type=OperationImplementation.load, nargs="+"),
)
# Add orig label to above since we are done loading
above = cls.add_orig_label(*above)
# Load all the opimps and add the arguments they might require
for loaded in OperationImplementation.load():
loaded.args(args, *above)
return args
@classmethod
def config(cls, config, *above) -> BaseConfig:
return MemoryOperationImplementationNetworkConfig(
operations={
imp.op.name: imp
for imp in [
Imp.withconfig(config, "opimp")
for Imp in cls.config_get(config, above, "opimps")
]
}
)
class MemoryOrchestratorConfig(BaseOrchestratorConfig):
"""
Same as base orchestrator config
"""
class MemoryOrchestratorContext(BaseOrchestratorContext):
def __init__(self, parent: "BaseOrchestrator") -> None:
super().__init__(parent)
self._stack = None
async def __aenter__(self) -> "BaseOrchestratorContext":
self._stack = AsyncExitStack()
self._stack = await aenter_stack(
self,
{
"rctx": self.parent.rchecker,
"ictx": self.parent.input_network,
"octx": self.parent.operation_network,
"lctx": self.parent.lock_network,
"nctx": self.parent.opimp_network,
},
)
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self._stack.aclose()
async def run_operations(
self, strict: bool = True
) -> AsyncIterator[Tuple[BaseContextHandle, Dict[str, Any]]]:
# Track if there are more contexts
more = True
# Set of tasks we are waiting on
tasks = set()
# Create initial events to wait on
new_context_enters_network = asyncio.create_task(self.ictx.ctx())
tasks.add(new_context_enters_network)
try:
# Return when outstanding operations reaches zero
while tasks:
if (
not more
and len(tasks) == 1
and new_context_enters_network in tasks
):
break
# Wait for incoming events
done, _pending = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
for task in done:
# Remove the task from the set of tasks we are waiting for
tasks.remove(task)
# Get the tasks exception if any
exception = task.exception()
if strict and exception is not None:
raise exception
elif exception is not None:
# If there was an exception log it
output = io.StringIO()
task.print_stack(file=output)
self.logger.error("%s", output.getvalue().rstrip())
output.close()
elif task is new_context_enters_network:
# TODO Make some way to cap the number of context's who have
# operations executing. Or maybe just the number of
# operations. Or both.
# A new context entered the network
more, new_ctx = new_context_enters_network.result()
self.logger.debug(
"new_context_enters_network: %s",
(await new_ctx.handle()).as_string(),
)
# Add a task which will run operations for the new context
tasks.add(
asyncio.create_task(
self.run_operations_for_ctx(
new_ctx, strict=strict
)
)
)
# Create a another task to waits for a new context
new_context_enters_network = asyncio.create_task(
self.ictx.ctx()
)
tasks.add(new_context_enters_network)
else:
# All operations for a context completed
# Yield the context that completed and the results of its
# output operations
ctx, results = task.result()
yield ctx, results
self.logger.debug("ctx.outstanding: %d", len(tasks) - 1)
finally:
# Cancel tasks which we don't need anymore now that we know we are done
for task in tasks:
if not task.done():
task.cancel()
else:
task.exception()
async def run_operations_for_ctx(
self, ctx: BaseContextHandle, *, strict: bool = True
) -> AsyncIterator[Tuple[BaseContextHandle, Dict[str, Any]]]:
# Track if there are more inputs
more = True
# Set of tasks we are waiting on
tasks = set()
# String representing the context we are executing operations for
ctx_str = (await ctx.handle()).as_string()
# Create initial events to wait on
input_set_enters_network = asyncio.create_task(self.ictx.added(ctx))
tasks.add(input_set_enters_network)
try:
# Return when outstanding operations reaches zero
while tasks:
if (
not more
and len(tasks) == 1
and input_set_enters_network in tasks
):
break
# Wait for incoming events
done, _pending = await asyncio.wait(
tasks, return_when=asyncio.FIRST_COMPLETED
)
for task in done:
# Remove the task from the set of tasks we are waiting for
tasks.remove(task)
# Get the tasks exception if any
exception = task.exception()
if strict and exception is not None:
raise exception
elif exception is not None:
# If there was an exception log it
output = io.StringIO()
task.print_stack(file=output)
self.logger.error("%s", output.getvalue().rstrip())
output.close()
elif task is input_set_enters_network:
more, new_input_sets = (
input_set_enters_network.result()
)
for new_input_set in new_input_sets:
# Identify which operations have complete contextually
# appropriate input sets which haven't been run yet
async for operation, parameter_set in self.nctx.operations_parameter_set_pairs(
self.ictx,
self.octx,
self.rctx,
ctx,
new_input_set=new_input_set,
):
# Add inputs and operation to redundancy checker before
# dispatch
await self.rctx.add(operation, parameter_set)
# Dispatch the operation and input set for running
dispatch_operation = await self.nctx.dispatch(
self.ictx,
self.lctx,
operation,
parameter_set,
)
tasks.add(dispatch_operation)
self.logger.debug(
"[%s]: dispatch operation: %s",
ctx_str,
operation.name,
)
# Create a another task to waits for new input sets
input_set_enters_network = asyncio.create_task(
self.ictx.added(ctx)
)
tasks.add(input_set_enters_network)
finally:
# Cancel tasks which we don't need anymore now that we know we are done
for task in tasks:
if not task.done():
task.cancel()
else:
task.exception()
# Run cleanup
async for _operation, _results in self.run_stage(
ctx, Stage.CLEANUP
):
pass
# Run output and return context along with output
return (
ctx,
{
operation.name: results
async for operation, results in self.run_stage(
ctx, Stage.OUTPUT
)
},
)
async def run_stage(self, ctx: BaseInputSetContext, stage: Stage):
# Identify which operations have complete contextually appropriate
# input sets which haven't been run yet and are stage operations
async for operation, parameter_set in self.nctx.operations_parameter_set_pairs(
self.ictx, self.octx, self.rctx, ctx, stage=stage
):
# Add inputs and operation to redundancy checker before dispatch
await self.rctx.add(operation, parameter_set)
# Run the operation, input set pair
yield operation, await self.nctx.run(
ctx, self.ictx, operation, await parameter_set._asdict()
)
@entry_point("memory")
class MemoryOrchestrator(BaseOrchestrator):
CONTEXT = MemoryOrchestratorContext
def __init__(self, config: "BaseConfig") -> None:
super().__init__(config)
self._stack = None
async def __aenter__(self) -> "DataFlowFacilitator":
self._stack = await aenter_stack(
self,
{
"rchecker": self.config.rchecker,
"input_network": self.config.input_network,
"operation_network": self.config.operation_network,
"lock_network": self.config.lock_network,
"opimp_network": self.config.opimp_network,
},
call=False,
)
return self
async def __aexit__(self, exc_type, exc_value, traceback):
await self._stack.aclose()
@classmethod
def args(cls, args, *above) -> Dict[str, Arg]:
# Extending above is done right before loading args of subclasses
cls.config_set(
args,
above,
"input",
"network",
Arg(type=BaseInputNetwork.load, default=MemoryInputNetwork),
)
cls.config_set(
args,
above,
"operation",
"network",
Arg(
type=BaseOperationNetwork.load, default=MemoryOperationNetwork
),
)
cls.config_set(
args,
above,
"opimp",
"network",
Arg(
type=BaseOperationImplementationNetwork.load,
default=MemoryOperationImplementationNetwork,
),
)
cls.config_set(
args,
above,
"lock",
"network",
Arg(type=BaseLockNetwork.load, default=MemoryLockNetwork),
)
cls.config_set(
args,
above,
"rchecker",
Arg(
type=BaseRedundancyChecker.load,
default=MemoryRedundancyChecker,
),
)
above = cls.add_orig_label(*above)
for sub in [
BaseInputNetwork,
BaseOperationNetwork,
BaseOperationImplementationNetwork,
BaseLockNetwork,
BaseRedundancyChecker,
]:
for loaded in sub.load():
loaded.args(args, *above)
return args
@classmethod
def config(cls, config, *above):
input_network = cls.config_get(config, above, "input", "network")
operation_network = cls.config_get(
config, above, "operation", "network"
)
opimp_network = cls.config_get(config, above, "opimp", "network")
lock_network = cls.config_get(config, above, "lock", "network")
rchecker = cls.config_get(config, above, "rchecker")
above = cls.add_label(*above)
return MemoryOrchestratorConfig(
input_network=input_network.withconfig(config, *above),
operation_network=operation_network.withconfig(config, *above),
lock_network=lock_network.withconfig(config, *above),
opimp_network=opimp_network.withconfig(config, *above),
rchecker=rchecker.withconfig(config, *above),
)
@classmethod
def basic_config(
cls, *args: OperationImplementation, config: Dict[str, Any] = None
):
"""
Creates a Memory Orchestrator which will be backed by other objects
within dffml.df.memory.
"""
if config is None:
config = {}
return MemoryOrchestrator(
MemoryOrchestratorConfig(
input_network=MemoryInputNetwork(BaseConfig()),
operation_network=MemoryOperationNetwork(
MemoryOperationNetworkConfig(
operations=[Imp.op for Imp in args]
)
),
lock_network=MemoryLockNetwork(BaseConfig()),
rchecker=MemoryRedundancyChecker(
BaseRedundancyCheckerConfig(
key_value_store=MemoryKeyValueStore(BaseConfig())
)
),
opimp_network=MemoryOperationImplementationNetwork(
MemoryOperationImplementationNetworkConfig(
operations={
imp.op.name: imp
for imp in [Imp.withconfig(config) for Imp in args]
}
)
),
)
)
|
<reponame>whfh3900/Tacotron-2-korea-example
import os
import numpy as np
import tensorflow as tf
from datasets.audio import save_wavenet_wav, get_hop_size, melspectrogram
from infolog import log
from wavenet_vocoder.models import create_model
from wavenet_vocoder.train import create_shadow_saver, load_averaged_model
from wavenet_vocoder.feeder import _interp
from . import util
class Synthesizer:
def load(self, checkpoint_path, hparams, model_name='WaveNet'):
print('Constructing model: {}'.format(model_name))
log('Constructing model: {}'.format(model_name))
self._hparams = hparams
local_cond, global_cond = self._check_conditions()
self.local_conditions = tf.placeholder(tf.float32, shape=(None, None, hparams.num_mels), name='local_condition_features') if local_cond else None
self.global_conditions = tf.placeholder(tf.int32, shape=(None, 1), name='global_condition_features') if global_cond else None
self.synthesis_length = tf.placeholder(tf.int32, shape=(), name='synthesis_length') if not local_cond else None
self.targets = tf.placeholder(tf.float32, shape=(1, None, 1), name='audio_targets') if hparams.wavenet_synth_debug else None #Debug only with 1 wav
self.input_lengths = tf.placeholder(tf.int32, shape=(1, ), name='input_lengths') if hparams.wavenet_synth_debug else None
self.synth_debug = hparams.wavenet_synth_debug
with tf.variable_scope('WaveNet_model') as scope:
self.model = create_model(model_name, hparams)
self.model.initialize(y=None, c=self.local_conditions, g=self.global_conditions,
input_lengths=self.input_lengths, synthesis_length=self.synthesis_length, test_inputs=self.targets)
self._hparams = hparams
sh_saver = create_shadow_saver(self.model)
print('Loading checkpoint: {}'.format(checkpoint_path))
log('Loading checkpoint: {}'.format(checkpoint_path))
#Memory allocation on the GPU as needed
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
self.session = tf.Session(config=config)
self.session.run(tf.global_variables_initializer())
load_averaged_model(self.session, sh_saver, checkpoint_path)
def synthesize(self, mel_spectrograms, speaker_ids, basenames, out_dir, log_dir):
hparams = self._hparams
local_cond, global_cond = self._check_conditions()
#Switch mels in case of debug
if self.synth_debug:
assert len(hparams.wavenet_debug_mels) == len(hparams.wavenet_debug_wavs)
mel_spectrograms = [np.load(mel_file) for mel_file in hparams.wavenet_debug_mels]
#Get True length of audio to be synthesized: audio_len = mel_len * hop_size
audio_lengths = [len(x) * get_hop_size(self._hparams) for x in mel_spectrograms]
#Prepare local condition batch
maxlen = max([len(x) for x in mel_spectrograms])
#[-max, max] or [0,max]
T2_output_range = (-self._hparams.max_abs_value, self._hparams.max_abs_value) if self._hparams.symmetric_mels else (0, self._hparams.max_abs_value)
if self._hparams.clip_for_wavenet:
mel_spectrograms = [np.clip(x, T2_output_range[0], T2_output_range[1]) for x in mel_spectrograms]
c_batch = np.stack([_pad_inputs(x, maxlen, _pad=T2_output_range[0]) for x in mel_spectrograms]).astype(np.float32)
if self._hparams.normalize_for_wavenet:
#rerange to [0, 1]
c_batch = _interp(c_batch, T2_output_range).astype(np.float32)
g = None if speaker_ids is None else np.asarray(speaker_ids, dtype=np.int32).reshape(len(c_batch), 1)
feed_dict = {}
if local_cond:
feed_dict[self.local_conditions] = c_batch
else:
feed_dict[self.synthesis_length] = 100
if global_cond:
feed_dict[self.global_conditions] = g
if self.synth_debug:
debug_wavs = hparams.wavenet_debug_wavs
assert len(debug_wavs) % hparams.wavenet_num_gpus == 0
test_wavs = [np.load(debug_wav).reshape(-1, 1) for debug_wav in debug_wavs]
#pad wavs to same length
max_test_len = max([len(x) for x in test_wavs])
test_wavs = np.stack([_pad_inputs(x, max_test_len) for x in test_wavs]).astype(np.float32)
assert len(test_wavs) == len(debug_wavs)
feed_dict[self.targets] = test_wavs.reshape(len(test_wavs), max_test_len, 1)
feed_dict[self.input_lengths] = np.asarray([test_wavs.shape[1]])
#Generate wavs and clip extra padding to select Real speech parts
generated_wavs, upsampled_features = self.session.run([self.model.tower_y_hat, self.model.tower_synth_upsampled_local_features], feed_dict=feed_dict)
#Linearize outputs (n_gpus -> 1D)
generated_wavs = [wav for gpu_wavs in generated_wavs for wav in gpu_wavs]
upsampled_features = [feat for gpu_feats in upsampled_features for feat in gpu_feats]
generated_wavs = [generated_wav[:length] for generated_wav, length in zip(generated_wavs, audio_lengths)]
upsampled_features = [upsampled_feature[:, :length] for upsampled_feature, length in zip(upsampled_features, audio_lengths)]
audio_filenames = []
for i, (generated_wav, input_mel, upsampled_feature) in enumerate(zip(generated_wavs, mel_spectrograms, upsampled_features)):
#Save wav to disk
audio_filename = os.path.join(out_dir, 'wavenet-audio-{}.wav'.format(basenames[i]))
save_wavenet_wav(generated_wav, audio_filename, sr=hparams.sample_rate, inv_preemphasize=hparams.preemphasize, k=hparams.preemphasis)
audio_filenames.append(audio_filename)
#Compare generated wav mel with original input mel to evaluate wavenet audio reconstruction performance
#Both mels should match on low frequency information, wavenet mel should contain more high frequency detail when compared to Tacotron mels.
generated_mel = melspectrogram(generated_wav, hparams).T
util.plot_spectrogram(generated_mel, os.path.join(log_dir, 'wavenet-mel-spectrogram-{}.png'.format(basenames[i])),
title='Local Condition vs Reconstructed Audio Mel-Spectrogram analysis', target_spectrogram=input_mel)
#Save upsampled features to visualize checkerboard artifacts.
util.plot_spectrogram(upsampled_feature.T, os.path.join(log_dir, 'wavenet-upsampled_features-{}.png'.format(basenames[i])),
title='Upmsampled Local Condition features', auto_aspect=True)
#Save waveplot to disk
if log_dir is not None:
plot_filename = os.path.join(log_dir, 'wavenet-waveplot-{}.png'.format(basenames[i]))
util.waveplot(plot_filename, generated_wav, None, hparams, title='WaveNet generated Waveform.')
return audio_filenames
def _check_conditions(self):
local_condition = self._hparams.cin_channels > 0
global_condition = self._hparams.gin_channels > 0
return local_condition, global_condition
def _pad_inputs(x, maxlen, _pad=0):
return np.pad(x, [(0, maxlen - len(x)), (0, 0)], mode='constant', constant_values=_pad)
|
#! /usr/bin/env python
import lib_robotis_xm430 as xm430
import sys
import time
import rospy
import actionlib
import o2as_msgs.msg
class ToolsAction:
def __init__(self):
name = rospy.get_name()
serial_port = rospy.get_param(name + "/serial_port", "/dev/ttyUSB0")
rospy.loginfo("Starting up on serial port: " + serial_port)
self.setScrew_motor_id = rospy.get_param(name + "/setScrew_motor_id", 75)
self.dynamixel = xm430.USB2Dynamixel_Device( serial_port, baudrate = 57600 )
self.p1 = xm430.Robotis_Servo2( self.dynamixel, self.setScrew_motor_id, series = "XM" )
self._feedback = o2as_msgs.msg.ToolsCommandFeedback()
self._result = o2as_msgs.msg.ToolsCommandResult()
#define the action
self._action_name = "setScrew_tools_action"
self._action_server = actionlib.SimpleActionServer(self._action_name, o2as_msgs.msg.ToolsCommandAction, execute_cb=self.action_callback, auto_start = False)
self._action_server.start()
rospy.loginfo('Action server '+ str(self._action_name)+" started.")
return
def action_callback(self, goal):
# publish info to the console for the user
rospy.loginfo('Executing'+ str(self._action_name)+"."+"request sent:")
rospy.loginfo(goal)
# start executing the action
command_is_sent = False
if goal.stop:
rospy.loginfo("Turning off torque.")
command_is_sent1 = self.setScrew_disable_torque()
if command_is_sent1 and command_is_sent2 and command_is_sent3 is True:
command_is_sent = True
else:
command_is_sent = False
elif goal.setScrew_fasten:
command_is_sent = self.setScrew_fasten(30)
else:
rospy.logerr('No command is sent, service request was empty.')
command_is_sent = False
success = command_is_sent
if success:
if goal.stop:
self._feedback.motor_speed = -1 #an arbitary number higher than self.speed_limit
elif goal.setScrew_fasten :
if goal.setScrew_fasten:
self._feedback.motor_speed = self.p1.read_current_velocity()
self._feedback.countTime = 0
while self._feedback.motor_speed > 10 and self._feedback.countTime < 100:
rospy.sleep(0.1)
self._feedback.countTime += 1
# check that preempt has not been requested by the client
if self._action_server.is_preempt_requested():
rospy.loginfo('%s: Preempted' % self._action_name)
self._action_server.set_preempted()
success = False
break
if goal.setScrew_fasten:
if goal.setScrew_fasten:
self._feedback.motor_speed = self.p1.read_current_velocity()
# publish the feedback
self._action_server.publish_feedback(self._feedback)
if success:
self._result.success = True
rospy.loginfo('%s: Succeeded' % self._action_name)
self._action_server.set_succeeded(self._result)
else:
self._action_server.set_preempted()
self.setScrew_disable_torque()
######################################################
def setScrew_fasten(self, current):
try:
self.p1.set_operating_mode("current")
self.p1.set_positive_direction("ccw")
self.p1.set_current(current)
rospy.sleep(0.1)
return True
except:
rospy.logerr("Failed to run commands.")
return False
def setScrew_disable_torque(self):
try:
self.p1.disable_torque()
return True
except:
rospy.logerr("Failed to run commands.")
return False
if __name__ == '__main__':
rospy.init_node('tools_server')
server = ToolsAction()
rospy.spin()
|
<filename>scripts/analysis/plot_embedding.py
#!/usr/bin/python
import h5py
import numpy as np
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import json
import codecs
from music21 import *
data = dict()
for embed in ['input', 'char_embeddings', 'embedding-lstm1', 'embedding-lstm2']:
fname = embed + '.h5'
f = h5py.File('./' + fname, 'r')
data[embed] = f['home/fl350/data/' + fname].value
utf_to_txt = json.load(codecs.open('./utf_to_txt.json', 'rb', 'utf-8'))
corpus_vocab = json.load(codecs.open('./concat_corpus.json', 'rb', 'utf-8'))
data['input'] = data['input'].squeeze()
x = []
for idx in list(data['input']):
txt = utf_to_txt[corpus_vocab['idx_to_token'][str(idx)]]
if len(txt) > 5: # NOTE: hacky way to tell if we have a note, but works...
midi, tied = eval(txt)
n = note.Note()
n.pitch.midi = midi
#data['input'].append((n.pitch.nameWithOctave, tied))
x.append(n.pitch.nameWithOctave)
else:
x.append(txt)
data['input'] = np.array(x)
X = data['char_embeddings']
# PCA plot
pca = PCA(n_components=2)
pca_embed = pca.fit_transform(X)
fig = plt.figure(figsize=(15, 8))
plt.title("PCA on character embeddings", fontsize=14)
plt.scatter(pca_embed[:,0], pca_embed[:,1], cmap=plt.cm.rainbow)
for label, x, y in zip(data['input'], pca_embed[:, 0], pca_embed[:, 1]):
plt.annotate(
label,
xy = (x, y), xytext = (0, 0),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round', fc = 'grey', alpha = 0.25),)
#arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.grid()
plt.savefig('PCA-notes.png')
plt.show()
# tSNE plot
pca = PCA(n_components=50)
pca_data = pca.fit_transform(X)
model = TSNE(n_components=2, random_state=0)
tsne_embed = model.fit_transform(X)
tsne_embed_pca = model.fit_transform(pca_data)
fig = plt.figure(figsize=(15, 8))
plt.title("tSNE on character embeddings", fontsize=14)
plt.scatter(tsne_embed[:,0], tsne_embed[:,1], cmap=plt.cm.rainbow)
for label, x, y in zip(data['input'], tsne_embed[:, 0], tsne_embed[:, 1]):
plt.annotate(
label,
xy = (x, y), xytext = (0, 0),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round', fc = 'grey', alpha = 0.25),)
#arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
plt.xlabel('tSNE dim 1')
plt.ylabel('tSNE dim 2')
plt.grid()
plt.savefig('tSNE-notes.png')
plt.show()
|
import numpy as np
import scipy.optimize as spo
'''
Metropolis-adjusted Langevin algorithm or Langevin Monte Carlo (LMC)
'''
def sampler(logpostfunc, options):
'''
Parameters
----------
logpostfunc : function
A function call describing the log of the posterior distribution.
If no gradient, logpostfunc should take a value of an m by p numpy
array of parameters and theta and return
a length m numpy array of log posterior evaluations.
If gradient, logpostfunc should return a tuple. The first element
in the tuple should be as listed above.
The second element in the tuple should be an m by p matrix of
gradients of the log posterior.
options : dict
a dictionary contains the output of the sampler.
Required -
theta0: an m by p matrix of initial parameter values.
Optional -
numsamp: the number of samplers you want from the posterior.
Default is 2000.
Returns
-------
TYPE
numsamp by p of sampled parameter values
'''
if 'theta0' in options.keys():
theta0 = options['theta0']
else:
raise ValueError('Unknown theta0')
# Initialize
if 'numsamp' in options.keys():
numsamp = options['numsamp']
else:
numsamp = 2000
# Minimum effective sample size (ESS) desired in the returned samples
tarESS = np.max((150, 10 * theta0.shape[1]))
# Test
testout = logpostfunc(theta0[0:2, :])
if type(testout) is tuple:
if len(testout) != 2:
raise ValueError('log density does not return 1 or 2 elements')
if testout[1].shape[1] is not theta0.shape[1]:
raise ValueError('derivative appears to be the wrong shape')
logpostf = logpostfunc
def logpostf_grad(theta):
return logpostfunc(theta)[1]
try:
testout = logpostfunc(theta0[10, :], return_grad=False)
if type(testout) is tuple:
raise ValueError('Cannot stop returning a grad')
def logpostf_nograd(theta):
return logpostfunc(theta, return_grad=False)
except Exception:
def logpostf_nograd(theta):
return logpostfunc(theta)[0]
else:
logpostf_grad = None
logpostf = logpostfunc
logpostf_nograd = logpostfunc
if logpostf_grad is None:
rho = 2 / theta0.shape[1] ** (1 / 2)
taracc = 0.25
else:
rho = 2 / theta0.shape[1] ** (1 / 6)
taracc = 0.60
keepgoing = True
theta0 = np.unique(theta0, axis=0)
iteratttempt = 0
while keepgoing:
logpost = logpostf_nograd(theta0) / 4
mlogpost = np.max(logpost)
logpost -= (mlogpost + np.log(np.sum(np.exp(logpost - mlogpost))))
post = np.exp(logpost)
post = post / np.sum(post)
thetaposs = theta0[np.random.choice(range(0, theta0.shape[0]),
size=1000,
p=post.reshape((theta0.shape[0],
))), :]
if np.any(np.std(thetaposs, 0) < 10 ** (-8) * np.min(np.std(theta0,
0))):
thetastar = theta0[np.argmax(logpost), :]
theta0 = thetastar + (theta0 - thetastar) / 2
iteratttempt += 1
else:
theta0 = thetaposs
keepgoing = False
if iteratttempt > 10:
raise ValueError('Could not find any points to vary.')
thetaop = theta0[:10, :]
thetastart = theta0
thetac = np.mean(theta0, 0)
thetas = np.maximum(np.std(theta0, 0), 10 ** (-8) * np.std(theta0))
def neglogpostf_nograd(thetap):
theta = thetac + thetas * thetap
return -logpostf_nograd(theta.reshape((1, len(theta))))
if logpostf_grad is not None:
def neglogpostf_grad(thetap):
theta = thetac + thetas * thetap
return -thetas * logpostf_grad(theta.reshape((1, len(theta))))
boundL = np.maximum(-10 * np.ones(theta0.shape[1]),
np.min((theta0 - thetac) / thetas, 0))
boundU = np.minimum(10 * np.ones(theta0.shape[1]),
np.max((theta0 - thetac) / thetas, 0))
bounds = spo.Bounds(boundL, boundU)
keeptryingwithgrad = True
failureswithgrad = 0
# begin preoptimizer
for k in range(0, thetaop.shape[0]):
theta0 = (thetaop[k, :] - thetac) / thetas
if logpostf_grad is None:
opval = spo.minimize(neglogpostf_nograd,
theta0,
method='L-BFGS-B',
bounds=bounds)
thetaop[k, :] = thetac + thetas * opval.x
else:
if keeptryingwithgrad:
opval = spo.minimize(neglogpostf_nograd,
theta0,
method='L-BFGS-B',
jac=neglogpostf_grad,
bounds=bounds,
options={'maxiter': 15, 'maxfun': 100})
thetaop[k, :] = thetac + thetas * opval.x
if not keeptryingwithgrad or not opval.success:
if keeptryingwithgrad:
failureswithgrad += 1
alpha = failureswithgrad + 0.25
beta = (k - failureswithgrad + 1)
stdtest = np.sqrt(alpha * beta / ((alpha + beta + 1) *
((alpha + beta) ** 2)))
meantest = alpha / (alpha + beta)
if meantest - 3 * stdtest > 0.25:
keeptryingwithgrad = False
opval = spo.minimize(neglogpostf_nograd,
theta0,
method='L-BFGS-B',
bounds=bounds,
options={'maxiter': 4, 'maxfun': 100})
thetaop[k, :] = thetac + thetas * opval.x
# end Preoptimizer
thetasave = np.vstack((thetastart, thetaop))
Lsave = logpostf_nograd(thetasave)
tau = -1
rho = 2 * (1 + (np.exp(2 * tau) - 1) / (np.exp(2 * tau) + 1))
numchain = 50
maxiters = 10
numsamppc = 20
covmat0 = np.diag(thetas)
for iters in range(0, maxiters):
startingv = np.random.choice(np.arange(0, Lsave.shape[0]),
size=Lsave.shape[0])
thetasave = thetasave[startingv, :]
covmat0 = 0.1 * covmat0 + 0.9 * np.cov(thetasave.T)
if covmat0.ndim > 1:
covmat0 += 0.1 * np.diag(np.diag(covmat0))
Wc, Vc = np.linalg.eigh(covmat0)
hc = (Vc @ np.diag(np.sqrt(Wc)) @ Vc.T)
else:
hc = np.sqrt(covmat0)
thetac = thetasave[np.random.choice(range(0, thetasave.shape[0]),
size=numchain), :]
if logpostf_grad is not None:
fval, dfval = logpostf(thetac)
else:
fval = logpostf_nograd(thetac)
thetasave = np.zeros((numchain, numsamppc, thetac.shape[1]))
Lsave = np.zeros((numchain, numsamppc))
numtimes = 0
for k in range(0, numsamppc):
rvalo = np.random.normal(0, 1, thetac.shape)
rval = np.sqrt(2) * rho * (rvalo @ hc)
if rval.ndim != thetac.ndim:
rval = np.reshape(rval, (thetac.shape))
thetap = thetac + rval
if logpostf_grad is not None:
diffval = rho ** 2 * (dfval @ covmat0)
thetap += diffval
fvalp, dfvalp = logpostf(thetap)
term1 = rvalo / np.sqrt(2)
term2 = (dfval + dfvalp) @ hc * rho / 2
qadj = -(2 * np.sum(term1 * term2, 1) + np.sum(term2 ** 2, 1))
else:
fvalp = logpostf_nograd(thetap)
qadj = np.zeros(fvalp.shape)
swaprnd = np.log(np.random.uniform(size=fval.shape[0]))
whereswap = np.where(np.squeeze(swaprnd)
< np.squeeze(fvalp - fval)
+ np.squeeze(qadj))[0]
if whereswap.shape[0] > 0:
numtimes = numtimes + (whereswap.shape[0] / numchain)
thetac[whereswap, :] = 1 * thetap[whereswap, :]
fval[whereswap] = 1 * fvalp[whereswap]
if logpostf_grad is not None:
dfval[whereswap, :] = 1 * dfvalp[whereswap, :]
# Robbins-Monroe updates
if iters < 1.5:
tau = tau + 1 / np.sqrt(1 + 100 / numchain * k) * \
((whereswap.shape[0] / numchain) - taracc)
rho = 2 * (1 + (np.exp(2 * tau) - 1) / (np.exp(2 * tau) + 1))
thetasave[:, k, :] = thetac
Lsave[:, k] = fval.reshape((len(fval),))
mut = np.mean(np.mean(thetasave, 1), 0)
B = np.zeros(mut.shape)
autocorr = np.zeros(mut.shape)
W = np.zeros(mut.shape)
for i in range(0, numchain):
muv = np.mean(thetasave[i, :, :], 0)
autocorr += 1 / numchain * \
np.mean((thetasave[i, 0:(numsamppc - 1), :] - muv.T) *
(thetasave[i, 1:, :] - muv.T), 0)
W += 1 / numchain * \
np.mean((thetasave[i, 0:(numsamppc - 1), :] - muv.T) ** 2, 0)
B += numsamppc / (numchain - 1) * ((muv - mut) ** 2)
varplus = W + 1 / numsamppc * B
if np.any(varplus < 10 ** (-10)):
raise ValueError('Sampler failed to move at all.')
else:
rhohat = (1 - (W - autocorr) / varplus)
ESS = 1 + numchain * numsamppc * (1 - np.abs(rhohat))
thetasave = np.reshape(thetasave, (-1, thetac.shape[1]))
accr = numtimes / numsamppc
# termination criteria
if iters > 1.5 and accr > taracc / 2 and accr < 1.5 * taracc and \
(np.mean(ESS) > tarESS):
break
elif accr < taracc * 4 / 5 or accr > taracc * 5 / 4:
tau = tau + 1 / (0.2 + 0.2 * iters) * (accr - taracc)
rho = 2 * (1 + (np.exp(2 * tau) - 1) / (np.exp(2 * tau) + 1))
if accr < taracc * 1.5 and accr > taracc * 0.6:
trm = np.min((1.5 * tarESS / np.mean(ESS), 4))
numsamppc = np.ceil(numsamppc * trm).astype('int')
theta = thetasave[np.random.choice(range(0, thetasave.shape[0]),
size=numsamp), :]
sampler_info = {'theta': theta, 'logpost': Lsave}
return sampler_info |
import os
import pytest
from sqlalchemy import Column, Text
from alembic import command
from alembic.autogenerate import render_python_code, produce_migrations
from alembic.config import Config
from alembic.migration import MigrationContext
from alembic.operations import Operations, ops
from sqlalchemy_bigint_id.schema import (
register_next_bigint_id_function, generate_next_bigint_id_sql_for_table, setup_bigint_id_for_all_tables
)
from sqlalchemy_bigint_id.migration import CreateNextBigIntegerIdFunctionOp, DropNextBigIntegerIdFunctionOp
from sqlalchemy_bigint_id.utils import get_bigint_id_column_from_table
from sqlalchemy_bigint_id.types import BigIntegerID
@pytest.fixture
def Foo(Base):
class Foo(Base):
__tablename__ = 'foo'
id = Column(BigIntegerID, primary_key=True)
name = Column(Text)
return Foo
@pytest.fixture
def User(Base):
class User(Base):
__tablename__ = 'user'
id = Column(BigIntegerID, primary_key=True)
name = Column(Text)
return User
@pytest.fixture
def init_models(Foo):
pass
def test_get_bigint_id_column_from_table(Foo):
assert get_bigint_id_column_from_table(Foo.__table__) == Foo.id
def test_generate_next_bigint_id_sql(Foo, User):
sql = generate_next_bigint_id_sql_for_table(Foo.__table__)
assert sql == """ALTER TABLE foo ALTER COLUMN id set default next_bigint_id('foo_id_seq')"""
sql = generate_next_bigint_id_sql_for_table(User.__table__)
assert sql == """ALTER TABLE "user" ALTER COLUMN id set default next_bigint_id('user_id_seq')"""
def test_register_bigint_id_function(Base, engine, connection):
# Just test for coverage, what can we test for?
register_next_bigint_id_function(metadata=Base.metadata)
Base.metadata.create_all(engine)
def test_setup_bigint_id_for_all_tables(Base, Foo, User, session, engine):
setup_bigint_id_for_all_tables(Base.metadata)
Base.metadata.drop_all(engine)
Base.metadata.create_all(engine)
foo = Foo(name='foo')
session.add(foo)
session.commit()
# Check that the ID is indeed large
assert foo.id > 10000000
def test_alembic_next_bigint_id_ops(engine):
# Test the migration operations work
with engine.connect() as conn:
context = MigrationContext.configure(conn)
op = Operations(context)
op.create_next_bigint_id_function()
op.drop_next_bigint_id_function()
def test_alembic_autogenerate_next_bigint_id(Foo, connection, Base, engine):
from sqlalchemy_bigint_id import migration # noqa
context = MigrationContext.configure(
connection=connection,
)
migration_script = produce_migrations(context, Base.metadata)
first_upgrade_op = migration_script.upgrade_ops.ops[0]
assert isinstance(first_upgrade_op, CreateNextBigIntegerIdFunctionOp)
def test_alembic_render_bigint_id_function_ops():
upgrade_code = render_python_code(ops.UpgradeOps(ops=[CreateNextBigIntegerIdFunctionOp()]))
downgrade_code = render_python_code(ops.DowngradeOps(ops=[DropNextBigIntegerIdFunctionOp()]))
assert 'op.create_next_bigint_id_function()' in upgrade_code
assert 'op.drop_next_bigint_id_function()' in downgrade_code
def test_alembic_migration():
from sqlalchemy_bigint_id.testapp import db # noqa
config = Config("alembic.ini")
result = command.revision(config, message='initial', autogenerate=True)
migration_filepath = result.path
with open(migration_filepath) as file:
content = file.read()
assert 'op.create_next_bigint_id_function' in content
assert """ALTER TABLE coin ALTER COLUMN id set default next_bigint_id('coin_id_seq')""" in content
# Clean it up
os.remove(migration_filepath)
|
# -*- coding: utf-8 -*-
"""A set of python modules that aid in plotting scientific data
Plotting depends on matplotlib and/or mayavi and file reading uses h5py
and to read hdf5 / xdmf files.
Note:
Modules in calculator and plot must be imported explicitly since
they have side effects on import.
Attributes:
logger (logging.Logger): a logging object whose verbosity can be
set from the command line using
:py:func`viscid.vutil.common_argparse`.
"""
from __future__ import print_function
import logging
import os
import re
import signal
import sys
import textwrap
import numpy
from viscid import _rc
from viscid.compat.vimportlib import import_module
__version__ = """0.98.9"""
__all__ = ['amr_field',
'amr_grid',
'bucket',
'coordinate',
'cotr',
'dataset',
'dipole',
'extools',
'field',
'fluidtrace',
'grid',
'mapfield',
'multiplot',
'npdatetime',
'parallel',
'pyeval',
'seed',
'sliceutil',
'tree',
'verror',
'vjson',
'vutil',
'calculator', # packages
'compat',
'cython',
'plot',
'readers',
]
#########################################
# setup logger for use throughout viscid
logger = logging.getLogger("viscid")
_handler = logging.StreamHandler()
_handler.setFormatter(logging.Formatter(fmt="%(levelname)s: %(message)s"))
logger.addHandler(_handler)
class _CustomFilter(logging.Filter, object):
def filter(self, record):
if '\n' not in record.msg:
record.msg = '\n'.join(textwrap.wrap(record.msg, width=65))
spaces = ' ' * (len(record.levelname) + 2)
record.msg = record.msg.replace('\n', '\n' + spaces)
return super(_CustomFilter, self).filter(record)
logger.addFilter(_CustomFilter())
logger.propagate = False
del _handler
###################################################################
# this is thunder-hacky, but it's a really simple way to import
# everything in __all__ and also, if those module have an __all__,
# then bring that stuff into this namespace too
def _on_injected_import_error(name, exception, quiet=False):
if not quiet:
logger.error(str(exception))
logger.error("Viscid tried to import {0}, but the import failed.\n"
"This module will not be available".format(name))
def import_injector(attr_list, namespace, package=None, quiet=False,
fatal=False):
"""import list of modules and consume their __all__ attrs"""
additional = []
for s in list(attr_list):
try:
m = import_module("." + s, package=package)
namespace[s] = m
# print(">", package, ">", s)
# print(">", package, ">", s, "::", getattr(m, "__all__", None))
if hasattr(m, "__all__"):
all_subattrs = getattr(m, "__all__")
additional += all_subattrs
for sub in all_subattrs:
# print(" ", sub, "=", getattr(m, sub))
namespace[sub] = getattr(m, sub)
except ImportError as e:
if s not in namespace:
_on_injected_import_error(s, e, quiet=quiet)
attr_list.remove(s)
if fatal:
raise
attr_list += additional
import_injector(__all__, globals(), package="viscid")
##############################################################
# now add some other random things into the __all__ namespace
__all__.append("logger")
# set the sample_dir so that it always points to something useful
# - for installed distribution
sample_dir = os.path.join(os.path.dirname(__file__), 'sample')
# - for in-place distribution
if not os.path.isdir(sample_dir):
sample_dir = os.path.join(os.path.dirname(__file__), '..', 'sample')
sample_dir = os.path.abspath(sample_dir)
# - is there a 3rd option? this shouldn't happen
if not os.path.isdir(sample_dir):
sample_dir = "SAMPLE-DIR-NOT-FOUND"
__all__.append("sample_dir")
# now this is just too cute to pass up :)
if sys.version_info[0] >= 3:
# hide setting to a unicode variable name in an exec b/c otherwise
# this file wouldn't parse in python2x
exec("π = numpy.pi") # pylint: disable=exec-used
__all__ += ["π"]
# apply settings in the rc file
_rc.load_rc_file("~/.viscidrc")
# this block is useful for debugging, ie, immediately do a pdb.set_trace()
# on the SIGUSR2 signal
def _set_trace(seg, frame): # pylint: disable=unused-argument
import pdb
pdb.set_trace()
# print("Trigger pdb with: kill -SIGUSR2", os.getpid())
signal.signal(signal.SIGUSR2, _set_trace)
|
<gh_stars>0
#!/usr/bin/env python3
#/***************************************************************************//**
# @file i_udp.py
#
# @author Black-Blade
# @brief i_udp.py
# @date 13.01.2021
# @version 0.0.1 Doxygen style eingebaut und erstellen dieser File
# @see https://tools.ietf.org/html/rfc1035
#*******************************************************************************/
import socket
from socket import AF_INET, SOCK_STREAM, SO_REUSEADDR, SOL_SOCKET, SHUT_RDWR
from _thread import start_new_thread
# IMPORT MY STANDRT CLASS
from log import logging
from config import Config
if __name__ == "__main__":
quit()
class Input_UDP:
#/*******************************************************************************
# @author Black-Blade
# @brief Constructor of Input_UDP
# @date 10.03.2021
# @param switch(pointer),geoip(pointer),[UDPSERVER(String),UDPPORT(INT)]
# @return
# @version 0.0.1 Doxygen style eingebaut und erstellen dieser File
# @see
# *******************************************************************************/
def __init__(self,switch,geoip,udpserver=None):
logging.debug ("")
self._switch =switch
self._geoip=geoip
if udpserver is None:
self._listen_addr = Config.I_DOTSERVER
self._listen_port = Config.I_UDPPORT
else:
server,port =udpserver
self._listen_addr = server
self._listen_port = port
self._buffersize =1024
self._conterrequests=0
self._conterrequest=0
self._countererror =0
#/*******************************************************************************
# @author Black-Blade
# @brief Deconstructor of Input_UDP
# @date 06.03.2021
# @param
# @return
# @version 0.0.1 Doxygen style eingebaut und erstellen dieser File
# @see
# *******************************************************************************/
def __del__(self):
logging.debug ("")
#/*******************************************************************************
# @author Black-Blade
# @brief Init of Input_UDP
# @date 06.03.2021
# @param
# @return
# @version 0.0.1 Doxygen style eingebaut und erstellen dieser File
# @see
# *******************************************************************************/
def init(self):
logging.debug ("")
start_new_thread(self._init_thread,())
#/*******************************************************************************
# @author Black-Blade
# @brief Init the thread of Input_UDP
# @date 06.03.2021
# @param
# @return
# @version 0.0.1 Doxygen style eingebaut und erstellen dieser File
# @see https://tools.ietf.org/html/rfc1035
# *******************************************************************************/
def _init_thread(self):
logging.debug ("")
with socket.socket(family=socket.AF_INET, type=socket.SOCK_DGRAM) as sock:
sock.bind((self._listen_addr, self._listen_port))
logging.info ("UDP input start from :"+str( self._listen_addr)+":"+str( self._listen_port))
while True:
try:
msg,conn = sock.recvfrom(self._buffersize)
start_new_thread(self._decoder_thread,(sock,conn, msg))
except OSError as err:
logging.error("OS error: {0}".format(err))
#/*******************************************************************************
# @author Black-Blade
# @brief Read the DNS rquest of extern
# @date 10.03.2021
# @param conn,addr
# @return
# @version 0.0.1 Doxygen style eingebaut und erstellen dieser File
# @see https://tools.ietf.org/html/rfc1035
# *******************************************************************************/
def _decoder_thread(self,sock,conn, txdata):
try:
logging.debug ("")
host, port = conn
if self._geoip is None:
ok= True
text= "NO GEOIP"
else:
ok,text = self._geoip(host)
if ok == True:
self._conterrequests=self._conterrequests+1
self._conterrequest=self._conterrequest+1
data =self._switch(txdata)
if data is not None:
isblock,rxdata,blockname,dname = data
sock.sendto(rxdata , conn)
logging.info("IP : "+str(host)+":"+str(port)+" :"+ text)
if isblock==True:
logging.info("Domain : "+str(dname)+ " is block true : blockname " + str(blockname))
else:
logging.info("Domain : "+str(dname)+ " is block false ")
else:
logging.info("IP : "+str(host)+": "+ text)
except OSError as err:
logging.error("OS error: {0}".format(err))
self._countererror=self._countererror +1
self._conterrequest=self._conterrequest-1
#/*******************************************************************************
# @author Black-Blade
# @brief Logs REQUESTS
# @date 08.03.2021
# @param
# @return
# @version 0.0.1 Doxygen style eingebaut und erstellen dieser File
# @see
# *******************************************************************************/
def logs(self):
logging.info("REQUEST : "+str(self._conterrequest) +": REQUESTS : "+str(self._conterrequests)+": ERRORS : "+str(self._countererror))
|
# yellowbrick.features.projection
# Base class for all projection (decomposition) high dimensional data visualizers.
#
# Author: <NAME>
# Created: Wed Jul 17 08:59:33 2019 -0400
#
# Copyright (C) 2019, the scikit-yb developers
# For license information, see LICENSE.txt
#
# ID: projection.py [21eb9d2] 43993586+<EMAIL> $
"""
Base class for all projection (decomposition) high dimensional data visualizers.
"""
##########################################################################
## Imports
##########################################################################
import warnings
import matplotlib as mpl
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import make_axes_locatable
import mpl_toolkits.mplot3d # noqa
from yellowbrick.draw import manual_legend
from yellowbrick.features.base import DataVisualizer, TargetType
from yellowbrick.exceptions import YellowbrickValueError, YellowbrickWarning, NotFitted
##########################################################################
## Projection Visualizers
##########################################################################
class ProjectionVisualizer(DataVisualizer):
"""
The ProjectionVisualizer provides functionality for projecting a multi-dimensional
dataset into either 2 or 3 components so they can be plotted as a scatter plot on
2d or 3d axes. The visualizer acts as a transformer, and draws the transformed data
on behalf of the user. Because it is a DataVisualizer, the ProjectionVisualizer
can plot continuous scatter plots with a colormap or discrete scatter plots with
a legend.
This visualizer is a base class and is not intended to be uses directly.
Subclasses should implement a ``transform()`` method that calls ``draw()`` using
the transformed data and the optional target as input.
Parameters
----------
ax : matplotlib Axes, default: None
The axes to plot the figure on. If None is passed in the current axes.
will be used (or generated if required).
features : list, default: None
The names of the features specified by the columns of the input dataset.
This length of this list must match the number of columns in X, otherwise
an exception will be raised on ``fit()``.
classes : list, default: None
The class labels for each class in y, ordered by sorted class index. These
names act as a label encoder for the legend, identifying integer classes
or renaming string labels. If omitted, the class labels will be taken from
the unique values in y.
Note that the length of this list must match the number of unique values in
y, otherwise an exception is raised. This parameter is only used in the
discrete target type case and is ignored otherwise.
colors : list or tuple, default: None
A single color to plot all instances as or a list of colors to color each
instance according to its class in the discrete case or as an ordered
colormap in the sequential case. If not enough colors per class are
specified then the colors are treated as a cycle.
colormap : string or cmap, default: None
The colormap used to create the individual colors. In the discrete case
it is used to compute the number of colors needed for each class and
in the continuous case it is used to create a sequential color map based
on the range of the target.
target_type : str, default: "auto"
Specify the type of target as either "discrete" (classes) or "continuous"
(real numbers, usually for regression). If "auto", then it will
attempt to determine the type by counting the number of unique values.
If the target is discrete, the colors are returned as a dict with classes
being the keys. If continuous the colors will be list having value of
color for each point. In either case, if no target is specified, then
color will be specified as the first color in the color cycle.
projection : int or string, default: 2
The number of axes to project into, either 2d or 3d. To plot 3d plots
with matplotlib, please ensure a 3d axes is passed to the visualizer,
otherwise one will be created using the current figure.
alpha : float, default: 0.75
Specify a transparency where 1 is completely opaque and 0 is completely
transparent. This property makes densely clustered points more visible.
colorbar : bool, default: True
If the target_type is "continous" draw a colorbar to the right of the
scatter plot. The colobar axes is accessible using the cax property.
kwargs : dict
Keyword arguments that are passed to the base class and may influence
the visualization as defined in other Visualizers.
"""
def __init__(
self,
ax=None,
features=None,
classes=None,
colors=None,
colormap=None,
target_type="auto",
projection=2,
alpha=0.75,
colorbar=True,
**kwargs
):
super(ProjectionVisualizer, self).__init__(
ax=ax,
features=features,
classes=classes,
colors=colors,
colormap=colormap,
target_type=target_type,
**kwargs
)
# Convert string to integer
if isinstance(projection, str):
if projection in {"2D", "2d"}:
projection = 2
if projection in {"3D", "3d"}:
projection = 3
if projection not in {2, 3}:
raise YellowbrickValueError("Projection dimensions must be either 2 or 3")
self.projection = projection
if self.ax.name != "3d" and self.projection == 3:
warnings.warn(
"data projection to 3 dimensions requires a 3d axes to draw on.",
YellowbrickWarning,
)
self.alpha = alpha
self.colorbar = colorbar
self._cax = None
@property
def cax(self):
"""
The axes of the colorbar, right of the scatterplot.
"""
if self._cax is None:
raise AttributeError("This visualizer does not have an axes for colorbar")
return self._cax
@property
def ax(self):
"""
Overloads the axes property from base class. If no axes is specified then
creates an axes for users. A 3d axes is created for 3 dimensional plots.
"""
if not hasattr(self, "_ax") or self._ax is None:
if self.projection == 3:
fig = plt.gcf()
self._ax = fig.add_subplot(111, projection="3d")
else:
self._ax = plt.gca()
return self._ax
@ax.setter
def ax(self, ax):
self._ax = ax
def layout(self, divider=None):
"""
Creates the layout for colorbar when target type is continuous.
The colorbar is added to the right of the scatterplot.
Subclasses can override this method to add other axes or layouts.
Parameters
----------
divider: AxesDivider
An AxesDivider to be passed among all layout calls.
"""
if (
self._target_color_type == TargetType.CONTINUOUS
and self.projection == 2
and self.colorbar
and self._cax is None
):
# Ensure matplotlib version compatibility
if make_axes_locatable is None:
raise YellowbrickValueError(
(
"Colorbar requires matplotlib 2.0.2 or greater "
"please upgrade matplotlib"
)
)
# Create the new axes for the colorbar
if divider is None:
divider = make_axes_locatable(self.ax)
self._cax = divider.append_axes("right", size="5%", pad=0.3)
self._cax.set_yticks([])
self._cax.set_xticks([])
def fit_transform(self, X, y=None):
"""
Fits the visualizer on the input data, and returns transformed X.
Parameters
----------
X : ndarray or DataFrame of shape n x m
A matrix or data frame of n instances with m features where m>2.
y : array-like of shape (n,), optional
A vector or series with target values for each instance in X. This
vector is used to determine the color of the points in X.
Returns
-------
Xprime : array-like of shape (n, 2)
Returns the 2-dimensional embedding of the instances.
"""
return self.fit(X, y).transform(X, y)
def draw(self, Xp, y=None):
"""
Draws the points described by Xp and colored by the points in y. Can be
called multiple times before finalize to add more scatter plots to the
axes, however ``fit()`` must be called before use.
Parameters
----------
Xp : array-like of shape (n, 2) or (n, 3)
The matrix produced by the ``transform()`` method.
y : array-like of shape (n,), optional
The target, used to specify the colors of the points.
Returns
-------
self.ax : matplotlib Axes object
Returns the axes that the scatter plot was drawn on.
"""
scatter_kwargs = self._determine_scatter_kwargs(y)
# Draws the layout of the visualizer. It draws the axes for colorbars,
# heatmap, etc.
self.layout()
if self.projection == 2:
# Adds colorbar axis for continuous target type.
self.ax.scatter(Xp[:, 0], Xp[:, 1], **scatter_kwargs)
if self.projection == 3:
self.ax.scatter(Xp[:, 0], Xp[:, 1], Xp[:, 2], **scatter_kwargs)
return self.ax
def finalize(self):
"""
Draws legends and colorbar for scatter plots.
"""
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
if self.projection == 3:
self.ax.set_zticklabels([])
if self._target_color_type == TargetType.DISCRETE:
# Add the legend
manual_legend(
self, self.classes_, list(self._colors.values()), frameon=True
)
elif self._target_color_type == TargetType.CONTINUOUS:
if self.colorbar:
if self.projection == 3:
sm = plt.cm.ScalarMappable(cmap=self._colors, norm=self._norm)
# Avoid MPL TypeError: "You must first set_array for mappable"
sm.set_array([])
self.cbar = plt.colorbar(sm, ax=self.ax)
else:
# Manually draw the colorbar.
self.cbar = mpl.colorbar.ColorbarBase(
self.cax, cmap=self._colors, norm=self._norm
)
def _determine_scatter_kwargs(self, y=None):
"""
Determines scatter argumnets to pass into ``plt.scatter()``. If y is
discrete or single then determine colors. If continuous then determine
colors and colormap.Also normalize to range
Parameters
----------
y : array-like of shape (n,), optional
The target, used to specify the colors of the points for continuous
target.
"""
scatter_kwargs = {"alpha": self.alpha}
# Determine the colors
if self._target_color_type == TargetType.SINGLE:
scatter_kwargs["c"] = self._colors
elif self._target_color_type == TargetType.DISCRETE:
if y is None:
raise YellowbrickValueError("y is required for discrete target")
try:
scatter_kwargs["c"] = [self._colors[self.classes_[yi]] for yi in y]
except IndexError:
raise YellowbrickValueError("Target needs to be label encoded.")
elif self._target_color_type == TargetType.CONTINUOUS:
if y is None:
raise YellowbrickValueError("y is required for continuous target")
scatter_kwargs["c"] = y
scatter_kwargs["cmap"] = self._colors
self._norm = mpl.colors.Normalize(vmin=self.range_[0], vmax=self.range_[1])
else:
# Technically this should never be raised
raise NotFitted("could not determine target color type")
return scatter_kwargs
|
# *****************************************************************************
# Copyright (c) 2019-2020, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from __future__ import print_function, division, absolute_import
from collections import namedtuple
import copy
import warnings
import numba
from numba import ir, ir_utils, types
from numba.ir_utils import (find_topo_order, guard, get_definition, require,
find_callname, mk_unique_var, compile_to_numba_ir,
replace_arg_nodes, build_definitions,
find_build_sequence, find_const)
from numba.parfor import Parfor
from numba.parfor import wrap_parfor_blocks, unwrap_parfor_blocks
import sdc
import sdc.io
import sdc.io.np_io
from sdc.hiframes.pd_series_ext import SeriesType
from sdc.utilities.utils import (get_constant, is_alloc_callname,
is_whole_slice, is_array, is_array_container,
is_np_array, find_build_tuple, debug_prints,
is_const_slice)
from sdc.hiframes.pd_dataframe_ext import DataFrameType
from enum import Enum
class Distribution(Enum):
REP = 1
Thread = 2
TwoD = 3
OneD_Var = 4
OneD = 5
_dist_analysis_result = namedtuple(
'dist_analysis_result', 'array_dists,parfor_dists')
distributed_analysis_extensions = {}
auto_rebalance = False
class DistributedAnalysis(object):
"""Analyze program for distributed transformation"""
_extra_call = {}
@classmethod
def add_call_analysis(cls, typ, func, analysis_func):
'''
External modules/packages (like daal4py) can register their own call-analysis.
Analysis funcs are stored in a dict with keys (typ, funcname)
'''
assert (typ, func) not in cls._extra_call
cls._extra_call[(typ, func)] = analysis_func
def __init__(self, func_ir, typemap, calltypes, typingctx, metadata):
self.func_ir = func_ir
self.typemap = typemap
self.calltypes = calltypes
self.typingctx = typingctx
self.metadata = metadata
def _init_run(self):
self.func_ir._definitions = build_definitions(self.func_ir.blocks)
self._parallel_accesses = set()
self._T_arrs = set()
self.second_pass = False
self.in_parallel_parfor = -1
def run(self):
self._init_run()
blocks = self.func_ir.blocks
array_dists = {}
parfor_dists = {}
topo_order = find_topo_order(blocks)
self._run_analysis(self.func_ir.blocks, topo_order,
array_dists, parfor_dists)
self.second_pass = True
self._run_analysis(self.func_ir.blocks, topo_order,
array_dists, parfor_dists)
# rebalance arrays if necessary
if auto_rebalance and Distribution.OneD_Var in array_dists.values():
changed = self._rebalance_arrs(array_dists, parfor_dists)
if changed:
return self.run()
return _dist_analysis_result(array_dists=array_dists, parfor_dists=parfor_dists)
def _run_analysis(self, blocks, topo_order, array_dists, parfor_dists):
save_array_dists = {}
save_parfor_dists = {1: 1} # dummy value
# fixed-point iteration
while array_dists != save_array_dists or parfor_dists != save_parfor_dists:
save_array_dists = copy.copy(array_dists)
save_parfor_dists = copy.copy(parfor_dists)
for label in topo_order:
self._analyze_block(blocks[label], array_dists, parfor_dists)
def _analyze_block(self, block, array_dists, parfor_dists):
for inst in block.body:
if isinstance(inst, ir.Assign):
self._analyze_assign(inst, array_dists, parfor_dists)
elif isinstance(inst, Parfor):
self._analyze_parfor(inst, array_dists, parfor_dists)
elif isinstance(inst, (ir.SetItem, ir.StaticSetItem)):
self._analyze_setitem(inst, array_dists)
# elif isinstance(inst, ir.Print):
# continue
elif type(inst) in distributed_analysis_extensions:
# let external calls handle stmt if type matches
f = distributed_analysis_extensions[type(inst)]
f(inst, array_dists)
else:
self._set_REP(inst.list_vars(), array_dists)
def _analyze_assign(self, inst, array_dists, parfor_dists):
lhs = inst.target.name
rhs = inst.value
# treat return casts like assignments
if isinstance(rhs, ir.Expr) and rhs.op == 'cast':
rhs = rhs.value
if isinstance(rhs, ir.Var) and (is_array(self.typemap, lhs)
or isinstance(self.typemap[lhs], (SeriesType, DataFrameType))
or is_array_container(self.typemap, lhs)):
self._meet_array_dists(lhs, rhs.name, array_dists)
return
elif (is_array(self.typemap, lhs)
and isinstance(rhs, ir.Expr)
and rhs.op == 'inplace_binop'):
# distributions of all 3 variables should meet (lhs, arg1, arg2)
arg1 = rhs.lhs.name
arg2 = rhs.rhs.name
dist = self._meet_array_dists(arg1, arg2, array_dists)
dist = self._meet_array_dists(arg1, lhs, array_dists, dist)
self._meet_array_dists(arg1, arg2, array_dists, dist)
return
elif isinstance(rhs, ir.Expr) and rhs.op in ['getitem', 'static_getitem']:
self._analyze_getitem(inst, lhs, rhs, array_dists)
return
elif isinstance(rhs, ir.Expr) and rhs.op == 'build_tuple':
# parallel arrays can be packed and unpacked from tuples
# e.g. boolean array index in test_getitem_multidim
return
elif (isinstance(rhs, ir.Expr) and rhs.op == 'getattr' and rhs.attr == 'T'
and is_array(self.typemap, lhs)):
# array and its transpose have same distributions
arr = rhs.value.name
self._meet_array_dists(lhs, arr, array_dists)
# keep lhs in table for dot() handling
self._T_arrs.add(lhs)
return
elif (isinstance(rhs, ir.Expr) and rhs.op == 'getattr'
and isinstance(self.typemap[rhs.value.name], DataFrameType)
and rhs.attr == 'to_csv'):
return
elif (isinstance(rhs, ir.Expr) and rhs.op == 'getattr'
and rhs.attr in ['shape', 'ndim', 'size', 'strides', 'dtype',
'itemsize', 'astype', 'reshape', 'ctypes',
'transpose', 'tofile', 'copy']):
pass # X.shape doesn't affect X distribution
elif isinstance(rhs, ir.Expr) and rhs.op == 'call':
self._analyze_call(lhs, rhs, rhs.func.name, rhs.args, array_dists)
# handle for A in arr_container: ...
# A = pair_first(iternext(getiter(arr_container)))
# TODO: support getitem of container
elif isinstance(rhs, ir.Expr) and rhs.op == 'pair_first' and is_array(self.typemap, lhs):
arr_container = guard(_get_pair_first_container, self.func_ir, rhs)
if arr_container is not None:
self._meet_array_dists(lhs, arr_container.name, array_dists)
return
elif isinstance(rhs, ir.Expr) and rhs.op in ('getiter', 'iternext'):
# analyze array container access in pair_first
return
elif isinstance(rhs, ir.Arg):
distributed_key = 'distributed'
threaded_key = 'threaded'
if distributed_key not in self.metadata.keys():
self.metadata[distributed_key] = {}
if threaded_key not in self.metadata.keys():
self.metadata[threaded_key] = {}
if rhs.name in self.metadata[distributed_key]:
if lhs not in array_dists:
array_dists[lhs] = Distribution.OneD
elif rhs.name in self.metadata[threaded_key]:
if lhs not in array_dists:
array_dists[lhs] = Distribution.Thread
else:
dprint("replicated input ", rhs.name, lhs)
self._set_REP([inst.target], array_dists)
else:
self._set_REP(inst.list_vars(), array_dists)
return
def _analyze_parfor(self, parfor, array_dists, parfor_dists):
if parfor.id not in parfor_dists:
parfor_dists[parfor.id] = Distribution.OneD
# analyze init block first to see array definitions
self._analyze_block(parfor.init_block, array_dists, parfor_dists)
out_dist = Distribution.OneD
if self.in_parallel_parfor != -1:
out_dist = Distribution.REP
parfor_arrs = set() # arrays this parfor accesses in parallel
array_accesses = _get_array_accesses(
parfor.loop_body, self.func_ir, self.typemap)
par_index_var = parfor.loop_nests[0].index_variable.name
#stencil_accesses, _ = get_stencil_accesses(parfor, self.typemap)
for (arr, index) in array_accesses:
# XXX sometimes copy propagation doesn't work for parfor indices
# so see if the index has a single variable definition and use it
# e.g. test_to_numeric
ind_def = self.func_ir._definitions[index]
if len(ind_def) == 1 and isinstance(ind_def[0], ir.Var):
index = ind_def[0].name
if index == par_index_var: # or index in stencil_accesses:
parfor_arrs.add(arr)
self._parallel_accesses.add((arr, index))
# multi-dim case
tup_list = guard(find_build_tuple, self.func_ir, index)
if tup_list is not None:
index_tuple = [var.name for var in tup_list]
if index_tuple[0] == par_index_var:
parfor_arrs.add(arr)
self._parallel_accesses.add((arr, index))
if par_index_var in index_tuple[1:]:
out_dist = Distribution.REP
# TODO: check for index dependency
for arr in parfor_arrs:
if arr in array_dists:
out_dist = Distribution(
min(out_dist.value, array_dists[arr].value))
parfor_dists[parfor.id] = out_dist
for arr in parfor_arrs:
if arr in array_dists:
array_dists[arr] = out_dist
# TODO: find prange actually coming from user
# for pattern in parfor.patterns:
# if pattern[0] == 'prange' and not self.in_parallel_parfor:
# parfor_dists[parfor.id] = Distribution.OneD
# run analysis recursively on parfor body
if self.second_pass and out_dist in [Distribution.OneD,
Distribution.OneD_Var]:
self.in_parallel_parfor = parfor.id
blocks = wrap_parfor_blocks(parfor)
for b in blocks.values():
self._analyze_block(b, array_dists, parfor_dists)
unwrap_parfor_blocks(parfor)
if self.in_parallel_parfor == parfor.id:
self.in_parallel_parfor = -1
return
def _analyze_call(self, lhs, rhs, func_var, args, array_dists):
"""analyze array distributions in function calls
"""
func_name = ""
func_mod = ""
fdef = guard(find_callname, self.func_ir, rhs, self.typemap)
if fdef is None:
# check ObjModeLiftedWith, we assume distribution doesn't change
# blocks of data are passed in, TODO: document
func_def = guard(get_definition, self.func_ir, rhs.func)
if isinstance(func_def, ir.Const) and isinstance(func_def.value,
numba.dispatcher.ObjModeLiftedWith):
return
warnings.warn(
"function call couldn't be found for distributed analysis")
self._analyze_call_set_REP(lhs, args, array_dists, fdef)
return
else:
func_name, func_mod = fdef
if is_alloc_callname(func_name, func_mod):
if lhs not in array_dists:
array_dists[lhs] = Distribution.OneD
return
# numpy direct functions
if isinstance(func_mod, str) and func_mod == 'numpy':
self._analyze_call_np(lhs, func_name, args, array_dists)
return
# handle array.func calls
if isinstance(func_mod, ir.Var) and is_array(self.typemap, func_mod.name):
self._analyze_call_array(lhs, func_mod, func_name, args, array_dists)
return
# handle df.func calls
if isinstance(func_mod, ir.Var) and isinstance(
self.typemap[func_mod.name], DataFrameType):
self._analyze_call_df(lhs, func_mod, func_name, args, array_dists)
return
# sdc.distributed_api functions
if isinstance(func_mod, str) and func_mod == 'sdc.distributed_api':
self._analyze_call_hpat_dist(lhs, func_name, args, array_dists)
return
# len()
if func_name == 'len' and func_mod in ('__builtin__', 'builtins'):
return
if fdef == ('quantile', 'sdc.hiframes.api'):
# quantile doesn't affect input's distribution
return
if fdef == ('nunique', 'sdc.hiframes.api'):
# nunique doesn't affect input's distribution
return
if fdef == ('unique', 'sdc.hiframes.api'):
# doesn't affect distribution of input since input can stay 1D
if lhs not in array_dists:
array_dists[lhs] = Distribution.OneD_Var
new_dist = Distribution(min(array_dists[lhs].value,
array_dists[rhs.args[0].name].value))
array_dists[lhs] = new_dist
return
if fdef == ('rolling_fixed', 'sdc.hiframes.rolling'):
self._meet_array_dists(lhs, rhs.args[0].name, array_dists)
return
if fdef == ('rolling_variable', 'sdc.hiframes.rolling'):
# lhs, in_arr, on_arr should have the same distribution
new_dist = self._meet_array_dists(lhs, rhs.args[0].name, array_dists)
new_dist = self._meet_array_dists(lhs, rhs.args[1].name, array_dists, new_dist)
array_dists[rhs.args[0].name] = new_dist
return
if fdef == ('shift', 'sdc.hiframes.rolling'):
self._meet_array_dists(lhs, rhs.args[0].name, array_dists)
return
if fdef == ('pct_change', 'sdc.hiframes.rolling'):
self._meet_array_dists(lhs, rhs.args[0].name, array_dists)
return
if fdef == ('nlargest', 'sdc.hiframes.api'):
# output of nlargest is REP
array_dists[lhs] = Distribution.REP
return
if fdef == ('median', 'sdc.hiframes.api'):
return
if fdef == ('concat', 'sdc.hiframes.api'):
# hiframes concat is similar to np.concatenate
self._analyze_call_np_concatenate(lhs, args, array_dists)
return
if fdef == ('isna', 'sdc.hiframes.api'):
return
if fdef == ('get_series_name', 'sdc.hiframes.api'):
return
# dummy hiframes functions
if func_mod == 'sdc.hiframes.api' and func_name in ('get_series_data',
'get_series_index',
'to_arr_from_series', 'ts_series_to_arr_typ',
'to_date_series_type', 'dummy_unbox_series',
'parallel_fix_df_array'):
# TODO: support Series type similar to Array
self._meet_array_dists(lhs, rhs.args[0].name, array_dists)
return
if fdef == ('init_series', 'sdc.hiframes.api'):
# lhs, in_arr, and index should have the same distribution
new_dist = self._meet_array_dists(lhs, rhs.args[0].name, array_dists)
if len(rhs.args) > 1 and self.typemap[rhs.args[1].name] != types.none:
new_dist = self._meet_array_dists(lhs, rhs.args[1].name, array_dists, new_dist)
array_dists[rhs.args[0].name] = new_dist
return
if fdef == ('init_dataframe', 'sdc.hiframes.pd_dataframe_ext'):
# lhs, data arrays, and index should have the same distribution
df_typ = self.typemap[lhs]
n_cols = len(df_typ.columns)
for i in range(n_cols):
new_dist = self._meet_array_dists(lhs, rhs.args[i].name, array_dists)
# handle index
if len(rhs.args) > n_cols and self.typemap[rhs.args[n_cols].name] != types.none:
new_dist = self._meet_array_dists(lhs, rhs.args[n_cols].name, array_dists, new_dist)
for i in range(n_cols):
array_dists[rhs.args[i].name] = new_dist
return
if fdef == ('get_dataframe_data', 'sdc.hiframes.pd_dataframe_ext'):
self._meet_array_dists(lhs, rhs.args[0].name, array_dists)
return
if fdef == ('compute_split_view', 'sdc.hiframes.split_impl'):
self._meet_array_dists(lhs, rhs.args[0].name, array_dists)
return
if fdef == ('get_split_view_index', 'sdc.hiframes.split_impl'):
# just used in str.get() implementation for now so we know it is
# parallel
# TODO: handle index similar to getitem to support more cases
return
if fdef == ('get_split_view_data_ptr', 'sdc.hiframes.split_impl'):
return
if fdef == ('setitem_str_arr_ptr', 'sdc.str_arr_ext'):
return
if fdef == ('num_total_chars', 'sdc.str_arr_ext'):
return
if fdef == ('_series_dropna_str_alloc_impl_inner', 'sdc.hiframes.series_kernels'):
if lhs not in array_dists:
array_dists[lhs] = Distribution.OneD_Var
in_dist = array_dists[rhs.args[0].name]
out_dist = array_dists[lhs]
out_dist = Distribution(min(out_dist.value, in_dist.value))
array_dists[lhs] = out_dist
# output can cause input REP
if out_dist != Distribution.OneD_Var:
array_dists[rhs.args[0].name] = out_dist
return
if (fdef == ('copy_non_null_offsets', 'sdc.str_arr_ext')
or fdef == ('copy_data', 'sdc.str_arr_ext')):
out_arrname = rhs.args[0].name
in_arrname = rhs.args[1].name
self._meet_array_dists(out_arrname, in_arrname, array_dists)
return
if fdef == ('str_arr_item_to_numeric', 'sdc.str_arr_ext'):
out_arrname = rhs.args[0].name
in_arrname = rhs.args[2].name
self._meet_array_dists(out_arrname, in_arrname, array_dists)
return
# np.fromfile()
if fdef == ('file_read', 'sdc.io.np_io'):
return
if sdc.config._has_pyarrow and fdef == ('read_parquet', 'sdc.io.parquet_pio'):
return
if sdc.config._has_pyarrow and fdef == ('read_parquet_str', 'sdc.io.parquet_pio'):
# string read creates array in output
if lhs not in array_dists:
array_dists[lhs] = Distribution.OneD
return
# TODO: make sure assert_equiv is not generated unnecessarily
# TODO: fix assert_equiv for np.stack from df.value
if fdef == ('assert_equiv', 'numba.array_analysis'):
return
# we perform call-analysis from external at the end
if isinstance(func_mod, ir.Var):
ky = (self.typemap[func_mod.name], func_name)
if ky in DistributedAnalysis._extra_call:
if DistributedAnalysis._extra_call[ky](lhs, func_mod, *ky, args, array_dists):
return
# set REP if not found
self._analyze_call_set_REP(lhs, args, array_dists, fdef)
def _analyze_call_np(self, lhs, func_name, args, array_dists):
"""analyze distributions of numpy functions (np.func_name)
"""
if func_name == 'ascontiguousarray':
self._meet_array_dists(lhs, args[0].name, array_dists)
return
if func_name == 'ravel':
self._meet_array_dists(lhs, args[0].name, array_dists)
return
if func_name == 'concatenate':
self._analyze_call_np_concatenate(lhs, args, array_dists)
return
if func_name == 'array' and is_array(self.typemap, args[0].name):
self._meet_array_dists(lhs, args[0].name, array_dists)
return
# sum over the first axis is distributed, A.sum(0)
if func_name == 'sum' and len(args) == 2:
axis_def = guard(get_definition, self.func_ir, args[1])
if isinstance(axis_def, ir.Const) and axis_def.value == 0:
array_dists[lhs] = Distribution.REP
return
if func_name == 'dot':
self._analyze_call_np_dot(lhs, args, array_dists)
return
# used in df.values
if func_name == 'stack':
seq_info = guard(find_build_sequence, self.func_ir, args[0])
if seq_info is None:
self._analyze_call_set_REP(lhs, args, array_dists, 'np.' + func_name)
return
in_arrs, _ = seq_info
axis = 0
# TODO: support kws
# if 'axis' in kws:
# axis = find_const(self.func_ir, kws['axis'])
if len(args) > 1:
axis = find_const(self.func_ir, args[1])
# parallel if args are 1D and output is 2D and axis == 1
if axis is not None and axis == 1 and self.typemap[lhs].ndim == 2:
for v in in_arrs:
self._meet_array_dists(lhs, v.name, array_dists)
return
if (func_name in ['cumsum', 'cumprod', 'empty_like',
'zeros_like', 'ones_like', 'full_like', 'copy']):
in_arr = args[0].name
self._meet_array_dists(lhs, in_arr, array_dists)
return
# set REP if not found
self._analyze_call_set_REP(lhs, args, array_dists, 'np.' + func_name)
def _analyze_call_array(self, lhs, arr, func_name, args, array_dists):
"""analyze distributions of array functions (arr.func_name)
"""
if func_name == 'transpose':
if len(args) == 0:
raise ValueError("Transpose with no arguments is not"
" supported")
in_arr_name = arr.name
arg0 = guard(get_constant, self.func_ir, args[0])
if isinstance(arg0, tuple):
arg0 = arg0[0]
if arg0 != 0:
raise ValueError("Transpose with non-zero first argument"
" is not supported")
self._meet_array_dists(lhs, in_arr_name, array_dists)
return
if func_name in ('astype', 'reshape', 'copy'):
in_arr_name = arr.name
self._meet_array_dists(lhs, in_arr_name, array_dists)
# TODO: support 1D_Var reshape
if func_name == 'reshape' and array_dists[lhs] == Distribution.OneD_Var:
# HACK support A.reshape(n, 1) for 1D_Var
if len(args) == 2 and guard(find_const, self.func_ir, args[1]) == 1:
return
self._analyze_call_set_REP(lhs, args, array_dists, 'array.' + func_name)
return
# Array.tofile() is supported for all distributions
if func_name == 'tofile':
return
# set REP if not found
self._analyze_call_set_REP(lhs, args, array_dists, 'array.' + func_name)
def _analyze_call_df(self, lhs, arr, func_name, args, array_dists):
# to_csv() can be parallelized
if func_name == 'to_csv':
return
# set REP if not found
self._analyze_call_set_REP(lhs, args, array_dists, 'df.' + func_name)
def _analyze_call_hpat_dist(self, lhs, func_name, args, array_dists):
"""analyze distributions of hpat distributed functions
(sdc.distributed_api.func_name)
"""
if func_name == 'local_len':
return
if func_name == 'parallel_print':
return
if func_name == 'dist_return':
arr_name = args[0].name
assert arr_name in array_dists, "array distribution not found"
if array_dists[arr_name] == Distribution.REP:
raise ValueError("distributed return of array {} not valid"
" since it is replicated".format(arr_name))
return
if func_name == 'threaded_return':
arr_name = args[0].name
assert arr_name in array_dists, "array distribution not found"
if array_dists[arr_name] == Distribution.REP:
raise ValueError("threaded return of array {} not valid"
" since it is replicated")
array_dists[arr_name] = Distribution.Thread
return
if func_name == 'rebalance_array':
if lhs not in array_dists:
array_dists[lhs] = Distribution.OneD
in_arr = args[0].name
if array_dists[in_arr] == Distribution.OneD_Var:
array_dists[lhs] = Distribution.OneD
else:
self._meet_array_dists(lhs, in_arr, array_dists)
return
# set REP if not found
self._analyze_call_set_REP(lhs, args, array_dists, 'sdc.distributed_api.' + func_name)
def _analyze_call_np_concatenate(self, lhs, args, array_dists):
assert len(args) == 1
tup_def = guard(get_definition, self.func_ir, args[0])
assert isinstance(tup_def, ir.Expr) and tup_def.op == 'build_tuple'
in_arrs = tup_def.items
# input arrays have same distribution
in_dist = Distribution.OneD
for v in in_arrs:
in_dist = Distribution(
min(in_dist.value, array_dists[v.name].value))
# OneD_Var since sum of block sizes might not be exactly 1D
out_dist = Distribution.OneD_Var
out_dist = Distribution(min(out_dist.value, in_dist.value))
array_dists[lhs] = out_dist
# output can cause input REP
if out_dist != Distribution.OneD_Var:
in_dist = out_dist
for v in in_arrs:
array_dists[v.name] = in_dist
return
def _analyze_call_np_dot(self, lhs, args, array_dists):
arg0 = args[0].name
arg1 = args[1].name
ndim0 = self.typemap[arg0].ndim
ndim1 = self.typemap[arg1].ndim
dist0 = array_dists[arg0]
dist1 = array_dists[arg1]
# Fortran layout is caused by X.T and means transpose
t0 = arg0 in self._T_arrs
t1 = arg1 in self._T_arrs
if ndim0 == 1 and ndim1 == 1:
# vector dot, both vectors should have same layout
new_dist = Distribution(min(array_dists[arg0].value,
array_dists[arg1].value))
array_dists[arg0] = new_dist
array_dists[arg1] = new_dist
return
if ndim0 == 2 and ndim1 == 1 and not t0:
# special case were arg1 vector is treated as column vector
# samples dot weights: np.dot(X,w)
# w is always REP
array_dists[arg1] = Distribution.REP
if lhs not in array_dists:
array_dists[lhs] = Distribution.OneD
# lhs and X have same distribution
self._meet_array_dists(lhs, arg0, array_dists)
dprint("dot case 1 Xw:", arg0, arg1)
return
if ndim0 == 1 and ndim1 == 2 and not t1:
# reduction across samples np.dot(Y,X)
# lhs is always REP
array_dists[lhs] = Distribution.REP
# Y and X have same distribution
self._meet_array_dists(arg0, arg1, array_dists)
dprint("dot case 2 YX:", arg0, arg1)
return
if ndim0 == 2 and ndim1 == 2 and t0 and not t1:
# reduction across samples np.dot(X.T,Y)
# lhs is always REP
array_dists[lhs] = Distribution.REP
# Y and X have same distribution
self._meet_array_dists(arg0, arg1, array_dists)
dprint("dot case 3 XtY:", arg0, arg1)
return
if ndim0 == 2 and ndim1 == 2 and not t0 and not t1:
# samples dot weights: np.dot(X,w)
# w is always REP
array_dists[arg1] = Distribution.REP
self._meet_array_dists(lhs, arg0, array_dists)
dprint("dot case 4 Xw:", arg0, arg1)
return
# set REP if no pattern matched
self._analyze_call_set_REP(lhs, args, array_dists, 'np.dot')
def _analyze_call_set_REP(self, lhs, args, array_dists, fdef=None):
for v in args:
if (is_array(self.typemap, v.name)
or is_array_container(self.typemap, v.name)
or isinstance(self.typemap[v.name], DataFrameType)):
dprint("dist setting call arg REP {} in {}".format(v.name, fdef))
array_dists[v.name] = Distribution.REP
if (is_array(self.typemap, lhs)
or is_array_container(self.typemap, lhs)
or isinstance(self.typemap[lhs], DataFrameType)):
dprint("dist setting call out REP {} in {}".format(lhs, fdef))
array_dists[lhs] = Distribution.REP
def _analyze_getitem(self, inst, lhs, rhs, array_dists):
# selecting an array from a tuple
if (rhs.op == 'static_getitem'
and isinstance(self.typemap[rhs.value.name], types.BaseTuple)
and isinstance(rhs.index, int)):
seq_info = guard(find_build_sequence, self.func_ir, rhs.value)
if seq_info is not None:
in_arrs, _ = seq_info
arr = in_arrs[rhs.index]
self._meet_array_dists(lhs, arr.name, array_dists)
return
if rhs.op == 'static_getitem':
if rhs.index_var is None:
# TODO: things like A[0] need broadcast
self._set_REP(inst.list_vars(), array_dists)
return
index_var = rhs.index_var
else:
assert rhs.op == 'getitem'
index_var = rhs.index
if (rhs.value.name, index_var.name) in self._parallel_accesses:
# XXX: is this always valid? should be done second pass?
self._set_REP([inst.target], array_dists)
return
# in multi-dimensional case, we only consider first dimension
# TODO: extend to 2D distribution
tup_list = guard(find_build_tuple, self.func_ir, index_var)
if tup_list is not None:
index_var = tup_list[0]
# rest of indices should be replicated if array
other_ind_vars = tup_list[1:]
self._set_REP(other_ind_vars, array_dists)
if isinstance(index_var, int):
self._set_REP(inst.list_vars(), array_dists)
return
assert isinstance(index_var, ir.Var)
# array selection with boolean index
if (is_np_array(self.typemap, index_var.name)
and self.typemap[index_var.name].dtype == types.boolean):
# input array and bool index have the same distribution
new_dist = self._meet_array_dists(index_var.name, rhs.value.name,
array_dists)
array_dists[lhs] = Distribution(min(Distribution.OneD_Var.value,
new_dist.value))
return
# array selection with permutation array index
if is_np_array(self.typemap, index_var.name):
arr_def = guard(get_definition, self.func_ir, index_var)
if isinstance(arr_def, ir.Expr) and arr_def.op == 'call':
fdef = guard(find_callname, self.func_ir, arr_def, self.typemap)
if fdef == ('permutation', 'numpy.random'):
self._meet_array_dists(lhs, rhs.value.name, array_dists)
return
# whole slice or strided slice access
# for example: A = X[:,5], A = X[::2,5]
if guard(is_whole_slice, self.typemap, self.func_ir, index_var,
accept_stride=True):
self._meet_array_dists(lhs, rhs.value.name, array_dists)
return
# output of operations like S.head() is REP since it's a "small" slice
# input can remain 1D
if guard(is_const_slice, self.typemap, self.func_ir, index_var):
array_dists[lhs] = Distribution.REP
return
self._set_REP(inst.list_vars(), array_dists)
return
def _analyze_setitem(self, inst, array_dists):
if isinstance(inst, ir.SetItem):
index_var = inst.index
else:
index_var = inst.index_var
if ((inst.target.name, index_var.name) in self._parallel_accesses):
# no parallel to parallel array set (TODO)
return
tup_list = guard(find_build_tuple, self.func_ir, index_var)
if tup_list is not None:
index_var = tup_list[0]
# rest of indices should be replicated if array
self._set_REP(tup_list[1:], array_dists)
if guard(is_whole_slice, self.typemap, self.func_ir, index_var):
# for example: X[:,3] = A
self._meet_array_dists(
inst.target.name, inst.value.name, array_dists)
return
self._set_REP([inst.value], array_dists)
def _meet_array_dists(self, arr1, arr2, array_dists, top_dist=None):
if top_dist is None:
top_dist = Distribution.OneD
if arr1 not in array_dists:
array_dists[arr1] = top_dist
if arr2 not in array_dists:
array_dists[arr2] = top_dist
new_dist = Distribution(min(array_dists[arr1].value,
array_dists[arr2].value))
new_dist = Distribution(min(new_dist.value, top_dist.value))
array_dists[arr1] = new_dist
array_dists[arr2] = new_dist
return new_dist
def _set_REP(self, var_list, array_dists):
for var in var_list:
varname = var.name
# Handle SeriesType since it comes from Arg node and it could
# have user-defined distribution
if (is_array(self.typemap, varname)
or is_array_container(self.typemap, varname)
or isinstance(
self.typemap[varname], (SeriesType, DataFrameType))):
dprint("dist setting REP {}".format(varname))
array_dists[varname] = Distribution.REP
# handle tuples of arrays
var_def = guard(get_definition, self.func_ir, var)
if (var_def is not None and isinstance(var_def, ir.Expr)
and var_def.op == 'build_tuple'):
tuple_vars = var_def.items
self._set_REP(tuple_vars, array_dists)
def _rebalance_arrs(self, array_dists, parfor_dists):
# rebalance an array if it is accessed in a parfor that has output
# arrays or is in a loop
# find sequential loop bodies
cfg = numba.analysis.compute_cfg_from_blocks(self.func_ir.blocks)
loop_bodies = set()
for loop in cfg.loops().values():
loop_bodies |= loop.body
rebalance_arrs = set()
for label, block in self.func_ir.blocks.items():
for inst in block.body:
# TODO: handle hiframes filter etc.
if (isinstance(inst, Parfor)
and parfor_dists[inst.id] == Distribution.OneD_Var):
array_accesses = _get_array_accesses(
inst.loop_body, self.func_ir, self.typemap)
onedv_arrs = set(arr for (arr, ind) in array_accesses
if arr in array_dists and array_dists[arr] == Distribution.OneD_Var)
if (label in loop_bodies
or _arrays_written(onedv_arrs, inst.loop_body)):
rebalance_arrs |= onedv_arrs
if len(rebalance_arrs) != 0:
self._gen_rebalances(rebalance_arrs, self.func_ir.blocks)
return True
return False
def _gen_rebalances(self, rebalance_arrs, blocks):
#
for block in blocks.values():
new_body = []
for inst in block.body:
# TODO: handle hiframes filter etc.
if isinstance(inst, Parfor):
self._gen_rebalances(rebalance_arrs, {0: inst.init_block})
self._gen_rebalances(rebalance_arrs, inst.loop_body)
if isinstance(inst, ir.Assign) and inst.target.name in rebalance_arrs:
out_arr = inst.target
self.func_ir._definitions[out_arr.name].remove(inst.value)
# hold inst results in tmp array
tmp_arr = ir.Var(out_arr.scope,
mk_unique_var("rebalance_tmp"),
out_arr.loc)
self.typemap[tmp_arr.name] = self.typemap[out_arr.name]
inst.target = tmp_arr
nodes = [inst]
def f(in_arr): # pragma: no cover
out_a = sdc.distributed_api.rebalance_array(in_arr)
f_block = compile_to_numba_ir(f, {'sdc': sdc}, self.typingctx,
(self.typemap[tmp_arr.name],),
self.typemap, self.calltypes).blocks.popitem()[1]
replace_arg_nodes(f_block, [tmp_arr])
nodes += f_block.body[:-3] # remove none return
nodes[-1].target = out_arr
# update definitions
dumm_block = ir.Block(out_arr.scope, out_arr.loc)
dumm_block.body = nodes
build_definitions({0: dumm_block}, self.func_ir._definitions)
new_body += nodes
else:
new_body.append(inst)
block.body = new_body
def _get_pair_first_container(func_ir, rhs):
assert isinstance(rhs, ir.Expr) and rhs.op == 'pair_first'
iternext = get_definition(func_ir, rhs.value)
require(isinstance(iternext, ir.Expr) and iternext.op == 'iternext')
getiter = get_definition(func_ir, iternext.value)
require(isinstance(iternext, ir.Expr) and getiter.op == 'getiter')
return getiter.value
def _arrays_written(arrs, blocks):
for block in blocks.values():
for inst in block.body:
if isinstance(inst, Parfor) and _arrays_written(arrs, inst.loop_body):
return True
if (isinstance(inst, (ir.SetItem, ir.StaticSetItem))
and inst.target.name in arrs):
return True
return False
# def get_stencil_accesses(parfor, typemap):
# # if a parfor has stencil pattern, see which accesses depend on loop index
# # XXX: assuming loop index is not used for non-stencil arrays
# # TODO support recursive parfor, multi-D, mutiple body blocks
# # no access if not stencil
# is_stencil = False
# for pattern in parfor.patterns:
# if pattern[0] == 'stencil':
# is_stencil = True
# neighborhood = pattern[1]
# if not is_stencil:
# return {}, None
# par_index_var = parfor.loop_nests[0].index_variable
# body = parfor.loop_body
# body_defs = build_definitions(body)
# stencil_accesses = {}
# for block in body.values():
# for stmt in block.body:
# if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Expr):
# lhs = stmt.target.name
# rhs = stmt.value
# if (rhs.op == 'getitem' and is_array(typemap, rhs.value.name)
# and vars_dependent(body_defs, rhs.index, par_index_var)):
# stencil_accesses[rhs.index.name] = rhs.value.name
# return stencil_accesses, neighborhood
# def vars_dependent(defs, var1, var2):
# # see if var1 depends on var2 based on definitions in defs
# if len(defs[var1.name]) != 1:
# return False
# vardef = defs[var1.name][0]
# if isinstance(vardef, ir.Var) and vardef.name == var2.name:
# return True
# if isinstance(vardef, ir.Expr):
# for invar in vardef.list_vars():
# if invar.name == var2.name or vars_dependent(defs, invar, var2):
# return True
# return False
# array access code is copied from ir_utils to be able to handle specialized
# array access calls such as get_split_view_index()
# TODO: implement extendable version in ir_utils
def get_parfor_array_accesses(parfor, func_ir, typemap, accesses=None):
if accesses is None:
accesses = set()
blocks = wrap_parfor_blocks(parfor)
accesses = _get_array_accesses(blocks, func_ir, typemap, accesses)
unwrap_parfor_blocks(parfor)
return accesses
array_accesses_extensions = {}
array_accesses_extensions[Parfor] = get_parfor_array_accesses
def _get_array_accesses(blocks, func_ir, typemap, accesses=None):
"""returns a set of arrays accessed and their indices.
"""
if accesses is None:
accesses = set()
for block in blocks.values():
for inst in block.body:
if isinstance(inst, ir.SetItem):
accesses.add((inst.target.name, inst.index.name))
if isinstance(inst, ir.StaticSetItem):
accesses.add((inst.target.name, inst.index_var.name))
if isinstance(inst, ir.Assign):
lhs = inst.target.name
rhs = inst.value
if isinstance(rhs, ir.Expr) and rhs.op == 'getitem':
accesses.add((rhs.value.name, rhs.index.name))
if isinstance(rhs, ir.Expr) and rhs.op == 'static_getitem':
index = rhs.index
# slice is unhashable, so just keep the variable
if index is None or ir_utils.is_slice_index(index):
index = rhs.index_var.name
accesses.add((rhs.value.name, index))
if isinstance(rhs, ir.Expr) and rhs.op == 'call':
fdef = guard(find_callname, func_ir, rhs, typemap)
if fdef is not None:
if fdef == ('get_split_view_index', 'sdc.hiframes.split_impl'):
accesses.add((rhs.args[0].name, rhs.args[1].name))
if fdef == ('setitem_str_arr_ptr', 'sdc.str_arr_ext'):
accesses.add((rhs.args[0].name, rhs.args[1].name))
if fdef == ('str_arr_item_to_numeric', 'sdc.str_arr_ext'):
accesses.add((rhs.args[0].name, rhs.args[1].name))
accesses.add((rhs.args[2].name, rhs.args[3].name))
for T, f in array_accesses_extensions.items():
if isinstance(inst, T):
f(inst, func_ir, typemap, accesses)
return accesses
def dprint(*s):
if debug_prints():
print(*s)
|
<reponame>hypothesis/h-matchers<filename>tests/unit/h_matchers/matcher/collection/containment_test.py<gh_stars>0
# pylint: disable=misplaced-comparison-constant
import pytest
from h_matchers import Any
from h_matchers.matcher.collection.containment import (
AnyIterableWithItems,
AnyIterableWithItemsInOrder,
AnyMappingWithItems,
)
from tests.unit.data_types import DataTypes
class MultiDict(list):
"""Very bare bones implementation of a multi-dict."""
def items(self):
yield from self
class TestAnyMappableWithItems:
@pytest.mark.parametrize("item,_", DataTypes.parameters())
def test_it_fails_gracefully(self, item, _):
assert item != AnyMappingWithItems({"a": 1})
def test_it_can_match_values(self):
matcher = AnyMappingWithItems({"a": 1})
assert matcher == {"a": 1}
assert {"a": 1} == matcher
assert matcher == {"a": 1, "b": 2}
assert {"a": 2} != matcher
assert {"b": 2} != matcher
def test_it_can_match_multi_dicts(self):
multi_dict = MultiDict((("a", 2), ["a", 1], ("b", 2)))
assert multi_dict == AnyMappingWithItems({"a": 2})
assert multi_dict == AnyMappingWithItems({"a": 1})
assert multi_dict == AnyMappingWithItems({"a": 1, "b": 2})
assert multi_dict != AnyMappingWithItems({"d": 1})
def test_it_can_match_with_multi_dicts(self):
multi_dict = MultiDict((("a", 2), ["a", 1], ("b", 2)))
matcher = AnyMappingWithItems(multi_dict)
assert multi_dict == matcher
assert {"a": 1, "b": 2} != matcher
assert MultiDict((("a", 2), ["a", 1], ("b", 2), ["c", 3])) == matcher
class TestAnyIterableWithItemsInOrder:
@pytest.mark.parametrize("item,_", DataTypes.parameters())
def test_it_fails_gracefully(self, item, _):
assert item != AnyIterableWithItemsInOrder(["a"])
def test_it_matches_in_order(self):
matcher = AnyIterableWithItemsInOrder([1, 1, 2])
# Ordered things do
assert matcher == [0, 1, 1, 2, 3]
assert matcher == [2, 1, 1, 2, 3] # It is in here
assert matcher != [0, 2, 1, 1, 3]
assert matcher != [1, 2, 2]
def test_it_matches_generators_in_order(self):
matcher = AnyIterableWithItemsInOrder([0, 1, 2])
assert matcher == iter(range(3))
assert iter(range(3)) == matcher
assert matcher != iter(range(2))
assert iter(range(2)) != matcher
class TestAnyIterableWithItems:
@pytest.mark.parametrize("item,_", DataTypes.parameters())
def test_it_fails_gracefully(self, item, _):
assert item != AnyIterableWithItems(["a"])
def test_it_matches_out_of_order(self):
matcher = AnyIterableWithItems([1, 2])
assert matcher == {2: "b", 1: "a", 0: "c"}
assert matcher == {0, 2, 1}
assert matcher == [0, 1, 2, 3]
assert matcher == [0, 2, 1, 3]
assert matcher != [1]
assert matcher != [1, 1]
def test_it_matches_generators_out_of_order(self):
matcher = AnyIterableWithItems([2, 0, 1])
def matching_gen():
yield from range(3)
assert matcher == matching_gen()
assert matching_gen() == matcher
def non_matching_gen():
yield from range(2)
assert matcher != non_matching_gen()
assert non_matching_gen() != matcher
def test_it_can_match_unhashable_in_any_order(self):
dict_a = {"a": 1}
dict_b = {"b": 2}
matcher = AnyIterableWithItems([dict_a, dict_b])
assert [dict_b, dict_a] == matcher
assert matcher == [dict_b, dict_a]
def test_it_matches_non_trival_matches(self):
# For some items a naive approach will not work, as there are many
# solutions to matching a set of objects, only some of which will
# work.
matcher = AnyIterableWithItems(
[
Any(),
Any.string(),
Any.string.containing("a"),
Any.string.containing("aaaa"),
]
)
assert matcher == ["aaaa", "a", "", None]
assert ["aaaa", "a", "", None] == matcher
def test_it_detects_incompatible_matches(self):
matcher = AnyIterableWithItems(
[
Any.string.containing("a"),
Any.string.containing("a"),
Any.string.containing("a"),
]
)
assert ["a", "aa", None] != matcher
assert matcher != ["a", "aa", None]
|
<filename>django_mako_plus/router/discover.py
from django.apps import apps
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ViewDoesNotExist
from django.template import TemplateDoesNotExist
from django.views.generic import View
from .decorators import view_function, CONVERTER_ATTRIBUTE_NAME
from ..util import import_qualified, log
import inspect
import threading
from importlib import import_module
from importlib.util import find_spec
########################################################
### Cached routers
CACHED_VIEW_FUNCTIONS = {}
rlock = threading.RLock()
def get_view_function(module_name, function_name, fallback_app=None, fallback_template=None, verify_decorator=True):
'''
Retrieves a view function from the cache, finding it if the first time.
Raises ViewDoesNotExist if not found. This is called by resolver.py.
'''
# first check the cache (without doing locks)
key = ( module_name, function_name )
try:
return CACHED_VIEW_FUNCTIONS[key]
except KeyError:
with rlock:
# try again now that we're locked
try:
return CACHED_VIEW_FUNCTIONS[key]
except KeyError:
# if we get here, we need to load the view function
func = find_view_function(module_name, function_name, fallback_app, fallback_template, verify_decorator)
# cache in production mode
if not settings.DEBUG:
CACHED_VIEW_FUNCTIONS[key] = func
return func
# the code should never be able to get here
raise Exception("Django-Mako-Plus error: get_view_function() should not have been able to get to this point. Please notify the owner of the DMP project. Thanks.")
def find_view_function(module_name, function_name, fallback_app=None, fallback_template=None, verify_decorator=True):
'''
Finds a view function, class-based view, or template view.
Raises ViewDoesNotExist if not found.
'''
dmp = apps.get_app_config('django_mako_plus')
# I'm first calling find_spec first here beacuse I don't want import_module in
# a try/except -- there are lots of reasons that importing can fail, and I just want to
# know whether the file actually exists. find_spec raises AttributeError if not found.
try:
spec = find_spec(module_name)
except ValueError:
spec = None
if spec is None:
# no view module, so create a view function that directly renders the template
try:
return create_view_for_template(fallback_app, fallback_template)
except TemplateDoesNotExist as e:
raise ViewDoesNotExist('view module {} not found, and fallback template {} could not be loaded ({})'.format(module_name, fallback_template, e))
# load the module and function
try:
module = import_module(module_name)
func = getattr(module, function_name)
func.view_type = 'function'
except ImportError as e:
raise ViewDoesNotExist('module "{}" could not be imported: {}'.format(module_name, e))
except AttributeError as e:
raise ViewDoesNotExist('module "{}" found successfully, but "{}" was not found: {}'.format(module_name, function_name, e))
# if class-based view, call as_view() to get a view function to it
if inspect.isclass(func) and issubclass(func, View):
func = func.as_view()
func.view_type = 'class'
# if regular view function, check the decorator
elif verify_decorator and not view_function.is_decorated(func):
raise ViewDoesNotExist("view {}.{} was found successfully, but it must be decorated with @view_function or be a subclass of django.views.generic.View.".format(module_name, function_name))
# attach a converter to the view function
if dmp.options['PARAMETER_CONVERTER'] is not None:
try:
converter = import_qualified(dmp.options['PARAMETER_CONVERTER'])(func)
setattr(func, CONVERTER_ATTRIBUTE_NAME, converter)
except ImportError as e:
raise ImproperlyConfigured('Cannot find PARAMETER_CONVERTER: {}'.format(str(e)))
# return the function/class
return func
def create_view_for_template(app_name, template_name):
'''
Creates a view function for templates (used whe a view.py file doesn't exist but the .html does)
Raises TemplateDoesNotExist if the template doesn't exist.
'''
# ensure the template exists
apps.get_app_config('django_mako_plus').engine.get_template_loader(app_name).get_template(template_name)
# create the view function
def template_view(request, *args, **kwargs):
# not caching the template object (getting it each time) because Mako has its own cache
dmp = apps.get_app_config('django_mako_plus')
template = dmp.engine.get_template_loader(app_name).get_template(template_name)
return template.render_to_response(request=request, context=kwargs)
template_view.view_type = 'template'
return template_view
|
<gh_stars>10-100
# EDGE Estimator for Shannon Mutual Information
#
# Created by <NAME> (<EMAIL>)
# Current version: 4.3.1
# Requirements: numpy, cvxpy(v1.0.6),scipy, sklearn
#
# 10/1/2018
#
# Based on the paper: Scalable Mutual Information Estimation using Dependence Graphs
#
################
# The estimator is in the following form:
#
# I = EDGE(X,Y,U=10, gamma=[1, 1], epsilon=[0,0], epsilon_vector = 'fixed', eps_range_factor=0.1, normalize_epsilon = False ,
# ensemble_estimation = 'median', L_ensemble=5 ,hashing='p-stable', stochastic = False)
#
# Arguments:
#
# X is N * d_x and Y is N * d_Y data sets
# U (optional) is an upper bound on the MI. It doesn't need to be accurate, but more accurate upper bound we set, faster convergence rates we get
# gamma=[gamma_X,gamma_Y] (optional) is the vector of soothness for X and Y.
# For example, if the data is discrete we set gamma close to 0,
# and if the data is continuous we set gamma close to 1 (or maybe higher if it is very smooth)
# epsilon=[eps_X, eps_Y] (optional) is the vector of bandwidths for X and Y. If no epsilon is set,
# automatic bandwidths according to KNN distances will be set.
# epsilon_vector (optional): possible arguments are 'fixed' or 'range'. If 'fixed' is given, all of
# the bandwidths for the ensemble estimation will be the same, while, if 'range' is chosen,
# the badwidths will be arithmetically increasing in a range.
# eps_range_factor (optional): If epsilon_vector == 'range', then the range of epsilon is
# [epsilon, epsilon*(1+epsilon_vector)].
# normalize_epsilon: If it is True, then the badwidth will be normalized according to the MI estimate
# ensemble_estimation: several options are available:
# 'average': the ensemble estimator is the average of the base estimators
# 'optimal_weights': the ensemble estimator is the wighted sum of the base estimators
# where the weights are computed using an optimization problem
# * You need to import cvxpy as cvx (install cvxpy if you do not have it)
# 'median': the ensemble estimator is the median of the base estimators
# L_ensemble: number of different base estimators used in ensemble estimation. For more accurate estimates
# you can increase L_ensemble, but runtime increases linearly as well.
# hashing (optional): possible arguments are 'p-stable' (default) which is a common type of LSH
# or 'floor' which uses the simple floor function as hashing. For small dimensions, 'floor', a
# for higher dimensions, 'p-stable' are preferred.
# stochastic: it is stochastic, the hashing is generated using a random seed.
#
# Output: I is the estimation of mutual information between X snd Y
###########################
import numpy as np
import math
import cvxpy as cvx # Need to install CVXPY package,
# it is also possible to run this code without cvxpy, by
# using 'average' or 'median' ensemble_estimation
import time
from scipy.special import *
from sklearn.neighbors import NearestNeighbors
import sklearn
#from random import randint, seed
#np.random.seed(seed=0)
#####################
#####################
# Generate W and V matrices (used in LSH)
def gen_W(X,Y):
np.random.seed(3334)
# Num of Samples and dimensions
N = X.shape[0]
dim_X , dim_Y = X.shape[1], Y.shape[1]
# parameters to control the dimension of W and V
kx,ky = 2, 2
rx,ry = 10,10
# Find standard deviation vectors
std_X = np.array([np.std(X[:,[i]]) for i in range(dim_X)])
std_Y = np.array([np.std(Y[:,[i]]) for i in range(dim_Y)])
std_X = np.reshape(std_X,(dim_X,1))
std_Y = np.reshape(std_Y,(dim_Y,1))
# Compute dimensions of W and V
d_X_shrink=min(dim_X,math.floor(math.log(1.0*N/rx,kx)))
d_Y_shrink=min(dim_Y,math.floor(math.log(1.0*N/ry,ky)))
# Repeat columns of std_X and Y to be in the same size as W and V
std_X_mat= np.tile(std_X,(1,d_X_shrink))
std_Y_mat= np.tile(std_Y,(1,d_Y_shrink))
# avoid devision by zero
std_X_mat[std_X_mat<0.0001]=1
std_Y_mat[std_Y_mat<0.0001]=1
# Mean and standard deviation of Normal pdf for elements of W and V
mu_X = np.zeros((dim_X, d_X_shrink))
mu_Y = np.zeros((dim_Y, d_Y_shrink))
sigma_X = 1.0/(std_X_mat *np.sqrt(dim_X))
sigma_Y = 1.0/(std_Y_mat *np.sqrt(dim_Y))
# Generate normal matrices W and V
#np.random.seed(seed=0)
W = np.random.normal(mu_X, sigma_X, (dim_X, d_X_shrink))
V = np.random.normal(mu_Y, sigma_Y, (dim_Y, d_Y_shrink))
return (W,V)
# Find KNN distances for a number of samples for normalizing bandwidth
def find_knn(A,d):
np.random.seed(3334)
#np.random.seed()
#np.random.seed(seed=int(time.time()))
r = 500
# random samples from A
A = A.reshape((-1,1))
N = A.shape[0]
k=math.floor(0.43*N**(2/3 + 0.17*(d/(d+1)) )*math.exp(-1.0/np.max([10000, d**4])))
#print('k,d', k, d)
T= np.random.choice(A.reshape(-1,), size=r).reshape(-1,1)
nbrs = NearestNeighbors(n_neighbors=k, algorithm='auto').fit(A)
distances, indices = nbrs.kneighbors(T)
d = np.mean(distances[:,-1])
return d
# Returns epsilon and random shifts b
def gen_eps(XW,YV):
d_X , d_Y = XW.shape[1], YV.shape[1]
# Find KNN distances for a number of samples for normalizing bandwidth
eps_X = np.array([find_knn(XW[:,[i]],d_X) for i in range(d_X)]) + 0.0001
eps_Y = np.array([find_knn(YV[:,[i]],d_Y) for i in range(d_Y)]) + 0.0001
return (eps_X,eps_Y)
# Define H1 (LSH) for a vector X (X is just one sample)
def H1(XW,b,eps):
# dimension of X
d_X = XW.shape[0]
#d_W = W.shape[1]
XW=XW.reshape(1,d_X)
# If not scalar
if d_X > 1:
X_te = 1.0*(np.squeeze(XW)+b)/eps
elif eps>0:
X_te = 1.0*(XW+b)/eps
else:
X_te=XW
# Discretize X
X_t = np.floor(X_te)
if d_X>1:
R = tuple(X_t.tolist())
else: R=np.asscalar(np.squeeze(X_t))
return R
# Compuate Hashing: Compute the number of collisions in each bucket
def Hash(XW,YV,eps_X,eps_Y,b_X,b_Y):
# Num of Samples and dimensions
N = XW.shape[0]
# Hash vectors as dictionaries
CX, CY, CXY = {}, {}, {}
# Computing Collisions
for i in range(N):
# Compute H_1 hashing of X_i and Y_i: Convert to tuple (vectors cannot be taken as keys in dict)
X_l, Y_l = H1(XW[i],b_X,eps_X), H1(YV[i],b_Y,eps_Y)
# X collisions: compute H_2
if X_l in CX:
CX[X_l].append(i)
else:
CX[X_l] = [i]
# Y collisions: compute H_2
if Y_l in CY:
CY[Y_l].append(i)
else:
CY[Y_l] = [i]
# XY collisions
if (X_l,Y_l) in CXY:
CXY[(X_l,Y_l)].append(i)
else:
CXY[(X_l,Y_l)] = [i]
return (CX, CY, CXY)
# Compute mutual information and gradient given epsilons and radom shifts
def Compute_MI(XW,YV,U,eps_X,eps_Y,b_X,b_Y):
N = XW.shape[0]
(CX, CY, CXY) = Hash(XW,YV,eps_X,eps_Y,b_X,b_Y)
# Computing Mutual Information Function
I = 0
N_c = 0
for e in CXY.keys():
Ni, Mj, Nij = len(CX[e[0]]), len(CY[e[1]]), len(CXY[e])
if 1==1:
I += Nij* max(min(math.log(1.0*Nij*N/(Ni*Mj),2), U),0.001)
N_c+=Nij
I = 1.0* I / N_c
return I
def TSNE_DR(X_train, X_test=None, Y_train=None, **kwargs):
clf = sklearn.manifold.TSNE(**kwargs)
Z = clf.fit_transform(X_train)
return Z
def EDGE(X,Y,U=10, gamma=[1, 1], epsilon=[0,0], epsilon_vector = 'range', eps_range_factor=0.1, normalize_epsilon = True ,
ensemble_estimation = 'median', L_ensemble=10 ,hashing='p-stable', stochastic = False):
#print('checkpoint 1, data transformed using TSNE')
gamma = np.array(gamma)
gamma = gamma * 0.4
epsilon = np.array(epsilon)
if X.ndim==1:
X=X.reshape((-1,1))
if Y.ndim==1:
Y=Y.reshape((-1,1))
# Num of Samples and dim
N, d = X.shape[0], X.shape[1]
dy = Y.shape[1]
# Normalize gamma based on the dimension
#gamma[0] = gamma[0]* min(1, 3/(np.log2(d)+1))
#gamma[1] = gamma[1]* min(1, 3/(np.log2(dy)+1))
# Find dimensions
dim_X, dim_Y = X.shape[1], Y.shape[1]
dim = dim_X + dim_Y
## Hash type
# check for dimentionality reduction hashing
if hashing == 'TSNE':
if gamma[0]>0.05:
XW = TSNE_DR(X)
d_X_shrink = 2
else:
XW = X
d_X_shrink = dim_X
if gamma[1]>0.05:
YV = TSNE_DR(Y)
d_Y_shrink = 2
else:
YV = Y
d_Y_shrink = dim_Y
if dim_X<=6 and dim_Y <=6:
hashing = 'floor'
if hashing == 'p-stable':
# Generate random transformation matrices W and V
(W,V) = gen_W(X,Y)
d_X_shrink, d_Y_shrink=W.shape[1], V.shape[1]
# Find inner products
XW, YV = np.dot(X,W), np.dot(Y,V)
elif hashing == 'floor':
#W = np.identity(dim_X)
#V = np.identity(dim_Y)
d_X_shrink, d_Y_shrink = dim_X, dim_Y
XW, YV = X, Y
## Initial epsilon and apply smoothness gamma
# If no manual epsilon is set for computing MI:
if epsilon[0] ==0:
# Generate auto epsilon and b
(eps_X_temp,eps_Y_temp) = gen_eps(XW,YV)
#print('eps_X_temp, eps_Y_temp', eps_X_temp, eps_Y_temp)
# Normalizing factors for the bandwidths
cx, cy = 18*d_X_shrink / np.max([(1+1.*math.log(dim_X)),1]), 18*d_Y_shrink/ np.max([(1+1.*math.log(dim_Y)),1])
eps_X0, eps_Y0 = eps_X_temp * cx*gamma[0], eps_Y_temp * cy*gamma[1]
#print('********eps_X0, eps_Y0', eps_X0, eps_Y0)
else:
eps_X_temp = np.ones(d_X_shrink,)*epsilon[0]
eps_Y_temp = np.ones(d_Y_shrink,)*epsilon[1]
cx, cy = 15*d_X_shrink / np.max([(1+1.0*math.log(dim_X)),1]), 15*d_Y_shrink/ np.max([(1+1.0*math.log(dim_Y)),1])
eps_X0, eps_Y0 = eps_X_temp * cx*gamma[0], eps_Y_temp * cy*gamma[1]
#print('eps_X0, eps_Y0', eps_X0, eps_Y0)
## epsilon_vector
if epsilon_vector == 'fixed':
T = np.ones(L_ensemble)
elif epsilon_vector == 'range':
T = np.linspace(1,1+eps_range_factor,L_ensemble)
## Compute MI Vector
#print('Compute MI Vector: ')
# MI Vector
I_vec = np.zeros(L_ensemble)
for j in range(L_ensemble):
# Apply epsilon_vector
eps_X, eps_Y = eps_X0 * T[j], eps_Y0 * T[j]
#print('j, eps_X, eps_Y', j, eps_X, eps_Y)
## Shifts of hashing
if stochastic== True:
np.random.seed()
f=0.1
b_X = f*np.random.rand(d_X_shrink,)*eps_X
b_Y = f*np.random.rand(d_Y_shrink,)*eps_Y
else:
b_X = np.linspace(0,1,L_ensemble,endpoint=False)[j]*eps_X
b_Y = np.linspace(0,1,L_ensemble,endpoint=False)[j]*eps_Y
I_vec[j] = Compute_MI(XW,YV,U,eps_X,eps_Y,b_X,b_Y)
## Ensemble method
if ensemble_estimation == 'average':
I = np.mean(I_vec)
elif ensemble_estimation == 'optimal_weights':
weights = compute_weights(L_ensemble, d, T, N)
weights=weights.reshape(L_ensemble,)
I = np.dot(I_vec, weights)
elif ensemble_estimation == 'median':
I = np.median(I_vec)
## Normalize epsilon according to MI estimation (cross validation)
if normalize_epsilon == True:
gamma=gamma * math.pow(2,-math.sqrt(I*2.0)+(0.5/I))
normalize_epsilon = False
I = EDGE(X,Y,U, gamma, epsilon, epsilon_vector, eps_range_factor, normalize_epsilon, ensemble_estimation, L_ensemble,hashing, stochastic)
return I
##### Quadratic Program for Ensemble Estimation ####
## Needed only if you are using 'optimal_weights' for ensemble_estimation
def compute_weights(L, d, T, N):
# Create optimization variables.
cvx_eps = cvx.Variable()
cvx_w = cvx.Variable(L)
# Create constraints:
constraints = [cvx.sum(cvx_w)==1, cvx.pnorm(cvx_w, 2)- cvx_eps/2 <= 0 ]
for i in range(1,L):
Tp = ((1.0*T/N)**(1.0*i/(2*d)))
cvx_mult = cvx_w.T * Tp
constraints.append(cvx.sum(cvx_mult) - cvx_eps*2 <= 0)
# Form objective.
obj = cvx.Minimize(cvx_eps)
# Form and solve problem.
prob = cvx.Problem(obj, constraints)
prob.solve() # Returns the optimal value.
sol = np.array(cvx_w.value)
return sol.T
####################################
####################################
if __name__ == "__main__":
# Fully Dependent Datasets
X = np.random.rand(10000,100)
Y = np.random.rand(10000,100)
#I = EDGE(X,Y,U=10,gamma=[1, 1],epsilon=(0.2,0.2)) # Estimated Mutual Information between X and Y using EDGE method
#I = EDGE(X,Y,U=100,epsilon=[1,1])
#I = EDGE(X,Y, U=10, gamma=[1, 1], epsilon=[0,0], epsilon_vector = 'range', eps_range_factor=0.1, normalize_epsilon = True ,
# ensemble_estimation = 'optimal_weights', L_ensemble=10 ,hashing='p-stable', stochastic = True)
#I = EDGE(X,Y,U=10, gamma=[1.1, 1.1], epsilon=[0,0], epsilon_vector = 'range', eps_range_factor=0.1, normalize_epsilon = True ,
# ensemble_estimation = 'optimal_weights', L_ensemble=20 ,hashing='p-stable', stochastic = False)
I = EDGE(X,Y)
print ('Estimated MI ', I)
print('################################')
|
# -*- coding: utf-8 -*-
"""
/***************************************************************************
ThreeDiCustomStats
A QGIS plugin
This plugin calculates statistics of 3Di results. The user chooses the variable, aggregation method and spatiotemperal filtering.
Generated by Plugin Builder: http://g-sherman.github.io/Qgis-Plugin-Builder/
-------------------
begin : 2019-11-27
git sha : $Format:%H$
copyright : (C) 2019 by <NAME> | <NAME>
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
import tempfile
import processing
from qgis.PyQt.QtCore import QSettings, QTranslator, QCoreApplication
from qgis.PyQt.QtGui import QIcon
from qgis.PyQt.QtWidgets import QAction
from qgis.core import (
Qgis,
QgsApplication,
QgsProject,
QgsTask,
QgsRasterLayer
)
from .threedi_result_aggregation import *
from .ogr2qgis import *
# Initialize Qt resources from file resources.py
from .resources import *
# Import the code for the dialog
from .threedi_custom_stats_dialog import ThreeDiCustomStatsDialog
import os.path
# TODO: cfl strictness factors instelbaar maken
# TODO: berekening van max timestep ook op basis van volume vs. debiet
# TODO: opties af laten hangen van wat er in het model aanwezig is; is wel tricky ivm presets
class Aggregate3DiResults(QgsTask):
def __init__(self,
description: str,
parent: ThreeDiCustomStatsDialog,
gridadmin: str,
results_3di: str,
demanded_aggregations: List,
bbox,
start_time: int,
end_time: int,
subsets,
interpolation_method,
resample_point_layer: bool,
resolution,
output_flowlines: bool,
output_cells: bool,
output_nodes: bool,
output_rasters: bool
):
super().__init__(description, QgsTask.CanCancel)
self.exception = None
self.parent = parent
self.parent.setEnabled(False)
self.grid_admin = gridadmin
self.results_3di = results_3di
self.demanded_aggregations = demanded_aggregations
self.bbox = bbox
self.start_time = start_time
self.end_time = end_time
self.subsets = subsets
self.interpolation_method = interpolation_method
self.resample_point_layer = resample_point_layer
self.resolution = resolution
self.output_flowlines = output_flowlines
self.output_cells = output_cells
self.output_nodes = output_nodes
self.output_rasters = output_rasters
self.parent.iface.messageBar().pushMessage("3Di Custom Statistics",
"Started aggregating 3Di results",
level=Qgis.Info,
duration=3
)
self.parent.iface.mainWindow().repaint() # to show the message before the task starts
def run(self):
try:
self.ogr_ds, self.mem_rasts = aggregate_threedi_results(
gridadmin=self.grid_admin,
results_3di=self.results_3di,
demanded_aggregations=self.demanded_aggregations,
bbox=self.bbox,
start_time=self.start_time,
end_time=self.end_time,
subsets=self.subsets,
interpolation_method=self.interpolation_method,
resample_point_layer=self.resample_point_layer,
resolution=self.resolution,
output_flowlines=self.output_flowlines,
output_cells=self.output_cells,
output_nodes=self.output_nodes,
output_rasters=self.output_rasters
)
return True
except Exception as e:
self.exception = e
return False
def finished(self, result):
if self.exception is not None:
self.parent.setEnabled(True)
self.parent.repaint()
raise self.exception
if result:
# Add layers to layer tree
# They are added in order so the raster is below the polygon is below the line is below the point layer
# raster layer
if len(self.mem_rasts) > 0:
for rastname, rast in self.mem_rasts.items():
raster_output_dir = self.parent.mQgsFileWidgetRasterFolder.filePath()
raster_output_fn = os.path.join(raster_output_dir, rastname + '.tif')
drv = gdal.GetDriverByName('GTiff')
gdal_tif = drv.CreateCopy(utf8_path=raster_output_fn, src=rast)
gdal_tif = None
self.parent.iface.addRasterLayer(raster_output_fn,
"Aggregation results: raster {}".format(rastname))
# cell layer
ogr_lyr = self.ogr_ds.GetLayerByName('cell')
if ogr_lyr is not None:
if ogr_lyr.GetFeatureCount() > 0:
# polygon layer
qgs_lyr = as_qgis_memory_layer(ogr_lyr, 'Aggregation results: cells')
project = QgsProject.instance()
project.addMapLayer(qgs_lyr)
style = self.parent.comboBoxCellsStyleType.currentData()
style_kwargs = self.parent.get_styling_parameters(output_type=style.output_type)
style.apply(qgis_layer=qgs_lyr, style_kwargs=style_kwargs)
# flowline layer
ogr_lyr = self.ogr_ds.GetLayerByName('flowline')
if ogr_lyr is not None:
if ogr_lyr.GetFeatureCount() > 0:
qgs_lyr = as_qgis_memory_layer(ogr_lyr, 'Aggregation results: flowlines')
project = QgsProject.instance()
project.addMapLayer(qgs_lyr)
style = self.parent.comboBoxFlowlinesStyleType.currentData()
style_kwargs = self.parent.get_styling_parameters(output_type=style.output_type)
style.apply(qgis_layer=qgs_lyr, style_kwargs=style_kwargs)
# node layer
ogr_lyr = self.ogr_ds.GetLayerByName('node')
if ogr_lyr is not None:
if ogr_lyr.GetFeatureCount() > 0:
qgs_lyr = as_qgis_memory_layer(ogr_lyr, 'Aggregation results: nodes')
project = QgsProject.instance()
project.addMapLayer(qgs_lyr)
style = self.parent.comboBoxNodesStyleType.currentData()
style_kwargs = self.parent.get_styling_parameters(output_type=style.output_type)
style.apply(qgis_layer=qgs_lyr, style_kwargs=style_kwargs)
# resampled point layer
ogr_lyr = self.ogr_ds.GetLayerByName('node_resampled')
if ogr_lyr is not None:
if ogr_lyr.GetFeatureCount() > 0:
qgs_lyr = as_qgis_memory_layer(ogr_lyr, 'Aggregation results: resampled nodes')
project = QgsProject.instance()
project.addMapLayer(qgs_lyr)
style = self.parent.comboBoxNodesStyleType.currentData()
style_kwargs = self.parent.get_styling_parameters(output_type=style.output_type)
style.apply(qgis_layer=qgs_lyr, style_kwargs=style_kwargs)
self.parent.setEnabled(True)
self.parent.iface.messageBar().pushMessage("3Di Custom Statistics",
"Finished custom aggregation",
level=Qgis.Success,
duration=3
)
else:
self.parent.setEnabled(True)
self.parent.iface.messageBar().pushMessage("3Di Custom Statistics",
"Aggregating 3Di results returned no results",
level=Qgis.Warning,
duration=3
)
def cancel(self):
self.parent.iface.messageBar().pushMessage("3Di Custom Statistics",
"Pre-processing simulation results cancelled by user",
level=Qgis.Info,
duration=3
)
super().cancel()
class ThreeDiCustomStats:
"""QGIS Plugin Implementation."""
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'ThreeDiCustomStats_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&3Di Custom Statistics')
# Check if plugin was started the first time in current QGIS session
# Must be set in initGui() to survive plugin reloads
self.first_start = None
self.tm = QgsApplication.taskManager()
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('ThreeDiCustomStats', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
# Adds plugin icon to Plugins toolbar
self.iface.addToolBarIcon(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/threedi_custom_stats/icon.png'
self.add_action(
icon_path,
text=self.tr(u'3Di Custom Statistics'),
callback=self.run,
parent=self.iface.mainWindow())
# will be set False in run()
self.first_start = True
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&3Di Custom Statistics'),
action)
self.iface.removeToolBarIcon(action)
def run(self):
"""Run method that performs all the real work"""
# Create the dialog with elements (after translation) and keep reference
# Only create GUI ONCE in callback, so that it will only load when the plugin is started
if self.first_start:
self.first_start = False
self.dlg = ThreeDiCustomStatsDialog(self.iface)
# show the dialog
self.dlg.show()
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# 3Di results
results_3di = self.dlg.QgsFileWidget3DiResults.filePath()
grid_admin = self.dlg.QgsFileWidgetGridAdmin.filePath()
# Filtering parameters
start_time = self.dlg.doubleSpinBoxStartTime.value()
end_time = self.dlg.doubleSpinBoxEndTime.value()
bbox_qgs_rectangle = self.dlg.mExtentGroupBox.outputExtent() # bbox is now a https://qgis.org/pyqgis/master/core/QgsRectangle.html#qgis.core.QgsRectangle
bbox = None
if bbox_qgs_rectangle is not None:
if not bbox_qgs_rectangle.isEmpty():
bbox = [bbox_qgs_rectangle.xMinimum(),
bbox_qgs_rectangle.yMinimum(),
bbox_qgs_rectangle.xMaximum(),
bbox_qgs_rectangle.yMaximum()]
subsets = []
if self.dlg.checkBox1D2DConnections.isChecked():
subsets.append('1D2D')
if self.dlg.checkBoxAll1D.isChecked():
subsets.append('All1D')
if self.dlg.checkBoxAll2D.isChecked():
subsets.append('All2D')
if self.dlg.checkBoxAllSewerage.isChecked():
subsets.append('AllSewerage')
if self.dlg.checkBoxCulverts.isChecked():
subsets.append('Culverts')
if self.dlg.checkBoxOrifices.isChecked():
subsets.append('Orifices')
if self.dlg.checkBoxPipes.isChecked():
subsets.append('Pipes')
if self.dlg.checkBoxWeirs.isChecked():
subsets.append('Weirs')
# Resolution
resolution = self.dlg.doubleSpinBoxResolution.value()
# Outputs
output_flowlines = self.dlg.groupBoxFlowlines.isChecked()
output_nodes = self.dlg.groupBoxNodes.isChecked()
output_cells = self.dlg.groupBoxCells.isChecked()
output_rasters = self.dlg.groupBoxRasters.isChecked()
# Resample point layer
resample_point_layer = self.dlg.checkBoxResample.isChecked()
if resample_point_layer:
interpolation_method = 'linear'
else:
interpolation_method = None
aggregate_threedi_results_task = Aggregate3DiResults(
description='Aggregate 3Di Results',
parent=self.dlg,
gridadmin=grid_admin,
results_3di=results_3di,
demanded_aggregations=self.dlg.demanded_aggregations,
bbox=bbox,
start_time=start_time,
end_time=end_time,
subsets=subsets,
interpolation_method=interpolation_method,
resample_point_layer=resample_point_layer,
resolution=resolution,
output_flowlines=output_flowlines,
output_cells=output_cells,
output_nodes=output_nodes,
output_rasters=output_rasters
)
self.tm.addTask(aggregate_threedi_results_task)
|
import abc
import marshal
import dictdiffer
import six
def format_diff(differ, diff):
"""
Formats the given differ and diff for mongo storage.
:param differ: the differ object
:param diff: the diff
:return: a dict for storage
"""
return {u'id': differ.differ_id, u'd': diff}
def extract_diff(raw_diff):
"""
Given a diff from mongo storage, return the differ object used and the diff itself.
:param raw_diff: the diff from mongo
:return: a 2-tuple of the differ object used to create the diff and the diff itself
"""
return differs[raw_diff[u'id']], raw_diff[u'd']
@six.add_metaclass(abc.ABCMeta)
class Differ(object):
"""
Abstract class defining the Differ interface methods.
"""
def __init__(self, differ_id):
"""
:param differ_id: the id of the differ, this will be stored alongside the diffs produced
"""
self.differ_id = differ_id
@abc.abstractmethod
def can_diff(self, data):
"""
Whether this differ can diff the given data.
:param data: the data to check
:return: True or False
"""
pass
@abc.abstractmethod
def diff(self, old, new, ignore=None):
"""
Produce a diff that when provided to the patch function can modify the old data state to the
new data state. If ignore is provided then the keys within it will be ignored during the
diff.
:param old: the old data
:param new: the new data
:param ignore: the keys to ignore. This should be a list or a set.
:return: the diff
"""
pass
@abc.abstractmethod
def patch(self, diff_result, old, in_place=False):
"""
Given the return from the diff function and some data, apply the diff to patch the old data.
If the in_place parameter is True then the patch will be applied in place and the old data
passed in will be returned. If in_place is False (the default) then the old data is copied
before applying the patch.
:param diff_result: the diff to apply
:param old: the old data
:param in_place: whether to update the old data in place or not (default: False)
:return: the updated data
"""
pass
class DictDifferDiffer(Differ):
"""
A Differ that uses the dictdiffer lib to diff the dicts. The ID used for this differ is 'dd'.
"""
def __init__(self):
super(DictDifferDiffer, self).__init__(u'dd')
def can_diff(self, data):
"""
We can diff any dict! Wee!
:param data: the data to check
:return: True
"""
return True
def diff(self, old, new, ignore=None):
"""
Diffs the two data dicts using dictdiffer and returns the diff as a list. The ignore
parameter is passed straight through to dictdiffer.diff so refer to that doc for information
on how it should be provided.
:param old: the old data
:param new: the new data
:param ignore: the keys to ignore
:return: the diff as a list
"""
return list(dictdiffer.diff(old, new, ignore=ignore))
def patch(self, diff_result, old, in_place=False):
"""
Given a dictdiffer diff result and some data, apply the diff to patch the old data.
If the in_place parameter is True then the patch will be applied in place and the old data
passed in will be returned. If in_place is False (the default) then the old data is copied
before applying the patch. The copy is done using marshall rather than copy.deepcopy (as it
is in the dictdiffer lib) as it is the fastest way to copy an object.
:param diff_result: the diff to apply
:param old: the old data
:param in_place: whether to update the old data in place or not (default: False)
:return: the updated data
"""
if not in_place:
old = marshal.loads(marshal.dumps(old))
return dictdiffer.patch(diff_result, old, in_place=True)
class ShallowDiffer(Differ):
"""
A Differ that only works on dicts that don't have nested dicts. Assuming this allows it to use
dict.update to patch the old data dict to the new which is really quick! The ID used for this
differ is 'sd'.
"""
def __init__(self):
super(ShallowDiffer, self).__init__(u'sd')
def can_diff(self, data):
"""
We can only diff the data if it doesn't contain any nested dicts.
:param data: the data to check
:return: True if the data dict passed contains no nested dicts
"""
return all(not isinstance(value, dict) for value in data.values())
def diff(self, old, new, ignore=None):
"""
Diffs the two data dicts and returns the diff as a dict containing two keys:
- 'r': a list of keys that were removed
- 'c': a dict of changes made
Any keys present in the ignored parameter are ignored in the diff.
:param old: the old data
:param new: the new data
:param ignore: the keys to ignore
"""
diff = {}
if ignore is None:
ignore = []
new_keys = set(new.keys()) - set(ignore)
removes = set(old.keys()) - new_keys
if removes:
diff[u'r'] = list(removes)
changes = {}
for key in new_keys:
if key in old:
if old[key] != new[key]:
# a value has changed
changes[key] = new[key]
else:
# a new value has been added
changes[key] = new[key]
if changes:
diff[u'c'] = changes
return diff
def patch(self, diff_result, old, in_place=False):
"""
Given a diff result from this differs diff function and some data, apply the diff to patch
the old data using the dict.update function and `del` to remove the removed keys.
If the in_place parameter is True then the patch will be applied in place and the old data
passed in will be returned. If in_place is False (the default) then the old data is copied
before applying the patch. The copy is done using marshall rather for speed.
:param diff_result: the diff to apply
:param old: the old data
:param in_place: whether to update the old data in place or not (default: False)
:return: the updated data
"""
if not in_place:
old = marshal.loads(marshal.dumps(old))
for key in diff_result.get(u'r', []):
del old[key]
old.update(diff_result.get(u'c', {}))
return old
# the differs, instantiated globally for ease of use
SHALLOW_DIFFER = ShallowDiffer()
DICT_DIFFER_DIFFER = DictDifferDiffer()
# a dict of all the differs, instantiated and keyed by their ids
differs = {differ.differ_id: differ for differ in [SHALLOW_DIFFER, DICT_DIFFER_DIFFER]}
|
<filename>data_processing/eccv2020-sharp-workshop/sharp/utils.py
import copy
import numbers
import cv2
import numpy as np
try:
from scipy.spatial import cKDTree as KDTree
except ImportError:
from scipy.spatial import KDTree
from .trirender import UVTrianglesRenderer
def slice_by_plane(mesh, center, n):
c = np.dot(center, n)
plane_side = lambda x: np.dot(x, n) >= c
split = np.asarray([plane_side(v) for v in mesh.vertices])
slice1_indices = np.argwhere(split == True)
slice2_indices = np.argwhere(split == False)
return slice1_indices, slice2_indices
def remove_points(mesh, indices, blackoutTexture=True):
cpy = copy.deepcopy(mesh)
cpy.vertices = np.delete(mesh.vertices, indices, axis=0)
if mesh.vertex_colors is not None:
cpy.vertex_colors = np.delete(mesh.vertex_colors, indices, axis=0)
if mesh.vertex_normals is not None:
cpy.vertex_normals = np.delete(
mesh.vertex_normals, indices, axis=0)
if mesh.faces is not None:
face_indices = np.where(
np.any(np.isin(mesh.faces[:], indices, assume_unique=False),
axis=1)
)[0]
cpy.faces = np.delete(mesh.faces, face_indices, axis=0)
fix_indices = np.vectorize(
lambda x: np.sum(x >= indices))(cpy.faces)
cpy.faces -= fix_indices
if mesh.face_normals is not None:
cpy.face_normals = np.delete(
mesh.face_normals, face_indices, axis=0)
unused_uv = None
if mesh.texture_indices is not None:
cpy.texture_indices = np.delete(
mesh.texture_indices, face_indices, axis=0)
used_uv = np.unique(cpy.texture_indices.flatten())
all_uv = np.arange(len(mesh.texcoords))
unused_uv = np.setdiff1d(all_uv, used_uv, assume_unique=True)
fix_uv_idx = np.vectorize(
lambda x: np.sum(x >= unused_uv))(cpy.texture_indices)
cpy.texture_indices -= fix_uv_idx
cpy.texcoords = np.delete(mesh.texcoords, unused_uv, axis=0)
# render texture
if blackoutTexture:
tri_indices = cpy.texture_indices
tex_coords = cpy.texcoords
img = render_texture(mesh.texture, tex_coords, tri_indices)
# dilate the result to remove sewing
kernel = np.ones((3, 3), np.uint8)
texture_f32 = cv2.dilate(img, kernel, iterations=1)
cpy.texture = texture_f32.astype(np.float64)
if mesh.faces_normal_indices is not None:
cpy.faces_normal_indices = np.delete(
mesh.faces_normal_indices, face_indices, axis=0)
used_ni = np.unique(cpy.faces_normal_indices.flatten())
all_ni = np.arange(len(mesh.face_normals))
unused_ni = np.setdiff1d(all_ni, used_ni, assume_unique=True)
fix_ni_idx = np.vectorize(lambda x: np.sum(
x > unused_ni))(cpy.faces_normal_indices)
cpy.faces_normal_indices -= fix_ni_idx
cpy.face_normals = np.delete(
mesh.face_normals, unused_ni, axis=0)
return cpy
def render_texture(texture, tex_coords, tri_indices):
if len(texture.shape) == 3 and texture.shape[2] == 4:
texture = texture[:, :, 0:3]
elif len(texture.shape) == 2:
texture = np.concatenate([texture, texture, texture], axis=2)
renderer = UVTrianglesRenderer.with_standalone_ctx(
(texture.shape[1], texture.shape[0])
)
return renderer.render(tex_coords, tri_indices, texture, True)
def estimate_plane(a, b, c):
"""Estimate the parameters of the plane passing by three points.
Returns:
center(float): The center point of the three input points.
normal(float): The normal to the plane.
"""
center = (a + b + c) / 3
normal = np.cross(b - a, c - a)
assert(np.isclose(np.dot(b - a, normal), np.dot(c - a, normal)))
return center, normal
def shoot_holes(vertices, n_holes, dropout, mask_faces=None, faces=None,
rng=None):
"""Generate a partial shape by cutting holes of random location and size.
Each hole is created by selecting a random point as the center and removing
the k nearest-neighboring points around it.
Args:
vertices: The array of vertices of the mesh.
n_holes (int or (int, int)): Number of holes to create, or bounds from
which to randomly draw the number of holes.
dropout (float or (float, float)): Proportion of points (with respect
to the total number of points) in each hole, or bounds from which
to randomly draw the proportions (a different proportion is drawn
for each hole).
mask_faces: A boolean mask on the faces. 1 to keep, 0 to ignore. If
set, the centers of the holes are sampled only on the
non-masked regions.
faces: The array of faces of the mesh. Required only when `mask_faces`
is set.
rng: (optional) An initialised np.random.Generator object. If None, a
default Generator is created.
Returns:
array: Indices of the points defining the holes.
"""
if rng is None:
rng = np.random.default_rng()
if not isinstance(n_holes, numbers.Integral):
n_holes_min, n_holes_max = n_holes
n_holes = rng.integers(n_holes_min, n_holes_max)
if mask_faces is not None:
valid_vertex_indices = np.unique(faces[mask_faces > 0])
valid_vertices = vertices[valid_vertex_indices]
else:
valid_vertices = vertices
# Select random hole centers.
center_indices = rng.choice(len(valid_vertices), size=n_holes)
centers = valid_vertices[center_indices]
n_vertices = len(valid_vertices)
if isinstance(dropout, numbers.Number):
hole_size = n_vertices * dropout
hole_sizes = [hole_size] * n_holes
else:
hole_size_bounds = n_vertices * np.asarray(dropout)
hole_sizes = rng.integers(*hole_size_bounds, size=n_holes)
# Identify the points indices making up the holes.
kdtree = KDTree(vertices, leafsize=200)
to_crop = []
for center, size in zip(centers, hole_sizes):
_, indices = kdtree.query(center, k=size)
to_crop.append(indices)
to_crop = np.unique(np.concatenate(to_crop))
return to_crop
|
<gh_stars>1-10
import pygame
class MouseInput:
mouse_pos = pygame.mouse.get_pos()
mouse_buttons = pygame.mouse.get_pressed()
pressed = [0, 0, 0]
@staticmethod
def _update():
MouseInput.mouse_pos = pygame.mouse.get_pos()
MouseInput.mouse_buttons = pygame.mouse.get_pressed()
for i in range(3):
if MouseInput.mouse_buttons[i]:
MouseInput.pressed[i] += 1
else:
MouseInput.pressed[i] = 0
@staticmethod
def is_pressed(index):
return MouseInput.pressed[index] > 0
@staticmethod
def is_released(index):
return MouseInput.pressed[index] == 0
@staticmethod
def is_pressed_once(index):
return MouseInput.pressed[index] == 1
@staticmethod
def get_pos():
return MouseInput.mouse_pos
@staticmethod
def get_x():
return MouseInput.mouse_pos[0]
@staticmethod
def get_y():
return MouseInput.mouse_pos[1]
class Keys:
KEYS_QUANTITY = 132
pygame_keys = [
pygame.K_BACKSPACE,
pygame.K_TAB,
pygame.K_CLEAR,
pygame.K_RETURN,
pygame.K_PAUSE,
pygame.K_ESCAPE,
pygame.K_SPACE,
pygame.K_EXCLAIM,
pygame.K_QUOTEDBL,
pygame.K_HASH,
pygame.K_DOLLAR,
pygame.K_AMPERSAND,
pygame.K_QUOTE,
pygame.K_LEFTPAREN,
pygame.K_RIGHTPAREN,
pygame.K_ASTERISK,
pygame.K_PLUS,
pygame.K_COMMA,
pygame.K_MINUS,
pygame.K_PERIOD,
pygame.K_SLASH,
pygame.K_0,
pygame.K_1,
pygame.K_2,
pygame.K_3,
pygame.K_4,
pygame.K_5,
pygame.K_6,
pygame.K_7,
pygame.K_8,
pygame.K_9,
pygame.K_COLON,
pygame.K_SEMICOLON,
pygame.K_LESS,
pygame.K_EQUALS,
pygame.K_GREATER,
pygame.K_QUESTION,
pygame.K_AT,
pygame.K_LEFTPAREN,
pygame.K_BACKSLASH,
pygame.K_RIGHTBRACKET,
pygame.K_CARET,
pygame.K_UNDERSCORE,
pygame.K_BACKQUOTE,
pygame.K_a,
pygame.K_b,
pygame.K_c,
pygame.K_d,
pygame.K_e,
pygame.K_f,
pygame.K_g,
pygame.K_h,
pygame.K_i,
pygame.K_j,
pygame.K_k,
pygame.K_l,
pygame.K_m,
pygame.K_n,
pygame.K_o,
pygame.K_p,
pygame.K_q,
pygame.K_r,
pygame.K_s,
pygame.K_t,
pygame.K_u,
pygame.K_v,
pygame.K_w,
pygame.K_x,
pygame.K_y,
pygame.K_z,
pygame.K_DELETE,
pygame.K_KP0,
pygame.K_KP1,
pygame.K_KP2,
pygame.K_KP3,
pygame.K_KP4,
pygame.K_KP5,
pygame.K_KP6,
pygame.K_KP7,
pygame.K_KP8,
pygame.K_KP9,
pygame.K_KP_PERIOD,
pygame.K_KP_DIVIDE,
pygame.K_KP_MULTIPLY,
pygame.K_KP_MINUS,
pygame.K_KP_PLUS,
pygame.K_KP_ENTER,
pygame.K_EQUALS,
pygame.K_UP,
pygame.K_DOWN,
pygame.K_RIGHT,
pygame.K_LEFT,
pygame.K_INSERT,
pygame.K_HOME,
pygame.K_END,
pygame.K_PAGEUP,
pygame.K_PAGEDOWN,
pygame.K_F1,
pygame.K_F2,
pygame.K_F3,
pygame.K_F4,
pygame.K_F5,
pygame.K_F6,
pygame.K_F7,
pygame.K_F8,
pygame.K_F9,
pygame.K_F10,
pygame.K_F11,
pygame.K_F12,
pygame.K_F13,
pygame.K_F14,
pygame.K_F15,
pygame.K_NUMLOCK,
pygame.K_CAPSLOCK,
pygame.K_SCROLLOCK,
pygame.K_RSHIFT,
pygame.K_LSHIFT,
pygame.K_RCTRL,
pygame.K_LCTRL,
pygame.K_RALT,
pygame.K_LALT,
pygame.K_RMETA,
pygame.K_LMETA,
pygame.K_LSUPER,
pygame.K_RSUPER,
pygame.K_MODE,
pygame.K_HELP,
pygame.K_PRINT,
pygame.K_SYSREQ,
pygame.K_BREAK,
pygame.K_MENU,
pygame.K_POWER,
pygame.K_EURO
]
K_BACKSPACE = 0
K_TAB = 1
K_CLEAR = 2
K_RETURN = 3
K_PAUSE = 4
K_ESCAPE = 5
K_SPACE = 6
K_EXCLAIM = 7
K_QUOTEDBL = 8
K_HASH = 9
K_DOLLAR = 10
K_AMPERSAND = 11
K_QUOTE = 12
K_LEFTPAREN = 13
K_RIGHTPAREN = 14
K_ASTERISK = 15
K_PLUS = 16
K_COMMA = 17
K_MINUS = 18
K_PERIOD = 19
K_SLASH = 20
K_0 = 21
K_1 = 22
K_2 = 23
K_3 = 24
K_4 = 25
K_5 = 26
K_6 = 27
K_7 = 28
K_8 = 29
K_9 = 30
K_COLON = 31
K_SEMICOLON = 32
K_LESS = 33
K_EQUALS = 34
K_GREATER = 35
K_QUESTION = 36
K_AT = 37
K_LEFTBRACKET = 38
K_BACKSLASH = 39
K_RIGHTBRACKET = 40
K_CARET = 41
K_UNDERSCORE = 42
K_BACKQUOTE = 43
K_A = 44
K_B = 45
K_C = 46
K_D = 47
K_E = 48
K_F = 49
K_G = 50
K_H = 51
K_I = 52
K_J = 53
K_K = 54
K_L = 55
K_M = 56
K_N = 57
K_O = 58
K_P = 59
K_Q = 60
K_R = 61
K_S = 62
K_T = 63
K_U = 64
K_V = 65
K_W = 66
K_X = 67
K_Y = 68
K_Z = 69
K_DELETE = 70
K_KP0 = 71
K_KP1 = 72
K_KP2 = 73
K_KP3 = 74
K_KP4 = 75
K_KP5 = 76
K_KP6 = 77
K_KP7 = 78
K_KP8 = 79
K_KP9 = 80
K_KP_PERIOD = 81
K_KP_DIVIDE = 82
K_KP_MULTIPLY = 83
K_KP_MINUS = 84
K_KP_PLUS = 85
K_KP_ENTER = 86
K_KP_EQUALS = 87
K_UP = 88
K_DOWN = 89
K_RIGHT = 90
K_LEFT = 91
K_INSERT = 92
K_HOME = 93
K_END = 94
K_PAGEUP = 95
K_PAGEDOWN = 96
K_F1 = 97
K_F2 = 98
K_F3 = 99
K_F4 = 100
K_F5 = 101
K_F6 = 102
K_F7 = 103
K_F8 = 104
K_F9 = 105
K_F10 = 106
K_F11 = 107
K_F12 = 108
K_F13 = 109
K_F14 = 110
K_F15 = 111
K_NUMLOCK = 112
K_CAPSLOCK = 113
K_SCROLLOCK = 114
K_RSHIFT = 115
K_LSHIFT = 116
K_RCTRL = 117
K_LCTRL = 118
K_RALT = 119
K_LALT = 120
K_RMETA = 121
K_LMETA = 122
K_LSUPER = 123
K_RSUPER = 124
K_MODE = 125
K_HELP = 126
K_PRINT = 127
K_SYSREQ = 128
K_BREAK = 129
K_MENU = 130
K_POWER = 131
K_EURO = 132
class KeyInput:
keys = pygame.key.get_pressed()
keys_count = []
for i in range(Keys.KEYS_QUANTITY):
keys_count.append(0)
@staticmethod
def _update():
KeyInput.keys = pygame.key.get_pressed()
for i in range(Keys.KEYS_QUANTITY):
if KeyInput.keys[Keys.pygame_keys[i]]:
KeyInput.keys_count[i] += 1
else:
KeyInput.keys_count[i] = 0
@staticmethod
def is_pressed(key_index):
return KeyInput.keys_count[key_index] >= 1
@staticmethod
def is_pressed_once(key_index):
return KeyInput.keys_count[key_index] == 1
@staticmethod
def is_released(key_index):
return KeyInput.keys_count[key_index] == 0
class EventInput:
pass
|
# -*- coding: utf-8 -*-
# Author:Guzhongren
# created: 2017-04-28
import os
import sys
import arcpy
from PIL import Image
reload(sys)
sys.setdefaultencoding("utf-8")
# tif 4波段图像转换为3波段图像 --未用
def band4_2_band3_raster(band4_raster_path, temp_file_path):
raster_arr = arcpy.RasterToNumPyArray(
band4_raster_path.decode("utf8").encode("gbk"))
raster_arr_3d = raster_arr[:3, :, :]
print(band4_raster_path + "ssss")
path_pices = band4_raster_path.split("\\")
file_name = path_pices[len(path_pices) - 1].split(".")[0]
new_raster = arcpy.NumPyArrayToRaster(raster_arr_3d)
new_raster.save(temp_file_path + "\\" + file_name + ".tif")
del raster_arr, raster_arr_3d, new_raster
return temp_file_path
# 获取影像的波段数
def getband_count(input_raster_path):
raster = arcpy.Raster(input_raster_path)
print(u"获取的波段数:" + str(raster.bandCount))
return raster.bandCount
# 生成tif的缩略图
def generate_tif_thumbnail(input_raster_path, target_path):
print u"开始生成tif " + input_raster_path + u" 的缩略图......"
img = Image.open(input_raster_path)
region = img.crop((0, 0, 380, 380))
#img.thumbnail(img.size, Image.ANTIALIAS)
# img.save(target_path + img.filename.split(parent)
# [1].split(".")[0] + ".jpeg", "JPEG")
path_pices = img.filename.split("\\")
file_name = path_pices[len(path_pices) - 1].split(".")[0]
region.save(target_path + "\\" + file_name + ".jpg", "JPEG")
del path_pices, file_name,region
print u"成功,生成"+img.filename+"..."
del img
# 生成jpg图像的缩略图 raster_path必须为3波段的影像或者jpg
def generate_thumbnail(input_raster_path, target_path):
print u"开始生成" + input_raster_path + u" 的缩略图......"
img = Image.open(input_raster_path)
img.thumbnail(img.size, Image.ANTIALIAS)
# img.save(target_path + img.filename.split(parent)
# [1].split(".")[0] + ".jpeg", "JPEG")
path_pices = img.filename.split("\\")
file_name = path_pices[len(path_pices) - 1].split(".")[0]
img.save(target_path + "\\" + file_name + ".jpg", "JPEG")
del path_pices, file_name
print u"成功,生成"+img.filename+"..."
del img
def for_each_file(folder_path, target_path):
try:
for parent, dirnames, filenames in os.walk(folder_path):
for filename in filenames:
file_path = os.path.join(
parent.decode("gbk"), filename.decode("gbk"))
if img_type[0] in filename:
generate_thumbnail(file_path, target_path)
elif img_type[1] in filename:
generate_tif_thumbnail(file_path, target_path)
else:
print(u"未知类型的数据不能生成缩略图")
except Exception, e:
print e
if __name__ == "__main__":
# 图片存放目录
folder_path = unicode(r"C:\geocon\解译样本", "utf8").encode("gbk")
target_path = unicode(r"C:\geocon\thumbile", "utf8").encode("gbk")
img_type = [".jpg", ".tif"] # 过滤类型
for_each_file(folder_path, target_path)
|
<reponame>timofriedl/multicam-ihgg
import numpy as np
import torch
import torch.nn.functional as F
from torch import Tensor
from torchvision.utils import make_grid, save_image
"""
A bunch of helpful functions for VAE training and general tensor / array conversions.
Original author: <NAME>
Heavily modified by <NAME>
"""
# The device used for VAE training
device = 'cuda' if torch.cuda.is_available() else 'cpu'
def images_to_tensor(images: np.ndarray) -> Tensor:
"""
Converts a numpy array of rgb images with shape [num_images, height, width, 3]
to a normalized pytorch tensor with shape [num_images, 3, height, width]
using the given device from above
:param images: the numpy array of images to convert
:return: the converted pytorch tensor
"""
images = images.transpose((0, 3, 1, 2)) / 255.0
images = torch.tensor(images, dtype=torch.float32)
images = images.to(device)
return images
def images_to_tensor_cpu(images: np.ndarray) -> Tensor:
"""
Converts a numpy array of rgb images with shape [num_images, height, width, 3]
to a normalized pytorch tensor with shape [num_images, 3, height, width]
using cpu as device
:param images: the numpy array of images to convert
:return: the converted pytorch tensor
"""
images = images.transpose((0, 3, 1, 2)) / 255.0
images = torch.tensor(images, dtype=torch.float32)
images = images.to('cpu')
return images
def np_to_tensor(array: np.ndarray) -> Tensor:
"""
Converts a numpy array to a corresponding float32 pytorch tensor
using the given device from above
:param array: the numpy array to convert
:return: the converted pytorch tensor
"""
return torch.tensor(array, dtype=torch.float32).to(device)
def tensor_to_np(tensor: Tensor) -> np.ndarray:
"""
Converts a pytorch tensor to a corresponding numpy array
:param tensor: the pytorch tensor to convert
:return: the converted numpy array
"""
return tensor.detach().cpu().numpy()
def image_to_tensor(image: np.ndarray) -> Tensor:
"""
Converts a single rgb image with shape [height, width, 3]
to a pytorch tensor with shape [1, 3, height, width]
that represents a one-element batch of images
using the device from above
:param image: the numpy array to convert
:return: the converted pytorch tensor
"""
images = image.copy()
images = images.reshape((1, image.shape[0], image.shape[1], 3))
images = images.transpose((0, 3, 1, 2)) / 255.0
images = torch.tensor(images, dtype=torch.float32)
images = images.to(device)
return images
def tensor_to_image(tensor: Tensor) -> np.ndarray:
"""
Converts a pytorch tensor with shape [1, 3, height, width]
that represents a one-element batch of images
to a numpy array with shape [height, width, 3] representing the same image
:param tensor: the pytorch tensor to convert
:return: the converted numpy array
"""
image = tensor.detach().cpu().numpy()
image[(image < 0.)] = 0.
image[(image > 1.)] = 1.
image = np.floor(255 * image).astype(np.uint8).reshape(tensor.shape[1:])
return image.transpose(1, 2, 0)
def save_examples(examples: Tensor, name: str, img_format="jpeg"):
"""
Saves a batch of training images to a given location
:param examples: the batch of images with shape [num_images, 3, height, width]
:param name: the base name of the output file
:param img_format: the image format as a string, use "png" for lossless compression
"""
clipped = torch.clamp(examples.detach(), 0, 1)
image = make_grid(clipped)
save_image(image, "images/{0}.{1}".format(name, img_format))
def loss_fn(x: Tensor, x_rec: Tensor, mu: Tensor, logvar: Tensor, alpha=10., beta=1.) -> (float, float, float):
"""
The VAE training loss function.
:param x: the batch of original input images
:param x_rec: the batch of reconstructions of the input images
:param mu: the batch of mu values from the network encoding
:param logvar: the batch of logvar values from the network encoding
:param alpha: a weight factor hyperparameter
:param beta: a weight factor hyperparameter
:return: the weighted mean square error, kl loss, and sum of both
"""
batch_size = x.size(0)
kl = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).sum(dim=-1).mean()
mse_loss = F.mse_loss(x_rec, x, reduction='none')
mse_loss = mse_loss.view(batch_size, -1)
mse_loss = mse_loss.sum(dim=-1)
mse_loss = mse_loss.mean()
return alpha * mse_loss, beta * kl, alpha * mse_loss + beta * kl
def loss_fn_weighted(x: Tensor, x_rec: Tensor, mu: Tensor, logvar: Tensor, alpha=10., beta=1.) -> (float, float, float):
"""
Another VAE training loss function with special weight factor.
:param x: the batch of original input images
:param x_rec: the batch of reconstructions of the input images
:param mu: the batch of mu values from the network encoding
:param logvar: the batch of logvar values from the network encoding
:param alpha: a weight factor hyperparameter
:param beta: a weight factor hyperparameter
:return: the weighted mean square error, kl loss, and sum of both
"""
batch_size = x.size(0)
kl = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).sum(dim=-1).mean()
mse_loss = F.mse_loss(x_rec, x, reduction='none')
a = torch.ones_like(mse_loss)
a[:, :, 40:, :] = 2.
mse_loss = mse_loss * a
mse_loss = mse_loss.view(batch_size, -1)
mse_loss = mse_loss.sum(dim=-1)
mse_loss = mse_loss.mean()
return alpha * mse_loss, beta * kl, alpha * mse_loss + beta * kl
def loss_fn_weighted2(x: Tensor, x_rec: Tensor, mu: Tensor, logvar: Tensor, alpha=10., beta=1.) -> (
float, float, float):
"""
Another VAE training loss function with different special weight factor.
:param x: the batch of original input images
:param x_rec: the batch of reconstructions of the input images
:param mu: the batch of mu values from the network encoding
:param logvar: the batch of logvar values from the network encoding
:param alpha: a weight factor hyperparameter
:param beta: a weight factor hyperparameter
:return: the weighted mean square error, kl loss, and sum of both
"""
batch_size = x.size(0)
if len(x.shape) == 2:
x = x.view(batch_size, 1, 1, -1)
x_rec = x_rec.view(batch_size, 1, 1, -1)
kl = -0.5 * (1 + logvar - mu.pow(2) - logvar.exp()).sum(dim=-1).mean()
mse_loss = F.mse_loss(x_rec, x, reduction='none')
a = torch.ones_like(mse_loss)
a[:, :, :60, :] = 2.
mse_loss = mse_loss * a
mse_loss = mse_loss.view(batch_size, -1)
mse_loss = mse_loss.sum(dim=-1)
mse_loss = mse_loss.mean()
return alpha * mse_loss, beta * kl, alpha * mse_loss + beta * kl
def tensor_wrap(x: Tensor) -> Tensor:
"""
Wraps a given pytorch tensor inside another brackets to increase the dimensionality by one.
Example:
tensor_wrap(tensor([1., 2., 3.])) = tensor([[1., 2., 3.]])
:param x: the pytorch tensor to wrap
:return: the wrapped pytorch tensor
"""
return x.unsqueeze(0)
|
<reponame>ArneBinder/nlp-formats
import glob
from abc import ABC, abstractmethod
from dataclasses import dataclass
from os import path
import nlp
@dataclass
class BratConfig(nlp.BuilderConfig):
"""BuilderConfig for BRAT."""
ann_file_extension: str = 'ann'
txt_file_extension: str = 'txt'
class AbstractBrat(nlp.GeneratorBasedBuilder, ABC):
BUILDER_CONFIG_CLASS = BratConfig
def _info(self):
return nlp.DatasetInfo(
features=nlp.Features({
"context": nlp.Value("string"),
"spans": nlp.Sequence({
"id": nlp.Value("string"),
"type": nlp.Value("string"),
"locations": nlp.Sequence({
"start": nlp.Value("int32"),
"end": nlp.Value("int32"),
}),
"text": nlp.Value("string"),
}),
"relations": nlp.Sequence({
"id": nlp.Value("string"),
"type": nlp.Value("string"),
"arguments": nlp.Sequence({
"type": nlp.Value("string"),
"target": nlp.Value("string")
})
}),
"equivalence_relations": nlp.Sequence({
"type": nlp.Value("string"),
"targets": nlp.Sequence(nlp.Value("string")),
}),
"events": nlp.Sequence({
"id": nlp.Value("string"),
"type": nlp.Value("string"),
"trigger": nlp.Value("string"),
"arguments": nlp.Sequence({
"type": nlp.Value("string"),
"target": nlp.Value("string")
})
}),
"attributions": nlp.Sequence({
"id": nlp.Value("string"),
"type": nlp.Value("string"),
"target": nlp.Value("string"),
"value": nlp.Value("string"),
}),
"normalizations": nlp.Sequence({
"id": nlp.Value("string"),
"type": nlp.Value("string"),
"target": nlp.Value("string"),
"resource_id": nlp.Value("string"),
"entity_id": nlp.Value("string"),
}),
"notes": nlp.Sequence({
"id": nlp.Value("string"),
"type": nlp.Value("string"),
"target": nlp.Value("string"),
"note": nlp.Value("string"),
}),
})
)
@abstractmethod
def _split_generators(self, dl_manager):
pass
@staticmethod
def _get_location(location_string):
parts = location_string.split(' ')
assert len(parts) == 2, f'Wrong number of entries in location string. Expected 2, but found: {parts}'
return {'start': int(parts[0]), 'end': int(parts[1])}
@staticmethod
def _get_span_annotation(annotation_line):
"""
example input:
T1 Organization 0 4 Sony
"""
_id, remaining, text = annotation_line.split('\t', maxsplit=2)
_type, locations = remaining.split(' ', maxsplit=1)
return {
'id': _id,
'text': text,
'type': _type,
'locations': [AbstractBrat._get_location(loc) for loc in locations.split(';')]
}
@staticmethod
def _get_event_annotation(annotation_line):
"""
example input:
E1 MERGE-ORG:T2 Org1:T1 Org2:T3
"""
_id, remaining = annotation_line.strip().split('\t')
args = [dict(zip(['type', 'target'], a.split(':'))) for a in remaining.split(' ')]
return {
'id': _id,
'type': args[0]['type'],
'trigger': args[0]['target'],
'arguments': args[1:]
}
@staticmethod
def _get_relation_annotation(annotation_line):
"""
example input:
R1 Origin Arg1:T3 Arg2:T4
"""
_id, remaining = annotation_line.strip().split('\t')
_type, remaining = remaining.split(' ', maxsplit=1)
args = [dict(zip(['type', 'target'], a.split(':'))) for a in remaining.split(' ')]
return {
'id': _id,
'type': _type,
'arguments': args
}
@staticmethod
def _get_equivalence_relation_annotation(annotation_line):
"""
example input:
* Equiv T1 T2 T3
"""
_, remaining = annotation_line.strip().split('\t')
parts = remaining.split(' ')
return {
'type': parts[0],
'targets': parts[1:]
}
@staticmethod
def _get_attribute_annotation(annotation_line):
"""
example input (binary: implicit value is True, if present, False otherwise):
A1 Negation E1
example input (multi-value: explicit value)
A2 Confidence E2 L1
"""
_id, remaining = annotation_line.strip().split('\t')
parts = remaining.split(' ')
# if no value is present, it is implicitly "true"
if len(parts) == 2:
parts.append('true')
return {
'id': _id,
'type': parts[0],
'target': parts[1],
'value': parts[2],
}
@staticmethod
def _get_normalization_annotation(annotation_line):
"""
example input:
N1 Reference T1 Wikipedia:534366 Barack Obama
"""
_id, remaining, text = annotation_line.split('\t', maxsplit=2)
_type, target, ref = remaining.split(' ')
res_id, ent_id = ref.split(':')
return {
'id': _id,
'type': _type,
'target': target,
'resource_id': res_id,
'entity_id': ent_id,
}
@staticmethod
def _get_note_annotation(annotation_line):
"""
example input:
#1 AnnotatorNotes T1 this annotation is suspect
"""
_id, remaining, note = annotation_line.split('\t', maxsplit=2)
_type, target = remaining.split(' ')
return {
'id': _id,
'type': _type,
'target': target,
'note': note,
}
@staticmethod
def _read_annotation_file(filename):
"""
reads a BRAT v1.3 annotations file (see https://brat.nlplab.org/standoff.html)
"""
res = {
'spans': [],
'events': [],
'relations': [],
'equivalence_relations': [],
'attributions': [],
'normalizations': [],
'notes': [],
}
with open(filename) as file:
for line in file:
if len(line) == 0:
continue
ann_type = line[0]
# strip away the new line character
line = line[:-1]
if ann_type == 'T':
res['spans'].append(AbstractBrat._get_span_annotation(line))
elif ann_type == 'E':
res['events'].append(AbstractBrat._get_event_annotation(line))
elif ann_type == 'R':
res['relations'].append(AbstractBrat._get_relation_annotation(line))
elif ann_type == '*':
res['relations'].append(AbstractBrat._get_equivalence_relation_annotation(line))
elif ann_type in ['A', 'M']:
res['attributions'].append(AbstractBrat._get_attribute_annotation(line))
elif ann_type == 'N':
res['normalizations'].append(AbstractBrat._get_normalization_annotation(line))
elif ann_type == '#':
res['notes'].append(AbstractBrat._get_note_annotation(line))
else:
raise ValueError(f'unknown BRAT id type: {line[0]}. Annotation ids have to start with T (spans), '
f'E (events), R (relations), A (attributions), or N (normalizations). See '
f'https://brat.nlplab.org/standoff.html for the BRAT annotation file '
f'specification.')
return res
def _generate_examples(self, files=None, directory=None):
""" Read context (.txt) and annotation (.ann) files. """
if files is None:
assert directory is not None, 'If files is None, directory has to be provided, but it is also None.'
_files = glob.glob(f"{directory}/*.{self.config.ann_file_extension}")
files = [path.splitext(fn)[0] for fn in _files]
for filename in files:
basename = path.basename(filename)
ann_fn = f'{filename}.{self.config.ann_file_extension}'
brat_annotations = AbstractBrat._read_annotation_file(ann_fn)
txt_fn = f'{filename}.{self.config.txt_file_extension}'
txt_content = open(txt_fn).read()
brat_annotations['context'] = txt_content
yield basename, brat_annotations
|
<reponame>anuwrag/opentrons
"""
otupdate.buildroot.update_actions: what files to expect and what to do with them
This module has functions that actually accomplish the various tasks required
for an update: unzipping update files, hashing rootfs, checking signatures,
writing to root partitions
"""
import contextlib
import enum
import logging
import os
import re
import subprocess
import tempfile
from typing import Callable, Optional
from otupdate.common.file_actions import (
unzip_update,
hash_file,
verify_signature,
HashMismatch,
)
from otupdate.common.update_actions import UpdateActionsInterface, Partition
ROOTFS_SIG_NAME = "rootfs.ext4.hash.sig"
ROOTFS_HASH_NAME = "rootfs.ext4.hash"
ROOTFS_NAME = "rootfs.ext4"
UPDATE_FILES = [ROOTFS_NAME, ROOTFS_SIG_NAME, ROOTFS_HASH_NAME]
LOG = logging.getLogger(__name__)
class RootPartitions(enum.Enum):
TWO: Partition = Partition(2, "/dev/mmcblk0p2")
THREE: Partition = Partition(3, "/dev/mmcblk0p3")
class OT2UpdateActions(UpdateActionsInterface):
def validate_update(
self,
filepath: str,
progress_callback: Callable[[float], None],
cert_path: Optional[str],
):
"""Worker for validation. Call in an executor (so it can return things)
- Unzips filepath to its directory
- Hashes the rootfs inside
- If requested, checks the signature of the hash
:param filepath: The path to the update zip file
:param progress_callback: The function to call with progress between 0
and 1.0. May never reach precisely 1.0, best
only for user information
:param cert_path: Path to an x.509 certificate to check the signature
against. If ``None``, signature checking is disabled
:returns str: Path to the rootfs file to update
Will also raise an exception if validation fails
"""
def zip_callback(progress):
progress_callback(progress / 2.0)
required = [ROOTFS_NAME, ROOTFS_HASH_NAME]
if cert_path:
required.append(ROOTFS_SIG_NAME)
files, sizes = unzip_update(filepath, zip_callback, UPDATE_FILES, required)
def hash_callback(progress):
progress_callback(progress / 2.0 + 0.5)
rootfs = files.get(ROOTFS_NAME)
assert rootfs
rootfs_hash = hash_file(rootfs, hash_callback, file_size=sizes[ROOTFS_NAME])
hashfile = files.get(ROOTFS_HASH_NAME)
assert hashfile
packaged_hash = open(hashfile, "rb").read().strip()
if packaged_hash != rootfs_hash:
msg = (
f"Hash mismatch: calculated {rootfs_hash!r} != "
f"packaged {packaged_hash!r}"
)
LOG.error(msg)
raise HashMismatch(msg)
if cert_path:
sigfile = files.get(ROOTFS_SIG_NAME)
assert sigfile
verify_signature(hashfile, sigfile, cert_path)
return rootfs
def write_update(
self,
rootfs_filepath: str,
progress_callback: Callable[[float], None],
chunk_size: int = 1024,
file_size: int = None,
) -> Partition:
"""
Write the new rootfs to the next root partition
- Figure out, from the system, the correct root partition to write to
- Write the rootfs at ``rootfs_filepath`` there, with progress
:param rootfs_filepath: The path to a checked rootfs.ext4
:param progress_callback: A callback to call periodically with progress
between 0 and 1.0. May never reach precisely
1.0, best only for user information.
:param chunk_size: The size of file chunks to copy in between progress
notifications
:param file_size: The total size of the update file (for generating
progress percentage). If ``None``, generated with
``seek``/``tell``.
:returns: The root partition that the rootfs image was written to, e.g.
``RootPartitions.TWO`` or ``RootPartitions.THREE``.
"""
unused = _find_unused_partition()
part_path = unused.value.path
write_file(rootfs_filepath, part_path, progress_callback, chunk_size, file_size)
return unused.value
@contextlib.contextmanager
def mount_update(self):
"""Mount the freshly-written partition r/w (to update machine-id).
Should be used as a context manager, and the yielded value is the path
to the mount. When the context manager exits, the partition will be
unmounted again and its mountpoint removed.
:param mountpoint_in: The directory in which to create the mountpoint.
"""
unused = _find_unused_partition()
part_path = unused.value.path
with tempfile.TemporaryDirectory(dir=_mountpoint_root()) as mountpoint:
subprocess.check_output(["mount", part_path, mountpoint])
LOG.info(f"mounted {part_path} to {mountpoint}")
try:
yield mountpoint
finally:
subprocess.check_output(["umount", mountpoint])
LOG.info(f"Unmounted {part_path} from {mountpoint}")
def commit_update(self) -> None:
"""Switch the target boot partition."""
unused = _find_unused_partition()
new = _switch_partition()
if new != unused:
msg = f"Bad switch: switched to {new} when {unused} was unused"
LOG.error(msg)
raise RuntimeError(msg)
else:
LOG.info(f"commit_update: committed to booting {new}")
def write_machine_id(self, current_root: str, new_root: str):
"""Update the machine id in target rootfs"""
mid = open(os.path.join(current_root, "etc", "machine-id")).read()
with open(os.path.join(new_root, "etc", "machine-id"), "w") as new_mid:
new_mid.write(mid)
LOG.info(f"Wrote machine_id {mid.strip()} to {new_root}/etc/machine-id")
def _find_unused_partition() -> RootPartitions:
"""Find the currently-unused root partition to write to"""
which = subprocess.check_output(["ot-unused-partition"]).strip()
return {b"2": RootPartitions.TWO, b"3": RootPartitions.THREE}[which]
def write_file(
infile: str,
outfile: str,
progress_callback: Callable[[float], None],
chunk_size: int = 1024,
file_size: int = None,
):
"""Write a file to another file with progress callbacks.
:param infile: The input filepath
:param outfile: The output filepath
:param progress_callback: The callback to call for progress
:param chunk_size: The size of file chunks to copy in between progress
notifications
:param file_size: The total size of the update file (for generating
progress percentage). If ``None``, generated with
``seek``/``tell``.
"""
total_written = 0
with open(infile, "rb") as img, open(outfile, "wb") as part:
if None is file_size:
file_size = img.seek(0, 2)
img.seek(0)
LOG.info(f"write_file: file size calculated as {file_size}B")
LOG.info(
f"write_file: writing {infile} ({file_size}B)"
f" to {outfile} in {chunk_size}B chunks"
)
while True:
chunk = img.read(chunk_size)
part.write(chunk)
total_written += len(chunk)
progress_callback(total_written / file_size)
if len(chunk) != chunk_size:
break
def _mountpoint_root():
"""provides mountpoint location for :py:meth:`mount_update`.
exists only for ease of mocking
"""
return "/mnt"
def _switch_partition() -> RootPartitions:
"""Switch the active boot partition using the switch script"""
res = subprocess.check_output(["ot-switch-partitions"])
for line in res.split(b"\n"):
matches = re.match(b"Current boot partition: ([23]), setting to ([23])", line)
if matches:
return {b"2": RootPartitions.TWO, b"3": RootPartitions.THREE}[
matches.group(2)
]
else:
raise RuntimeError(f"Bad output from ot-switch-partitions: {res!r}")
|
<gh_stars>0
# Leviton Cloud Services API model Installation.
# Auto-generated by api_scraper.py.
#
# Copyright 2017 <NAME> <<EMAIL>>
#
# This code is released under the terms of the MIT license. See the LICENSE
# file for more details.
from ..base_model import BaseModel
class Installation(BaseModel):
def __init__(self, session, model_id=None):
super(Installation, self).__init__(session, model_id)
@classmethod
def count(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/count"
return session.call_api(api, attribs, 'get')
def count_action_blocks(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actionBlocks/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_actions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actions/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_activities(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activities/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_activity_triggers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activityTriggers/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_area_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areaSnapshots/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_areas(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areas/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_controllers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/controllers/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_feed_items(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/feedItems/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_load_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loadSnapshots/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_loads(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loads/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_schedules(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/schedules/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_sensor_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensorSnapshots/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_sensors(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensors/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_shades(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/shades/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_thermostat_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostatSnapshots/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_thermostats(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostats/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
def count_touchscreens(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/touchscreens/count".format(self._id)
return self._session.call_api(api, attribs, 'get')
@classmethod
def create(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations"
return session.call_api(api, attribs, 'post')
def create_action_blocks(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actionBlocks".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_actions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actions".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_activities(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activities".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_activity_triggers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activityTriggers".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_area_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areaSnapshots".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_areas(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areas".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_controllers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/controllers".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_load_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loadSnapshots".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_loads(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loads".format(self._id)
return self._session.call_api(api, attribs, 'post')
@classmethod
def create_many(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations"
return session.call_api(api, attribs, 'post')
def create_schedules(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/schedules".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_sensor_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensorSnapshots".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_sensors(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensors".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_shades(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/shades".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_thermostat_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostatSnapshots".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_thermostats(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostats".format(self._id)
return self._session.call_api(api, attribs, 'post')
def create_touchscreens(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/touchscreens".format(self._id)
return self._session.call_api(api, attribs, 'post')
def delete_action_blocks(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actionBlocks".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_actions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actions".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_activities(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activities".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_activity_triggers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activityTriggers".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_area_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areaSnapshots".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_areas(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areas".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_controllers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/controllers".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_load_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loadSnapshots".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_loads(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loads".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_schedules(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/schedules".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_sensor_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensorSnapshots".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_sensors(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensors".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_shades(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/shades".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_thermostat_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostatSnapshots".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_thermostats(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostats".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def delete_touchscreens(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/touchscreens".format(self._id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_action_blocks(self, action_block_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actionBlocks/{1}".format(self._id, action_block_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_actions(self, action_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actions/{1}".format(self._id, action_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_activities(self, activity_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activities/{1}".format(self._id, activity_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_activity_triggers(self, activity_trigger_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activityTriggers/{1}".format(self._id, activity_trigger_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_area_snapshots(self, area_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areaSnapshots/{1}".format(self._id, area_snapshot_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_areas(self, area_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areas/{1}".format(self._id, area_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_controllers(self, controller_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/controllers/{1}".format(self._id, controller_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_load_snapshots(self, load_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loadSnapshots/{1}".format(self._id, load_snapshot_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_loads(self, load_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loads/{1}".format(self._id, load_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_schedules(self, schedule_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/schedules/{1}".format(self._id, schedule_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_sensor_snapshots(self, sensor_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensorSnapshots/{1}".format(self._id, sensor_snapshot_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_sensors(self, sensor_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensors/{1}".format(self._id, sensor_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_shades(self, shade_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/shades/{1}".format(self._id, shade_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_thermostat_snapshots(self, thermostat_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostatSnapshots/{1}".format(self._id, thermostat_snapshot_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_thermostats(self, thermostat_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostats/{1}".format(self._id, thermostat_id)
return self._session.call_api(api, attribs, 'delete')
def destroy_by_id_touchscreens(self, touchscreen_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/touchscreens/{1}".format(self._id, touchscreen_id)
return self._session.call_api(api, attribs, 'delete')
def exists(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/exists".format(self._id)
return self._session.call_api(api, attribs, 'get')
@classmethod
def find(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations"
items = session.call_api(api, attribs, 'get')
result = []
if items is not None:
for data in items:
model = Installation(session, data['id'])
model.data = data
result.append(model)
return result
def find_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'get')
self.data.update(data)
return self
def find_by_id_action_blocks(self, action_block_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actionBlocks/{1}".format(self._id, action_block_id)
data = self._session.call_api(api, attribs, 'get')
from .action_block import ActionBlock
model = ActionBlock(self._session, data['id'])
model.data = data
return model
def find_by_id_actions(self, action_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actions/{1}".format(self._id, action_id)
data = self._session.call_api(api, attribs, 'get')
from .action import Action
model = Action(self._session, data['id'])
model.data = data
return model
def find_by_id_activities(self, activity_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activities/{1}".format(self._id, activity_id)
data = self._session.call_api(api, attribs, 'get')
from .activity import Activity
model = Activity(self._session, data['id'])
model.data = data
return model
def find_by_id_activity_triggers(self, activity_trigger_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activityTriggers/{1}".format(self._id, activity_trigger_id)
data = self._session.call_api(api, attribs, 'get')
from .activity_trigger import ActivityTrigger
model = ActivityTrigger(self._session, data['id'])
model.data = data
return model
def find_by_id_area_snapshots(self, area_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areaSnapshots/{1}".format(self._id, area_snapshot_id)
data = self._session.call_api(api, attribs, 'get')
from .area_snapshot import AreaSnapshot
model = AreaSnapshot(self._session, data['id'])
model.data = data
return model
def find_by_id_areas(self, area_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areas/{1}".format(self._id, area_id)
data = self._session.call_api(api, attribs, 'get')
from .area import Area
model = Area(self._session, data['id'])
model.data = data
return model
def find_by_id_controllers(self, controller_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/controllers/{1}".format(self._id, controller_id)
data = self._session.call_api(api, attribs, 'get')
from .controller import Controller
model = Controller(self._session, data['id'])
model.data = data
return model
def find_by_id_feed_items(self, feed_item_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/feedItems/{1}".format(self._id, feed_item_id)
data = self._session.call_api(api, attribs, 'get')
from .feed_item import FeedItem
model = FeedItem(self._session, data['id'])
model.data = data
return model
def find_by_id_load_snapshots(self, load_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loadSnapshots/{1}".format(self._id, load_snapshot_id)
data = self._session.call_api(api, attribs, 'get')
from .load_snapshot import LoadSnapshot
model = LoadSnapshot(self._session, data['id'])
model.data = data
return model
def find_by_id_loads(self, load_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loads/{1}".format(self._id, load_id)
data = self._session.call_api(api, attribs, 'get')
from .load import Load
model = Load(self._session, data['id'])
model.data = data
return model
def find_by_id_schedules(self, schedule_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/schedules/{1}".format(self._id, schedule_id)
return self._session.call_api(api, attribs, 'get')
def find_by_id_sensor_snapshots(self, sensor_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensorSnapshots/{1}".format(self._id, sensor_snapshot_id)
data = self._session.call_api(api, attribs, 'get')
from .sensor_snapshot import SensorSnapshot
model = SensorSnapshot(self._session, data['id'])
model.data = data
return model
def find_by_id_sensors(self, sensor_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensors/{1}".format(self._id, sensor_id)
data = self._session.call_api(api, attribs, 'get')
from .sensor import Sensor
model = Sensor(self._session, data['id'])
model.data = data
return model
def find_by_id_shades(self, shade_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/shades/{1}".format(self._id, shade_id)
data = self._session.call_api(api, attribs, 'get')
from .shade import Shade
model = Shade(self._session, data['id'])
model.data = data
return model
def find_by_id_thermostat_snapshots(self, thermostat_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostatSnapshots/{1}".format(self._id, thermostat_snapshot_id)
data = self._session.call_api(api, attribs, 'get')
from .thermostat_snapshot import ThermostatSnapshot
model = ThermostatSnapshot(self._session, data['id'])
model.data = data
return model
def find_by_id_thermostats(self, thermostat_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostats/{1}".format(self._id, thermostat_id)
data = self._session.call_api(api, attribs, 'get')
from .thermostat import Thermostat
model = Thermostat(self._session, data['id'])
model.data = data
return model
def find_by_id_touchscreens(self, touchscreen_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/touchscreens/{1}".format(self._id, touchscreen_id)
data = self._session.call_api(api, attribs, 'get')
from .touchscreen import Touchscreen
model = Touchscreen(self._session, data['id'])
model.data = data
return model
@classmethod
def find_one(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/findOne"
return session.call_api(api, attribs, 'get')
def refresh(self):
api = "/Installations/{0}".format(self._id)
result = self._session.call_api(api, {}, 'get')
if result is not None:
self.data.update(result)
return self
def get_action_blocks(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actionBlocks".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .action_block import ActionBlock
result = []
if items is not None:
for data in items:
model = ActionBlock(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_actions(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actions".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .action import Action
result = []
if items is not None:
for data in items:
model = Action(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_activities(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activities".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .activity import Activity
result = []
if items is not None:
for data in items:
model = Activity(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_activity_triggers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activityTriggers".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .activity_trigger import ActivityTrigger
result = []
if items is not None:
for data in items:
model = ActivityTrigger(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_area_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areaSnapshots".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .area_snapshot import AreaSnapshot
result = []
if items is not None:
for data in items:
model = AreaSnapshot(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_areas(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areas".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .area import Area
result = []
if items is not None:
for data in items:
model = Area(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_controllers(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/controllers".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .controller import Controller
result = []
if items is not None:
for data in items:
model = Controller(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_feed_items(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/feedItems".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .feed_item import FeedItem
result = []
if items is not None:
for data in items:
model = FeedItem(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_load_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loadSnapshots".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .load_snapshot import LoadSnapshot
result = []
if items is not None:
for data in items:
model = LoadSnapshot(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_loads(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loads".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .load import Load
result = []
if items is not None:
for data in items:
model = Load(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_location(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/location".format(self._id)
data = self._session.call_api(api, attribs, 'get')
from .location import Location
model = Location(self._session, data['id'])
model.data = data
return model
def get_schedules(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/schedules".format(self._id)
return self._session.call_api(api, attribs, 'get')
def get_sensor_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensorSnapshots".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .sensor_snapshot import SensorSnapshot
result = []
if items is not None:
for data in items:
model = SensorSnapshot(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_sensors(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensors".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .sensor import Sensor
result = []
if items is not None:
for data in items:
model = Sensor(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_shades(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/shades".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .shade import Shade
result = []
if items is not None:
for data in items:
model = Shade(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_thermostat_snapshots(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostatSnapshots".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .thermostat_snapshot import ThermostatSnapshot
result = []
if items is not None:
for data in items:
model = ThermostatSnapshot(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_thermostats(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostats".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .thermostat import Thermostat
result = []
if items is not None:
for data in items:
model = Thermostat(self._session, data['id'])
model.data = data
result.append(model)
return result
def get_touchscreens(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/touchscreens".format(self._id)
items = self._session.call_api(api, attribs, 'get')
from .touchscreen import Touchscreen
result = []
if items is not None:
for data in items:
model = Touchscreen(self._session, data['id'])
model.data = data
result.append(model)
return result
def register_controller(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/registerController".format(self._id)
return self._session.call_api(api, attribs, 'post')
def replace_by_id(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/replace".format(self._id)
return self._session.call_api(api, attribs, 'post')
@classmethod
def replace_or_create(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/replaceOrCreate"
return session.call_api(api, attribs, 'post')
def update_attributes(self, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}".format(self._id)
data = self._session.call_api(api, attribs, 'put')
self.data.update(attribs)
return self
def update_by_id_action_blocks(self, action_block_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actionBlocks/{1}".format(self._id, action_block_id)
data = self._session.call_api(api, attribs, 'put')
from .action_block import ActionBlock
model = ActionBlock(self._session, data['id'])
model.data = data
return model
def update_by_id_actions(self, action_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/actions/{1}".format(self._id, action_id)
data = self._session.call_api(api, attribs, 'put')
from .action import Action
model = Action(self._session, data['id'])
model.data = data
return model
def update_by_id_activities(self, activity_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activities/{1}".format(self._id, activity_id)
data = self._session.call_api(api, attribs, 'put')
from .activity import Activity
model = Activity(self._session, data['id'])
model.data = data
return model
def update_by_id_activity_triggers(self, activity_trigger_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/activityTriggers/{1}".format(self._id, activity_trigger_id)
data = self._session.call_api(api, attribs, 'put')
from .activity_trigger import ActivityTrigger
model = ActivityTrigger(self._session, data['id'])
model.data = data
return model
def update_by_id_area_snapshots(self, area_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areaSnapshots/{1}".format(self._id, area_snapshot_id)
data = self._session.call_api(api, attribs, 'put')
from .area_snapshot import AreaSnapshot
model = AreaSnapshot(self._session, data['id'])
model.data = data
return model
def update_by_id_areas(self, area_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/areas/{1}".format(self._id, area_id)
data = self._session.call_api(api, attribs, 'put')
from .area import Area
model = Area(self._session, data['id'])
model.data = data
return model
def update_by_id_controllers(self, controller_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/controllers/{1}".format(self._id, controller_id)
data = self._session.call_api(api, attribs, 'put')
from .controller import Controller
model = Controller(self._session, data['id'])
model.data = data
return model
def update_by_id_load_snapshots(self, load_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loadSnapshots/{1}".format(self._id, load_snapshot_id)
data = self._session.call_api(api, attribs, 'put')
from .load_snapshot import LoadSnapshot
model = LoadSnapshot(self._session, data['id'])
model.data = data
return model
def update_by_id_loads(self, load_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/loads/{1}".format(self._id, load_id)
data = self._session.call_api(api, attribs, 'put')
from .load import Load
model = Load(self._session, data['id'])
model.data = data
return model
def update_by_id_schedules(self, schedule_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/schedules/{1}".format(self._id, schedule_id)
return self._session.call_api(api, attribs, 'put')
def update_by_id_sensor_snapshots(self, sensor_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensorSnapshots/{1}".format(self._id, sensor_snapshot_id)
data = self._session.call_api(api, attribs, 'put')
from .sensor_snapshot import SensorSnapshot
model = SensorSnapshot(self._session, data['id'])
model.data = data
return model
def update_by_id_sensors(self, sensor_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/sensors/{1}".format(self._id, sensor_id)
data = self._session.call_api(api, attribs, 'put')
from .sensor import Sensor
model = Sensor(self._session, data['id'])
model.data = data
return model
def update_by_id_shades(self, shade_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/shades/{1}".format(self._id, shade_id)
data = self._session.call_api(api, attribs, 'put')
from .shade import Shade
model = Shade(self._session, data['id'])
model.data = data
return model
def update_by_id_thermostat_snapshots(self, thermostat_snapshot_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostatSnapshots/{1}".format(self._id, thermostat_snapshot_id)
data = self._session.call_api(api, attribs, 'put')
from .thermostat_snapshot import ThermostatSnapshot
model = ThermostatSnapshot(self._session, data['id'])
model.data = data
return model
def update_by_id_thermostats(self, thermostat_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/thermostats/{1}".format(self._id, thermostat_id)
data = self._session.call_api(api, attribs, 'put')
from .thermostat import Thermostat
model = Thermostat(self._session, data['id'])
model.data = data
return model
def update_by_id_touchscreens(self, touchscreen_id, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/{0}/touchscreens/{1}".format(self._id, touchscreen_id)
data = self._session.call_api(api, attribs, 'put')
from .touchscreen import Touchscreen
model = Touchscreen(self._session, data['id'])
model.data = data
return model
@classmethod
def upsert(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations"
data = session.call_api(api, attribs, 'put')
model = Installation(session, data['id'])
model.data = data
return model
@classmethod
def upsert_with_where(cls, session, attribs=None):
if attribs is None:
attribs = {}
api = "/Installations/upsertWithWhere"
return session.call_api(api, attribs, 'post')
|
# ------------------------------------------------------------------------------------------------ #
# MIT License #
# #
# Copyright (c) 2020, Microsoft Corporation #
# #
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software #
# and associated documentation files (the "Software"), to deal in the Software without #
# restriction, including without limitation the rights to use, copy, modify, merge, publish, #
# distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the #
# Software is furnished to do so, subject to the following conditions: #
# #
# The above copyright notice and this permission notice shall be included in all copies or #
# substantial portions of the Software. #
# #
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING #
# BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND #
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, #
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, #
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. #
# ------------------------------------------------------------------------------------------------ #
import pytest
import jax
import haiku as hk
from ._array import unvectorize
@pytest.fixture
def rngs():
return hk.PRNGSequence(42)
@pytest.fixture
def x_batch():
rng = jax.random.PRNGKey(13)
return jax.random.normal(rng, shape=(7, 11))
@pytest.fixture
def x_single():
rng = jax.random.PRNGKey(17)
return jax.random.normal(rng, shape=(11,))
def test_unvectorize_single_output(rngs, x_batch, x_single):
def f_batch(X):
return hk.Linear(11)(X)
init, f_batch = hk.transform(f_batch)
params = init(next(rngs), x_batch)
y_batch = f_batch(params, next(rngs), x_batch)
assert y_batch.shape == (7, 11)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=0)
y_single = f_single(params, next(rngs), x_single)
assert y_single.shape == (11,)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0,))
msg = r"out_axes must be an int for functions with a single output; got: out_axes=\(0,\)"
with pytest.raises(TypeError, match=msg):
f_single(params, next(rngs), x_single)
f_single = unvectorize(f_batch, in_axes=(None, None, 0, 0), out_axes=(0,))
msg = r"number of in_axes must match the number of function inputs"
with pytest.raises(ValueError, match=msg):
f_single(params, next(rngs), x_single)
def test_unvectorize_multi_output(rngs, x_batch, x_single):
def f_batch(X):
return hk.Linear(11)(X), hk.Linear(13)(X)
init, f_batch = hk.transform(f_batch)
params = init(next(rngs), x_batch)
y_batch = f_batch(params, next(rngs), x_batch)
assert y_batch[0].shape == (7, 11)
assert y_batch[1].shape == (7, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=0)
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (11,)
assert y_single[1].shape == (13,)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0, None))
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (11,)
assert y_single[1].shape == (1, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=None)
y_single = f_single(params, next(rngs), x_single)
assert y_single[0].shape == (1, 11,)
assert y_single[1].shape == (1, 13)
f_single = unvectorize(f_batch, in_axes=(None, None, 0), out_axes=(0,))
msg = r"number of out_axes must match the number of function outputs"
with pytest.raises(ValueError, match=msg):
f_single(params, next(rngs), x_single)
|
import pytest
from py2vega import py2vega, Variable
from py2vega.main import Py2VegaSyntaxError, Py2VegaNameError
from py2vega.functions.math import isNaN
whitelist = ['value', 'x', Variable('cell', ['value', 'x'])]
def test_nameconstant():
code = 'False'
assert py2vega(code, whitelist) == 'false'
code = 'True'
assert py2vega(code, whitelist) == 'true'
code = 'None'
assert py2vega(code, whitelist) == 'null'
def test_num():
code = '36'
assert py2vega(code, whitelist) == '36'
def test_str():
code = '\'white\''
assert py2vega(code, whitelist) == '\'white\''
def test_tuple():
code = '(True, 3, \'hello\')'
assert py2vega(code, whitelist) == '[true, 3, \'hello\']'
code = '((True, 3, \'hello\'), 3)'
assert py2vega(code, whitelist) == '[[true, 3, \'hello\'], 3]'
def test_list():
code = '[True, 3, \'hello\']'
assert py2vega(code, whitelist) == '[true, 3, \'hello\']'
def test_dict():
code = '{\'hello\': 3, \'there\': 4}'
assert py2vega(code, whitelist) == '{\'hello\': 3, \'there\': 4}'
code = '{\'hello\': 3, \'there\': 4}'
assert py2vega(code, whitelist) == '{\'hello\': 3, \'there\': 4}'
def test_unary():
code = 'not value'
assert py2vega(code, whitelist) == '!(value)'
code = '-value'
assert py2vega(code, whitelist) == '-value'
code = '+value'
assert py2vega(code, whitelist) == '+value'
def test_binary():
code = 'value or 3'
assert py2vega(code, whitelist) == '(value || 3)'
code = 'value and 3'
assert py2vega(code, whitelist) == '(value && 3)'
code = 'value + 3'
assert py2vega(code, whitelist) == '(value + 3)'
code = 'value**3'
assert py2vega(code, whitelist) == '(pow(value, 3))'
# Unsupported operator
code = 'value & x'
with pytest.raises(Py2VegaSyntaxError):
py2vega(code, whitelist)
def test_ternary():
code = '3 if value else 4'
assert py2vega(code, whitelist) == '(value ? 3 : 4)'
def test_compare():
code = '3 < value <= 4'
assert py2vega(code, whitelist) == '(3 < value <= 4)'
code = 'value in (\'ford\', \'chevrolet\')'
assert py2vega(code, whitelist) == '(indexof([\'ford\', \'chevrolet\'], value) != -1)'
code = '\'chevrolet\' in value'
assert py2vega(code, whitelist) == '(indexof(value, \'chevrolet\') != -1)'
code = '\'chevrolet\' not in value'
assert py2vega(code, whitelist) == '(indexof(value, \'chevrolet\') == -1)'
def test_call():
code = 'toBoolean(3)'
assert py2vega(code, whitelist) == 'toBoolean(3)'
code = 'bool(3)'
assert py2vega(code, whitelist) == '(isValid(3) ? toBoolean(3) : false)'
code = 'py2vega.string.toString(3)'
assert py2vega(code, whitelist) == 'toString(3)'
code = 'str(3)'
assert py2vega(code, whitelist) == 'toString(3)'
code = 'toNumber("3")'
assert py2vega(code, whitelist) == 'toNumber(\'3\')'
code = 'float("3")'
assert py2vega(code, whitelist) == 'toNumber(\'3\')'
code = 'int("3")'
assert py2vega(code, whitelist) == 'floor(toNumber(\'3\'))'
code = 'length(value)'
assert py2vega(code, whitelist) == 'length(value)'
code = 'len(value)'
assert py2vega(code, whitelist) == 'length(value)'
# Unsupported function
code = 'foo(value)'
with pytest.raises(Py2VegaNameError):
py2vega(code, whitelist)
def test_subscript():
code = 'value[0]'
assert py2vega(code, whitelist) == 'value[0]'
code = '[34, 32][0 if value < 2 else 1]'
assert py2vega(code, whitelist) == '[34, 32][((value < 2) ? 0 : 1)]'
code = 'value[:2]'
assert py2vega(code, whitelist) == 'slice(value, 0, 2)'
code = 'value[1:]'
assert py2vega(code, whitelist) == 'slice(value, 1)'
code = 'value[:]'
assert py2vega(code, whitelist) == 'slice(value, 0)'
code = 'value[1:2]'
assert py2vega(code, whitelist) == 'slice(value, 1, 2)'
# Unsupported step parameter
code = 'value[::2]'
with pytest.raises(Py2VegaSyntaxError):
py2vega(code, whitelist)
# Unsupported ExtSlice node
code = 'value[::2, 1:]'
with pytest.raises(Py2VegaSyntaxError):
py2vega(code, whitelist)
def test_attribute():
code = 'cell.value'
assert py2vega(code, whitelist) == 'cell.value'
with pytest.raises(NameError):
py2vega('cell.value')
with pytest.raises(Py2VegaSyntaxError):
py2vega('cell.undef', whitelist)
assert py2vega('3 if value.member1 > value.member2 else 4', whitelist=[Variable('value', ['member1', 'member2'])]) == "((value.member1 > value.member2) ? 3 : 4)"
# Nested member access
whitelisted_vars = [Variable('nested_var', [Variable('var', ['test']), 'x'])]
assert py2vega('nested_var.x', whitelisted_vars) == 'nested_var.x'
with pytest.raises(NameError):
py2vega('var.test', whitelisted_vars)
assert py2vega('nested_var.var.test', whitelisted_vars) == 'nested_var.var.test'
# Cannot validate a member access on an unknown variable
with pytest.raises(Py2VegaSyntaxError):
py2vega('nested_var[0].test', whitelisted_vars)
def func(value):
return 'red' if value < 150 else 'green'
def test_function():
assert py2vega(func, whitelist) == '((value < 150) ? \'red\' : \'green\')'
def test_whitelist():
with pytest.raises(NameError):
py2vega('my_variable')
assert py2vega('my_variable', ['my_variable']) == 'my_variable'
# Vega constants are accessible by default
assert py2vega('PI') == 'PI'
def math_func():
return isNaN(3)
def test_math():
assert py2vega(math_func) == 'isNaN(3)'
def invalid_func1():
print(3)
def test_invalid1():
with pytest.raises(Py2VegaSyntaxError):
py2vega(invalid_func1)
def invalid_func2():
return 2
return 3
def test_invalid2():
with pytest.raises(Py2VegaSyntaxError, match='A `FunctionDef` node body cannot contain an `if` or `return` statement if it is not the last element of the body'):
py2vega(invalid_func2)
def invalid_func3(value):
if value < 3:
return 3
return 2
def test_invalid3():
with pytest.raises(Py2VegaSyntaxError, match='A `FunctionDef` node body cannot contain an `if` or `return` statement if it is not the last element of the body'):
py2vega(invalid_func3)
def invalid_func4(value):
if value < 3:
return 2
return 1
else:
return 2
def test_invalid4():
with pytest.raises(Py2VegaSyntaxError, match='A `If` node body cannot contain an `if` or `return` statement if it is not the last element of the body'):
py2vega(invalid_func4)
def invalid_func5(value):
if value < 3:
return 3
def test_invalid5():
with pytest.raises(Py2VegaSyntaxError, match='A `If` node body must contain at least one `if` statement or one `return` statement'):
py2vega(invalid_func5)
def invalid_func6(value):
del value
return 3
def test_invalid6():
with pytest.raises(Py2VegaSyntaxError):
py2vega(invalid_func6)
def test_lambda():
with pytest.raises(RuntimeError):
py2vega(lambda value: value)
def conditional_func(value):
if value < 3:
return 'red'
elif value < 5:
return 'green'
else:
return 'yellow'
def test_if_stmt():
assert py2vega(conditional_func, whitelist) == "if((value < 3), 'red', if((value < 5), 'green', 'yellow'))"
def assign_func1(value):
val = ('USA', 'Japan')
return 'red' if value in val else 'green'
def test_assign1():
assert py2vega(assign_func1, whitelist) == "((indexof(['USA', 'Japan'], value) != -1) ? 'red' : 'green')"
def assign_func2(value):
a = 'green'
b = 'red'
return a if value < 3 else b
def test_assign2():
assert py2vega(assign_func2, whitelist) == "((value < 3) ? 'green' : 'red')"
def assign_func3(value):
a = 'green'
a = 'red'
return a
def test_assign3():
assert py2vega(assign_func3, whitelist) == "'red'"
def assign_func4(value):
a = 'green'
b = a
return b
def test_assign4():
assert py2vega(assign_func4, whitelist) == "'green'"
def assign_func5(value):
a = b = 'Hello'
return (a, b)
def test_assign5():
assert py2vega(assign_func5, whitelist) == "['Hello', 'Hello']"
def assign_func6(value):
a = 'Hello'
b = a
a = 'World'
return b
def test_assign6():
assert py2vega(assign_func6, whitelist) == "'Hello'"
def assign_func7(value):
if value < 3:
a = 3
return a
else:
return a
def test_assign7():
with pytest.raises(NameError):
py2vega(assign_func7, whitelist)
def assign_func8(value):
if value < 3:
a = 3
return a
else:
a = 8
return a
def test_assign8():
assert py2vega(assign_func8, whitelist) == "if((value < 3), 3, 8)"
def assign_func9(value):
a = 38 if isNaN(value) else 32
if value < 3:
return a
else:
a = 8
return a
def test_assign9():
assert py2vega(assign_func9, whitelist) == "if((value < 3), (isNaN(value) ? 38 : 32), 8)"
def assign_func10(value):
value[0] = 36
return 3
def test_assign10():
with pytest.raises(Py2VegaSyntaxError, match='Unsupported target'):
assert py2vega(assign_func10, whitelist)
|
<filename>HTMLtoXML.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#######################################
# From HTML to XML - InDesign flavour #
#######################################
### Modules
import re
import sys
from bs4 import BeautifulSoup
import HTMLParser
### Functions
def clean_linebreaks(html_string):
"""Return an html string..."""
cleaned = re.sub(r'(?<!>)\n+',' ', html_string)
return cleaned
def extract_body_from_html(html_soup):
"""Return an XML beautiful soup object with the <body> of the input HTML file"""
body = html_soup.body.extract()
xml_soup = BeautifulSoup('', 'xml')
xml_soup.append(body)
return xml_soup
def convert_footnotes(xml_soup):
"""Return a beautiful xml soup..."""
if xml_soup.find_all('li', id=re.compile("fn*.")):
# Iterate through footnotes
footnotes = xml_soup.find_all('a', id=re.compile("fnref*."))
for index_footnote, each_footnote in enumerate(footnotes):
footnote_content = xml_soup.find_all('li', id=re.compile("fn*."))[index_footnote];
# clean footnote
footnote_content = footnote_content.contents[0] # clear fn back link (↵)
footnote_content.find('a', href=re.compile("#fnref*.")).extract() # remove link
# replace footnote content
each_footnote.insert_before("-fn--") # to fit the requirements of ReFoot_mod.js
each_footnote.insert_after("--fn-") # to fit the requirements of ReFoot_mod.js
each_footnote.replace_with(footnote_content) #remove surrounding <p>?
# clean footnotes from xml
footnotes = xml_soup.find('div', { "class" : "footnotes" })
footnotes.extract()
# remove footnotes title
footnotes_title = xml_soup.find('h2', id="notes")
footnotes_title.extract()
return xml_soup
def convert_HTMLtable_to_XMLCALStable(xml_soup, table_width = 300):
"""Return a Beautiful xml Soup..."""
def calc_headerRowsNumber(HTML_table):
# tbody approach
thead = HTML_table.find('thead')
if thead:
len_header = len(thead.find_all('tr'))
# th approach
else:
len_header = 0
for each_row in HTML_table.find_all('tr'):
tag_list =[x.name for x in each_row.find_all(True)]
if 'th' in tag_list:
len_header +=1
return len_header
def calcRowsNumber(xml_soup):
rows = xml_soup.find_all('tr')
return len(rows)
def calcColsNumber(xml_soup):
cols_number = 0
# Iterating over the rows of the table
for each_row in xml_soup.find_all('tr'):
cells = each_row.find_all('td')
if cols_number < len(cells):
cols_number = len(cells)
return cols_number
def createTagWithAttributesPlusString(xml_soup, tag_name, dict_attrib, new_string):
# New tag declaration
new_tag = xml_soup.new_tag(tag_name)
# Looking for present attributes to move inside the new tag
if dict_attrib:
for k, v in dict_attrib.items():
new_tag[k] = v
# New string to put inside the tag
new_string = xml_soup.new_string(new_string)
# Appending the string inside the tag
new_tag.append(new_string)
return new_tag
# Grab table tag
HTML_tables = xml_soup.find_all('table')
for index_table, each_HTML_table in enumerate(HTML_tables):
# Vars
table_name = "table_%#03d" % (index_table+1)
header_rows_number = calc_headerRowsNumber(each_HTML_table)
# Rows and cols number calculation
cols_number = calcColsNumber(each_HTML_table)
# New tree
CALS_table = BeautifulSoup('', 'xml')
root_tag = createTagWithAttributesPlusString(CALS_table, table_name, None, '')
# Creating tag 'table'
table_tag_attributes = {'frame':'all'}
table_tag = createTagWithAttributesPlusString(CALS_table, 'table', table_tag_attributes, '')
# Creating tag 'tgroup'
tgroup_tag_attributes = {'cols': cols_number}
tgroup_tag = createTagWithAttributesPlusString(CALS_table, 'tgroup', tgroup_tag_attributes, '')
# Creating tag 'colspec'
for i in xrange(1, cols_number+1):
colspec_tag_attributes = {
'colname':"c%01d" % i,
'colwidth': "%01dmm" % (table_width / cols_number)
}
colspec_tag = createTagWithAttributesPlusString(CALS_table, 'colspec', colspec_tag_attributes, '')
tgroup_tag.append(colspec_tag)
# Creating tag 'thead' e 'tbody'
head_tag = createTagWithAttributesPlusString(CALS_table, 'thead', None, '')
body_tag = createTagWithAttributesPlusString(CALS_table, 'tbody', None, '')
# Iterating over HTML rows
for i, each_row in enumerate(each_HTML_table.find_all('tr')):
# Creating tag 'row'
row_tag = createTagWithAttributesPlusString(CALS_table, 'row', None, '')
# Iterating over 'td' (HTML cells tags)
for j, each_col in enumerate(each_row.find_all(['td', 'th'])):
# Extracting contents from HTML cells
cell_content = each_col.text.replace('\t', '').replace('\n', ' ').lstrip().rstrip()
# Attributes for entry tag (CALS cell)
entry_tag_attributes = {'align':"left", 'valign':"top"}
# Multiple rows cell
if 'rowspan' in each_col.attrs:
entry_tag_attributes['morerows'] = int(each_col.attrs['rowspan'])-1
# Multiple columns cell
if 'colspan' in each_col.attrs:
begin = "c%01d" % (j+1)
end = "c%01d" % (j+int(each_col.attrs['colspan']))
entry_tag_attributes['namest'] = begin
entry_tag_attributes['nameend'] = end
# Creating 'entry' tag (CALS cell)
entry_tag = createTagWithAttributesPlusString(CALS_table, 'entry', entry_tag_attributes, '')
entry_tag.string = cell_content
# Appending cell into row
row_tag.append(entry_tag)
if i <= header_rows_number-1:
head_tag.append(row_tag)
else:
body_tag.append(row_tag)
# Appending header to table
tgroup_tag.append(head_tag)
tgroup_tag.append(body_tag)
# Appending tgroup to table
table_tag.append(tgroup_tag)
# Appending table to root
root_tag.append(table_tag)
# Replacement with the new table
each_HTML_table.replace_with(root_tag)
return xml_soup
def main():
# Terminal input
try:
source_path = str(sys.argv[1])
except IndexError:
print '\nNo file provided.\nUsage: python HTMLtoXML.py [yourfile.html]\n'
return
# Reading html file
html_file = open(source_path, 'r+')
html_doc = html_file.read()
# Cleaning and calling bs4
clean_html_doc = clean_linebreaks(html_doc)
html_soup = BeautifulSoup(clean_html_doc, 'html')
# Parsing and converting the tree
xml_soup = extract_body_from_html(html_soup)
xml_soup = convert_footnotes(xml_soup)
xml_soup = convert_HTMLtable_to_XMLCALStable(xml_soup)
# Writing the output
output_xml = open(source_path[:-4]+'xml', "w+")
output_xml.write(str(xml_soup))
output_xml.close()
### Instructions
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# MIT License
#
# Copyright (c) 2018 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse, sys, time, math, socket, fcntl, struct
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def convert_size(size_bytes):
if size_bytes == 0:
return "0"
size_name = ("", "K", "M", "G", "T", "P", "E", "Z", "Y")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
parser = argparse.ArgumentParser(description = 'Check used disk space, designed for use with icinga or nagios')
parser.add_argument('-w', '--warning', type=str, default="0", help='Set network speed (rx OR tx) warning threshold in bps', required=True)
parser.add_argument('-c', '--critical', type=str, default="0", help="Set network speed (rx OR tx) critical threshold in bps", required=True)
parser.add_argument('-W', '--pwarning', type=str, default="0", help='Set packets (rx OR tx) warning threshold in pps (0=disable check)')
parser.add_argument('-C', '--pcritical', type=str, default="0", help="Set packets (rx OR tx) critical threshold in pps (0=disable check)")
parser.add_argument('-i', '--interval', type=int, default="5", help="Set sampling interval in seconds")
args = parser.parse_args()
size_threshold_warning = int(args.warning)
size_threshold_critical = int(args.critical)
psize_threshold_warning = int(args.pwarning)
psize_threshold_critical = int(args.pcritical)
interval = int(args.interval)
return_code = 0
result = ""
stats = ""
dev_0 = []
dev_5 = []
interfaces = {}
# Gather data
with open("/proc/net/dev") as mf:
dev_0 = mf.readlines()
time.sleep(interval)
with open("/proc/net/dev") as mf:
dev_5 = mf.readlines()
for line in dev_0:
data = line.split();
if data[0] == "Inter-|" or data[0] == "face":
continue
data[0] = data[0][:-1]
interfaces[data[0]] = {}
interfaces[data[0]]['name'] = data[0]
interfaces[data[0]]['address'] = get_ip_address(data[0])
interfaces[data[0]]["rx_bytes_0"] = data[1]
interfaces[data[0]]["rx_packets_0"] = data[2]
interfaces[data[0]]["rx_errors_0"] = data[3]
interfaces[data[0]]["rx_drops_0"] = data[4]
interfaces[data[0]]["rx_fifo_0"] = data[5]
interfaces[data[0]]["rx_frame_0"] = data[6]
interfaces[data[0]]["rx_compressed_0"] = data[7]
interfaces[data[0]]["rx_multicast_0"] = data[8]
interfaces[data[0]]["tx_bytes_0"] = data[9]
interfaces[data[0]]["tx_packets_0"] = data[10]
interfaces[data[0]]["tx_errors_0"] = data[11]
interfaces[data[0]]["tx_drop_0"] = data[12]
interfaces[data[0]]["tx_fifo_0"] = data[13]
interfaces[data[0]]["tx_colls_0"] = data[14]
interfaces[data[0]]["tx_carrier_0"] = data[15]
interfaces[data[0]]["tx_compressed_0"] = data[16]
for line in dev_5:
data = line.split();
if data[0] == "Inter-|" or data[0] == "face":
continue
data[0] = data[0][:-1]
interfaces[data[0]]["rx_bytes_5"] = data[1]
interfaces[data[0]]["rx_packets_5"] = data[2]
interfaces[data[0]]["rx_errors_5"] = data[3]
interfaces[data[0]]["rx_drops_5"] = data[4]
interfaces[data[0]]["rx_fifo_5"] = data[5]
interfaces[data[0]]["rx_frame_5"] = data[6]
interfaces[data[0]]["rx_compressed_5"] = data[7]
interfaces[data[0]]["rx_multicast_5"] = data[8]
interfaces[data[0]]["tx_bytes_5"] = data[9]
interfaces[data[0]]["tx_packets_5"] = data[10]
interfaces[data[0]]["tx_errors_5"] = data[11]
interfaces[data[0]]["tx_drop_5"] = data[12]
interfaces[data[0]]["tx_fifo_5"] = data[13]
interfaces[data[0]]["tx_colls_5"] = data[14]
interfaces[data[0]]["tx_carrier_5"] = data[15]
interfaces[data[0]]["tx_compressed_5"] = data[16]
for i in interfaces.itervalues():
i["rx_bps"] = (float(i["rx_bytes_5"]) - float(i["rx_bytes_0"])) / interval * 8
i["tx_bps"] = (float(i["tx_bytes_5"]) - float(i["tx_bytes_0"])) / interval * 8
i["rx_pps"] = (float(i["rx_packets_5"]) - float(i["rx_packets_0"])) / interval * 8
i["tx_pps"] = (float(i["tx_packets_5"]) - float(i["tx_packets_0"])) / interval * 8
i["rx_eps"] = (float(i["rx_errors_5"]) - float(i["rx_errors_0"])) / interval * 8
i["tx_eps"] = (float(i["tx_errors_5"]) - float(i["tx_errors_0"])) / interval * 8
result += "%s rx %sbps (%spkt/s) tx %sbps (%spkt/s) " % ( i['address'], convert_size(i['rx_bps']), convert_size(i['rx_pps']), convert_size(i['tx_bps']), convert_size(i['tx_pps']) )
stats += "%s.bps.rx=%d;%d;%d;; " % ( i['address'], i['rx_bps'], size_threshold_warning, size_threshold_critical )
stats += "%s.bps.tx=%d;%d;%d;; " % ( i['address'], i['tx_bps'], size_threshold_warning, size_threshold_critical )
stats += "%s.pps.rx=%d;%s;%s;; " % ( i['address'], i['rx_pps'], psize_threshold_warning if psize_threshold_warning > 0 else "", psize_threshold_critical if psize_threshold_critical > 0 else "" )
stats += "%s.pps.tx=%d;%s;%s;; " % ( i['address'], i['tx_pps'], psize_threshold_warning if psize_threshold_warning > 0 else "", psize_threshold_critical if psize_threshold_critical > 0 else "" )
stats += "%s.eps.rx=%d;;;; " % ( i['address'], i['rx_eps'] )
stats += "%s.eps.tx=%d;;;; " % ( i['address'], i['tx_eps'] )
if i['rx_bps'] >= size_threshold_critical or i['tx_bps'] >= size_threshold_critical:
return_code = 2
elif i['rx_bps'] >= size_threshold_warning or i['tx_bps'] >= size_threshold_warning:
if (return_code < 1):
return_code = 1
if psize_threshold_critical > 0 and ( i['rx_pps'] >= psize_threshold_critical or i['tx_pps'] >= psize_threshold_critical ):
return_code = 2
elif psize_threshold_warning > 0 and ( i['rx_pps'] >= psize_threshold_warning or i['tx_pps'] >= psize_threshold_warning ):
if (return_code < 1):
return_code = 1
status = "OK"
if return_code == 1:
status = "WARNING"
elif return_code == 2:
status = "CRITICAL"
print "%s - IF TRAFFIC: %s | %s" % (status, result, stats);
sys.exit(return_code);
|
import random
from collections import Counter
from enum import Enum, unique
from os import PathLike
from typing import Union, List
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from app.features import N_MELS
MODEL_PATH = 'models/emotions.pt'
@unique
class Emotion(Enum):
COMFORTABLE = 'comfortable'
HAPPY = 'happy'
INSPIRATIONAL = 'inspirational'
JOY = 'joy'
LONELY = 'lonely'
FUNNY = 'funny'
NOSTALGIC = 'nostalgic'
PASSIONATE = 'passionate'
QUIET = 'quiet'
RELAXED = 'relaxed'
ROMANTIC = 'romantic'
SADNESS = 'sadness'
SOULFUL = 'soulful'
SWEET = 'sweet'
SERIOUS = 'serious'
ANGER = 'anger'
WARY = 'wary'
SURPRISE = 'surprise'
FEAR = 'fear'
EMOTIONS = [e.value for e in Emotion]
def get_emoji(emotion: str) -> str:
emoji = {
Emotion.COMFORTABLE.value: '😊',
Emotion.HAPPY.value: '😁',
Emotion.INSPIRATIONAL.value: '🤩',
Emotion.JOY.value: '😂',
Emotion.LONELY.value: '😟',
Emotion.FUNNY.value: '😆',
Emotion.NOSTALGIC.value: '🙄',
Emotion.PASSIONATE.value: '😍',
Emotion.QUIET.value: '🤐',
Emotion.RELAXED.value: '😌',
Emotion.ROMANTIC.value: '😘',
Emotion.SADNESS.value: '🙁',
Emotion.SOULFUL.value: '🙃',
Emotion.SWEET.value: '🤗',
Emotion.SERIOUS.value: '🤨',
Emotion.ANGER.value: '😡',
Emotion.WARY.value: '😑',
Emotion.SURPRISE.value: '😲',
Emotion.FEAR.value: '😱'
}
return emoji.get(emotion, '')
class EmotionClassifier(nn.Module):
"""
LSTM Emotion Classifier
"""
def __init__(
self,
input_dim: int,
hidden_dim: int,
batch_size: int = 9,
output_dim: int = len(Emotion),
n_layers: int = 2
):
"""
:param input_dim: The number of expected features in the input `x`
:param hidden_dim: The number of features in the hidden state `h`
:param batch_size:
:param output_dim:
:param n_layers:
"""
super(EmotionClassifier, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.n_layers = n_layers
self.lstm = nn.LSTM(self.input_dim, self.hidden_dim, self.n_layers, batch_first=True)
self.output = nn.Linear(self.hidden_dim, output_dim)
def forward(self, x):
lstm_out, hidden = self.lstm(x)
logits = self.output(lstm_out[:, -1])
return F.softmax(logits, dim=1)
def load_model(model_path: Union[str, bytes, PathLike]) -> nn.Module:
"""
Loads model from state dict
:param model_path:
:return:
"""
input_dim = N_MELS
hidden_dim = 32
n_classes = len(Emotion)
model = EmotionClassifier(
input_dim=input_dim,
hidden_dim=hidden_dim,
output_dim=n_classes
)
model.load_state_dict(torch.load(model_path))
model.eval()
return model
def predict_topk_emotions(features: np.ndarray, k=3) -> List[str]:
model = load_model(MODEL_PATH)
output = model(torch.tensor(features))
indices = torch.flatten(torch.topk(output, k, dim=1)[1])
indices = list(map(lambda x: x[0], Counter(indices).most_common(k)))
return [EMOTIONS[i] for i in indices]
|
<filename>examples/simple/classification/classification_pipelines.py
from typing import Optional
from fedot.core.log import Log
from fedot.core.pipelines.node import PrimaryNode, SecondaryNode
from fedot.core.pipelines.pipeline import Pipeline
def cnn_composite_pipeline(composite_flag: bool = True) -> Pipeline:
"""
Returns pipeline with the following structure:
cnn \
-> rf -> [rf] -> final prediction
cnn /
Where cnn - convolutional neural network, rf - random forest
:param composite_flag: add additional random forest estimator
"""
node_first = PrimaryNode('cnn')
node_first.custom_params = {'image_shape': (28, 28, 1),
'architecture': 'deep',
'num_classes': 10,
'epochs': 15,
'batch_size': 128}
node_second = PrimaryNode('cnn')
node_second.custom_params = {'image_shape': (28, 28, 1),
'architecture_type': 'simplified',
'num_classes': 10,
'epochs': 10,
'batch_size': 128}
node_final = SecondaryNode('rf', nodes_from=[node_first, node_second])
if not composite_flag:
node_final = SecondaryNode('rf', nodes_from=[node_first])
pipeline = Pipeline(node_final)
return pipeline
def classification_pipeline_with_balancing(custom_params=None):
"""
Returns pipeline with the following structure:
resample -> logit -> final prediction
Where resample - algorithm for balancing dataset, logit - logistic_regression
:param custom_params: custom parameters for resample node
"""
node_resample = PrimaryNode(operation_type='resample')
if custom_params is not None:
node_resample.custom_params = custom_params
graph = SecondaryNode(operation_type='logit', nodes_from=[node_resample])
return Pipeline(graph)
def classification_pipeline_without_balancing():
"""
Returns pipeline with the following structure:
logit -> final prediction
Where resample - algorithm for balancing dataset, logit - logistic_regression
"""
node = PrimaryNode(operation_type='logit')
return Pipeline(node)
def classification_complex_pipeline(log: Optional[Log] = None):
"""
Returns pipeline with the following structure:
rf \
-> logit -> final prediction
knn /
"""
first = PrimaryNode(operation_type='rf')
second = PrimaryNode(operation_type='knn')
final = SecondaryNode(operation_type='logit',
nodes_from=[first, second])
pipeline = Pipeline(final, log=log)
return pipeline
def classification_random_forest_pipeline():
"""
Returns pipeline with the following structure:
scaling -> rf -> final prediction
"""
node_scaling = PrimaryNode('scaling')
node_final = SecondaryNode('rf', nodes_from=[node_scaling])
return Pipeline(node_final)
def classification_isolation_forest_pipeline():
"""
Returns pipeline with the following structure:
scaling -> isolation_forest -> rf -> final prediction
"""
node_first = PrimaryNode('scaling')
node_second = SecondaryNode('isolation_forest_class', nodes_from=[node_first])
node_final = SecondaryNode('rf', nodes_from=[node_second])
return Pipeline(node_final)
def classification_svc_complex_pipeline():
"""
Returns pipeline with the following structure:
svc -> logit \
\
rf -> final prediction
knn -> knn /
/
svc -> logit /
Where svc - support vector classifier, logit - logistic regression, knn - K nearest neighbors classifier,
rf - random forest classifier
"""
svc_primary_node = PrimaryNode('svc')
svc_primary_node.custom_params = dict(probability=True)
logit_secondary_node = SecondaryNode('logit', nodes_from=[svc_primary_node])
svc_node_with_custom_params = PrimaryNode('svc')
svc_node_with_custom_params.custom_params = dict(kernel='rbf', C=10,
gamma=1, cache_size=2000,
probability=True)
logit_secondary_node_2 = SecondaryNode('logit', nodes_from=[svc_node_with_custom_params])
knn_primary_node = PrimaryNode('knn')
knn_secondary_node = SecondaryNode('knn', nodes_from=[knn_primary_node, logit_secondary_node])
rf_node = SecondaryNode('rf', nodes_from=[logit_secondary_node_2, knn_secondary_node])
preset_pipeline = Pipeline(rf_node)
return preset_pipeline
def classification_three_depth_manual_pipeline():
"""
Returns pipeline with the following structure:
logit \
knn \
rf / knn -> final prediction
rf -> qda /
Where rf - xg boost classifier, logit - logistic regression, knn - K nearest neighbors classifier,
qda - discriminant analysis
"""
logit_node_primary = PrimaryNode('logit')
xgb_node_primary = PrimaryNode('rf')
xgb_node_primary_second = PrimaryNode('rf')
qda_node_third = SecondaryNode('qda', nodes_from=[xgb_node_primary_second])
knn_node_third = SecondaryNode('knn', nodes_from=[logit_node_primary, xgb_node_primary])
knn_root = SecondaryNode('knn', nodes_from=[qda_node_third, knn_node_third])
pipeline = Pipeline(knn_root)
return pipeline
def classification_rf_complex_pipeline():
"""
Returns pipeline with the following structure:
logit \
rf \
lda / \
rf -> final prediction
logit -> knn /
/
lda /
Where lda - discriminant analysis, logit - logistic regression, rf - random forest classifier,
knn - K nearest neighbors classifier
"""
pipeline = Pipeline()
root_of_tree, root_child_first, root_child_second = \
[SecondaryNode(model) for model in ('rf', 'rf', 'knn')]
for root_node_child in (root_child_first, root_child_second):
for requirement_model in ('logit', 'lda'):
new_node = PrimaryNode(requirement_model)
root_node_child.nodes_from.append(new_node)
pipeline.add_node(new_node)
pipeline.add_node(root_node_child)
root_of_tree.nodes_from.append(root_node_child)
pipeline.add_node(root_of_tree)
return pipeline
|
import os.path
import pkgutil
import re
import tokenize
import pytest
import streamlink.plugins
import tests.plugins
from streamlink.compat import is_py2
from streamlink.plugin.plugin import Matcher, Plugin
from streamlink.utils.module import load_module
from streamlink_cli.argparser import build_parser
plugins_path = streamlink.plugins.__path__[0]
plugintests_path = tests.plugins.__path__[0]
protocol_plugins = [
"http",
"hls",
"dash",
"rtmp",
]
plugintests_ignore = [
"test_stream",
]
plugins = [
pname
for finder, pname, ispkg in pkgutil.iter_modules([plugins_path])
if not pname.startswith("common_")
]
plugins_no_protocols = [pname for pname in plugins if pname not in protocol_plugins]
plugintests = [
re.sub(r"^test_", "", tname)
for finder, tname, ispkg in pkgutil.iter_modules([plugintests_path])
if tname.startswith("test_") and tname not in plugintests_ignore
]
def unique(iterable):
seen = set()
for item in iterable:
if item not in seen:
seen.add(item)
yield item
class TestPlugins:
@pytest.fixture(scope="class", params=plugins)
def plugin(self, request):
return load_module(request.param, plugins_path)
@pytest.fixture(scope="class")
def parser(self):
return build_parser()
@pytest.fixture(scope="class")
def global_arg_dests(self, parser):
return [action.dest for action in parser._actions]
def test_exports_plugin(self, plugin):
assert hasattr(plugin, "__plugin__"), "Plugin module exports __plugin__"
assert issubclass(plugin.__plugin__, Plugin), "__plugin__ is an instance of the Plugin class"
def test_classname(self, plugin):
classname = plugin.__plugin__.__name__
assert classname == classname[0].upper() + classname[1:], "__plugin__ class name starts with uppercase letter"
assert "_" not in classname, "__plugin__ class name does not contain underscores"
def test_matchers(self, plugin):
pluginclass = plugin.__plugin__
assert isinstance(pluginclass.matchers, list) and len(pluginclass.matchers) > 0, "Has at least one matcher"
assert all(isinstance(matcher, Matcher) for matcher in pluginclass.matchers), "Only has valid matchers"
def test_plugin_api(self, plugin):
pluginclass = plugin.__plugin__
assert not hasattr(pluginclass, "can_handle_url"), "Does not implement deprecated can_handle_url(url)"
assert not hasattr(pluginclass, "priority"), "Does not implement deprecated priority(url)"
assert callable(pluginclass._get_streams), "Implements _get_streams()"
def test_has_valid_global_args(self, global_arg_dests, plugin):
assert all(parg.dest in global_arg_dests for parg in plugin.__plugin__.arguments if parg.is_global), \
"All plugin arguments with is_global=True are valid global arguments"
class TestPluginTests:
@pytest.mark.parametrize("plugin", plugins_no_protocols)
def test_plugin_has_tests(self, plugin):
assert plugin in plugintests, "Test module exists for plugin"
@pytest.mark.parametrize("plugintest", plugintests)
def test_test_has_plugin(self, plugintest):
assert plugintest in plugins, "Plugin exists for test module"
@pytest.mark.skipif(is_py2, reason="Test is only applicable for py3")
class TestPluginMetadata:
@pytest.fixture(scope="class")
def metadata_keys_all(self):
return (
"description",
"url",
"type",
"region",
"account",
"notes",
)
@pytest.fixture(scope="class")
def metadata_keys_required(self):
return (
"url",
"type",
)
@pytest.fixture(scope="class")
def metadata_keys_repeat(self):
return (
"url",
)
@pytest.fixture(scope="class")
def metadata_keys_no_repeat(self, metadata_keys_all, metadata_keys_repeat):
return tuple(
key
for key in metadata_keys_all
if key not in metadata_keys_repeat
)
@pytest.fixture(scope="class", params=plugins_no_protocols)
def tokeninfo(self, request):
with open(os.path.join(plugins_path, "{0}.py".format(request.param)), "r") as handle:
for tokeninfo in tokenize.generate_tokens(handle.readline): # pragma: no branch
if tokeninfo.type != tokenize.STRING:
continue
break
assert type(tokeninfo) is tokenize.TokenInfo, "Parses the first token"
assert tokeninfo.type == tokenize.STRING, "First token is a string"
return tokeninfo
@pytest.fixture(scope="class")
def metadata_items(self, tokeninfo):
match = re.search(r"^\"\"\"\n(?P<metadata>.+)\n\"\"\"$", tokeninfo.string, re.DOTALL)
assert match is not None, "String is a properly formatted long string"
lines = [
re.search(r"^\$(?P<key>\w+) (?P<value>\S.+)$", line)
for line in match.group("metadata").split("\n")
]
assert all(lines), "All lines are properly formatted using the '$key value' format"
return [(m.group("key"), m.group("value")) for m in lines]
@pytest.fixture(scope="class")
def metadata_keys(self, metadata_items):
return tuple(key for key, value in metadata_items)
@pytest.fixture(scope="class")
def metadata_dict(self, metadata_keys_no_repeat, metadata_items):
return {k: v for k, v in metadata_items if k in metadata_keys_no_repeat}
def test_no_unknown(self, metadata_keys_all, metadata_keys):
assert not any(True for key in metadata_keys if key not in metadata_keys_all), \
"No unknown metadata keys are set"
def test_required(self, metadata_keys_required, metadata_keys):
assert all(True for tag in metadata_keys_required if tag in metadata_keys), \
"All required metadata keys are set"
def test_order(self, metadata_keys_all, metadata_keys):
keys = tuple(key for key in metadata_keys_all if key in metadata_keys)
assert keys == tuple(unique(metadata_keys)), \
"All metadata keys are defined in order"
assert tuple(reversed(keys)) == tuple(unique(reversed(metadata_keys))), \
"All repeatable metadata keys are defined in order"
def test_repeat(self, metadata_keys_repeat, metadata_keys, metadata_items):
items = {key: tuple(v for k, v in metadata_items if k == key) for key in metadata_keys if key in metadata_keys_repeat}
assert items == {key: tuple(unique(value)) for key, value in items.items()}, \
"Repeatable keys don't have any duplicates"
def test_no_repeat(self, metadata_keys_no_repeat, metadata_keys):
keys = tuple(key for key in metadata_keys if key in metadata_keys_no_repeat)
assert keys == tuple(unique(keys)), "Non-repeatable keys are set at most only once"
def test_key_url(self, metadata_items):
assert not any(re.match("^https?://", val) for key, val in metadata_items if key == "url"), \
"URL metadata values don't start with http:// or https://"
def test_key_type(self, metadata_dict):
assert metadata_dict.get("type") in ("live", "vod", "live, vod"), \
"Type metadata has the correct value"
class TestRemovedPluginsFile:
@pytest.fixture(scope="class")
def removedplugins(self):
with open(os.path.join(plugins_path, ".removed"), "r") as handle:
return [line.strip() for line in handle.readlines() if not line.strip().startswith("#")]
@pytest.mark.parametrize("plugin", plugins)
def test_plugin_not_in_file(self, plugin, removedplugins):
assert plugin not in removedplugins, "Existing plugin is not in removed plugins list"
def test_is_sorted(self, removedplugins):
removedplugins_sorted = removedplugins[:]
removedplugins_sorted.sort()
assert removedplugins_sorted == removedplugins, "Removed plugins list is sorted alphabetically"
|
<filename>tests/test_saving_calculators.py
import json
import unittest
from tests.test_base import BaseTest
from tests.saving_constants import (
AHORROS_JSON_0,
AHORROS_JSON_1,
AHORROS_PARA_META_JSON_0,
AHORROS_PARA_META_JSON_1,
AHORROS_PARA_META_RESULT_0,
AHORROS_PARA_META_RESULT_1,
AHORROS_RESULT_0,
AHORROS_RESULT_1,
INTERES_REQUERIDO_JSON_0,
INTERES_REQUERIDO_JSON_1,
INTERES_REQUERIDO_RESULT_0,
INTERES_REQUERIDO_RESULT_1,
TIEMPO_PARA_META_JSON_0,
TIEMPO_PARA_META_JSON_1,
TIEMPO_PARA_META_RESULT_0,
TIEMPO_PARA_META_RESULT_1,
VALOR_ACTUAL_JSON_0,
VALOR_ACTUAL_JSON_1,
VALOR_ACTUAL_RESULT_0,
VALOR_ACTUAL_RESULT_1,
)
class TestSavingCalculator(BaseTest):
"""Test all endpoints for the saving calculator"""
def setUp(self):
super(TestSavingCalculator, self).setUp()
with self.app_context:
pass
def test_saving_for_goal_end(self):
"""
Test the endpoint for the savings to reach goal calculator
when deposit are made at the end of the compounding period
"""
with self.client as c:
results = c.post(
"/ahorros-para-lograr-meta",
data=json.dumps(AHORROS_PARA_META_JSON_0),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertDictEqual(data, AHORROS_PARA_META_RESULT_0)
def test_saving_for_goal_start(self):
"""
Test the endpoint for the savings to reach goal calculator
when deposit are made at the start of the compounding period
"""
with self.client as c:
results = c.post(
"/ahorros-para-lograr-meta",
data=json.dumps(AHORROS_PARA_META_JSON_1),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertDictEqual(data, AHORROS_PARA_META_RESULT_1)
def test_saving_end(self):
"""
Test the endpoint for the savings calculator when deposit are
made at the end of the compounding period
"""
with self.client as c:
results = c.post(
"/calculadora-de-ahorros",
data=json.dumps(AHORROS_JSON_0),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertDictEqual(data, AHORROS_RESULT_0)
def test_saving_start(self):
"""
Test the endpoint for the savings calculator when deposit are
made at the start of the compounding period
"""
with self.client as c:
results = c.post(
"/calculadora-de-ahorros",
data=json.dumps(AHORROS_JSON_1),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertDictEqual(data, AHORROS_RESULT_1)
def test_required_rate_end(self):
"""
Test the endpoint for the required interest rate calculator
when deposit are made at the end of the compounding period
"""
with self.client as c:
results = c.post(
"/tasa-de-interes-requerida",
data=json.dumps(INTERES_REQUERIDO_JSON_0),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertLess(
abs(data["rate"] - INTERES_REQUERIDO_RESULT_0["rate"]), 0.001
)
def test_required_rate_start(self):
"""
Test the endpoint for the required interest rate calculator
when deposit are made at the start of the compounding period
"""
with self.client as c:
results = c.post(
"/tasa-de-interes-requerida",
data=json.dumps(INTERES_REQUERIDO_JSON_1),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertLess(
abs(data["rate"] - INTERES_REQUERIDO_RESULT_1["rate"]), 0.001
)
def test_time_to_goal_end(self):
"""
Test the endpoint for the time to reach goal calculator
when deposit are made at the end of the compounding period
"""
with self.client as c:
results = c.post(
"/tiempo-para-lograr-meta",
data=json.dumps(TIEMPO_PARA_META_JSON_0),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertDictEqual(data, TIEMPO_PARA_META_RESULT_0)
def test_time_to_goal_start(self):
"""
Test the endpoint for the time to reach goal calculator
when deposit are made at the start of the compounding period
"""
with self.client as c:
results = c.post(
"/tiempo-para-lograr-meta",
data=json.dumps(TIEMPO_PARA_META_JSON_1),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertDictEqual(data, TIEMPO_PARA_META_RESULT_1)
def test_present_value_end(self):
"""
Test the endpoint for the present value calculator when deposit
are made at the end of the compounding period
"""
with self.client as c:
results = c.post(
"/valor-actual",
data=json.dumps(VALOR_ACTUAL_JSON_0),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertDictEqual(data, VALOR_ACTUAL_RESULT_0)
def test_present_value_start(self):
"""
Test the endpoint for the present value calculator when deposit
are made at the start of the compounding period
"""
with self.client as c:
results = c.post(
"/valor-actual",
data=json.dumps(VALOR_ACTUAL_JSON_1),
headers=TestSavingCalculator.request_headers,
)
data = json.loads(results.data)
self.assertDictEqual(data, VALOR_ACTUAL_RESULT_1)
if __name__ == "__main__":
unittest.main()
|
import bpy
import bpy_extras
import bmesh
from bpy.props import StringProperty
from .reader import AsciiModelReader
import os
class ImportOperator(bpy.types.Operator, bpy_extras.io_utils.ImportHelper):
"""This appears in the tooltip of the operator and in the generated docs"""
bl_idname = 'io_scene_modl.modl_import' # important since its how bpy.ops.import_test.some_data is constructed
bl_label = 'Import Lithtech MODL'
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
# ImportHelper mixin class uses this
filename_ext = ".model00a"
filter_glob = StringProperty(
default="*.model00a",
options={'HIDDEN'},
maxlen=255, # Max internal buffer length, longer would be clamped.
)
def execute(self, context):
model = AsciiModelReader().from_file(self.filepath)
model.name = os.path.splitext(os.path.basename(self.filepath))[0]
armature_data = bpy.data.armatures.new(model.name)
armature_object = bpy.data.objects.new(model.name, armature_data)
context.scene.objects.link(armature_object)
''' Create materials. '''
materials = []
for shape_index, shape in enumerate(model.shapes):
while len(materials) <= shape.material_index:
''' Create a material for the new piece. '''
material = bpy.data.materials.new(shape.name) # TODO: if material indices are out-of-order, the name might be wrong!
materials.append(material)
''' Create texture. '''
texture = bpy.data.textures.new(shape.name, type='IMAGE')
# TODO: not sure where we'd get this from (DDS?)
texture_slot = material.texture_slots.add()
texture_slot.texture = texture
print(materials)
for shape_index, shape in enumerate(model.shapes):
mesh_data = bpy.data.meshes.new(shape.name)
mesh_object = bpy.data.objects.new(shape.name, mesh_data)
bm = bmesh.new()
bm.from_mesh(mesh_data)
''' Add materials to mesh. '''
for material in materials:
''' Create UV map. '''
uv_texture = mesh_data.uv_textures.new()
mesh_data.materials.append(material)
material.texture_slots[0].uv_layer = uv_texture.name
# Vertices
for vertex in shape.vertices:
bm.verts.new(vertex)
bm.verts.ensure_lookup_table()
# Faces
for face_index, face in enumerate(shape.faces):
face = [bm.verts[x] for x in face]
try:
bmface = bm.faces.new(face)
except ValueError:
'''
This face is a duplicate of another face, which is disallowed by Blender.
Mark this face for deletion after iteration.
'''
#duplicate_face_indices.append(face_index)
continue
'''
Assign the material index of face based on the piece's material index.
'''
bmface.material_index = model.shapes[shape_index].material_index
#bmface.smooth = True
'''
Assign texture coordinates.
'''
#material_face_offsets = [0] * len(mesh.materials)
# TODO: we gotta make a new material, if necessary
'''
uv_texture = mesh_data.uv_layers[shape.material_index] # TODO: shape material index???
for face_index, face in enumerate(shape.faces):
material_face_offset = material_face_offsets[0]
texcoords = [vertex.texcoord for vertex in face.vertices]
for i in range(3):
uv = texcoords[i][0], 1.0 - texcoords[i][1]
uv_texture.data[(material_face_offset + face_index) * 3 + i].uv = uv
material_face_offsets[0] += len(lod.faces)
'''
bm.faces.ensure_lookup_table()
bm.to_mesh(mesh_data)
mesh_data.validate(clean_customdata=False)
mesh_data.update(calc_edges=False)
context.scene.objects.link(mesh_object)
mesh_object.parent = armature_object
return {'FINISHED'}
@staticmethod
def menu_func_import(self, content):
self.layout.operator(ImportOperator.bl_idname, text='Lithtech MODL (.model00a)')
|
<gh_stars>100-1000
from typing import Optional
from botocore.client import BaseClient
from typing import Dict
from botocore.paginate import Paginator
from botocore.waiter import Waiter
from typing import Union
from typing import List
class Client(BaseClient):
def associate_domain(self, FleetArn: str, DomainName: str, AcmCertificateArn: str, DisplayName: str = None) -> Dict:
"""
Specifies a domain to be associated to Amazon WorkLink.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/AssociateDomain>`_
**Request Syntax**
::
response = client.associate_domain(
FleetArn='string',
DomainName='string',
AcmCertificateArn='string',
DisplayName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The Amazon Resource Name (ARN) of the fleet.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The fully qualified domain name (FQDN).
:type AcmCertificateArn: string
:param AcmCertificateArn: **[REQUIRED]**
The ARN of an issued ACM certificate that is valid for the domain being associated.
:type DisplayName: string
:param DisplayName:
The name to display.
:rtype: dict
:returns:
"""
pass
def associate_website_certificate_authority(self, FleetArn: str, Certificate: str, DisplayName: str = None) -> Dict:
"""
Imports the root certificate of a certificate authority (CA) used to obtain TLS certificates used by associated websites within the company network.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/AssociateWebsiteCertificateAuthority>`_
**Request Syntax**
::
response = client.associate_website_certificate_authority(
FleetArn='string',
Certificate='string',
DisplayName='string'
)
**Response Syntax**
::
{
'WebsiteCaId': 'string'
}
**Response Structure**
- *(dict) --*
- **WebsiteCaId** *(string) --*
A unique identifier for the CA.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type Certificate: string
:param Certificate: **[REQUIRED]**
The root certificate of the CA.
:type DisplayName: string
:param DisplayName:
The certificate name to display.
:rtype: dict
:returns:
"""
pass
def can_paginate(self, operation_name: str = None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:return: ``True`` if the operation can be paginated,
``False`` otherwise.
"""
pass
def create_fleet(self, FleetName: str, DisplayName: str = None, OptimizeForEndUserLocation: bool = None) -> Dict:
"""
Creates a fleet. A fleet consists of resources and the configuration that delivers associated websites to authorized users who download and set up the Amazon WorkLink app.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/CreateFleet>`_
**Request Syntax**
::
response = client.create_fleet(
FleetName='string',
DisplayName='string',
OptimizeForEndUserLocation=True|False
)
**Response Syntax**
::
{
'FleetArn': 'string'
}
**Response Structure**
- *(dict) --*
- **FleetArn** *(string) --*
The ARN of the fleet.
:type FleetName: string
:param FleetName: **[REQUIRED]**
A unique name for the fleet.
:type DisplayName: string
:param DisplayName:
The fleet name to display.
:type OptimizeForEndUserLocation: boolean
:param OptimizeForEndUserLocation:
The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region.
:rtype: dict
:returns:
"""
pass
def delete_fleet(self, FleetArn: str) -> Dict:
"""
Deletes a fleet. Prevents users from accessing previously associated websites.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DeleteFleet>`_
**Request Syntax**
::
response = client.delete_fleet(
FleetArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:rtype: dict
:returns:
"""
pass
def describe_audit_stream_configuration(self, FleetArn: str) -> Dict:
"""
Describes the configuration for delivering audit streams to the customer account.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeAuditStreamConfiguration>`_
**Request Syntax**
::
response = client.describe_audit_stream_configuration(
FleetArn='string'
)
**Response Syntax**
::
{
'AuditStreamArn': 'string'
}
**Response Structure**
- *(dict) --*
- **AuditStreamArn** *(string) --*
The ARN of the Amazon Kinesis data stream that will receive the audit events.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:rtype: dict
:returns:
"""
pass
def describe_company_network_configuration(self, FleetArn: str) -> Dict:
"""
Describes the networking configuration to access the internal websites associated with the specified fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeCompanyNetworkConfiguration>`_
**Request Syntax**
::
response = client.describe_company_network_configuration(
FleetArn='string'
)
**Response Syntax**
::
{
'VpcId': 'string',
'SubnetIds': [
'string',
],
'SecurityGroupIds': [
'string',
]
}
**Response Structure**
- *(dict) --*
- **VpcId** *(string) --*
The VPC with connectivity to associated websites.
- **SubnetIds** *(list) --*
The subnets used for X-ENI connections from Amazon WorkLink rendering containers.
- *(string) --*
- **SecurityGroupIds** *(list) --*
The security groups associated with access to the provided subnets.
- *(string) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:rtype: dict
:returns:
"""
pass
def describe_device(self, FleetArn: str, DeviceId: str) -> Dict:
"""
Provides information about a user's device.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeDevice>`_
**Request Syntax**
::
response = client.describe_device(
FleetArn='string',
DeviceId='string'
)
**Response Syntax**
::
{
'Status': 'ACTIVE'|'SIGNED_OUT',
'Model': 'string',
'Manufacturer': 'string',
'OperatingSystem': 'string',
'OperatingSystemVersion': 'string',
'PatchLevel': 'string',
'FirstAccessedTime': datetime(2015, 1, 1),
'LastAccessedTime': datetime(2015, 1, 1),
'Username': 'string'
}
**Response Structure**
- *(dict) --*
- **Status** *(string) --*
The current state of the device.
- **Model** *(string) --*
The model of the device.
- **Manufacturer** *(string) --*
The manufacturer of the device.
- **OperatingSystem** *(string) --*
The operating system of the device.
- **OperatingSystemVersion** *(string) --*
The operating system version of the device.
- **PatchLevel** *(string) --*
The operating system patch level of the device.
- **FirstAccessedTime** *(datetime) --*
The date that the device first signed in to Amazon WorkLink.
- **LastAccessedTime** *(datetime) --*
The date that the device last accessed Amazon WorkLink.
- **Username** *(string) --*
The user name associated with the device.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type DeviceId: string
:param DeviceId: **[REQUIRED]**
A unique identifier for a registered user\'s device.
:rtype: dict
:returns:
"""
pass
def describe_device_policy_configuration(self, FleetArn: str) -> Dict:
"""
Describes the device policy configuration for the specified fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeDevicePolicyConfiguration>`_
**Request Syntax**
::
response = client.describe_device_policy_configuration(
FleetArn='string'
)
**Response Syntax**
::
{
'DeviceCaCertificate': 'string'
}
**Response Structure**
- *(dict) --*
- **DeviceCaCertificate** *(string) --*
The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:rtype: dict
:returns:
"""
pass
def describe_domain(self, FleetArn: str, DomainName: str) -> Dict:
"""
Provides information about the domain.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeDomain>`_
**Request Syntax**
::
response = client.describe_domain(
FleetArn='string',
DomainName='string'
)
**Response Syntax**
::
{
'DisplayName': 'string',
'CreatedTime': datetime(2015, 1, 1),
'DomainStatus': 'PENDING_VALIDATION'|'ASSOCIATING'|'ACTIVE'|'INACTIVE'|'DISASSOCIATING'|'DISASSOCIATED'|'FAILED_TO_ASSOCIATE'|'FAILED_TO_DISASSOCIATE'
}
**Response Structure**
- *(dict) --*
- **DisplayName** *(string) --*
The name to display.
- **CreatedTime** *(datetime) --*
The time that the domain was added.
- **DomainStatus** *(string) --*
The current state for the domain.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the domain.
:rtype: dict
:returns:
"""
pass
def describe_fleet_metadata(self, FleetArn: str) -> Dict:
"""
Provides basic information for the specified fleet, excluding identity provider, networking, and device configuration details.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeFleetMetadata>`_
**Request Syntax**
::
response = client.describe_fleet_metadata(
FleetArn='string'
)
**Response Syntax**
::
{
'CreatedTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'FleetName': 'string',
'DisplayName': 'string',
'OptimizeForEndUserLocation': True|False,
'CompanyCode': 'string',
'FleetStatus': 'CREATING'|'ACTIVE'|'DELETING'|'DELETED'|'FAILED_TO_CREATE'|'FAILED_TO_DELETE'
}
**Response Structure**
- *(dict) --*
- **CreatedTime** *(datetime) --*
The time that the fleet was created.
- **LastUpdatedTime** *(datetime) --*
The time that the fleet was last updated.
- **FleetName** *(string) --*
The name of the fleet.
- **DisplayName** *(string) --*
The name to display.
- **OptimizeForEndUserLocation** *(boolean) --*
The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region.
- **CompanyCode** *(string) --*
The identifier used by users to sign in to the Amazon WorkLink app.
- **FleetStatus** *(string) --*
The current state of the fleet.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:rtype: dict
:returns:
"""
pass
def describe_identity_provider_configuration(self, FleetArn: str) -> Dict:
"""
Describes the identity provider configuration of the specified fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeIdentityProviderConfiguration>`_
**Request Syntax**
::
response = client.describe_identity_provider_configuration(
FleetArn='string'
)
**Response Syntax**
::
{
'IdentityProviderType': 'SAML',
'ServiceProviderSamlMetadata': 'string',
'IdentityProviderSamlMetadata': 'string'
}
**Response Structure**
- *(dict) --*
- **IdentityProviderType** *(string) --*
The type of identity provider.
- **ServiceProviderSamlMetadata** *(string) --*
The SAML metadata document uploaded to the user’s identity provider.
- **IdentityProviderSamlMetadata** *(string) --*
The SAML metadata document provided by the user’s identity provider.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:rtype: dict
:returns:
"""
pass
def describe_website_certificate_authority(self, FleetArn: str, WebsiteCaId: str) -> Dict:
"""
Provides information about the certificate authority.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DescribeWebsiteCertificateAuthority>`_
**Request Syntax**
::
response = client.describe_website_certificate_authority(
FleetArn='string',
WebsiteCaId='string'
)
**Response Syntax**
::
{
'Certificate': 'string',
'CreatedTime': datetime(2015, 1, 1),
'DisplayName': 'string'
}
**Response Structure**
- *(dict) --*
- **Certificate** *(string) --*
The root certificate of the certificate authority.
- **CreatedTime** *(datetime) --*
The time that the certificate authority was added.
- **DisplayName** *(string) --*
The certificate name to display.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type WebsiteCaId: string
:param WebsiteCaId: **[REQUIRED]**
A unique identifier for the certificate authority.
:rtype: dict
:returns:
"""
pass
def disassociate_domain(self, FleetArn: str, DomainName: str) -> Dict:
"""
Disassociates a domain from Amazon WorkLink. End users lose the ability to access the domain with Amazon WorkLink.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DisassociateDomain>`_
**Request Syntax**
::
response = client.disassociate_domain(
FleetArn='string',
DomainName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the domain.
:rtype: dict
:returns:
"""
pass
def disassociate_website_certificate_authority(self, FleetArn: str, WebsiteCaId: str) -> Dict:
"""
Removes a certificate authority (CA).
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/DisassociateWebsiteCertificateAuthority>`_
**Request Syntax**
::
response = client.disassociate_website_certificate_authority(
FleetArn='string',
WebsiteCaId='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type WebsiteCaId: string
:param WebsiteCaId: **[REQUIRED]**
A unique identifier for the CA.
:rtype: dict
:returns:
"""
pass
def generate_presigned_url(self, ClientMethod: str = None, Params: Dict = None, ExpiresIn: int = None, HttpMethod: str = None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
``ClientMethod``.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method\'s model.
:returns: The presigned url
"""
pass
def get_paginator(self, operation_name: str = None) -> Paginator:
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is ``create_foo``, and you\'d normally invoke the
operation as ``client.create_foo(**kwargs)``, if the
``create_foo`` operation can be paginated, you can use the
call ``client.get_paginator(\"create_foo\")``.
:raise OperationNotPageableError: Raised if the operation is not
pageable. You can use the ``client.can_paginate`` method to
check if an operation is pageable.
:rtype: L{botocore.paginate.Paginator}
:return: A paginator object.
"""
pass
def get_waiter(self, waiter_name: str = None) -> Waiter:
"""
Returns an object that can wait for some condition.
:type waiter_name: str
:param waiter_name: The name of the waiter to get. See the waiters
section of the service docs for a list of available waiters.
:returns: The specified waiter object.
:rtype: botocore.waiter.Waiter
"""
pass
def list_devices(self, FleetArn: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Retrieves a list of devices registered with the specified fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/ListDevices>`_
**Request Syntax**
::
response = client.list_devices(
FleetArn='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Devices': [
{
'DeviceId': 'string',
'DeviceStatus': 'ACTIVE'|'SIGNED_OUT'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Devices** *(list) --*
Information about the devices.
- *(dict) --*
The summary of devices.
- **DeviceId** *(string) --*
The ID of the device.
- **DeviceStatus** *(string) --*
The status of the device.
- **NextToken** *(string) --*
The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type NextToken: string
:param NextToken:
The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to be included in the next page.
:rtype: dict
:returns:
"""
pass
def list_domains(self, FleetArn: str, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Retrieves a list of domains associated to a specified fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/ListDomains>`_
**Request Syntax**
::
response = client.list_domains(
FleetArn='string',
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'Domains': [
{
'DomainName': 'string',
'CreatedTime': datetime(2015, 1, 1),
'DomainStatus': 'PENDING_VALIDATION'|'ASSOCIATING'|'ACTIVE'|'INACTIVE'|'DISASSOCIATING'|'DISASSOCIATED'|'FAILED_TO_ASSOCIATE'|'FAILED_TO_DISASSOCIATE',
'DisplayName': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **Domains** *(list) --*
Information about the domains.
- *(dict) --*
The summary of the domain.
- **DomainName** *(string) --*
The name of the domain.
- **CreatedTime** *(datetime) --*
The time that the domain was created.
- **DomainStatus** *(string) --*
The status of the domain.
- **DisplayName** *(string) --*
The name to display.
- **NextToken** *(string) --*
The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type NextToken: string
:param NextToken:
The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to be included in the next page.
:rtype: dict
:returns:
"""
pass
def list_fleets(self, NextToken: str = None, MaxResults: int = None) -> Dict:
"""
Retrieves a list of fleets for the current account and Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/ListFleets>`_
**Request Syntax**
::
response = client.list_fleets(
NextToken='string',
MaxResults=123
)
**Response Syntax**
::
{
'FleetSummaryList': [
{
'FleetArn': 'string',
'CreatedTime': datetime(2015, 1, 1),
'LastUpdatedTime': datetime(2015, 1, 1),
'FleetName': 'string',
'DisplayName': 'string',
'CompanyCode': 'string',
'FleetStatus': 'CREATING'|'ACTIVE'|'DELETING'|'DELETED'|'FAILED_TO_CREATE'|'FAILED_TO_DELETE'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **FleetSummaryList** *(list) --*
The summary list of the fleets.
- *(dict) --*
The summary of the fleet.
- **FleetArn** *(string) --*
The ARN of the fleet.
- **CreatedTime** *(datetime) --*
The time when the fleet was created.
- **LastUpdatedTime** *(datetime) --*
The time when the fleet was last updated.
- **FleetName** *(string) --*
The name of the fleet.
- **DisplayName** *(string) --*
The name to display.
- **CompanyCode** *(string) --*
The identifier used by users to sign into the Amazon WorkLink app.
- **FleetStatus** *(string) --*
The status of the fleet.
- **NextToken** *(string) --*
The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null.
:type NextToken: string
:param NextToken:
The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to be included in the next page.
:rtype: dict
:returns:
"""
pass
def list_website_certificate_authorities(self, FleetArn: str, MaxResults: int = None, NextToken: str = None) -> Dict:
"""
Retrieves a list of certificate authorities added for the current account and Region.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/ListWebsiteCertificateAuthorities>`_
**Request Syntax**
::
response = client.list_website_certificate_authorities(
FleetArn='string',
MaxResults=123,
NextToken='string'
)
**Response Syntax**
::
{
'WebsiteCertificateAuthorities': [
{
'WebsiteCaId': 'string',
'CreatedTime': datetime(2015, 1, 1),
'DisplayName': 'string'
},
],
'NextToken': 'string'
}
**Response Structure**
- *(dict) --*
- **WebsiteCertificateAuthorities** *(list) --*
Information about the certificates.
- *(dict) --*
The summary of the certificate authority (CA).
- **WebsiteCaId** *(string) --*
A unique identifier for the CA.
- **CreatedTime** *(datetime) --*
The time when the CA was added.
- **DisplayName** *(string) --*
The name to display.
- **NextToken** *(string) --*
The pagination token used to retrieve the next page of results for this operation. If there are no more pages, this value is null.
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type MaxResults: integer
:param MaxResults:
The maximum number of results to be included in the next page.
:type NextToken: string
:param NextToken:
The pagination token used to retrieve the next page of results for this operation. If this value is null, it retrieves the first page.
:rtype: dict
:returns:
"""
pass
def restore_domain_access(self, FleetArn: str, DomainName: str) -> Dict:
"""
Moves a domain to ACTIVE status if it was in the INACTIVE status.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/RestoreDomainAccess>`_
**Request Syntax**
::
response = client.restore_domain_access(
FleetArn='string',
DomainName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the domain.
:rtype: dict
:returns:
"""
pass
def revoke_domain_access(self, FleetArn: str, DomainName: str) -> Dict:
"""
Moves a domain to INACTIVE status if it was in the ACTIVE status.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/RevokeDomainAccess>`_
**Request Syntax**
::
response = client.revoke_domain_access(
FleetArn='string',
DomainName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the domain.
:rtype: dict
:returns:
"""
pass
def sign_out_user(self, FleetArn: str, Username: str) -> Dict:
"""
Signs the user out from all of their devices. The user can sign in again if they have valid credentials.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/SignOutUser>`_
**Request Syntax**
::
response = client.sign_out_user(
FleetArn='string',
Username='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type Username: string
:param Username: **[REQUIRED]**
The name of the user.
:rtype: dict
:returns:
"""
pass
def update_audit_stream_configuration(self, FleetArn: str, AuditStreamArn: str = None) -> Dict:
"""
Updates the audit stream configuration for the fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/UpdateAuditStreamConfiguration>`_
**Request Syntax**
::
response = client.update_audit_stream_configuration(
FleetArn='string',
AuditStreamArn='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type AuditStreamArn: string
:param AuditStreamArn:
The ARN of the Amazon Kinesis data stream that receives the audit events.
:rtype: dict
:returns:
"""
pass
def update_company_network_configuration(self, FleetArn: str, VpcId: str, SubnetIds: List, SecurityGroupIds: List) -> Dict:
"""
Updates the company network configuration for the fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/UpdateCompanyNetworkConfiguration>`_
**Request Syntax**
::
response = client.update_company_network_configuration(
FleetArn='string',
VpcId='string',
SubnetIds=[
'string',
],
SecurityGroupIds=[
'string',
]
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type VpcId: string
:param VpcId: **[REQUIRED]**
The VPC with connectivity to associated websites.
:type SubnetIds: list
:param SubnetIds: **[REQUIRED]**
The subnets used for X-ENI connections from Amazon WorkLink rendering containers.
- *(string) --*
:type SecurityGroupIds: list
:param SecurityGroupIds: **[REQUIRED]**
The security groups associated with access to the provided subnets.
- *(string) --*
:rtype: dict
:returns:
"""
pass
def update_device_policy_configuration(self, FleetArn: str, DeviceCaCertificate: str = None) -> Dict:
"""
Updates the device policy configuration for the fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/UpdateDevicePolicyConfiguration>`_
**Request Syntax**
::
response = client.update_device_policy_configuration(
FleetArn='string',
DeviceCaCertificate='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type DeviceCaCertificate: string
:param DeviceCaCertificate:
The certificate chain, including intermediate certificates and the root certificate authority certificate used to issue device certificates.
:rtype: dict
:returns:
"""
pass
def update_domain_metadata(self, FleetArn: str, DomainName: str, DisplayName: str = None) -> Dict:
"""
Updates domain metadata, such as DisplayName.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/UpdateDomainMetadata>`_
**Request Syntax**
::
response = client.update_domain_metadata(
FleetArn='string',
DomainName='string',
DisplayName='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type DomainName: string
:param DomainName: **[REQUIRED]**
The name of the domain.
:type DisplayName: string
:param DisplayName:
The name to display.
:rtype: dict
:returns:
"""
pass
def update_fleet_metadata(self, FleetArn: str, DisplayName: str = None, OptimizeForEndUserLocation: bool = None) -> Dict:
"""
Updates fleet metadata, such as DisplayName.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/UpdateFleetMetadata>`_
**Request Syntax**
::
response = client.update_fleet_metadata(
FleetArn='string',
DisplayName='string',
OptimizeForEndUserLocation=True|False
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type DisplayName: string
:param DisplayName:
The fleet name to display. The existing DisplayName is unset if null is passed.
:type OptimizeForEndUserLocation: boolean
:param OptimizeForEndUserLocation:
The option to optimize for better performance by routing traffic through the closest AWS Region to users, which may be outside of your home Region.
:rtype: dict
:returns:
"""
pass
def update_identity_provider_configuration(self, FleetArn: str, IdentityProviderType: str, IdentityProviderSamlMetadata: str = None) -> Dict:
"""
Updates the identity provider configuration for the fleet.
See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/worklink-2018-09-25/UpdateIdentityProviderConfiguration>`_
**Request Syntax**
::
response = client.update_identity_provider_configuration(
FleetArn='string',
IdentityProviderType='SAML',
IdentityProviderSamlMetadata='string'
)
**Response Syntax**
::
{}
**Response Structure**
- *(dict) --*
:type FleetArn: string
:param FleetArn: **[REQUIRED]**
The ARN of the fleet.
:type IdentityProviderType: string
:param IdentityProviderType: **[REQUIRED]**
The type of identity provider.
:type IdentityProviderSamlMetadata: string
:param IdentityProviderSamlMetadata:
The SAML metadata document provided by the customer’s identity provider. The existing IdentityProviderSamlMetadata is unset if null is passed.
:rtype: dict
:returns:
"""
pass
|
<gh_stars>1-10
import numpy as np
import pandas as pd
# to make this stable across runs
np.random.seed(22)
from sklearn.cluster import OPTICS
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import PolynomialFeatures
from sklearn.preprocessing import RobustScaler
from sklearn.model_selection import KFold
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.metrics import mean_squared_error
from sklearn.metrics import explained_variance_score
from sklearn.base import clone
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.model_selection import GridSearchCV
def winning_pipeline(mydata,mytestdata,myfinalmodel,feature_selection_done = True,myfeatures =None,numerical_attributes = None):
"""
If feature _selection has not been performed:
Function performs Cross Validation (with scaling within folds) on the data passed through.
Scales the data with RobustScaler() and Imputes the data with IterativeImputer(). Additionally adds clusters for the cities latitude and longitude
Else:
Performs Cross-Validation given the estimator on a subset of the features of mydata which were passed through to myfeatures
Arguments
@myestimator: sklearn estimator
@mydata: training data with missing values and is not scaled)
@myfolds: number of folds for cross validation
@feature_selection_done: Boolean flag indicating if feature_selection has been done to the data in `mydata`
@myfeatures: list of informative features from features
@checknoise: Whether scoring for Cross-Validation should be Explained Variance
"""
# part 1 create location feature for training data using optics clustering
optics_df = mydata[['Latitude','Longitude']].copy()
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
clust.fit(optics_df)
#
optics_df['clust_label'] = clust.labels_
#
location_max = np.max(optics_df.clust_label.unique())
#optics labels noisy samples as -1 need to replace for successful onehotencoding
optics_df['clust_label'].replace([-1],location_max+1,inplace=True)
#one hot encoding and combining to mydata
enc = OneHotEncoder(categories='auto')
optics_df_1hot = enc.fit_transform(optics_df[['clust_label']])
location_labels = ['cluster' + str(l) for l in optics_df.clust_label.unique()]
optics_df_1hot = pd.DataFrame(optics_df_1hot.todense(),index = optics_df.index,columns= location_labels )
#part1done cluster columns added
mydata = pd.concat([mydata,optics_df_1hot],axis=1)
#part 2 drop unneccessary columns in our case
mydata_labels = mydata['med_rental_rate'].copy()
mydata = mydata.drop('med_rental_rate',axis =1)
if feature_selection_done:
mydata = mydata.loc[:,myfeatures].copy()
else:
mydata = mydata.drop(['city','Latitude','Longitude','change_hunits','studio_1000_1499', 'studio_1500_more',
'studio_750_999', 'onebed_1000_1499', 'onebed_1500_more',
'onebed_750_999', 'twobed_1000_1499', 'twobed_1500_more',
'twobed_750_999', 'threebed_1000_1499', 'threebed_1500_more',
'threebed_750_999'],axis=1)
imputer = IterativeImputer(max_iter = 10 ,random_state =22,min_value=0)
imputed_dat = imputer.fit_transform(mydata)
#scale only numerical attrbs which are everything but the columns which were appended earlier
imputed_dat = pd.DataFrame(imputed_dat,columns=mydata.columns)
ct = ColumnTransformer(
[('scale1',RobustScaler(),numerical_attributes)],
remainder = 'passthrough')
X_train_prepped = ct.fit_transform(imputed_dat)
#to pickle
processed_training_data = X_train_prepped.copy()
#nowfor the test data
# part 1 create location feature for test data using optics clustering
optics_df = mytestdata[['Latitude','Longitude']].copy()
clust = OPTICS(min_samples=50, xi=.05, min_cluster_size=.05)
clust.fit(optics_df)
#
optics_df['clust_label'] = clust.labels_
#
location_max = np.max(optics_df.clust_label.unique())
#optics labels noisy samples as -1 need to replace for successful onehotencoding
optics_df['clust_label'].replace([-1],location_max+1,inplace=True)
#one hot encoding and combining to mydata
enc = OneHotEncoder(categories='auto')
optics_df_1hot = enc.fit_transform(optics_df[['clust_label']])
location_labels = ['cluster' + str(l) for l in optics_df.clust_label.unique()]
optics_df_1hot = pd.DataFrame(optics_df_1hot.todense(),index = optics_df.index,columns= location_labels )
#part1done cluster columns added
mytestdata = pd.concat([mytestdata,optics_df_1hot],axis=1)
#part 2 drop unneccessary columns in our case
mytest_data_labels = mytestdata['med_rental_rate'].copy()
mytestdata = mytestdata.drop('med_rental_rate',axis =1)
if feature_selection_done:
mytestdata = mytestdata.loc[:,myfeatures].copy()
else:
mydata = mydata.drop(['city','Latitude','Longitude','change_hunits','studio_1000_1499', 'studio_1500_more',
'studio_750_999', 'onebed_1000_1499', 'onebed_1500_more',
'onebed_750_999', 'twobed_1000_1499', 'twobed_1500_more',
'twobed_750_999', 'threebed_1000_1499', 'threebed_1500_more',
'threebed_750_999'],axis=1)
#prepare testdata them
imputed_testdata = imputer.transform(mytestdata)
imputed_testdata = pd.DataFrame(imputed_testdata,columns=mytestdata.columns)
mytestdata_prepared = ct.transform(imputed_testdata)
#to pickle
processed_test_data = mytestdata_prepared.copy()
#make final predictions
myfinalmodel.fit(X_train_prepped,mydata_labels)
final_predictions = myfinalmodel.predict(mytestdata_prepared)
final_mse = mean_squared_error(mytest_data_labels,final_predictions)
final_rmse = np.sqrt(final_mse)
final_expvar = explained_variance_score(mytest_data_labels,final_predictions)
return {'final_rmse':final_rmse,'final_predictions':final_predictions,'final_expvar':final_expvar,'myfinalmodel':myfinalmodel,
'processed_training_data':processed_training_data,'processed_test_data':processed_test_data}
|
<reponame>gockxml/thumbor
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/globocom/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com <EMAIL>
import json
from thumbor.engines import BaseEngine
class JSONEngine(BaseEngine):
def __init__(self, engine, path, callback_name=None):
super(JSONEngine, self).__init__(engine.context)
self.engine = engine
self.width, self.height = self.engine.size
self.path = path
self.callback_name = callback_name
self.operations = []
self.focal_points = []
self.refresh_image()
def refresh_image(self):
self.image = self.engine.image
@property
def size(self):
return self.engine.size
def resize(self, width, height):
self.operations.append({
"type": "resize",
"width": width,
"height": height
})
self.engine.resize(width, height)
self.refresh_image()
def crop(self, left, top, right, bottom):
self.operations.append({
"type": "crop",
"left": left,
"top": top,
"right": right,
"bottom": bottom
})
self.engine.crop(left, top, right, bottom)
self.refresh_image()
def focus(self, points):
for point in points:
self.focal_points.append(point.to_dict())
def flip_vertically(self):
self.operations.append({"type": "flip_vertically"})
def flip_horizontally(self):
self.operations.append({"type": "flip_horizontally"})
def get_target_dimensions(self):
width = self.width
height = self.height
for operation in self.operations:
if operation['type'] == 'crop':
width = operation['right'] - operation['left']
height = operation['bottom'] - operation['top']
if operation['type'] == 'resize':
width = operation['width']
height = operation['height']
return (width, height)
def gen_image(self, size, color):
return self.engine.gen_image(size, color)
def create_image(self, buffer):
return self.engine.create_image(buffer)
def draw_rectangle(self, x, y, width, height):
return self.engine.draw_rectangle(x, y, width, height)
def rotate(self, degrees):
return self.engine.rotate(degrees)
def read_multiple(self, images, extension=None):
return self.engine.read_multiple(images, extension)
def paste(self, other_engine, pos, merge=True):
return self.engine.paste(other_engine, pos, merge)
def enable_alpha(self):
return self.engine.enable_alpha()
def strip_icc(self):
return self.engine.strip_icc()
def get_image_mode(self):
return self.engine.get_image_mode()
def get_image_data(self):
return self.engine.get_image_data()
def set_image_data(self, data):
return self.engine.set_image_data(data)
def image_data_as_rgb(self, update_image=True):
return self.engine.image_data_as_rgb(update_image)
def convert_to_grayscale(self):
pass
def read(self, extension, quality):
target_width, target_height = self.get_target_dimensions()
thumbor_json = {
"thumbor": {
"source": {
"url": self.path,
"width": self.width,
"height": self.height,
},
"operations": self.operations,
"target": {
"width": target_width,
"height": target_height
}
}
}
if self.focal_points:
thumbor_json["thumbor"]["focal_points"] = self.focal_points
thumbor_json = json.dumps(thumbor_json)
if self.callback_name:
return "%s(%s);" % (self.callback_name, thumbor_json)
return thumbor_json
|
<gh_stars>1-10
import os
from pathlib import Path
from typing import List
from mugen import Filter, MusicVideo, MusicVideoGenerator
from mugen.exceptions import ParameterError
from mugen.mixins import Persistable
from mugen.utilities import system
from mugen.video.effects import FadeIn, FadeOut
from mugen.video.io.VideoWriter import VideoWriter
from mugen.video.sources.VideoSource import VideoSourceList
from scripts.cli.events import prepare_events
from scripts.cli.utilities import message, shutdown
def create_music_video(args):
music_video, generator = generate_music_video(args)
apply_effects(music_video, args)
print_rejected_segment_stats(music_video, generator.video_filters)
output_music_video(music_video, args)
def generate_music_video(args) -> MusicVideo:
audio_source = args.audio_source
duration = args.duration
video_sources = args.video_sources
video_source_weights = args.video_source_weights
video_filters = args.video_filters
exclude_video_filters = args.exclude_video_filters
include_video_filters = args.include_video_filters
video_sources = VideoSourceList(video_sources, weights=video_source_weights)
generator = MusicVideoGenerator(audio_source, video_sources, duration=duration)
generator.video_filters = video_filters
generator.exclude_video_filters = exclude_video_filters
generator.include_video_filters = include_video_filters
message(
f"Weights\n------------\n{generator.video_sources.flatten().weight_stats()}"
)
try:
events = prepare_events(generator, args)
except ParameterError as error:
shutdown(str(error))
message("Generating music video from video segments and audio...")
music_video = generator.generate_from_events(events)
return music_video, generator
def apply_effects(music_video: MusicVideo, args):
fade_in = args.fade_in
fade_out = args.fade_out
# Apply effects
if fade_in:
music_video.segments[0].effects.append(FadeIn(fade_in))
if fade_out:
music_video.segments[-1].effects.append(FadeOut(fade_out))
def output_music_video(music_video: MusicVideo, args):
video_preset = args.video_preset
video_codec = args.video_codec
video_crf = args.video_crf
audio_codec = args.audio_codec
audio_bitrate = args.audio_bitrate
use_original_audio = args.use_original_audio
video_dimensions = args.video_dimensions
video_aspect_ratio = args.video_aspect_ratio
(
music_video_directory,
music_video_output_path,
music_video_pickle_path,
) = prepare_output_directory(args)
message(f"Writing music video '{music_video_output_path}'...")
music_video.writer.preset = video_preset
music_video.writer.codec = video_codec
music_video.writer.crf = video_crf
music_video.writer.audio_codec = audio_codec
music_video.writer.audio_bitrate = audio_bitrate
if use_original_audio:
music_video.audio_file = None
if video_dimensions:
music_video.dimensions = video_dimensions
if video_aspect_ratio:
music_video.aspect_ratio = video_aspect_ratio
music_video.write_to_video_file(music_video_output_path)
music_video.save(music_video_pickle_path)
output_segments(music_video, music_video_directory, args)
def prepare_output_directory(args):
output_directory = args.output_directory
video_name = args.video_name
# Create the directory for the music video
music_video_name = get_music_video_name(output_directory, video_name)
music_video_directory = os.path.join(output_directory, music_video_name)
music_video_output_path = os.path.join(
music_video_directory, music_video_name + VideoWriter.DEFAULT_VIDEO_EXTENSION
)
music_video_pickle_path = os.path.join(
music_video_directory, music_video_name + Persistable.PICKLE_EXTENSION
)
system.ensure_directory_exists(music_video_directory)
return music_video_directory, music_video_output_path, music_video_pickle_path
def output_segments(music_video: MusicVideo, directory: str, args):
save_segments = args.save_segments
save_rejected_segments = args.save_rejected_segments
if save_segments:
message("Saving video segments...")
music_video.write_video_segments(directory)
if save_rejected_segments:
message("Saving rejected video segments...")
music_video.write_rejected_video_segments(directory)
def preview_music_video(args):
output_directory = args.output_directory
audio_source = args.audio_source
duration = args.duration
# Prepare Inputs
preview_name = get_preview_path(
output_directory, VideoWriter.DEFAULT_VIDEO_EXTENSION
)
output_path = os.path.join(output_directory, preview_name)
generator = MusicVideoGenerator(audio_source, duration=duration)
try:
events = prepare_events(generator, args)
except ParameterError as error:
shutdown(str(error))
message(f"Creating preview '{Path(output_path).stem}'...")
preview = generator.preview_from_events(events)
preview.write_to_video_file(output_path)
def get_music_video_name(directory: str, basename: str):
count = 0
while True:
music_video_name = basename + f"_{count}"
music_video_path = os.path.join(directory, music_video_name)
if not os.path.exists(music_video_path):
break
count += 1
return music_video_name
def get_preview_path(directory: str, extension: str) -> str:
count = 0
while True:
preview_name = f"music_video_preview_{count}{extension}"
preview_path = os.path.join(directory, preview_name)
if not os.path.exists(preview_path):
break
count += 1
return preview_name
def print_rejected_segment_stats(music_video: MusicVideo, video_filters: List[Filter]):
message("Filter results:")
rejected_segments = music_video.rejected_segments
for video_filter in video_filters:
number_of_failing_segments = sum(
1
for segment in rejected_segments
if video_filter.name in segment.failed_filters
)
print(
f"{number_of_failing_segments} segments failed filter {video_filter.name}"
)
|
import numpy as np
from scipy.spatial.distance import cdist
def cmeans(data, c, h, error, maxiter, metric='euclidean', init=None, seed=None):
"""
Fuzzy c-means clustering algorithm [1].
Parameters
----------
data : 2d array, size (S, N)
Data to be clustered. N is the number of data sets; S is the number
of features within each sample vector.
c : int
Desired number of clusters or classes.
m : float
Array exponentiation applied to the membership function u_old at each
iteration, where U_new = u_old ** m.
error : float
Stopping criterion; stop early if the norm of (u[p] - u[p-1]) < error.
maxiter : int
Maximum number of iterations allowed.
metric: string
By default is set to euclidean. Passes any option accepted by
``scipy.spatial.distance.cdist``.
init : 2d array, size (S, N)
Initial fuzzy c-partitioned matrix. If none provided, algorithm is
randomly initialized.
seed : int
If provided, sets random seed of init. No effect if init is
provided. Mainly for debug/testing purposes.
Returns
-------
cntr : 2d array, size (S, c)
Cluster centers. Data for each center along each feature provided
for every cluster (of the `c` requested clusters).
u : 2d array, (S, N)
Final fuzzy c-partitioned matrix.
u0 : 2d array, (S, N)
Initial guess at fuzzy c-partitioned matrix (either provided init or
random guess used if init was not provided).
d : 2d array, (S, N)
Final Euclidian distance matrix.
jm : 1d array, length P
Objective function history.
p : int
Number of iterations run.
fpc : float
Final fuzzy partition coefficient.
Notes
-----
The algorithm implemented is from Ross et al. [1]_.
Fuzzy C-Means has a known problem with high dimensionality datasets, where
the majority of cluster centers are pulled into the overall center of
gravity. If you are clustering data with very high dimensionality and
encounter this issue, another clustering method may be required. For more
information and the theory behind this, see Winkler et al. [2]_.
References
----------
.. [1] <NAME>. Fuzzy Logic With Engineering Applications, 3rd ed.
Wiley. 2010. ISBN 978-0-470-74376-8 pp 352-353, eq 10.28 - 10.35.
.. [2] <NAME>., <NAME>., & <NAME>. Fuzzy c-means in high
dimensional spaces. 2012. Contemporary Theory and Pragmatic
Approaches in Fuzzy Computing Utilization, 1.
"""
# Setup u0
# 初始化聚类划分矩阵
if init is None:
if seed is not None:
np.random.seed(seed=seed)
n = data.shape[1]
u0 = np.random.rand(c, n)
u0 = normalize_columns(u0)
init = u0.copy()
u0 = init
u = np.fmax(u0, np.finfo(np.float64).eps)#精度的比较,小于这个精度会自动取这个值
#计算派i
s = np.sum(u, axis=1, keepdims=True)/u.shape[1]
# Initialize loop parameters
jm = np.zeros(0)
p = 0
# Main cmeans loop
while p < maxiter - 1:
u2 = u.copy()
s0 = s.copy()
[cntr, u, Jjm , d, s] = _cmeans0(data, u2, c, h, s0, metric)
jm = np.hstack((jm, Jjm))
p += 1
# Stopping rule
if np.linalg.norm(u - u2) < error:
break
# Final calculations
error = np.linalg.norm(u - u2)
fpc = _fp_coeff(u)
return cntr, u, u0, d, jm, p, fpc
def _cmeans0(data, u_old, c, h, s, metric):
"""
Single step in generic fuzzy c-means clustering algorithm.
Modified from Ross, Fuzzy Logic w/Engineering Applications (2010),
pages 352-353, equations 10.28 - 10.35.
Parameters inherited from cmeans()
"""
# Normalizing, then eliminating any potential zero values.标准化,然后消除任何潜在的零值。
u_old = normalize_columns(u_old)# 标准化,然后消除任何潜在的零值,,用于最开始的时候,归一化
u_old = np.fmax(u_old, np.finfo(np.float64).eps)#精度的比较,小于这个精度会自动取这个值
# 计算分布先验Pi [[s1],[s2],...[s..]]
s = np.sum(u_old, axis=1, keepdims=True) / u_old.shape[1] ##[c1 c2 ....cn]每个聚类中心的先验分布
s = np.fmax(s, np.finfo(np.float64).eps)#精度的比较,小于这个精度会自动取这个值
um = u_old
# Calculate cluster centers
data = data.T
# 点乘,得到聚类中心
cntr = um.dot(data) / np.atleast_2d(um.sum(axis=1)).T #待处理公式聚类中心,不用改动
d = _distance(data, cntr, metric) #待处理欧式距离公式,目前估计也不用改动
d = np.fmax(d, np.finfo(np.float64).eps)##精度的比较,小于这个精度会自动取这个值
jm = (um * d ** 2).sum()
# u = normalize_power_columns(d, - 2. / (m - 1)) #待处理划分矩阵公式
u = _uij(d, s, h)
# u = np.exp()#指数运算
return cntr, u, jm, d, s
'''
将模糊m换成正则化项系数
1.先计算派i
2.在更加派i求隶属度
3.聚类中心
'''
def _uij(d, s, h):
'''
:param d: 聚类距离矩阵
:param n: 正则化系数
:return:
'''
s1 = s.repeat(d.shape[1], axis=1)
tmp = s1*np.exp(d/(-h))
tmp = np.fmax(tmp, np.finfo(np.float64).eps)##精度的比较,小于这个精度会自动取这个值
# s2 = s.repeat(d.shape[1], axis=1)
tmp1 = np.sum(tmp, axis=0, keepdims=True)##
# 需要改的地方。。。。。
temp1 = tmp1.repeat(d.shape[0], axis=0)
u = tmp/temp1
u = normalize_columns(u)
return u
def _fp_coeff(u):
"""
Fuzzy partition coefficient `fpc` relative to fuzzy c-partitioned
matrix `u`. Measures 'fuzziness' in partitioned clustering.
Parameters
----------
u : 2d array (C, N)
Fuzzy c-partitioned matrix; N = number of data points and C = number
of clusters.
Returns
-------
fpc : float
Fuzzy partition coefficient.
"""
n = u.shape[1]
return np.trace(u.dot(u.T)) / float(n)
def _distance(data, centers, metric='euclidean'):
"""
Euclidean distance from each point to each cluster center.
Parameters
----------
data : 2d array (N x Q)
Data to be analyzed. There are N data points.
centers : 2d array (C x Q)
Cluster centers. There are C clusters, with Q features.
metric: string
By default is set to euclidean. Passes any option accepted by
``scipy.spatial.distance.cdist``.
Returns
-------
dist : 2d array (C x N)
Euclidean distance from each point, to each cluster center.
See Also
--------
scipy.spatial.distance.cdist
"""
return cdist(data, centers, metric=metric).T
"""
_normalize_columns.py : Normalize columns.
"""
# import numpy as np
def normalize_columns(columns):
"""
Normalize columns of matrix.
Parameters
----------
columns : 2d array (M x N)
Matrix with columns
Returns
-------
normalized_columns : 2d array (M x N)
columns/np.sum(columns, axis=0, keepdims=1)
"""
# broadcast sum over columns
normalized_columns = columns / np.sum(columns, axis=0, keepdims=1)
return normalized_columns
def normalize_power_columns(x, exponent):
"""
Calculate normalize_columns(x**exponent)
in a numerically safe manner.
Parameters
----------
x : 2d array (M x N)
Matrix with columns
n : float
Exponent
Returns
-------
result : 2d array (M x N)
normalize_columns(x**n) but safe
"""
assert np.all(x >= 0.0)
x = x.astype(np.float64)
# values in range [0, 1]
x = x / np.max(x, axis=0, keepdims=True)
# values in range [eps, 1]
x = np.fmax(x, np.finfo(x.dtype).eps)
if exponent < 0:
# values in range [1, 1/eps]
x /= np.min(x, axis=0, keepdims=True)
# values in range [1, (1/eps)**exponent] where exponent < 0
# this line might trigger an underflow warning
# if (1/eps)**exponent becomes zero, but that's ok
x = x ** exponent
else:
# values in range [eps**exponent, 1] where exponent >= 0
x = x ** exponent
result = normalize_columns(x)
return result
|
from datetime import datetime
import discord
import math
import typing
from discord.ext import commands
from cogs.boards import MockPlayer
from cogs.utils.db_objects import SlimDummyBoardConfig
from cogs.utils.paginator import (
SeasonStatsPaginator, StatsAttacksPaginator, StatsDefensesPaginator, StatsGainsPaginator, StatsDonorsPaginator
)
from cogs.utils.formatters import CLYTable, get_render_type
from cogs.utils.cache import cache, Strategy
from cogs.utils.emoji_lookup import misc
mock = MockPlayer('Unknown', 'Unknown')
class SeasonStats(commands.Cog):
def __init__(self, bot):
self.bot = bot
@cache(strategy=Strategy.lru)
async def get_board_fmt(self, guild_id, season_id, board_type):
board_config = await self.bot.utils.get_board_configs(guild_id, board_type)
if not board_config:
board_config = SlimDummyBoardConfig(board_type, 2, f"{board_type.capitalize()}Board", None, 'donations' if board_type == "donation" else "trophies")
else:
board_config = board_config[0]
clans = await self.bot.get_clans(guild_id)
players = []
for n in clans:
players.extend(p for p in n.itermembers)
top_players = await self.bot.donationboard.get_top_players(
players, board_type, board_config.sort_by, False, season_id=season_id
)
if not top_players:
e = discord.Embed(colour=self.bot.colour,
title='No Data Found.')
return [e]
players = {n.tag: n for n in players if n.tag in set(x['player_tag'] for x in top_players)}
message_count = math.ceil(len(top_players) / 20)
embeds = []
for i in range(message_count):
player_data = top_players[i*20:(i+1)*20]
table = CLYTable()
for x, y in enumerate(player_data):
index = i*20 + x
if board_config.render == 2:
table.add_row([index,
y[1],
players.get(y['player_tag'], mock).name])
else:
table.add_row([index,
y[1],
y[2],
players.get(y['player_tag'], mock).name])
render = get_render_type(board_config, table)
fmt = render()
e = discord.Embed(colour=self.bot.donationboard.get_colour(board_type, False),
description=fmt,
timestamp=datetime.utcnow()
)
e.set_author(name=board_config.title,
icon_url=board_config.icon_url or 'https://cdn.discordapp.com/'
'emojis/592028799768592405.png?v=1')
e.set_footer(
text=f'Historical {board_type.capitalize()}Board; Season {season_id} - Page {i+1}/{message_count}'
)
embeds.append(e)
return embeds
@commands.group(invoke_without_subcommand=True)
async def seasonstats(self, ctx):
"""[Group] command to manage historical stats for seasons past."""
if ctx.invoked_subcommand is None:
await ctx.send_help(ctx.command)
@seasonstats.command(name='donationboard')
async def seasonstats_donationboard(self, ctx, season: typing.Optional[int] = None):
"""Get historical donationoard stats.
*Parameters**
:key: Season ID (optional - defaults to last season)
**Example**
:white_check_mark: `+seasonstats donationboard`
:white_check_mark: `+seasonstats donationboard 2`
"""
embeds = await self.get_board_fmt(ctx.guild.id, season or (await self.bot.seasonconfig.get_season_id()) - 1,
'donation')
p = SeasonStatsPaginator(ctx, entries=embeds)
await p.paginate()
@seasonstats.command(name='trophyboard')
async def seasonstats_trophyboard(self, ctx, season: int = None):
"""Get historical trophyboard stats.
*Parameters**
:key: Season ID (optional - defaults to last season)
**Example**
:white_check_mark: `+seasonstats trophyboard`
:white_check_mark: `+seasonstats trophyboard 2`
"""
embeds = await self.get_board_fmt(ctx.guild.id, season or (await self.bot.seasonconfig.get_season_id()) - 1,
'trophy')
p = SeasonStatsPaginator(ctx, entries=embeds)
await p.paginate()
@seasonstats.command(name='attacks')
async def seasonstats_attacks(self, ctx, season: typing.Optional[int] = None):
"""Get attack wins for all clans.
**Parameters**
:key: Season ID (optional - defaults to last season)
**Example**
:white_check_mark: `+season stats attacks`
:white_check_mark: `+season stats attacks 2`
"""
season = season or await self.bot.seasonconfig.get_season_id() - 1
clans = await ctx.get_clans()
query = """SELECT player_tag, ABS(end_attacks - start_attacks) as attacks, trophies
FROM players
WHERE player_tag = ANY($1::TEXT[])
AND season_id = $2
ORDER BY attacks DESC
NULLS LAST
"""
players = []
for clan in clans:
players.extend((n.tag for n in clan.itermembers))
fetch = await ctx.db.fetch(query, players, season)
if not fetch:
return await ctx.send("No data found. Sorry.")
title = f"Attack wins for Season {season}"
key = f"**Key:**\n{misc['attack']} - Attacks\n{misc['trophygold']} - Trophies"
p = StatsAttacksPaginator(ctx, fetch, title, key=key, page_count=math.ceil(len(fetch) / 20))
await p.paginate()
@seasonstats.command(name='defenses', aliases=['defense', 'defences', 'defence'])
async def seasonstats_defenses(self, ctx, season: typing.Optional[int] = None):
"""Get defense wins for all clans.
**Parameters**
:key: Season ID (optional - defaults to last season)
**Example**
:white_check_mark: `+season stats defenses`
:white_check_mark: `+season stats defenses 3`
"""
season = season or await self.bot.seasonconfig.get_season_id() - 1
clans = await ctx.get_clans()
query = """SELECT player_tag, end_defenses - start_defenses as defenses, trophies
FROM players
WHERE player_tag = ANY($1::TEXT[])
AND season_id = $2
ORDER BY defenses DESC
NULLS LAST
"""
players = []
for clan in clans:
players.extend((n.tag for n in clan.itermembers))
fetch = await ctx.db.fetch(query, players, season)
if not fetch:
return await ctx.send("No data found. Sorry.")
title = f"Defense wins for Season {season}"
key = f"**Key:**\n{misc['defense']} - Defenses\n{misc['trophygold']} - Trophies"
p = StatsDefensesPaginator(ctx, fetch, title, key=key, page_count=math.ceil(len(fetch) / 20))
await p.paginate()
@seasonstats.command(name='gains', aliases=['trophies'])
async def seasonstats_gains(self, ctx, season: typing.Optional[int] = None):
"""Get trophy gains for all clans.
**Parameters**
:key: Season ID (optional - defaults to last season)
**Example**
:white_check_mark: `+season stats gains`
:white_check_mark: `+season stats gains 1`
"""
season = season or await self.bot.seasonconfig.get_season_id() - 1
clans = await ctx.get_clans()
query = """SELECT player_tag, trophies - start_trophies as gain, trophies
FROM players
WHERE player_tag = ANY($1::TEXT[])
AND season_id = $2
ORDER BY gain DESC
NULLS LAST
"""
players = []
for clan in clans:
players.extend((n.tag for n in clan.itermembers))
fetch = await ctx.db.fetch(query, players, season)
if not fetch:
return await ctx.send("No data found. Sorry.")
title = f"Trophy Gains for Season {season}"
key = f"**Key:**\n{misc['trophygreen']} - Trophy Gain\n{misc['trophygold']} - Total Trophies"
p = StatsGainsPaginator(ctx, fetch, title, key=key, page_count=math.ceil(len(fetch) / 20))
await p.paginate()
@seasonstats.command(name='donors', aliases=['donations', 'donates', 'donation'])
async def seasonstats_donors(self, ctx, season: typing.Optional[int] = None):
"""Get donations for all clans.
**Parameters**
:key: Season ID (optional - defaults to last season)
**Example**
:white_check_mark: `+season stats donors`
:white_check_mark: `+season stats donations 4`
"""
season = season or await self.bot.seasonconfig.get_season_id() - 1
clans = await ctx.get_clans()
query = """SELECT player_tag, (end_friend_in_need + end_sharing_is_caring) - (start_friend_in_need + start_sharing_is_caring) as donations
FROM players
WHERE player_tag = ANY($1::TEXT[])
AND season_id = $2
ORDER BY donations DESC
NULLS LAST
"""
players = []
for clan in clans:
players.extend((n.tag for n in clan.itermembers))
fetch = await ctx.db.fetch(query, players, season)
if not fetch:
return await ctx.send("No data found. Sorry.")
title = f"Donations for Season {season}"
p = StatsDonorsPaginator(ctx, fetch, title, page_count=math.ceil(len(fetch) / 20))
await p.paginate()
def setup(bot):
bot.add_cog(SeasonStats(bot))
|
<reponame>ska-telescope/sdp-prototype
# -*- coding: utf-8 -*-
"""Tango SDPSubarray device module."""
# pylint: disable=invalid-name
# pylint: disable=too-many-lines
# pylint: disable=wrong-import-position
# pylint: disable=too-many-public-methods
# pylint: disable=fixme
import os
import sys
import time
import signal
import logging
import json
from enum import IntEnum, unique
import jsonschema
from ska_sdp_logging import tango_logging
import tango
from tango import AttrWriteType, AttributeProxy, ConnectionFailed, Database, \
DbDevInfo, DevState
from tango.server import Device, DeviceMeta, attribute, command, \
device_property, run
sys.path.insert(0, os.path.join(os.path.dirname(__file__)))
from release import VERSION as SERVER_VERSION # noqa
try:
import ska_sdp_config
except ImportError:
ska_sdp_config = None
LOG = logging.getLogger()
# https://pytango.readthedocs.io/en/stable/data_types.html#devenum-pythonic-usage
@unique
class AdminMode(IntEnum):
"""AdminMode enum."""
OFFLINE = 0
ONLINE = 1
MAINTENANCE = 2
NOT_FITTED = 3
RESERVED = 4
@unique
class HealthState(IntEnum):
"""HealthState enum."""
OK = 0
DEGRADED = 1
FAILED = 2
UNKNOWN = 3
@unique
class ObsState(IntEnum):
"""ObsState enum."""
IDLE = 0
CONFIGURING = 1
READY = 2
SCANNING = 3
PAUSED = 4
ABORTED = 5
FAULT = 6
@unique
class FeatureToggle(IntEnum):
"""Feature Toggles."""
CONFIG_DB = 1 #: Enable / Disable the Config DB
CBF_OUTPUT_LINK = 2 #: Enable / Disable use of of the CBF OUTPUT LINK
AUTO_REGISTER = 3 #: Enable / Disable tango db auto-registration
# class SDPSubarray(SKASubarray):
class SDPSubarray(Device):
"""SDP Subarray device class.
.. note::
This should eventually inherit from SKASubarray but these need
some work before doing so would add any value to this device.
"""
# pylint: disable=attribute-defined-outside-init
# pylint: disable=too-many-instance-attributes
# pylint: disable=no-self-use
__metaclass__ = DeviceMeta
# -----------------
# Device Properties
# -----------------
SdpMasterAddress = device_property(
dtype='str',
doc='FQDN of the SDP Master',
default_value='mid_sdp/elt/master'
)
# ----------
# Attributes
# ----------
serverVersion = attribute(
label='Server Version',
dtype=str,
access=AttrWriteType.READ,
doc='The version of the SDP Subarray device'
)
obsState = attribute(
label='Obs State',
dtype=ObsState,
access=AttrWriteType.READ_WRITE,
doc='The device obs state.',
polling_period=1000
)
adminMode = attribute(
label='Admin mode',
dtype=AdminMode,
access=AttrWriteType.READ_WRITE,
doc='The device admin mode.',
polling_period=1000
)
healthState = attribute(
label='Health state',
dtype=HealthState,
access=AttrWriteType.READ,
doc='The health state reported for this device.',
polling_period=1000
)
receiveAddresses = attribute(
label='Receive Addresses',
dtype=str,
access=AttrWriteType.READ,
doc='Host addresses for the visibility receive workflow as a '
'JSON string.',
polling_period=1000
)
processingBlockState = attribute(
label='State of real-time processing blocks',
dtype=str,
access=AttrWriteType.READ,
doc='Processing block states for real-time workflows as a '
'JSON string.',
polling_period=5000
)
# ---------------
# General methods
# ---------------
def init_device(self):
"""Initialise the device."""
# SKASubarray.init_device(self)
Device.init_device(self)
self.set_state(DevState.INIT)
LOG.info('Initialising SDP Subarray: %s', self.get_name())
# Initialise attributes
self._set_obs_state(ObsState.IDLE)
self._set_admin_mode(AdminMode.ONLINE)
self._set_health_state(HealthState.OK)
self._set_receive_addresses(None)
# Initialise instance variables
self._sbi_id = None
self._pb_realtime = []
self._pb_batch = []
self._cbf_outlink_address = None
self._pb_receive_addresses = None
if ska_sdp_config is not None \
and self.is_feature_active(FeatureToggle.CONFIG_DB):
self._config_db_client = ska_sdp_config.Config()
LOG.debug('SDP Config DB enabled')
else:
self._config_db_client = None
LOG.warning('SDP Config DB disabled %s',
'(ska_sdp_config package not found)'
if ska_sdp_config is None
else 'by feature toggle')
if self.is_feature_active(FeatureToggle.CBF_OUTPUT_LINK):
LOG.debug('CBF output link enabled')
else:
LOG.debug('CBF output link disabled')
# The subarray device is initialised in the OFF state.
self.set_state(DevState.OFF)
LOG.info('SDP Subarray initialised: %s', self.get_name())
def always_executed_hook(self):
"""Run for on each call."""
def delete_device(self):
"""Device destructor."""
LOG.info('Deleting subarray device: %s', self.get_name())
# ------------------
# Attributes methods
# ------------------
def read_serverVersion(self):
"""Get the SDPSubarray device server version attribute.
:returns: The SDP subarray device server version.
"""
return SERVER_VERSION
def read_obsState(self):
"""Get the obsState attribute.
:returns: The current obsState attribute value.
"""
return self._obs_state
def read_adminMode(self):
"""Get the adminMode attribute.
:returns: The current adminMode attribute value.
"""
return self._admin_mode
def read_healthState(self):
"""Get the healthState attribute.
:returns: The current healthState attribute value.
"""
return self._health_state
def read_receiveAddresses(self):
"""Get the receive addresses.
More details are provided on SKA confluence at the address:
http://bit.ly/2Gad55Q
:returns: JSON describing receive addresses
"""
return json.dumps(self._receive_addresses)
def read_processingBlockState(self):
"""Get the states of the real-time processing blocks.
:returns: JSON describing real-time processing block states
"""
pb_state_list = []
if self._config_db_client is not None:
for pb_id in self._pb_realtime:
for txn in self._config_db_client.txn():
pb_state = txn.get_processing_block_state(pb_id).copy()
pb_state['id'] = pb_id
pb_state_list.append(pb_state)
return json.dumps(pb_state_list)
def write_obsState(self, obs_state):
"""Set the obsState attribute.
:param obs_state: An observation state enum value.
"""
self._set_obs_state(obs_state)
def write_adminMode(self, admin_mode):
"""Set the adminMode attribute.
:param admin_mode: An admin mode enum value.
"""
self._set_admin_mode(admin_mode)
# --------
# Commands
# --------
@command(dtype_in=str, doc_in='Resource configuration JSON string')
def AssignResources(self, config=''):
"""Assign resources to the subarray.
This is currently a noop for SDP!
Following the description of the SKA subarray device model,
resources can only be assigned to the subarray device when the
obsState attribute is IDLE. Once resources are assigned to the
subarray device, the device state transitions to ON.
:param config: Resource specification (currently ignored)
"""
# pylint: disable=unused-argument
LOG.info('-------------------------------------------------------')
LOG.info('AssignResources (%s)', self.get_name())
LOG.info('-------------------------------------------------------')
self._require_obs_state([ObsState.IDLE])
self._require_admin_mode([AdminMode.ONLINE, AdminMode.MAINTENANCE,
AdminMode.RESERVED])
LOG.warning('Assigning resources is currently a no-op!')
LOG.debug('Setting device state to ON')
self.set_state(DevState.ON)
LOG.info('-------------------------------------------------------')
LOG.info('AssignResources Successful!')
LOG.info('-------------------------------------------------------')
@command(dtype_in=str, doc_in='Resource configuration JSON string')
def ReleaseResources(self, config=''):
"""Release resources assigned to the subarray.
This is currently a noop for SDP!
Following the description of the SKA subarray device model,
when all resources are released the device state should transition to
OFF. Releasing resources is only allowed when the obsState is IDLE.
:param config: Resource specification (currently ignored).
"""
# pylint: disable=unused-argument
LOG.info('-------------------------------------------------------')
LOG.info('ReleaseResources (%s)', self.get_name())
LOG.info('-------------------------------------------------------')
self._require_obs_state([ObsState.IDLE])
self._require_admin_mode([AdminMode.OFFLINE, AdminMode.NOT_FITTED],
invert=True)
LOG.warning('Release resources is currently a no-op!')
LOG.debug('Setting device state to OFF')
self.set_state(DevState.OFF)
LOG.info('-------------------------------------------------------')
LOG.info('ReleaseResources Successful!')
LOG.info('-------------------------------------------------------')
@command(dtype_in=str, doc_in='Processing block configuration JSON string')
def Configure(self, config_str):
"""Configure processing associated with this subarray.
This is achieved by providing a JSON string containing an array of
processing block definitions that specify the workflows to
execute and their parameters. The workflows may be real-time or
batch.
:param config_str: Processing block configuration JSON string.
"""
LOG.info('-------------------------------------------------------')
LOG.info('Configure (%s)', self.get_name())
LOG.info('-------------------------------------------------------')
# Check obsState is IDLE, and set to CONFIGURING
self._require_obs_state([ObsState.IDLE])
self._set_obs_state(ObsState.CONFIGURING)
# self.set_state(DevState.ON)
# Validate the JSON configuration string
config = self._validate_json_config(config_str, 'configure.json')
if config is None:
# Validation has failed, so set obsState back to IDLE and raise
# an error
self._set_obs_state(ObsState.IDLE)
self._raise_command_error('Configuration validation failed')
return
# Create the processing blocks in the config DB
self._create_processing_blocks(config)
# Set the receive addresses for the first scan
scan_id = config.get('scanId')
receive_addresses = self._get_receive_addresses(scan_id)
self._set_receive_addresses(receive_addresses)
# Set the obsState to READY
self._set_obs_state(ObsState.READY)
LOG.info('-------------------------------------------------------')
LOG.info('Configure successful!')
LOG.info('-------------------------------------------------------')
@command(dtype_in=str, doc_in='Scan configuration JSON string')
def ConfigureScan(self, config_str):
"""Configure the subarray device to execute a scan.
This allows scan specific, late-binding information to be provided
to the configured real-time workflows.
ConfigureScan is only allowed in the READY obsState and should
leave the Subarray device in the READY obsState when configuring
is complete. While Configuring the Scan the obsState is set to
CONFIGURING.
:param config_str: Scan configuration JSON string.
"""
LOG.info('-------------------------------------------------------')
LOG.info('ConfigureScan (%s)', self.get_name())
LOG.info('-------------------------------------------------------')
# Check the obsState is READY and set to CONFIGURING
self._require_obs_state([ObsState.READY])
self._set_obs_state(ObsState.CONFIGURING)
# Validate JSON configuration string
config = self._validate_json_config(config_str, 'configure_scan.json')
if config is None:
# Validation has failed, so set obsState back to READY and raise
# an error.
self._set_obs_state(ObsState.READY)
self._raise_command_error('Configuration validation failed')
return
# Update scan parameters
self._update_scan_parameters(config)
# Set the receive addresses for the next scan
scan_id = config.get('scanId')
receive_addresses = self._get_receive_addresses(scan_id)
self._set_receive_addresses(receive_addresses)
# Set the obsState to READY
self._set_obs_state(ObsState.READY)
LOG.info('-------------------------------------------------------')
LOG.info('ConfigureScan Successful!')
LOG.info('-------------------------------------------------------')
@command
def Scan(self):
"""Command issued to start scan."""
LOG.info('-------------------------------------------------------')
LOG.info('Scan (%s)', self.get_name())
LOG.info('-------------------------------------------------------')
# Check obsState is READY
self._require_obs_state([ObsState.READY])
# self.set_state(DevState.ON)
# Set obsState to SCANNING
self._set_obs_state(ObsState.SCANNING)
LOG.info('-------------------------------------------------------')
LOG.info('Scan Successful')
LOG.info('-------------------------------------------------------')
@command
def EndScan(self):
"""Command issued to end scan."""
LOG.info('-------------------------------------------------------')
LOG.info('EndScan (%s)', self.get_name())
LOG.info('-------------------------------------------------------')
# Check obsState is SCANNING
self._require_obs_state([ObsState.SCANNING])
# Clear receiveAddresses
self._set_receive_addresses(None)
# Set obsState to READY
self._set_obs_state(ObsState.READY)
LOG.info('-------------------------------------------------------')
LOG.info('EndScan Successful')
LOG.info('-------------------------------------------------------')
@command
def EndSB(self):
"""Command issued to end the scheduling block."""
LOG.info('-------------------------------------------------------')
LOG.info('EndSB (%s)', self.get_name())
LOG.info('-------------------------------------------------------')
# Check obsState is READY
self._require_obs_state([ObsState.READY])
# End the real-time processing associated with this subarray
self._end_realtime_processing()
# Set obsState to IDLE
self._set_obs_state(ObsState.IDLE)
LOG.info('-------------------------------------------------------')
LOG.info('EndSB Successful')
LOG.info('-------------------------------------------------------')
# -------------------------------------
# Public methods
# -------------------------------------
@staticmethod
def set_feature_toggle_default(feature_name, default):
"""Set the default value of a feature toggle.
:param feature_name: Name of the feature
:param default: Default for the feature toggle (if it is not set)
"""
env_var = SDPSubarray._get_feature_toggle_env_var(feature_name)
if not os.environ.get(env_var):
LOG.debug('Setting default for toggle: %s = %s', env_var, default)
os.environ[env_var] = str(int(default))
@staticmethod
def is_feature_active(feature_name):
"""Check if feature is active.
:param feature_name: Name of the feature.
:returns: True if the feature toggle is enabled.
"""
env_var = SDPSubarray._get_feature_toggle_env_var(feature_name)
env_var_value = os.environ.get(env_var)
return env_var_value == '1'
# -------------------------------------
# Private methods
# -------------------------------------
@staticmethod
def _get_feature_toggle_env_var(feature_name):
"""Get the env var associated with the feature toggle.
:param feature_name: Name of the feature.
:returns: environment variable name for feature toggle.
"""
if isinstance(feature_name, FeatureToggle):
feature_name = feature_name.name
env_var = str('toggle_' + feature_name).upper()
allowed = ['TOGGLE_' + toggle.name for toggle in FeatureToggle]
if env_var not in allowed:
message = 'Unknown feature toggle: {} (allowed: {})' \
.format(env_var, allowed)
LOG.error(message)
raise ValueError(message)
return env_var
def _set_obs_state(self, value, verbose=True):
"""Set the obsState and issue a change event."""
if verbose:
LOG.debug('Setting obsState to: %s', repr(ObsState(value)))
self._obs_state = value
self.push_change_event('obsState', self._obs_state)
def _set_admin_mode(self, value, verbose=True):
"""Set the adminMode and issue a change event."""
if verbose:
LOG.debug('Setting adminMode to: %s', repr(AdminMode(value)))
self._admin_mode = value
self.push_change_event('adminMode', self._admin_mode)
def _set_health_state(self, value, verbose=True):
"""Set the healthState and issue a change event."""
if verbose:
LOG.debug('Setting healthState to: %s', repr(HealthState(value)))
self._health_state = value
self.push_change_event('healthState', self._health_state)
def _set_receive_addresses(self, value):
"""Set the receiveAddresses and issue a change event."""
self._receive_addresses = value
self.push_change_event('receiveAddresses',
json.dumps(self._receive_addresses))
def _require_obs_state(self, allowed_states, invert=False):
"""Require specified obsState values.
Checks if the current obsState matches the specified allowed values.
If invert is False (default), throw an exception if the obsState
is not in the list of specified states.
If invert is True, throw an exception if the obsState is NOT in the
list of specified allowed states.
:param allowed_states: List of allowed obsState values
:param invert: If True require that the obsState is not in one of
specified allowed states
"""
# Fail if obsState is NOT in one of the allowed_obs_states
if not invert and self._obs_state not in allowed_states:
self._set_obs_state(ObsState.FAULT)
msg = 'obsState ({}) must be in {}'.format(
self._obs_state, allowed_states)
self._raise_command_error(msg)
# Fail if obsState is in one of the allowed_obs_states
if invert and self._obs_state in allowed_states:
self._set_obs_state(ObsState.FAULT)
msg = 'The device must NOT be in one of the ' \
'following obsState values: {}'.format(allowed_states)
self._raise_command_error(msg)
def _require_admin_mode(self, allowed_modes, invert=False):
"""Require specified adminMode values.
Checks if the current adminMode matches the specified allowed values.
If invert is False (default), throw an exception if not in the list
of specified states / modes.
If invert is True, throw an exception if the adminMode is NOT in the
list of specified allowed states.
:param allowed_modes: List of allowed adminMode values
:param invert: If True require that the adminMode is not in one of
specified allowed states
"""
# Fail if adminMode is NOT in one of the allowed_modes
if not invert and self._admin_mode not in allowed_modes:
msg = 'adminMode ({}) must be in: {}'.format(
repr(self._admin_mode), allowed_modes)
LOG.error(msg)
self._raise_command_error(msg)
# Fail if obsState is in one of the allowed_obs_states
if invert and self._admin_mode in allowed_modes:
msg = 'adminMode ({}) must NOT be in: {}'.format(
repr(self._admin_mode), allowed_modes)
LOG.error(msg)
self._raise_command_error(msg)
def _raise_command_error(self, desc, origin=''):
"""Raise a command error.
:param desc: Error message / description.
:param origin: Error origin (optional).
"""
self._raise_error(desc, reason='Command error', origin=origin)
def _raise_error(self, desc, reason='', origin=''):
"""Raise an error.
:param desc: Error message / description.
:param reason: Reason for the error.
:param origin: Error origin (optional).
"""
if reason != '':
LOG.error(reason)
LOG.error(desc)
if origin != '':
LOG.error(origin)
tango.Except.throw_exception(reason, desc, origin,
tango.ErrSeverity.ERR)
@staticmethod
def _validate_json_config(config_str, schema_filename):
"""Validate a JSON configuration against a schema.
:param config_str: JSON configuration string
:param schema_filename: name of schema file in the 'schema'
sub-directory
:returns: validated configuration (as dict/list), or None if
validation fails
"""
LOG.debug('Validating JSON configuration against schema %s',
schema_filename)
schema_path = os.path.join(os.path.dirname(__file__), 'schema',
schema_filename)
config = None
if config_str == '':
LOG.error('Empty configuration string')
try:
config = json.loads(config_str)
with open(schema_path, 'r') as file:
schema = json.load(file)
jsonschema.validate(config, schema)
except json.JSONDecodeError as error:
LOG.error('Unable to decode configuration string as JSON: %s',
error.msg)
config = None
except jsonschema.ValidationError as error:
LOG.error('Unable to validate JSON configuration: %s',
error.message)
config = None
if config is not None:
LOG.debug('Successfully validated JSON configuration')
return config
def _create_processing_blocks(self, config):
"""Create processing blocks in the configuration database.
:param config: dict containing configuration data
"""
# pylint: disable=too-many-branches
sbi_id = config.get('sbiId')
scan_id = config.get('scanId')
LOG.info('Scheduling block instance: %s', sbi_id)
LOG.info('Scan: %s', scan_id)
self._sbi_id = sbi_id
# Loop over the processing block configurations.
cbf_outlink_address = None
pb_receive_addresses = None
for pbc in config.get('processingBlocks'):
pb_id = pbc.get('id')
LOG.info('Creating processing block %s', pb_id)
# Get type of workflow and add the processing block ID to the
# appropriate list.
workflow = pbc.get('workflow')
wf_type = workflow.get('type')
if wf_type == 'realtime':
self._pb_realtime.append(pb_id)
elif wf_type == 'batch':
self._pb_batch.append(pb_id)
else:
LOG.error('Unknown workflow type: %s', wf_type)
if 'cspCbfOutlinkAddress' in pbc:
if wf_type == 'batch':
LOG.error('cspCbfOutlinkAddress attribute must not '
'appear in batch processing block '
'configuration')
elif pb_receive_addresses is not None:
LOG.error('cspCbfOutlinkAddress attribute must not '
'appear in more than one real-time processing '
'block configuration')
else:
cbf_outlink_address = pbc.get('cspCbfOutlinkAddress')
pb_receive_addresses = pb_id
if 'scanParameters' in pbc:
if wf_type == 'realtime':
scan_parameters = pbc.get('scanParameters')
if scan_parameters is not None:
scan_parameters = scan_parameters.copy()
scan_parameters['scanId'] = scan_id
else:
scan_parameters = {}
elif wf_type == 'batch':
LOG.error('scanParameters attribute must not appear '
'in batch processing block configuration')
scan_parameters = {}
else:
scan_parameters = {}
if 'dependencies' in pbc and wf_type == 'realtime':
LOG.error('dependencies attribute must not appear in '
'real-time processing block configuration')
# Create processing block with empty state
if self._config_db_client is not None:
pb = ska_sdp_config.ProcessingBlock(
pb_id=pb_id,
sbi_id=sbi_id,
workflow=workflow,
parameters=pbc.get('parameters'),
scan_parameters=scan_parameters
)
for txn in self._config_db_client.txn():
txn.create_processing_block(pb)
txn.create_processing_block_state(pb_id, {})
if self.is_feature_active(FeatureToggle.CBF_OUTPUT_LINK) \
and self._config_db_client is not None:
self._cbf_outlink_address = cbf_outlink_address
self._pb_receive_addresses = pb_receive_addresses
def _update_scan_parameters(self, config):
"""Update scan parameters in real-time processing blocks.
:param config: dict containing configuration data
"""
scan_id = config.get('scanId')
LOG.info('Scan: %s', scan_id)
for pbc in config.get('processingBlocks'):
pb_id = pbc.get('id')
if pb_id not in self._pb_realtime:
LOG.error('Processing block %s is not a real-time PB '
'associated with this subarray', pb_id)
continue
LOG.info('Updating scan parameters in processing block %s', pb_id)
scan_parameters = pbc.get('scanParameters').copy()
scan_parameters['scanId'] = scan_id
if self._config_db_client is not None:
for txn in self._config_db_client.txn():
pb_old = txn.get_processing_block(pb_id)
pb_new = ska_sdp_config.ProcessingBlock(
pb_id=pb_old.pb_id,
sbi_id=pb_old.sbi_id,
workflow=pb_old.workflow,
parameters=pb_old.parameters,
scan_parameters=scan_parameters
)
txn.update_processing_block(pb_new)
def _get_receive_addresses(self, scan_id):
"""Get the receive addresses for the next scan.
The channel link map is read from the CSP device attribute and
passed to the workflow that was configured to provide the receive
addresses. The workflow responds by generating the addresses.
This communication happens via the processing block state.
:param scan_id: scan ID for which to get receive addresses
:returns: receive address as dict
"""
if self._cbf_outlink_address is None \
or self._config_db_client is None:
return None
pb_id = self._pb_receive_addresses
# Get channel link map with the same scan ID from CSP device
channel_link_map = self._get_channel_link_map(scan_id)
# Update channel link map in the PB state
for txn in self._config_db_client.txn():
pb_state = txn.get_processing_block_state(pb_id)
pb_state['channel_link_map'] = channel_link_map
txn.update_processing_block_state(pb_id, pb_state)
# Wait for receive addresses with same scan ID to be available in the
# PB state
for txn in self._config_db_client.txn():
pb_state = txn.get_processing_block_state(pb_id)
receive_addresses = pb_state.get('receive_addresses')
if receive_addresses is None:
ra_scan_id = None
else:
ra_scan_id = receive_addresses.get('scanId')
if ra_scan_id != scan_id:
txn.loop(wait=True)
return receive_addresses
def _get_channel_link_map(self, scan_id, timeout=30.0):
"""Get channel link map from the CSP Tango device attribute.
:param scan_id: Scan ID to match
:param timeout: Timeout in seconds
:returns: Validated channel link map as dict
"""
LOG.debug('Reading channel link map from %s',
self._cbf_outlink_address)
attribute_proxy = AttributeProxy(self._cbf_outlink_address)
attribute_proxy.ping()
LOG.debug('Waiting for CSP attribute to provide channel link map for '
'scan ID %s', scan_id)
# This is a horrendous hack to poll the CSP device until the scan
# ID matches. It needs to refactored to use events.
start_time = time.time()
while True:
channel_link_map_str = attribute_proxy.read().value
channel_link_map = self._validate_json_config(
channel_link_map_str, 'channel_link_map.json')
if channel_link_map is None:
self._set_obs_state(ObsState.FAULT)
self._raise_command_error('Channel link map validation '
'failed')
break
if channel_link_map.get('scanID') == scan_id:
break
elapsed = time.time() - start_time
LOG.debug('Waiting for scan ID on CSP attribute '
'(elapsed: %2.4f s)', elapsed)
if elapsed > timeout:
self._set_obs_state(ObsState.FAULT)
self._raise_command_error('Timeout reached while waiting for '
'scan ID on CSP attribute')
channel_link_map = None
break
time.sleep(1.0)
return channel_link_map
def _end_realtime_processing(self):
"""End real-time processing associated with this subarray.
Presently this only resets the internal state of the subarray. The
processing blocks are not updated in any way. Eventually it will
need to tell the real-time processing blocks to stop.
"""
self._sbi_id = None
self._pb_realtime = []
self._pb_batch = []
self._cbf_outlink_address = None
self._pb_receive_addresses = None
def delete_device_server(instance_name='*'):
"""Delete (unregister) SDPSubarray device server instance(s).
:param instance_name: Optional, name of the device server instance to
remove. If not specified all service instances will
be removed.
"""
try:
tango_db = Database()
class_name = 'SDPSubarray'
server_name = '{}/{}'.format(class_name, instance_name)
for server_name in list(tango_db.get_server_list(server_name)):
LOG.info('Removing device server: %s', server_name)
tango_db.delete_server(server_name)
except ConnectionFailed:
pass
def register(instance_name, *device_names):
"""Register device with a SDPSubarray device server instance.
If the device is already registered, do nothing.
:param instance_name: Instance name SDPSubarray device server
:param device_names: Subarray device names to register
"""
# pylint: disable=protected-access
try:
tango_db = Database()
class_name = 'SDPSubarray'
server_name = '{}/{}'.format(class_name, instance_name)
devices = list(tango_db.get_device_name(server_name, class_name))
device_info = DbDevInfo()
device_info._class = class_name
device_info.server = server_name
for device_name in device_names:
device_info.name = device_name
if device_name in devices:
LOG.debug("Device '%s' already registered", device_name)
continue
LOG.info('Registering device: %s (server: %s, class: %s)',
device_info.name, server_name, class_name)
tango_db.add_device(device_info)
except ConnectionFailed:
pass
def main(args=None, **kwargs):
"""Run server."""
# Initialise logging
log_level = tango.LogLevel.LOG_INFO
if len(sys.argv) > 2 and '-v' in sys.argv[2]:
log_level = tango.LogLevel.LOG_DEBUG
tango_logging.init(device_name='SDPSubarray', level=log_level)
# Set default values for feature toggles.
SDPSubarray.set_feature_toggle_default(FeatureToggle.CONFIG_DB, False)
SDPSubarray.set_feature_toggle_default(FeatureToggle.CBF_OUTPUT_LINK,
False)
SDPSubarray.set_feature_toggle_default(FeatureToggle.AUTO_REGISTER, True)
# If the feature is enabled, attempt to auto-register the device
# with the tango db.
if SDPSubarray.is_feature_active(FeatureToggle.AUTO_REGISTER):
if len(sys.argv) > 1:
# delete_device_server('*')
devices = ['mid_sdp/elt/subarray_{:d}'.format(i + 1)
for i in range(1)]
register(sys.argv[1], *devices)
return run((SDPSubarray,), args=args, **kwargs)
def terminate(_sig, _frame):
"""Terminate the program."""
LOG.info('Asked to terminate')
sys.exit(0)
if __name__ == '__main__':
signal.signal(signal.SIGTERM, terminate)
main()
|
<filename>tests/learner/test_object_recognizer.py
import pytest
from more_itertools import first, one
from adam.language_specific.chinese.chinese_phase_1_lexicon import (
GAILA_PHASE_1_CHINESE_LEXICON,
)
from adam.curriculum.curriculum_utils import CHOOSER_FACTORY, phase1_instances
from adam.language_specific.english.english_language_generator import PREFER_DITRANSITIVE
from adam.learner import PerceptionSemanticAlignment
from adam.learner.integrated_learner import SymbolicIntegratedTemplateLearner
from adam.learner.language_mode import LanguageMode
from adam.ontology.phase1_ontology import (
AGENT,
BABY,
CHAIR,
DAD,
GAILA_PHASE_1_ONTOLOGY,
GIVE,
GOAL,
PHASE_1_CURRICULUM_OBJECTS,
THEME,
)
from adam.perception import PerceptualRepresentation
from adam.perception.high_level_semantics_situation_to_developmental_primitive_perception import (
HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator,
)
from adam.perception.perception_graph import PerceptionGraph
from adam.random_utils import RandomChooser
from adam.situation import Action, SituationObject
from adam.situation.high_level_semantics_situation import HighLevelSemanticsSituation
from adam.situation.templates.phase1_templates import (
Phase1SituationTemplate,
object_variable,
sampled,
)
from tests.learner import (
LANGUAGE_MODE_TO_TEMPLATE_LEARNER_OBJECT_RECOGNIZER,
LANGUAGE_MODE_TO_OBJECT_RECOGNIZER,
)
@pytest.mark.parametrize("object_type", PHASE_1_CURRICULUM_OBJECTS)
@pytest.mark.parametrize("language_mode", [LanguageMode.ENGLISH, LanguageMode.CHINESE])
def test_recognizes_ontology_objects(object_type, language_mode):
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY,
salient_objects=[
SituationObject.instantiate_ontology_node(
ontology_node=object_type, ontology=GAILA_PHASE_1_ONTOLOGY
)
],
)
perception_generator = (
HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator(
GAILA_PHASE_1_ONTOLOGY
)
)
perception = perception_generator.generate_perception(
situation, chooser=RandomChooser.for_seed(0), include_ground=False
)
learner = SymbolicIntegratedTemplateLearner(
object_learner=LANGUAGE_MODE_TO_TEMPLATE_LEARNER_OBJECT_RECOGNIZER[language_mode]
)
descriptions = learner.describe(perception)
assert descriptions
if language_mode == LanguageMode.ENGLISH:
assert object_type.handle in one(descriptions.items())[0].as_token_sequence()
else:
mappings = (
GAILA_PHASE_1_CHINESE_LEXICON._ontology_node_to_word # pylint:disable=protected-access
)
for k, v in mappings.items():
if k.handle == object_type.handle:
assert v.base_form in one(descriptions.items())[0].as_token_sequence()
@pytest.mark.parametrize("language_mode", [LanguageMode.ENGLISH, LanguageMode.CHINESE])
def test_trivial_dynamic_situation_with_schemaless_object(language_mode):
dad_situation_object = SituationObject.instantiate_ontology_node(
ontology_node=DAD, ontology=GAILA_PHASE_1_ONTOLOGY
)
situation = HighLevelSemanticsSituation(
ontology=GAILA_PHASE_1_ONTOLOGY, salient_objects=[dad_situation_object]
)
perception_generator = (
HighLevelSemanticsSituationToDevelopmentalPrimitivePerceptionGenerator(
GAILA_PHASE_1_ONTOLOGY
)
)
# We explicitly exclude ground in perception generation
# this generates a static perception...
perception = perception_generator.generate_perception(
situation, chooser=RandomChooser.for_seed(0), include_ground=False
)
# so we need to construct a dynamic one by hand from two identical scenes
dynamic_perception = PerceptualRepresentation(
frames=[perception.frames[0], perception.frames[0]]
)
perception_graph = PerceptionGraph.from_dynamic_perceptual_representation(
dynamic_perception
)
perception_semantic_alignment = PerceptionSemanticAlignment.create_unaligned(
perception_graph
)
(_, description_to_matched_semantic_node) = LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[
language_mode
].match_objects(perception_semantic_alignment)
assert len(description_to_matched_semantic_node) == 1
assert (
language_mode == LanguageMode.ENGLISH
and ("Dad",) in description_to_matched_semantic_node
) or (
language_mode == LanguageMode.CHINESE
and ("ba4 ba4",) in description_to_matched_semantic_node
)
@pytest.mark.parametrize("language_mode", [LanguageMode.ENGLISH, LanguageMode.CHINESE])
def test_recognize_in_transfer_of_possession(language_mode):
dad = object_variable("person_0", DAD)
baby = object_variable("person_1", BABY)
chair = object_variable("give_object_0", CHAIR)
giving_template = Phase1SituationTemplate(
"dad-transfer-of-possession",
salient_object_variables=[dad, baby, chair],
actions=[
Action(
GIVE,
argument_roles_to_fillers=[(AGENT, dad), (GOAL, baby), (THEME, chair)],
)
],
syntax_hints=[PREFER_DITRANSITIVE],
)
(_, _, perception) = first(
phase1_instances(
"foo",
sampled(
giving_template,
max_to_sample=1,
chooser=CHOOSER_FACTORY(),
ontology=GAILA_PHASE_1_ONTOLOGY,
block_multiple_of_the_same_type=True,
),
).instances()
)
perception_graph = PerceptionGraph.from_dynamic_perceptual_representation(perception)
perception_semantic_alignment = PerceptionSemanticAlignment.create_unaligned(
perception_graph
)
(_, description_to_matched_semantic_node) = LANGUAGE_MODE_TO_OBJECT_RECOGNIZER[
language_mode
].match_objects(perception_semantic_alignment)
assert len(description_to_matched_semantic_node) == 4
assert (
language_mode == LanguageMode.ENGLISH
and ("Dad",) in description_to_matched_semantic_node
) or (
language_mode == LanguageMode.CHINESE
and ("ba4 ba4",) in description_to_matched_semantic_node
)
|
"""
TODO:
-) understand no boundary condition
-) validate understanding with analytical solution
"""
import nanopores, dolfin, os
from nanopores.physics.simplepnps import SimpleNernstPlanckProblem
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import force_profiles
from collections import defaultdict
nanopores.add_params(
savefig = False
)
class DiffusionProblem1D(SimpleNernstPlanckProblem):
method = SimpleNernstPlanckProblem.method
method["iterative"] = False
@staticmethod
def initial_u(V, c0):
u = dolfin.Function(V)
u.interpolate(dolfin.Constant(c0))
return u
@staticmethod
def forms(V, geo, phys, F):
dx = geo.dx()
grad = phys.grad
kT = dolfin.Constant(phys.kT)
D = dolfin.Constant(Dtarget(phys.rTarget))
lscale = dolfin.Constant(phys.lscale)
n = dolfin.FacetNormal(geo.mesh)
c = dolfin.TrialFunction(V)
d = dolfin.TestFunction(V)
FF = dolfin.as_vector([F])
J = -D*grad(c) + D/kT*FF*c
a = dolfin.inner(J, grad(d))*dx
L = dolfin.Constant(0.)*d*dx
aNoBC = -lscale*dolfin.inner(J, n*d)*geo.ds("bottom")
a += aNoBC
return a, L
@staticmethod
def bcs(V, geo, c0):
bc = dict(
top = c0,
#bottom = c0,
)
return geo.pwBC(V, "c0", value=bc)
def current(geo, phys, c, F):
dx = geo.dx()
grad = phys.grad
lscale = phys.lscale
mol = phys.mol
kT = dolfin.Constant(phys.kT)
D = dolfin.Constant(Dtarget(phys.rTarget))
FF = dolfin.as_vector([F])
print "v = %s" % (Dtarget(phys.rTarget)*F(0.)/phys.kT,)
j = -D*grad(c) + D/kT*FF*c
#dolfin.plot(j)
#dolfin.interactive()
L = 20.
r0 = 1./lscale
Across = r0**2 * dolfin.pi
# current in N/s
J = mol * Across * dolfin.assemble(j[0]/dolfin.Constant(L) * dx)
# current in N/ms
J = J * 1e-3
return J
def Dtarget(r):
return nanopores.kT/(6*dolfin.pi*nanopores.eta*r)
def J_FEM(F, c0):
geo = force_profiles.geo
phys = nanopores.Physics(geo=geo, rTarget=rMol*1e-9, lscale=1e9)
pde = nanopores.solve_pde(DiffusionProblem1D, geo=geo, phys=phys,
F=F, c0=c0, verbose=False)
c = pde.solution
return c, current(geo, phys, c, F)
def gather_currents(name, c0):
currents = defaultdict(list)
qmols = []
for results in force_profiles.Forces(name):
qmols.append(results["Q"])
for key in "F", "Fi", "Fi2":
f = results[key]
f = force_profiles.function_from_lambda(lambda z: 1e-12*f(z))
u, J = J_FEM(f, c0)
currents[key].append(J)
#force_profiles.plot_function(f, label="Q="+str(Q))
#if key=="F":
# force_profiles.plot_function(u, label="Q="+str(results["Q"]))
print "Q %s, J %s, Ji %s, Jib %s" % (
qmols[-1], currents["F"][-1], currents["Fi"][-1], currents["Fi2"][-1])
return qmols, currents
c0 = 1.6605 # [mol/m**3] = 1 molecule per (10nm)**3
#names = {0.25: "r025", 0.5: "r05", 0.2: "r02", 0.4: "r04", 0.75: "r075"}
items = (0.25, "r025"), (0.5, "r05"), (0.75, "r075")
figures = os.path.expanduser("~") + "/papers/pnps-numerics/figures/"
for rMol, name in items:
#plt.figure()
qmols, currents = gather_currents(name, c0)
#plt.legend()
fig, ax = plt.subplots()
ax.plot(qmols, currents["F"], "s-g", label="finite")
ax.plot(qmols, currents["Fi"], "s-b", label="point")
ax.plot(qmols, currents["Fi2"], "s-r", label="point, corrected")
#ax.set_aspect('equal')
tick_spacing = 1.
ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))
#ax.set_ylim([-0.4, 0.]) #xaxis.set_ticks(np.arange(start, end, 0.712123))
#ax.grid(True, which='both')
#ax.axhline(y=0, color='#cccccc')
plt.title("r = %s" %rMol)
plt.xlabel("Molecule charge [q]")
plt.ylabel("Molecule current [1/ms]")
plt.legend()
if savefig:
fig = plt.gcf()
fig.set_size_inches(5, 4)
plt.savefig(figures + "molcurrent_r%.2f.eps" % rMol, bbox_inches='tight')
#plt.show()
|
import asyncio
import io
import logging
import pandas as pd
import core.real_time as creatime
import helpers.hasyncio as hasynci
import helpers.hprint as hprint
import helpers.hsql as hsql
import helpers.hunit_test as hunitest
import market_data as mdata
import oms.broker_example as obroexam
import oms.oms_db as oomsdb
import oms.portfolio as omportfo
import oms.portfolio_example as oporexam
import oms.test.oms_db_helper as omtodh
_LOG = logging.getLogger(__name__)
_5mins = pd.DateOffset(minutes=5)
# #############################################################################
class TestSimulatedPortfolio1(hunitest.TestCase):
# @pytest.mark.skip("This is flaky because of the clock jitter")
def test_state(self) -> None:
"""
Check non-cash holdings for a Portfolio with only cash.
"""
expected = r""" asset_id curr_num_shares price value \
2000-01-01 09:35:00-05:00 -1 1000000 1 1000000
wall_clock_timestamp
2000-01-01 09:35:00-05:00 2000-01-01 09:35:00-05:00 """
portfolio = self._get_portfolio1()
actual = portfolio.get_cached_mark_to_market()
self.assert_equal(str(actual), expected, fuzzy_match=True)
def _get_portfolio1(self):
"""
Return a freshly minted Portfolio with only cash.
"""
with hasynci.solipsism_context() as event_loop:
(
market_data,
_,
) = mdata.get_ReplayedTimeMarketData_example3(event_loop)
# Build a Portfolio.
portfolio = oporexam.get_simulated_portfolio_example1(
event_loop,
market_data=market_data,
)
return portfolio
# #############################################################################
class TestSimulatedPortfolio2(hunitest.TestCase):
def test_initialization_with_cash1(self) -> None:
"""
Initialize a Portfolio with cash.
"""
with hasynci.solipsism_context() as event_loop:
(
market_data,
_,
) = mdata.get_ReplayedTimeMarketData_example3(event_loop)
# Build Portfolio.
portfolio = oporexam.get_simulated_portfolio_example1(
event_loop,
market_data=market_data,
)
# Check.
expected = pd.DataFrame(
{-1: 1000000.0},
[
pd.Timestamp(
"2000-01-01 09:35:00-05:00", tz="America/New_York"
)
],
)
self.assert_dfs_close(portfolio.get_historical_holdings(), expected)
def test_initialization_with_holdings1(self) -> None:
"""
Initialize a Portfolio with holdings.
"""
with hasynci.solipsism_context() as event_loop:
(
market_data,
get_wall_clock_time,
) = mdata.get_ReplayedTimeMarketData_example3(event_loop)
# Build Broker.
broker = obroexam.get_simulated_broker_example1(
event_loop, market_data=market_data
)
# Build Portfolio.
strategy_id = "str1"
account = "paper"
asset_id_col = "asset_id"
mark_to_market_col = "price"
pricing_method = "last"
timestamp_col = "end_datetime"
holdings_dict = {101: 727.5, 202: 1040.3, -1: 10000}
portfolio = omportfo.SimulatedPortfolio.from_dict(
strategy_id,
account,
broker,
asset_id_col,
mark_to_market_col,
pricing_method,
timestamp_col,
holdings_dict=holdings_dict,
)
# Check.
expected = pd.DataFrame(
{101: 727.5, 202: 1040.3, -1: 10000.0},
[
pd.Timestamp(
"2000-01-01 09:35:00-05:00", tz="America/New_York"
)
],
)
self.assert_dfs_close(portfolio.get_historical_holdings(), expected)
def test_get_historical_statistics1(self) -> None:
with hasynci.solipsism_context() as event_loop:
(
market_data,
_,
) = mdata.get_ReplayedTimeMarketData_example3(event_loop)
#
portfolio = oporexam.get_simulated_portfolio_example1(
event_loop,
market_data=market_data,
)
# Check.
txt = r"""
,2000-01-01 09:35:00-05:00
net_asset_holdings,0
cash,1000000.0
net_wealth,1000000.0
gross_exposure,0.0
leverage,0.0
pnl,NaN
realized_pnl,NaN
unrealized_pnl,NaN
"""
expected = pd.read_csv(
io.StringIO(txt),
index_col=0,
)
# The timestamp doesn't parse correctly from the CSV.
initial_timestamp = pd.Timestamp(
"2000-01-01 09:35:00-05:00", tz="America/New_York"
)
expected.columns = [initial_timestamp]
actual = portfolio.get_historical_statistics().transpose()
self.assert_dfs_close(actual, expected, rtol=1e-2, atol=1e-2)
def test_historical_statistics2(self) -> None:
with hasynci.solipsism_context() as event_loop:
(
market_data,
get_wall_clock_time,
) = mdata.get_ReplayedTimeMarketData_example3(event_loop)
# Build Broker.
broker = obroexam.get_simulated_broker_example1(
event_loop, market_data=market_data
)
# Build Portfolio.
strategy_id = "str1"
account = "paper"
asset_id_col = "asset_id"
mark_to_market_col = "price"
pricing_method = "last"
timestamp_col = "end_datetime"
holdings_dict = {101: 727.5, 202: 1040.3, -1: 10000}
portfolio = omportfo.SimulatedPortfolio.from_dict(
strategy_id,
account,
broker,
asset_id_col,
mark_to_market_col,
pricing_method,
timestamp_col,
holdings_dict=holdings_dict,
)
txt = r"""
,2000-01-01 09:35:00-05:00i
net_asset_holdings,1768351.42
cash,10000.0
net_wealth,1778351.42
gross_exposure,1768351.42
leverage,0.994
pnl,NaN
realized_pnl,NaN
unrealized_pnl,NaN
"""
expected = pd.read_csv(
io.StringIO(txt),
index_col=0,
)
# The timestamp doesn't parse correctly from the CSV.
initial_timestamp = pd.Timestamp(
"2000-01-01 09:35:00-05:00", tz="America/New_York"
)
expected.columns = [initial_timestamp]
actual = portfolio.get_historical_statistics().transpose()
self.assert_dfs_close(actual, expected, rtol=1e-2, atol=1e-2)
def test_get_historical_statistics3(self) -> None:
with hasynci.solipsism_context() as event_loop:
tz = "ET"
initial_timestamp = pd.Timestamp(
"2000-01-01 09:35:00-05:00", tz="America/New_York"
)
get_wall_clock_time = creatime.get_replayed_wall_clock_time(
tz,
initial_timestamp,
event_loop=event_loop,
)
price_txt = r"""
start_datetime,end_datetime,asset_id,price
2000-01-01 09:30:00-05:00,2000-01-01 09:35:00-05:00,100,100.34
"""
price_df = pd.read_csv(
io.StringIO(price_txt),
parse_dates=["start_datetime", "end_datetime"],
)
start_time_col_name = "start_datetime"
end_time_col_name = "end_datetime"
knowledge_datetime_col_name = "end_datetime"
delay_in_secs = 0
asset_id_col_name = "asset_id"
asset_ids = None
columns = []
market_data = mdata.ReplayedMarketData(
price_df,
knowledge_datetime_col_name,
delay_in_secs,
asset_id_col_name,
asset_ids,
start_time_col_name,
end_time_col_name,
columns,
get_wall_clock_time,
)
portfolio = oporexam.get_simulated_portfolio_example1(
event_loop,
market_data=market_data,
)
# Check.
txt = r"""
,2000-01-01 09:35:00-05:00
net_asset_holdings,0
cash,1000000.0
net_wealth,1000000.0
gross_exposure,0.0
leverage,0.0
pnl,NaN
realized_pnl,NaN
unrealized_pnl,NaN
"""
expected = pd.read_csv(
io.StringIO(txt),
index_col=0,
)
# The timestamp doesn't parse correctly from the CSV.
expected.columns = [initial_timestamp]
actual = portfolio.get_historical_statistics().transpose()
self.assert_dfs_close(actual, expected, rtol=1e-2, atol=1e-2)
# #############################################################################
def _get_row1() -> pd.Series:
row = """
strategyid,SAU1
account,candidate
id,0
tradedate,2000-01-01
timestamp_db,2000-01-01 21:38:39.419536
asset_id,101
target_position,10
current_position,20.0
open_quantity,0
net_cost,0
bod_position,0
bod_price,0
"""
srs = hsql.csv_to_series(row, sep=",")
return srs
def _get_row2() -> pd.Series:
row = """
strategyid,SAU1
account,candidate
id,0
tradedate,2000-01-01
timestamp_db,2000-01-01 21:38:39.419536
asset_id,101
target_position,10
current_position,20.0
open_quantity,0
net_cost,1903.1217
bod_position,0
bod_price,0
"""
srs = hsql.csv_to_series(row, sep=",")
return srs
class TestMockedPortfolio1(omtodh.TestOmsDbHelper):
def test1(self) -> None:
"""
Test that the update of Portfolio works.
"""
with hasynci.solipsism_context() as event_loop:
# Create current positions in the table.
row = _get_row1()
table_name = oomsdb.CURRENT_POSITIONS_TABLE_NAME
oomsdb.create_current_positions_table(
self.connection, incremental=False, table_name=table_name
)
hsql.execute_insert_query(self.connection, row, table_name)
if False:
# Print the DB status.
query = """SELECT * FROM current_positions"""
df = hsql.execute_query_to_df(self.connection, query)
print(hprint.dataframe_to_str(df))
assert 0
#
# Create MockedPortfolio with some initial cash.
portfolio = oporexam.get_mocked_portfolio_example1(
event_loop,
self.connection,
table_name,
asset_ids=[101],
)
coroutines = [self._coroutine1(portfolio)]
hasynci.run(asyncio.gather(*coroutines), event_loop=event_loop)
def test2(self) -> None:
"""
Test that the update of Portfolio works when accounting for costs.
"""
with hasynci.solipsism_context() as event_loop:
# Create current positions in the table.
row = _get_row2()
table_name = oomsdb.CURRENT_POSITIONS_TABLE_NAME
oomsdb.create_current_positions_table(
self.connection, incremental=False, table_name=table_name
)
hsql.execute_insert_query(self.connection, row, table_name)
if False:
# Print the DB status.
query = """SELECT * FROM current_positions"""
df = hsql.execute_query_to_df(self.connection, query)
print(hprint.dataframe_to_str(df))
assert 0
#
# Create MockedPortfolio with some initial cash.
portfolio = oporexam.get_mocked_portfolio_example1(
event_loop,
self.connection,
table_name,
asset_ids=[101],
)
coroutines = [self._coroutine2(portfolio)]
hasynci.run(asyncio.gather(*coroutines), event_loop=event_loop)
async def _coroutine1(
self,
portfolio,
):
await asyncio.sleep(60 * 5)
portfolio.mark_to_market()
# Check.
actual = str(portfolio)
expected = r"""# historical holdings=
asset_id 101 -1
2000-01-01 09:35:00-05:00 0.0 1000000.0
2000-01-01 09:40:00-05:00 20.0 1000000.0
# historical holdings marked to market=
asset_id 101 -1
2000-01-01 09:35:00-05:00 0.00 1000000.0
2000-01-01 09:40:00-05:00 20004.03 1000000.0
# historical statistics=
net_asset_holdings cash net_wealth gross_exposure leverage pnl realized_pnl unrealized_pnl
2000-01-01 09:35:00-05:00 0.00 1000000.0 1.00e+06 0.00 0.00 NaN NaN NaN
2000-01-01 09:40:00-05:00 20004.03 1000000.0 1.02e+06 20004.03 0.02 20004.03 0.0 20004.03"""
self.assert_equal(actual, expected, fuzzy_match=True)
async def _coroutine2(
self,
portfolio,
):
await asyncio.sleep(60 * 5)
portfolio.mark_to_market()
# Check.
actual = str(portfolio)
expected = r"""# historical holdings=
asset_id 101 -1
2000-01-01 09:35:00-05:00 0.0 1000000.00
2000-01-01 09:40:00-05:00 20.0 998096.88
# historical holdings marked to market=
asset_id 101 -1
2000-01-01 09:35:00-05:00 0.00 1000000.00
2000-01-01 09:40:00-05:00 20004.03 998096.88
# historical statistics=
net_asset_holdings cash net_wealth gross_exposure leverage pnl realized_pnl unrealized_pnl
2000-01-01 09:35:00-05:00 0.00 1000000.00 1.00e+06 0.00 0.00 NaN NaN NaN
2000-01-01 09:40:00-05:00 20004.03 998096.88 1.02e+06 20004.03 0.02 18100.91 -1903.12 20004.03"""
self.assert_equal(actual, expected, fuzzy_match=True)
|
# -*- coding: utf-8 -*-
from django.shortcuts import render, redirect
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.http import HttpResponse, HttpResponseRedirect
from .models import PATIENT, FRAX, LVA, APSPINE, DUALFEMUR, COMBINATION
from uploads.core.models import Document
from uploads.core.forms import nameForm
from os import listdir
from os.path import isfile, join
import matplotlib.pyplot as plt
import pydicom
import zipfile
import cv2
import os
import shutil
import time
import re
from datetime import datetime
from pydicom.data import get_testdata_files
from decimal import Decimal, getcontext
import io
from django.http import FileResponse
from reportlab.pdfgen import canvas
# variable naming principle:
# myfile: .dcm
# myZipFile: .zip
# zipFolder: folder
# fileName: no .dcm
# session:
# upload_zip: myZipFile, pid
# show_dcm/ manage_show_zip: myfile
def home(request):
return render(request, 'core/home.html')
def patient_data(filepath, saveType):
# read file
dataset = pydicom.dcmread(filepath)
# add upload time
datetime_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
pid = dataset.PatientID
name = str(dataset.PatientName)
sex = dataset.PatientSex
# get age (ex. 063Y->63)
age_list = list(dataset.PatientAge)
del age_list[-1]
if age_list[0]=='0':
del age_list[0]
age = int(''.join(age_list))
# get MP
PatientName = str(dataset.PatientName)
if "(PM" in PatientName:
PatientName = PatientName.split('(')[1].split(')')[0]
name_list = list(PatientName)
del name_list[0:2]
mp = ''.join(name_list)
else:
mp = ''
response={
'pid': pid,
'name': name,
'sex': sex,
'age': age,
'mp': mp,
'dataset': dataset,
'dateTime': datetime_str,
}
if saveType == 'uploadZIP':
file_path = '/media/ZIP/' + pid
# save to DB
fileInstance = PATIENT(pid=pid, file_path=file_path, pub_date=datetime_str, name=name, sex=sex, age=age, mp=mp)
fileInstance.save()
else:
print('file does not save to DB')
return response
def str_data(dataset, saveType):
response = {}
pid = dataset.PatientID
comment = dataset.ImageComments
comment = comment.split('><')
match = [s for s in comment if "SCAN type" in s]
length = len(match)
# 02 frax: major fracture
if length == 0:
response['scanType'] = 'FRAX'
majorFrac = [s for s in comment if "MAJOR_OSTEO_FRAC_RISK units" in s]
majorFrac = ''.join(majorFrac)
if majorFrac == '':
majorFrac = '(None)'
else:
majorFrac = majorFrac.split('</')[0].split('>')[1]
response['majorFrac'] = majorFrac
# get hip frac
hipFrac = [s for s in comment if "HIP_FRAC_RISK units" in s]
hipFrac = ''.join(hipFrac)
if hipFrac == '':
hipFrac = '(None)'
else:
hipFrac = hipFrac.split('</')[0].split('>')[1]
response['hipFrac'] = hipFrac
if saveType == 'uploadZIP':
# save to DB
fileInstance = FRAX(pid=pid, scantype='FRAX', majorFracture=majorFrac, hipFracture=hipFrac)
fileInstance.save()
else:
print('file does not save to DB')
# at least one scanType:
else:
comments = t_z_r(comment)
response['tscore'] = comments['str_tscore']
response['zscore'] = comments['str_zscore']
response['region'] = comments['str_region']
tscore = comments['tscore']
zscore = comments['zscore']
region = comments['region']
# classify through scanType
if length == 1:
scanType = ''.join(match)
scanType = scanType.split('"')[1]
response['scanType'] = scanType
# LVA
if scanType == 'LVA':
keyword = [s for s in comment if "DEFORMITY" in s]
lva=[]
for substring in keyword:
substring = substring.split('</')[0].split('>')[1]
if substring != 'None':
lva.append(substring)
response['lva'] = lva
if saveType == 'uploadZIP':
# save to DB
fileInstance = LVA(pid=pid, scantype=scanType, lva=lva)
fileInstance.save()
else:
print('file does not save to DB')
# AP Spine
elif scanType == 'AP Spine':
APSpine = ''
for i in range(len(tscore)):
if APSpine == '':
APSpine += '(' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
else:
APSpine += ', (' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
response['APSpine'] = APSpine
if saveType == 'uploadZIP':
# save to DB
fileInstance = APSPINE(pid=pid, scantype=scanType, tscore=tscore, zscore=zscore, region=region, apspine=APSpine)
fileInstance.save()
else:
print('file does not save to DB')
# Dual Femur
elif scanType == 'DualFemur':
DualFemur = ''
for i in range(len(tscore)):
if DualFemur == '':
DualFemur += '(' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
else:
DualFemur += ', (' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
response['DualFemur'] = DualFemur
if saveType == 'uploadZIP':
# save to DB
fileInstance = DUALFEMUR(pid=pid, scantype=scanType, tscore=tscore, zscore=zscore, region=region, dualfemur=DualFemur)
fileInstance.save()
else:
print('file does not save to DB')
else:
print('error input')
# multi scanType: combination
elif length == 2:
scanType = 'combination'
response['scanType'] = scanType
del region[1:-4]
combination = ''
for i in range(len(tscore)):
if combination == '':
combination += '(' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
else:
combination += ', (' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
response['combination'] = combination
# get APSpine
APSpine = ''
for i in range(len(tscore)):
if APSpine == '':
APSpine += '(' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
else:
APSpine += ', (' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
response['APSpine'] = APSpine
# get DualFemur
DualFemur = ''
for i in range(len(tscore)):
if DualFemur == '':
DualFemur += '(' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
else:
DualFemur += ', (' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
response['DualFemur'] = DualFemur
# # get T7
# T7 = [s for s in comment if "DEFORMITY" in s]
# T7 = ''.join(T7)
# T7 = T7.split('</')[0].split('>')[1]
# response['T7'] = T7
if saveType == 'uploadZIP':
# save to DB
fileInstance = COMBINATION(pid=pid, scantype=scanType, tscore=tscore, zscore=zscore, region=region, lva='None', apspine=APSpine, dualfemur=DualFemur, combination=combination, t7='None')
fileInstance.save()
else:
print('file does not save to DB')
# multi scanType: combination
elif length == 3:
scanType = 'combination'
response['scanType'] = scanType
del region[1:-4]
combination = ''
for i in range(len(tscore)):
if combination == '':
combination += '(' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
else:
combination += ', (' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
response['combination'] = combination
# get LVA
keyword = []
keyword = [s for s in comment if "DEFORMITY" in s]
lva=[]
for substring in keyword:
substring = substring.split('</')[0].split('>')[1]
lva.append(substring)
while 'None' in lva:
lva.remove(substring)
response['lva'] = lva
# get APSpine
APSpine = ''
for i in range(len(tscore)):
if APSpine == '':
APSpine += '(' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
else:
APSpine += ', (' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
response['APSpine'] = APSpine
# get DualFemur
DualFemur = ''
for i in range(len(tscore)):
if DualFemur == '':
DualFemur += '(' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
else:
DualFemur += ', (' + region[i] + ',' + tscore[i] + ',' + zscore[i] +')'
response['DualFemur'] = DualFemur
# get T7
T7 = [s for s in comment if "DEFORMITY" in s]
T7 = ''.join(T7)
T7 = T7.split('</')[0].split('>')[1]
response['T7'] = T7
if saveType == 'uploadZIP':
# save to DB
fileInstance = COMBINATION(pid=pid, scantype=scanType, tscore=tscore, zscore=zscore, region=region, lva=lva, apspine=APSpine, dualfemur=DualFemur, combination=combination, t7=T7)
fileInstance.save()
else:
print('file does not save to DB')
return response
def t_z_r(comment):
comments = {}
match_tscore = [s for s in comment if "BMD_TSCORE" in s]
tscore=[]
for substring in match_tscore:
substring = substring.split('</')[0].split('>')[1]
tscore.append(substring)
comments['tscore'] = tscore
match_zscore = [s for s in comment if "BMD_ZSCORE" in s]
zscore=[]
for substring in match_zscore:
substring = substring.split('</')[0].split('>')[1]
zscore.append(substring)
comments['zscore'] = zscore
match_region = [s for s in comment if "ROI region" in s]
region=[]
for substring in match_region:
substring = substring.split('"')[1]
region.append(substring)
comments['region'] = region
comments['str_region'] = ', '.join(region)
comments['str_tscore'] = ', '.join(tscore)
comments['str_zscore'] = ', '.join(zscore)
return comments
def main_upload(request):
return render(request, 'core/main_upload.html')
def upload_dcm(request):
if request.method == 'POST' and request.FILES['myfile']:
response = {}
myfile = request.FILES['myfile']
fs = FileSystemStorage()
myfile = fs.save(myfile.name, myfile)
fileName = ''.join(list(myfile)[:-4])
# get file list in the folder
onlyfiles = [f for f in listdir('media/DCM/') if isfile(join('media/DCM/', f))]
# if the file name already exists, show warning
if myfile in onlyfiles:
os.remove('media/'+myfile)
response = {
'warning':myfile
}
return render(request, 'core/upload_dcm.html', response)
else:
# move file form media/ to media/dcm/ folder
shutil.move('media/'+myfile, 'media/DCM/'+myfile)
dcmFilePath = 'media/DCM/' + myfile
# patient_data
data = patient_data(dcmFilePath, 'dcm')
dataset = data['dataset']
response.update(data)
try: # get image report from IMG file
dataset.pixel_array
if cv2.imwrite('media/DCM/JPG/' + fileName + '_report.jpg', dataset.pixel_array):
# must add a '/' ahead
response['report'] = '/media/DCM/JPG/' + fileName + '_report.jpg'
except: # get value from STR file
try:# get value from STR file
dataset.ImageComments
response.update(str_data(dataset, 'dcm'))
except:
response['except'] = dataset
uploaded_file_url = fs.url(myfile)
response['uploaded_file_url'] = uploaded_file_url
return render(request, 'core/upload_dcm.html', response)
else:
return render(request, 'core/upload_dcm.html')
def zip_process(myZipFile, zipFolder):
response={}
# get file list in the folder
print(listdir('media/ZIP/'))
onlyfiles = [f for f in listdir('media/ZIP/') if os.path.isdir(os.path.join('media/ZIP/', f))]
print('onlyfiles', onlyfiles)
DCMFiles = []
# read zip file
zip_file = zipfile.ZipFile(os.path.join(os.getcwd(), 'media/', myZipFile))
# extract zip file
for _file in zip_file.namelist():
zip_file.extract(_file, os.path.join(os.getcwd(), 'media/'))
if ".dcm" in _file.lower():
DCMFiles.append(_file)
zip_file.close()
# if the filename(pid.zip) already exists, show warning
dcmFilePath = 'media/' + str(DCMFiles[-1])
pid = pydicom.dcmread(dcmFilePath).PatientID
print('pid',pid)
if pid in onlyfiles:
os.remove('media/'+myZipFile)
shutil.rmtree('media/'+zipFolder)
response = {
'warning_origin':myZipFile,
'warning_pid':pid
}
else:
# remvoe zip
os.remove('media/'+myZipFile)
shutil.move('media/'+zipFolder, 'media/ZIP/'+zipFolder)
# read each file and save to DB
for file in DCMFiles:
dcmFilePath = 'media/ZIP/' + file
dataset = pydicom.dcmread(dcmFilePath)
try: # get image report from IMG file
dataset.pixel_array
if cv2.imwrite('media/ZIP/JPG/' + file + '_report.jpg', dataset.pixel_array):
# must add a '/' ahead
response['report'] = '/media/ZIP/JPG/' + file + '_report.jpg'
except:
try:# get value from STR file
dataset.ImageComments
response.update(str_data(dataset, 'uploadZIP'))
except:
response['except'] = dataset
response.update(patient_data(dcmFilePath, 'uploadZIP'))
#change filename to pid
# os.rename('media/ZIP/' + myZipFile, 'media/ZIP/' + response['pid'] + '.zip')
os.rename('media/ZIP/' + zipFolder, 'media/ZIP/' + response['pid'])
# response['myZipFile'] = myZipFile
return response
def upload_zip(request):
if request.method == 'POST' and request.FILES['myfile']:
response = {}
myfile = request.FILES['myfile']
fs = FileSystemStorage()
myZipFile = fs.save(myfile.name, myfile)
# get folder name of the extracted zip file
zipFolder = list(myZipFile)[:-4] #remove '.zip'
zipFolder = ''.join(zipFolder)
response.update(zip_process(myZipFile, zipFolder))
request.session['myfile']=response['pid']
if 'warning_origin' in response:
return render(request, 'core/upload_zip.html', response)
else:
request.session['myfile'] = response['pid']
pidFolder = 'media/ZIP/' + response['pid']
# directory tree
dir_tree = []
# contain '.dcm' files
file_tree = []
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk(pidFolder):
path = root.split(os.sep)
line = ((len(path) - 1) * '---', os.path.basename(root))
line = ''.join(line)
dir_tree.append(line)
file_tree.append('')
for file in files:
line = (len(path) * '---', file)
line = ''.join(line)
dir_tree.append(line)
file_tree.append(file)
response['dir_tree'] = dir_tree
response['file_tree'] = file_tree
# zip so that templates can show
file_dir_list = zip(dir_tree, file_tree)
response['file_dir_list'] = file_dir_list
response['uploaded_file_url'] = fs.url(myZipFile)
return render(request, 'core/upload_zip.html', response)
else:
return render(request, 'core/upload_zip.html')
def upload_multi_zip(request):
if request.method == 'POST' and request.FILES.getlist('myfile'):
start = time.clock()
response = {}
errorlist = []
successlist = []
myfiles = request.FILES.getlist('myfile')
fs = FileSystemStorage()
for myfile in myfiles:
data={}
myZipFile = fs.save(myfile.name, myfile)
# get folder name of the extracted zip file
zipFolder = list(myZipFile)[:-4] #remove '.zip'
zipFolder = ''.join(zipFolder)
data = zip_process(myZipFile, zipFolder)
try:
data['warning_origin']
errorlist.append(data['warning_origin'])
except:
successlist.append(myZipFile)
response['failed'] = errorlist
response['success'] = successlist
response['time'] = time.clock() - start
response['uploaded_file_url'] = fs.url(myZipFile)
return render(request, 'core/upload_multi_zip.html', response)
else:
return render(request, 'core/upload_multi_zip.html')
def upload_multi_in_one_zip(request):
if request.method == 'POST' and request.FILES['myfile']:
start = time.clock()
response = {}
listFolder = []
errorlist = []
successlist = []
myfile = request.FILES['myfile']
fs = FileSystemStorage()
myZipFile = fs.save(myfile.name, myfile)
zipFolder = ''.join(list(myZipFile[:-4]))
# read zip file
zip_file = zipfile.ZipFile(os.path.join(os.getcwd(), 'media/', myZipFile))
# extract zip file
for _file in zip_file.namelist():
zip_file.extract(_file, os.path.join(os.getcwd(), 'media/'))
zip_file.close()
# get folders
extractFolder = 'media/' + zipFolder
for folders in os.listdir(extractFolder):
listFolder.append(folders)
onlyfiles = []
# get file list in the folder
for f in os.listdir('media/ZIP/'):
onlyfiles.append(f)
DCMFiles = []
for _folders in listFolder:
DCMFiles = []
folders = extractFolder + '/' + _folders
# get dataset
# if the zip file has normal constructure
if os.path.isdir(folders + '/SDY00000/'):
tag = 'normal_folder'
strFolderPath = folders + '/SDY00000/'
for _file in os.listdir(strFolderPath):
if ".dcm" in _file.lower():
DCMFiles.append('/SDY00000/' + _file)
pid = pydicom.dcmread(folders + DCMFiles[0]).PatientID
# if the file has abnormal folder constructure
else:
tag = 'abnormal_folder'
for _path in os.listdir(folders):
_file_dir = os.path.join(folders, _path) ##
for path_ in os.listdir(_file_dir):
file_dir = os.path.join(_file_dir, path_)
for path in os.listdir(file_dir):
filePath = '/'+_path+'/'+path_+'/'+path
dataset = pydicom.dcmread(folders+filePath)
try:
dataset.ImageComments
DCMFiles.append(filePath)
except:
print('not str dcm file')
pid = pydicom.dcmread(folders+'/'+filePath).PatientID
# rename from pid
if pid in onlyfiles:
shutil.rmtree(folders)
errorlist.append(_folders)
else:
# move file from media/ to media/zip/ folder
shutil.move('media/'+zipFolder+'/'+_folders, 'media/ZIP/'+_folders)
# read each file and save to DB
for _file in DCMFiles:
dcmFilePath = 'media/ZIP/' + _folders + _file
dataset = pydicom.dcmread(dcmFilePath)
try: # get image report from IMG file
dataset.pixel_array
if cv2.imwrite('media/ZIP/JPG/' + _file + '_report.jpg', dataset.pixel_array):
# must add a '/' ahead
response['report'] = '/media/ZIP/JPG/' + _file + '_report.jpg'
except:
try:# get value from STR file
dataset.ImageComments
response.update(str_data(dataset, 'uploadZIP'))
except:
response['except'] = dataset
response.update(patient_data(dcmFilePath, 'uploadZIP'))
#change filename to pid
os.rename('media/ZIP/' + _folders, 'media/ZIP/' + pid)
successlist.append(_folders)
os.remove('media/' + myZipFile)
shutil.rmtree('media/' + zipFolder)
response['failed'] = errorlist
response['success'] = successlist
response['time'] = time.clock() - start
response['uploaded_file_url'] = fs.url(myZipFile)
return render(request, 'core/upload_multi_in_one_zip.html', response)
else:
return render(request, 'core/upload_multi_in_one_zip.html')
def show_zip(request):
response = {}
pid = request.session['myfile']
print(pid)
# get the file name user clicked from template
myfile = request.GET.get('file', None)
fileName = list(myfile)[:-4] # remove '.zip'
fileName = ''.join(fileName)
print(fileName)
_file_dir = 'media/ZIP/' + pid
if fileName in os.listdir(_file_dir):
filePath = os.path.join(_file_dir, myfile)
else:
# def searchFile(_file_dir):
# print('_file_dir',_file_dir)
# for _path in os.listdir(_file_dir):
# _file_dir_ = os.path.join(os.getcwd()+'/'+_file_dir, _path)
# print('_path',_path)
# if ''.join(list(_path[-4:])) == '.dcm':
# if myfile == _path:
# filePath = os.path.join(_file_dir, _path)
# filePath = filePath.replace(os.getcwd(),'')
# filePath = ''.join(list(filePath[1:]))
# print('bingo', filePath)
# return filePath
# elif os.path.isdir(_file_dir_):
# print('_file_dir_',_file_dir_)
# return searchFile(_file_dir_)
# filePath = searchFile(_file_dir)
for _path in os.listdir(_file_dir):
if ''.join(list(_path[-4:])) == '.dcm':
if myfile == _path:
filePath = os.path.join(_file_dir, _path)
break
elif os.path.isdir(os.path.join(os.getcwd()+'/'+_file_dir,_path)):
file_dir_ = os.path.join(_file_dir, _path)
for path_ in os.listdir(file_dir_):
if ''.join(list(path_[-4:])) == '.dcm':
if myfile == path_:
filePath = os.path.join(file_dir_, path_)
break
elif os.path.isdir(os.path.join(os.getcwd()+'/'+_file_dir+'/'+_path, path_)):
file_dir = os.path.join(file_dir_, path_)
for _path_ in os.listdir(file_dir):
if ''.join(list(_path_[-4:])) == '.dcm':
if myfile == _path_:
filePath = os.path.join(file_dir, _path_)
break
elif os.path.isdir(os.path.join(os.getcwd()+'/'+_file_dir+'/'+_path+'/'+path_,_path_)):
print('not found')
# read file
data = patient_data(filePath, 'zip')
dataset = data['dataset']
response.update(data)
try: # get image report from IMG file
dataset.pixel_array
if cv2.imwrite('media/ZIP/JPG/' + fileName + '_report.jpg', dataset.pixel_array):
# must add a '/' ahead
response['report'] = '/media/ZIP/JPG/' + fileName + '_report.jpg'
except:
try:# get value from STR file
dataset.ImageComments
response.update(str_data(dataset, 'zip'))
except:
response['except'] = dataset
return render(request, 'core/show_zip.html', response)
def main_manage(request):
return render(request, 'core/main_manage.html')
def manage_dcm(request): #remove
folderPath = 'media/DCM/'
# list files in the folder
onlyfiles = [f for f in listdir(folderPath) if isfile(join(folderPath, f))]
# remove selected files
if request.method == 'POST':
selected = request.POST.getlist('selected')
result = ''
response = {}
for files in selected:
os.remove(folderPath + files)
# check if the file has corresponding report, if yes, remove as well
fileName = list(files)[:-4] # remove '.dcm'
fileName = ''.join(fileName)
myReport = fileName + '_report.jpg'
dir_list = os.listdir(folderPath + 'JPG/')
if myReport in dir_list:
os.remove(folderPath + 'JPG/' + myReport)
result += fileName + ' '
response['result'] = result
return render(request, 'core/result.html', response)
return render(request, 'core/manage_dcm.html', {
'onlyfiles': onlyfiles,
})
def show_dcm(request): #remove
response = {}
if request.method == 'POST':
remove(request.session['myfile'], 'dcm')
response['result'] = request.session['myfile']
return render(request, 'core/result.html', response)
# get the file user clicked from template
myfile = request.GET.get('file', None)
#request.session['myfile'] = myfile
fileName = list(myfile)[:-4] # remove '.dcm'
fileName = ''.join(fileName)
# filePath preprocess
filePath = 'media/DCM/' + myfile
#request.session['filePath'] = filePath
# patient_data & upload to DB
data = patient_data(filePath, 'dcm')
dataset = data['dataset']
response.update(data)
try: # get image report from IMG file
dataset.pixel_array
if cv2.imwrite('media/DCM/JPG/' + fileName + '_report.jpg', dataset.pixel_array):
# must add a '/' ahead
response['report'] = '/media/DCM/JPG/' + fileName + '_report.jpg'
except:
try:# get value from STR file
dataset.ImageComments
response.update(str_data(dataset, 'dcm'))
except:
response['except'] = dataset
return render(request, 'core/show_dcm.html', response)
def manage_zip(request): #remove
response={}
# select files to remove
if request.method == 'POST':
checked = request.POST.getlist('checked')
if len(checked) > 1:
remove(checked, 'multiZIP')
else:
remove(checked, 'zip')
response['result'] = checked
return render(request, 'core/result.html', response)
# get patient data from DB
patients = PATIENT.objects.all()
return render(request, 'core/manage_zip.html', {'patients': patients})
def manage_show_zip(request):
response={}
if request.method == 'POST': #remove
remove(request.session['myfile'], 'zip')
response['result'] = request.session['myfile']
return render(request, 'core/result.html', response)
# get the file name user clicked from template
myfile = request.GET.get('file', None)
request.session['myfile'] = myfile
zipFilePath = 'media/ZIP/' + myfile
request.session['filePath'] = zipFilePath + '.zip'
response={
'myZipFile': myfile,
'zipFilePath': zipFilePath,
}
# directory tree
dir_tree = []
# contain '.dcm' files
file_tree = []
# traverse root directory, and list directories as dirs and files as files
for root, dirs, files in os.walk(zipFilePath):
path = root.split(os.sep)
line = ((len(path) - 1) * '---', os.path.basename(root))
line = ''.join(line)
dir_tree.append(line)
file_tree.append('')
for file in files:
line = (len(path) * '---', file)
line = ''.join(line)
dir_tree.append(line)
file_tree.append(file)
response['dir_tree'] = dir_tree
response['file_tree'] = file_tree
# zip so that templates can show
file_dir_list = zip(dir_tree, file_tree)
response['file_dir_list'] = file_dir_list
response['uploaded_file_url'] = myfile
return render(request, 'core/manage_show_zip.html', response)
def check_apspine(request):
# get the file name user clicked from template
pidFolder = request.session['myfile']
response={}
listFilesDCM = []
tag = ''
file_apspine=''
file_lva=''
zipFilePath = 'media/ZIP/' + pidFolder
# if the zip file has normal constructure
if os.path.isdir(zipFilePath + '/SDY00000/'):
tag = 'normal_folder'
strFolderPath = zipFilePath + '/SDY00000/'
# recognize files through dataset
# get list of the directory
onlyFiles = os.listdir(strFolderPath)
# get only 'str' files from the list
for files in onlyFiles:
if "STR" in files:
listFilesDCM.append(strFolderPath + files)
# if the file has abnormal folder constructure
else:
tag = 'abnormal_folder'
_file_dir = 'media/ZIP/' + pidFolder
for _path in os.listdir(_file_dir):
file_dir_ = os.path.join(_file_dir, _path) ##
for path_ in os.listdir(file_dir_):
file_dir = os.path.join(file_dir_, path_)
if ''.join(list(path_[-4:])) == '.dcm':
filePath = file_dir
dataset = pydicom.dcmread(filePath)
try:
dataset.ImageComments
listFilesDCM.append(filePath)
except:
print('not str dcm file')
else:
for path in os.listdir(file_dir):
if ''.join(list(path[-4:])) == '.dcm':
filePath = os.path.join(file_dir, path)
dataset = pydicom.dcmread(filePath)
try:
dataset.ImageComments
listFilesDCM.append(filePath)
except:
print('not str dcm file')
# browse through each file, search from dataset(scantype), and recognize the information(datatype)
for files in listFilesDCM:
dataset = pydicom.dcmread(files)
comment = dataset.ImageComments
comment = comment.split('><')
match = [s for s in comment if "SCAN type" in s]
length = len(match)
# no scanType: major fracture
if length == 0:
file_frax = files
scanType = 'Frax'
response['scanType'] = scanType
# at least one scanType:
else:
# classify through scanType
if length == 1:
scanType = ''.join(match)
scanType = scanType.split('"')[1]
response['scanType'] = scanType
if scanType == 'LVA':
file_lva = files
elif scanType == 'AP Spine':
file_apspine = files
elif scanType == 'DualFemur':
file_dualfemur = files
else:
print('error input')
# multi scanType: combination
else:
file_combination = files
scanType = 'combination'
response['scanType'] = scanType
# step 2: Obtain APspine
if file_apspine=='':
data = patient_data(listFilesDCM[0], 'zip')
response.update(data)
response['result_warn'] = 'Insufficient file resources: AP Spine'
return render(request, 'core/check_apspine.html', response)
else:
apspineFilePath = file_apspine
# read file
dataset = pydicom.dcmread(apspineFilePath)
comment = dataset.ImageComments.split('><')
comments = t_z_r(comment)
response['region'] = comments['str_region']
response['tscore'] = comments['str_tscore']
response['zscore'] = comments['str_zscore']
region = comments['region']
tscore = comments['tscore']
zscore = comments['zscore']
data = patient_data(apspineFilePath, 'zip')
response.update(data)
# decide group
age = int(data['age'])
mp = data['mp']
sex = data['sex']
if age<20:
group = 'Z'
elif 20<=age<50:
if mp == '':
group = 'Z'
else:
if sex == 'F':
group = 'T'
else:
group = 'Z'
else:
if mp == '':
if sex == 'F':
group = 'Z'
else:
group = 'T'
else:
group = 'T'
response['group'] = group
merge = list(zip(region, tscore, zscore))
# get the outcome from the machine
machineMerge = merge[4:]
machineOutcome = ''
list_machineOutcome =[]
for substring in machineMerge:
if machineOutcome == '':
machineOutcome = str(substring[0])
else:
machineOutcome = machineOutcome + ', ' + str(substring[0])
list_machineOutcome.append(substring[0])
response['machineOutcome'] = machineOutcome
merge = merge[:4]
# sort(according to tscore or zscore)
def getT(item):
return float(item[1])
def getZ(item):
return float(item[2])
getcontext().prec = 3
if group == 'T':
merge = sorted(merge, key=getT)
# get mean and absolute value
mean = (Decimal(merge[1][1]) + Decimal(merge[2][1]))/2
dist1 = abs(Decimal(merge[0][1]) - mean)
dist2 = abs(mean - Decimal(merge[3][1]))
response['mean'] = mean
response['dist1'] = dist1
response['dist2'] = dist2
elif group:
merge = sorted(merge, key=getZ)
# get mean and absolute value
mean = (Decimal(merge[1][2]) + Decimal(merge[2][2]))/2
dist1 = abs(Decimal(merge[0][2]) - mean)
dist2 = abs(mean - Decimal(merge[3][2]))
response['mean'] = mean
response['dist1'] = dist1
response['dist2'] = dist2
# regionFilter: remove outlier
regionFilter = ['L1','L2','L3','L4']
if dist1 > 1:
regionFilter.remove(merge[0][0])
if dist2 > 1:
regionFilter.remove(merge[3][0])
response['regionFilter'] = ', '.join(regionFilter)
# deal with value in '()'
start = regionFilter[0]
end = regionFilter[-1]
region = ['L1','L2','L3','L4']
index1 = region.index(start)
index2 = region.index(end)
region = region[index1:index2+1]
diffRegion = ','.join([item for item in region if item not in regionFilter])
if diffRegion == '':
outcome = str(start + '-' + end)
else:
outcome = str(start + '-' + end + '(' + diffRegion + ')')
list_outcome = ''.join(list(outcome))
response['outcome'] = outcome
# check the result to determine re-gen or not
if list_outcome in list_machineOutcome:
# step 4: Obtain LVA
if file_lva=='':
response['result_warn'] = 'Insufficient file resources: LVA'
return render(request, 'core/check_apspine.html', response)
else:
lvaFilePath = file_lva
# read file
dataset = pydicom.dcmread(lvaFilePath)
comment = dataset.ImageComments
comment = comment.split('><')
# get region
region4 = [s for s in comment if "ROI region" in s]
region=[]
for substring in region4:
substring = substring.split('"')[1]
region.append(substring)
# get deformity
keyword4 = [s for s in comment if "DEFORMITY" in s]
lva=[]
for substring in keyword4:
substring = substring.split('</')[0].split('>')[1]
lva.append(substring)
# zip two feature
merge = list(zip(region, lva))
# get outcome
lvagrade = ''
for substring in merge:
if substring[1] != 'None':
if lvagrade == '':
lvagrade = '(' + substring[0] + ', ' + substring[1] + ')'
else:
lvagrade += ', ' + '(' + substring[0] + ', ' + substring[1] + ')'
response['grade'] = lvagrade
# step 4: Obtain FRAX
if file_frax=='':
response['result_warn'] = 'Insufficient file resources: frax'
return render(request, 'core/check_apspine.html', response)
else:
fraxFilePath = file_frax
# read file
dataset = pydicom.dcmread(fraxFilePath)
comment = dataset.ImageComments
comment = comment.split('><')
# get major frac
majorFrac = [s for s in comment if "MAJOR_OSTEO_FRAC_RISK units" in s]
majorFrac = ''.join(majorFrac)
majorFrac = majorFrac.split('</')[0].split('>')[1]
response['majorFrac'] = majorFrac
# get hip frac
hipFrac = [s for s in comment if "HIP_FRAC_RISK units" in s]
hipFrac = ''.join(hipFrac)
hipFrac = hipFrac.split('</')[0].split('>')[1]
response['hipFrac'] = hipFrac
response['result_correct'] = 'Correct'
#TODO: Object of type 'bytes' is not JSON serializable
#request.session['reportVar'] = response['group']
# for key in list(response.keys()):
# request.session[key] = response[key]
# print(key)
# must add after session
response.update(data)
return render(request, 'core/check_apspine.html', response)
else:
response['result_warn'] = 'Warn!! Please Re-gen.'
return render(request, 'core/check_apspine.html', response)
def statistics(request):
return render(request, 'core/statistics.html')
def report(request):
reportVar = request.session['reportVar']
print(reportVar)
reportText = "Average bone mineral density(BMD) of L1 to L4 is " + reportVar['group'] + "gm/cm2, "
# Create the HttpResponse object with the appropriate PDF headers.
response = HttpResponse(content_type='application/pdf')
response['Content-Disposition'] = 'attachment; filename="somefilename.pdf"'
# Create the PDF object, using the response object as its "file."
p = canvas.Canvas(response)
# Draw things on the PDF. Here's where the PDF generation happens.
# See the ReportLab documentation for the full list of functionality.
p.drawString(90, 750, reportText)
# Close the PDF object cleanly, and we're done.
p.showPage()
p.save()
return response
def rename(request):
# get file name from show_DCM/manage_show_zip
myfile = request.session['myfile']
# get file type (dcm or zip)
fileType = list(myfile)[-3:]
fileType = ''.join(fileType)
# remove '.dcm'
fileName = list(myfile)[:-4]
fileName = ''.join(fileName)
myReport = fileName + '_report.jpg'
if request.method == 'POST':
form=nameForm(request.POST)
response={}
if form.is_valid():
name=form.cleaned_data['rename']
response['result']=name
folderName = list(name)[:-4]
folderName = ''.join(folderName)
if fileType.startswith('zip'):
os.rename('media/ZIP/' + myfile, 'media/ZIP/' + name)
os.rename('media/ZIP/' + fileName, 'media/ZIP/' + folderName)
elif fileType.startswith('dcm'):
os.rename('media/DCM/' + myfile, 'media/DCM/' + name)
# check if the file has corresponding report, if yes, rename as well
dir_list = os.listdir('media/DCM/JPG/')
if myReport in dir_list:
name = list(name)[:-4] # remove '.dcm'
name = ''.join(name)
os.rename('media/DCM/JPG/' + myReport, 'media/DCM/JPG/' + name + '_report.jpg')
return render(request, 'core/result.html', response)
else:
return render(request, 'core/result.html')
def remove(myfiles, fileType):
# if the file is a zip, remove both zip and the extracted folder
if fileType=='multiZIP':
for myfile in myfiles:
# remove from DB
PATIENT.objects.filter(pid=myfile).delete()
COMBINATION.objects.filter(pid=myfile).delete()
DUALFEMUR.objects.filter(pid=myfile).delete()
FRAX.objects.filter(pid=myfile).delete()
LVA.objects.filter(pid=myfile).delete()
APSPINE.objects.filter(pid=myfile).delete()
# remove from folder
#TODO: os.remove('media/ZIP/' + myfile + '.zip')
shutil.rmtree('media/ZIP/' + myfile)
elif fileType=='zip':
#myfiles = myfiles[0]
# remove from DB
PATIENT.objects.filter(pid=myfiles).delete()
COMBINATION.objects.filter(pid=myfiles).delete()
DUALFEMUR.objects.filter(pid=myfiles).delete()
FRAX.objects.filter(pid=myfiles).delete()
LVA.objects.filter(pid=myfiles).delete()
APSPINE.objects.filter(pid=myfiles).delete()
# remove from folder
# os.remove('media/ZIP/' + myfiles + '.zip')
shutil.rmtree('media/ZIP/' + myfiles)
# if the file is a dcm, remove dcm
elif fileType=='dcm':
os.remove('media/DCM/' + myfiles)
dir_list = os.listdir('media/DCM/JPG/')
fileName = list(myfiles)[:-4]
fileName = ''.join(fileName)
reportName = fileName + '_report.jpg'
if reportName in dir_list:
os.remove('media/DCM/JPG/' + reportName)
else:
print('Wrong File Type!!!')
def download(request):
print('download me')
# get file path from show_DCM/manage_show_zip
filePath = request.session['filePath']
print('filePath', filePath)
# get file name from show_DCM/manage_show_zip
myfile = request.session['myfile']
print('myfile', myfile)
# get file type (dcm or zip)
fileType = ''.join(list(myfile)[-3:])
if request.method == 'POST':
print('hihi')
if os.path.exists(filePath):
print('789')
with open(filePath, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/dicom")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(filePath)
print(response)
return response
else:
if fileType.startswith('dcm'):
print('123')
return render(request, 'core/show_dcm.html')
else:
print('456')
return render(request, 'core/manage_show_zip.html')
# else:
# print(fileType)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.