input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
`(lat, lon, Y2P, P2Y, Dx, Dy)`.
The transforms returned by this method can be used to easily convert back
and forth between spherical harmonic coefficients and intensities on a
discrete pixelized grid. Projections onto pixels are performed on an
equal-area Mollweide grid, so these transforms are useful for applying
priors on the pixel intensities, for instance.
The `lat` and `lon` arrays correspond to the latitude and longitude of
each of the points used in the transform (in units of `angle_unit`).
The `Y2P` matrix is an operator that transforms from spherical harmonic
coefficients `y` to pixels `p` on a Mollweide grid:
.. code-block:: python
p = Y2P @ y
The `P2Y` matrix is the (pseudo-)inverse of that operator:
.. code-block:: python
y = P2Y @ p
Finally, the `Dx` and `Dy` operators transform a pixel representation
of the map `p` to the derivative of `p` with respect to longitude and
latitude, respectively:
.. code-block:: python
dpdlon = Dx @ p
dpdlat = Dy @ p
By combining these operators, one can differentiate the spherical
harmonic expansion with respect to latitude and longitude, if desired:
dydlon = P2Y @ Dx @ Y2P @ y
dydlat = P2Y @ Dy @ Y2P @ y
These derivatives could be useful for implementing total-variation-reducing
regularization, for instance.
.. warning::
This is an experimental feature.
"""
# Prevent undersampling for ydeg = 1
if self.ydeg <= 1:
self.oversample = max(oversample, 3)
# Target number of pixels
npix = oversample * (self.ydeg + 1) ** 2
Ny = int(np.sqrt(npix * np.pi / 4.0))
Nx = 2 * Ny
y, x = np.meshgrid(
np.sqrt(2) * np.linspace(-1, 1, Ny),
2 * np.sqrt(2) * np.linspace(-1, 1, Nx),
)
x = x.flatten()
y = y.flatten()
# Remove off-grid points
a = np.sqrt(2)
b = 2 * np.sqrt(2)
idx = (y / a) ** 2 + (x / b) ** 2 <= 1
y = y[idx]
x = x[idx]
# https://en.wikipedia.org/wiki/Mollweide_projection
theta = np.arcsin(y / np.sqrt(2))
lat = np.arcsin((2 * theta + np.sin(2 * theta)) / np.pi)
lon0 = 3 * np.pi / 2
lon = lon0 + np.pi * x / (2 * np.sqrt(2) * np.cos(theta))
# Add points at the poles
lat = np.append(lat, [-np.pi / 2, 0, 0, np.pi / 2])
lon = np.append(
lon, [1.5 * np.pi, 1.5 * np.pi, 2.5 * np.pi, 1.5 * np.pi]
)
npix = len(lat)
# Back to Cartesian, this time on the *sky*
x = np.reshape(np.cos(lat) * np.cos(lon), [1, -1])
y = np.reshape(np.cos(lat) * np.sin(lon), [1, -1])
z = np.reshape(np.sin(lat), [1, -1])
R = self.ops.RAxisAngle(
np.array([1.0, 0.0, 0.0]), np.array(-np.pi / 2)
)
x, y, z = np.dot(R, np.concatenate((x, y, z)))
x = x.reshape(-1)
y = y.reshape(-1)
z = z.reshape(-1)
# Flatten and fix the longitude offset, then sort by latitude
lat = lat.reshape(-1)
lon = (lon - 1.5 * np.pi).reshape(-1)
idx = np.lexsort([lon, lat])
lat = lat[idx]
lon = lon[idx]
x = x[idx]
y = y[idx]
z = z[idx]
# Get the forward pixel transform
pT = self.ops.pT(x, y, z)[:, : (self.ydeg + 1) ** 2]
Y2P = pT * self.ops._c_ops.A1
# Get the inverse pixel transform
P2Y = np.linalg.solve(Y2P.T.dot(Y2P) + lam * np.eye(self.Ny), Y2P.T)
# Construct the differentiation operators
Dx = np.zeros((npix, npix))
Dy = np.zeros((npix, npix))
for i in range(npix):
# Get the relative x, y coords of the 10 closest points
y_ = (lat - lat[i]) * np.pi / 180
x_ = (
np.cos(0.5 * (lat + lat[i]) * np.pi / 180)
* (lon - lon[i])
* np.pi
/ 180
)
idx = np.argsort(x_ ** 2 + y_ ** 2)
x = x_[idx[:10]]
y = y_[idx[:10]]
# Require at least one point to be at a different latitude
j = np.argmax(np.abs(lat[idx] - lat[idx[0]]) > 1e-4)
if j >= 10:
# TODO: untested!
x[-1] = x_[idx[j]]
y[-1] = y_[idx[j]]
# Construct the design matrix that gives us
# the coefficients of the polynomial fit
# centered on the current point
X = np.vstack(
(
np.ones(10),
x,
y,
x ** 2,
x * y,
y ** 2,
x ** 3,
x ** 2 * y,
x * y ** 2,
x ** 3,
)
).T
A = np.linalg.solve(X.T.dot(X) + eps * np.eye(10), X.T)
# Since we're centered at the origin, the derivatives
# are just the coefficients of the linear terms.
Dx[i, idx[:10]] = A[1]
Dy[i, idx[:10]] = A[2]
return (
lat / self._angle_factor,
lon / self._angle_factor,
Y2P,
P2Y,
Dx * self._angle_factor,
Dy * self._angle_factor,
)
class LimbDarkenedBase(object):
"""The ``starry`` map class for purely limb-darkened maps.
This class handles light curves of purely limb-darkened objects in
emitted light.
.. note::
Instantiate this class by calling :py:func:`starry.Map` with
``ydeg`` set to zero and both ``rv`` and ``reflected`` set to False.
"""
_ops_class_ = OpsLD
def flux(self, **kwargs):
"""
Compute and return the light curve.
Args:
xo (scalar or vector, optional): x coordinate of the occultor
relative to this body in units of this body's radius.
yo (scalar or vector, optional): y coordinate of the occultor
relative to this body in units of this body's radius.
zo (scalar or vector, optional): z coordinate of the occultor
relative to this body in units of this body's radius.
ro (scalar, optional): Radius of the occultor in units of
this body's radius.
"""
# Orbital kwargs
theta = kwargs.pop("theta", None)
_, xo, yo, zo, ro = self._get_flux_kwargs(kwargs)
# Check for invalid kwargs
if theta is not None:
# If the user passed in `theta`, make sure a warning is raised
kwargs["theta"] = theta
self._check_kwargs("flux", kwargs)
# Compute & return
return self.amp * self.ops.flux(xo, yo, zo, ro, self._u)
def intensity(self, mu=None, x=None, y=None):
r"""
Compute and return the intensity of the map.
Args:
mu (scalar or vector, optional): the radial parameter :math:`\mu`,
equal to the cosine of the angle between the line of sight and
the normal to the surface. Default is None.
x (scalar or vector, optional): the Cartesian x position on the
surface in units of the body's radius. Default is None.
y (scalar or vector, optional): the Cartesian y position on the
surface in units of the body's radius. Default is None.
.. note::
Users must provide either `mu` **or** `x` and `y`.
"""
# Get the Cartesian points
if mu is not None:
mu = self._math.vectorize(self._math.cast(mu))
assert (
x is None and y is None
), "Please provide either `mu` or `x` and `y`, but not both."
else:
assert (
x is not None and y is not None
), "Please provide either `mu` or `x` and `y`."
x, y = self._math.vectorize(*self._math.cast(x, y))
mu = (1 - x ** 2 - y ** 2) ** 0.5
# Compute & return
return self.amp * self.ops.intensity(mu, self._u)
def render(self, res=300):
"""Compute and return the intensity of the map on a grid.
Returns an image of shape ``(res, res)``.
Args:
res (int, optional): The resolution of the map in pixels on a
side. Defaults to 300.
"""
# Multiple frames?
if self.nw is not None:
animated = True
else:
animated = False
# Compute
image = self.amp * self.ops.render_ld(res, self._u)
# Squeeze?
if animated:
return image
else:
return self._math.reshape(image, [res, res])
class RVBase(object):
"""The radial velocity ``starry`` map class.
This class handles velocity-weighted intensities for use in
Rossiter-McLaughlin effect investigations. It has all the same
attributes and methods as :py:class:`starry.maps.YlmBase`, with the
additions and modifications listed below.
All velocities are in meters per second, unless otherwise
specified via the attribute :py:attr:`_velocity_unit``.
.. note::
Instantiate this class by calling :py:func:`starry.Map` with
``ydeg > 0`` and ``rv`` set to True.
"""
_ops_class_ = OpsRV
def reset(self, **kwargs):
self.velocity_unit = kwargs.pop("velocity_unit", units.m / units.s)
self.veq = kwargs.pop("veq", 0.0)
self.alpha = kwargs.pop("alpha", 0.0)
super(RVBase, self).reset(**kwargs)
@property
def velocity_unit(self):
"""An ``astropy.units`` unit defining the velocity metric for this map."""
return self._velocity_unit
@velocity_unit.setter
def | |
#!/usr/bin/env python3
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# Class holds default settings for json requests to Ghost -
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
import sys
import os
import importlib
import requests
import jwt
from datetime import datetime
import json
import subprocess
from scp import SCPClient
import paramiko
import time
from collections import Counter
import shutil
import itertools
if sys.version_info[0] != 3:
print("This script requires Python 3")
exit()
sys.path.append(os.path.join(os.path.abspath(__file__ + "../../../")))
GrafanaRequest = importlib.import_module("py-dashboard.GrafanaRequest")
InfluxRequest = importlib.import_module("py-dashboard.InfluxRequest")
RecordInflux = InfluxRequest.RecordInflux
class CSVReader:
def read_csv(self,
file,
sep='\t'):
df = open(file).read().split('\n')
rows = list()
for x in df:
if len(x) > 0:
rows.append(x.split(sep))
return rows
def get_column(self,
df,
value):
index = df[0].index(value)
values = []
for row in df[1:]:
values.append(row[index])
return values
def get_columns(self, df, targets):
target_index = []
for item in targets:
target_index.append(df[0].index(item))
results = []
for row in df:
row_data = []
for x in target_index:
row_data.append(row[x])
results.append(row_data)
return results
def to_html(self, df):
html = ''
html = html + ('<table style="border:1px solid #ddd">'
'<colgroup>'
'<col style="width:25%">'
'<col style="width:25%">'
'<col style="width:50%">'
'</colgroup>'
'<tbody>'
'<tr>')
for row in df:
for item in row:
html = html + ('<td style="border:1px solid #ddd">%s</td>' % item)
html = html + '</tr>\n<tr>'
html = html + ('</tbody>'
'</table>')
return html
def filter_df(self, df, column, expression, target):
target_index = df[0].index(column)
counter = 0
targets = [0]
for row in df[1:]:
try:
if expression == 'less than':
if float(row[target_index]) < target:
targets.append(counter)
if expression == 'greater than':
if float(row[target_index]) > target:
targets.append(counter)
if expression == 'greater than or equal to':
if float(row[target_index]) >= target:
targets.append(counter)
finally:
pass
counter += 1
return list(map(df.__getitem__, targets))
def concat(self, dfs):
return list(itertools.chain.from_iterable(dfs))
class GhostRequest:
def __init__(self,
_ghost_json_host,
_ghost_json_port,
_api_token=None,
_overwrite='false',
debug_=False,
die_on_error_=False,
influx_host=None,
influx_port=8086,
influx_org=None,
influx_token=None,
influx_bucket=None):
self.debug = debug_
self.die_on_error = die_on_error_
self.ghost_json_host = _ghost_json_host
self.ghost_json_port = _ghost_json_port
self.ghost_json_url = "http://%s:%s/ghost/api/v3" % (_ghost_json_host, _ghost_json_port)
self.data = dict()
self.data['overwrite'] = _overwrite
self.ghost_json_login = self.ghost_json_url + '/admin/session/'
self.api_token = _api_token
self.images = list()
self.webpages = list()
self.pdfs = list()
self.influx_host = influx_host
self.influx_port = influx_port
self.influx_org = influx_org
self.influx_token = influx_token
self.influx_bucket = influx_bucket
def encode_token(self):
# Split the key into ID and SECRET
key_id, secret = self.api_token.split(':')
# Prepare header and payload
iat = int(datetime.now().timestamp())
header = {'alg': 'HS256', 'typ': 'JWT', 'kid': key_id}
payload = {
'iat': iat,
'exp': iat + 5 * 60,
'aud': '/v3/admin/'
}
token = jwt.encode(payload, bytes.fromhex(secret), algorithm='HS256', headers=header)
return token
def create_post(self,
title=None,
text=None,
status="published"):
ghost_json_url = self.ghost_json_url + '/admin/posts/?source=html'
post = dict()
posts = list()
datastore = dict()
datastore['html'] = text
datastore['title'] = title
datastore['status'] = status
posts.append(datastore)
post['posts'] = posts
headers = dict()
token = self.encode_token()
headers['Authorization'] = 'Ghost {}'.format(token)
response = requests.post(ghost_json_url, json=post, headers=headers)
if self.debug:
print(datastore)
print(ghost_json_url)
print('\n')
print(post)
print('\n')
print(headers)
print(response.headers)
def upload_image(self,
image):
if self.debug:
print(image)
ghost_json_url = self.ghost_json_url + '/admin/images/upload/'
token = self.encode_token()
bash_command = "curl -X POST -F 'file=@%s' -H \"Authorization: Ghost %s\" %s" % (image, token, ghost_json_url)
proc = subprocess.Popen(bash_command, shell=True, stdout=subprocess.PIPE)
output = proc.stdout.read().decode('utf-8')
if self.debug:
print(output)
self.images.append(json.loads(output)['images'][0]['url'])
def upload_images(self,
folder):
for image in os.listdir(folder):
if 'kpi' in image:
if 'png' in image:
self.upload_image(folder + '/' + image)
if self.debug:
print('images %s' % self.images)
def custom_post(self,
folder,
authors,
title='custom'):
self.upload_images(folder)
head = '''This is a custom post created via a script'''
for picture in self.images:
head = head + '<img src="%s"></img>' % picture
head = head + '''This is the end of the example'''
self.create_post(title=title,
text=head)
def kpi_to_ghost(self,
authors,
folders,
parent_folder=None,
title=None,
server_pull=None,
ghost_host=None,
port=22,
user_push=None,
password_push=<PASSWORD>,
customer=None,
testbed=None,
test_run=None,
target_folders=list(),
grafana_token=None,
grafana_host=None,
grafana_port=3000,
grafana_datasource='InfluxDB',
grafana_bucket=None):
global dut_hw, dut_sw, dut_model, dut_serial
now = datetime.now()
text = ''
csvreader = CSVReader()
if self.debug:
print('Folders: %s' % folders)
ssh_push = paramiko.SSHClient()
ssh_push.set_missing_host_key_policy(paramiko.client.AutoAddPolicy)
ssh_push.connect(ghost_host,
port,
username=user_push,
password=<PASSWORD>,
allow_agent=False,
look_for_keys=False)
scp_push = SCPClient(ssh_push.get_transport())
if parent_folder is not None:
files = os.listdir(parent_folder)
if self.debug:
print("parent_folder %s" % parent_folder)
print(files)
for file in files:
if os.path.isdir(parent_folder + '/' + file) is True:
if os.path.exists(file):
shutil.rmtree(file)
shutil.copytree(parent_folder + '/' + file, file)
target_folders.append(file)
if self.debug:
print('Target folders: %s' % target_folders)
else:
for folder in folders:
if self.debug:
print(folder)
target_folders.append(folder)
testbeds = list()
web_pages_and_pdfs = list()
high_priority_list = list()
low_priority_list = list()
images = list()
times = list()
test_pass_fail = list()
subtest_pass_fail = list()
subtest_pass_total = 0
subtest_fail_total = 0
test_tag_1 = list()
columns = ['test-rig', 'dut-hw-version', 'dut-sw-version',
'dut-model-num', 'dut-serial-num']
duts = dict()
for target_folder in target_folders:
try:
target_file = '%s/kpi.csv' % target_folder
df = csvreader.read_csv(file=target_file, sep='\t')
test_id = csvreader.get_column(df, 'test-id')[0]
for column in columns:
try:
column_data = csvreader.get_column(df, column)[0]
duts[column] = column_data
except:
print('no column named %s' % column)
test_tag_1.append([test_id, list(set(csvreader.get_column(df, 'test-tag')))])
pass_fail = Counter(csvreader.get_column(df, 'pass/fail'))
test_pass_fail.append(pass_fail)
subtest_pass = csvreader.get_column(df, 'Subtest-Pass')
subtest_fail = csvreader.get_column(df, 'Subtest-Fail')
for result in subtest_pass:
subtest_pass_total += int(result)
for result in subtest_fail:
subtest_fail_total += int(result)
subtest_pass_fail_list = dict()
subtest_pass_fail_list['PASS'] = subtest_pass_total
subtest_pass_fail_list['FAIL'] = subtest_fail_total
subtest_pass_fail.append(subtest_pass_fail_list)
times_append = csvreader.get_column(df, 'Date')
if len(times_append) == 0:
print(LookupError("%s/kpi.csv has no time points" % target_folder))
break
for target_time in times_append:
times.append(float(target_time) / 1000)
if pass_fail['PASS'] + pass_fail['FAIL'] > 0:
text = text + 'Tests passed: %s<br />' % pass_fail['PASS']
text = text + 'Tests failed: %s<br />' % pass_fail['FAIL']
text = text + 'Percentage of tests passed: %s<br />' % (
pass_fail['PASS'] / (pass_fail['PASS'] + pass_fail['FAIL']))
else:
text = text + 'Tests passed: 0<br />' \
'Tests failed : 0<br />' \
'Percentage of tests passed: Not Applicable<br />'
testbeds.append(duts['test-rig'])
if testbed is None:
testbed = duts['test-rig']
if test_run is None:
test_run = now.strftime('%B-%d-%Y-%I-%M-%p-report')
local_path = '/home/%s/%s/%s/%s' % (user_push, customer, testbed, test_run)
transport = paramiko.Transport(ghost_host, port)
transport.connect(None, user_push, password_push)
sftp = paramiko.sftp_client.SFTPClient.from_transport(transport)
if self.debug:
print(local_path)
print(target_folder)
try:
sftp.mkdir('/home/%s/%s/%s' % (user_push, customer, testbed))
except:
pass
try:
sftp.mkdir(local_path)
except:
pass
scp_push.put(target_folder, local_path, recursive=True)
files = sftp.listdir(local_path + '/' + target_folder)
pdfs = list()
webpages = list()
for file in files:
if 'pdf' in file:
url = 'http://%s/%s/%s/%s/%s/%s' % (
ghost_host, customer.strip('/'), testbed, test_run, target_folder, file)
pdfs.append('<a href="%s">PDF</a>' % url)
if 'index.html' in files:
url = 'http://%s/%s/%s/%s/%s/%s' % (
ghost_host, customer.strip('/'), testbed, test_run, target_folder, 'index.html')
webpages.append('<a href="%s">HTML</a>' % url)
web_pages_and_pdfs_append = dict()
web_pages_and_pdfs_append[test_id] = pdfs + webpages
web_pages_and_pdfs.append(web_pages_and_pdfs_append)
scp_push.close()
self.upload_images(target_folder)
for image in self.images:
if 'kpi-' in image:
if '-print' not in image:
images.append('<img src="%s"></img>' % image)
self.images = []
results = csvreader.get_columns(df, ['short-description', 'numeric-score', 'test details', 'pass/fail',
'test-priority'])
results[0] = ['Short Description', 'Score', 'Test Details', 'Pass or Fail', 'test-priority']
for row in results:
try:
row[1] = round(float(row[1]), 2)
except:
pass
low_priority = csvreader.filter_df(results, 'test-priority', 'less than', 94)
if self.debug:
print('Low Priority results %s' % len(low_priority))
high_priority = csvreader.filter_df(results, 'test-priority', 'greater than or equal to', 95)
high_priority_list.append(high_priority)
low_priority_list.append(low_priority)
except:
print("Failed to process %s" % target_folder)
target_folders.remove(target_folder)
failuredict = dict()
failuredict[target_folder] = ['Failure']
web_pages_and_pdfs.append(failuredict)
test_tag = dict()
for x in list(set([x[0] for x in test_tag_1])):
l3 = list()
for sublist in test_tag_1:
if sublist[0] == x:
l3 += sublist[1]
test_tag[x] = l3
if len(times) == 0:
return ArithmeticError("There are no datapoints in any folders passed into Ghost")
test_pass_fail_results = sum((Counter(test) for test in test_pass_fail), Counter())
subtest_pass_fail_results = sum((Counter(test) for test in subtest_pass_fail), Counter())
if self.debug:
print(times)
end_time = max(times)
start_time = '2021-07-01'
end_time = datetime.utcfromtimestamp(end_time)
now = time.time()
offset = datetime.fromtimestamp(now) - datetime.utcfromtimestamp(now)
end_time = end_time + offset
high_priority = csvreader.concat(high_priority_list)
low_priority = csvreader.concat(low_priority_list)
if len(high_priority) > 0:
high_priority = csvreader.get_columns(high_priority,
['Short Description', 'Score', 'Test Details'])
low_priority = csvreader.get_columns(low_priority,
['Short Description', 'Score', 'Test Details'])
high_priority.append(['Total Passed', test_pass_fail_results['PASS'], 'Total subtests passed during this run'])
high_priority.append(['Total Failed', test_pass_fail_results['FAIL'], 'Total subtests failed during this run'])
high_priority.append(
['Subtests Passed', subtest_pass_fail_results['PASS'], 'Total subtests passed during this run'])
high_priority.append(
['Subtests Failed', subtest_pass_fail_results['FAIL'], 'Total subtests failed during this run'])
if title is None:
title = end_time.strftime('%B %d, %Y %I:%M %p report')
# create Grafana | |
<reponame>piraz/firenado
# -*- coding: UTF-8 -*-
#
# Copyright 2015-2021 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cartola import config
import errno
import firenado.conf
import functools
import logging
import sys
logger = logging.getLogger(__name__)
def configure(data_sources):
""" Decorator that configures data sources on a data connected object.
:param data_sources: List of data sources to be configured.
"""
def f_wrapper(method):
@functools.wraps(method)
def wrapper(self, *args, **kwargs):
configure_data_sources(data_sources, self)
return method(self, *args, **kwargs)
return wrapper
return f_wrapper
class DataConnectedMixin(object):
""" Data connected objects has data sources. This mixin prepares an
object to to have data sources set to it and retrieved from it.
Example:
>>> class MyClass(..., DataConnectedMixin):
"""
@property
def data_sources(self):
""" Returns all connectors registered to the data connected instance.
"""
return get_data_sources(self, '__data_sources')
def get_data_source(self, name):
""" Returns a data source by it's name.
"""
return self.data_sources[name]
def set_data_source(self, name, data_source):
""" Add a data source to the data sources collection.
"""
self.data_sources[name] = data_source
def get_data_sources(obj, data_sources_attribute):
if not hasattr(obj, data_sources_attribute):
setattr(obj, data_sources_attribute, {})
return getattr(obj, data_sources_attribute)
class Connector(object):
""" A connector will receive a data connection instance and provide
a proper database connection to it.
"""
def __init__(self, data_connected):
self.__data_connected = data_connected
def get_connection(self):
""" Returns the configured and connected database connection.
"""
return None
def process_config(self, conf):
""" Parse the configuration data provided by the firenado.conf engine.
"""
return {}
class LdapConnector(Connector):
def __init__(self, data_connected):
super(LdapConnector, self).__init__(data_connected)
class RedisConnector(Connector):
""" Connects a redis database to a data connected instance.
"""
def __init__(self, data_connected):
self.__connection = None
super(RedisConnector, self).__init__(data_connected)
self.__name = None
def configure(self, name, conf):
self.__name = name
logger.info("Connecting to redis using the configuration: %s.", conf)
import redis
redis_conf = dict()
redis_conf.update(conf)
redis_conf.pop("connector")
# TODO Handle connection error
self.__connection = redis.Redis(**redis_conf)
try:
self.__connection.ping()
except redis.ConnectionError as error:
logger.fatal("Error trying to connect to redis: %s", error)
sys.exit(errno.ECONNREFUSED)
def get_connection(self):
return self.__connection
def process_config(self, conf):
db_conf = {
'connector': 'redis',
'host': 'localhost',
'port': 6379,
'db': 0,
}
for key in conf:
if key in ['db', 'host', 'port']:
if key in ['db', 'port']:
db_conf[key] = int(conf[key])
db_conf[key] = conf[key]
return db_conf
class SqlalchemyConnector(Connector):
""" Connects a sqlalchemy engine to a data connected instance. Sqlalchemy
support a big variety of relational database backends. The connection
returned by this handler contains a engine and session created by
sqlalchemy and the database backend name.
"""
def __init__(self, data_connected):
super(SqlalchemyConnector, self).__init__(data_connected)
self.__name = None
self.__connection = {
'backend': None,
'session': {
'autoflush': True,
'autocommit': False,
'expire_on_commit': True,
'info': None
}
}
self.__engine = None
def configure(self, name, conf):
self.__name = name
from sqlalchemy import create_engine
from sqlalchemy import exc, event, select
# We will set the isolation level to READ UNCOMMITTED by default
# to avoid the "cache" effect sqlalchemy has without this option.
# Solution from: http://bit.ly/2bDq0Nv
# TODO: Get the isolation level from data source conf
engine_params = {
'isolation_level': "READ UNCOMMITTED"
}
if "backend" in conf:
if conf['backend'] == 'mysql':
# Setting connection default connection timeout for mysql
# backends as suggested on http://bit.ly/2bvOLxs
# TODO: ignore this if pool_recycle is defined on conf
engine_params['pool_recycle'] = 3600
if "future" in conf:
if conf['future']:
engine_params['future'] = True
if "pool" in conf:
if "size" in conf['pool']:
engine_params['pool_size'] = conf['pool']['size']
if "max_overflow" in conf['pool']:
engine_params['max_overflow'] = conf['pool']['max_overflow']
if "class" in conf['pool']:
engine_params['pool_class'] = conf['pool']['class']
if isinstance(engine_params['pool_class'], str):
engine_params['pool_class'] = config.get_from_string(
engine_params['pool_class'])
if "session" in conf:
if "autoflush" in conf['session']:
self.__connection['session']['autoflush'] = conf['session'][
'autoflush']
if "autocommit" in conf['session']:
self.__connection['session']['autocommit'] = conf['session'][
'autocommit']
if "expire_on_commit" in conf['session']:
self.__connection['session']['expire_on_commit'] = conf[
'session']['expire_on_commit']
if "info" in conf['session']:
self.__connection['session']['info'] = conf['session']['info']
if "url" not in conf:
print(self.__connection)
logger.error("It is not possible to create sqlalchemy engine for "
"%s datasource. Configuration: %s." %
(self.__name, conf))
self.__engine = create_engine(conf['url'], **engine_params)
@event.listens_for(self.__engine, "engine_connect")
def ping_connection(connection, branch):
# Adding ping connection event handler as described at the
# pessimistic disconnect section of: http://bit.ly/2c8Sm2t
logger.debug("Pinging sqlalchemy connection.")
if branch:
# "branch" refers to a sub-connection of a connection,
# we don't want to bother pinging on these.
logger.debug("The connection is a branch. There is no need to "
"ping those.")
return
# turn off "close with result". This flag is only used with
# "connectionless" execution, otherwise will be False in any case
save_should_close_with_result = connection.should_close_with_result
connection.should_close_with_result = False
try:
# run a SELECT 1. use a core select() so that
# the SELECT of a scalar value without a table is
# appropriately formatted for the backend
logger.debug("Testing sqlalchemy connection.")
connection.scalar(select([1]))
except exc.DBAPIError as err:
logger.warning(err)
logger.warning("Firenado will try to reestablish the data "
"source connection.")
# catch SQLAlchemy's DBAPIError, which is a wrapper
# for the DBAPI's exception. It includes a
# .connection_invalidated attribute which specifies if this
# connection is a "disconnect" condition, which is based on
# inspection of the original exception by the dialect in use.
if err.connection_invalidated:
# run the same SELECT again - the connection will
# re-validate itself and establish a new connection.
# The disconnect detection here also causes the whole
# connection pool to be invalidated so that all stale
# connections are discarded.
connection.scalar(select([1]))
logger.warning("Data source connection reestablished.")
else:
raise
finally:
# restore "close with result"
connection.should_close_with_result = (
save_should_close_with_result)
logger.info("Connecting to the database using the engine: %s.",
self.__engine)
self.__connection['backend'] = conf['backend']
# will just happen during the handler execution
def get_connection(self):
return self.__connection
def connect_engine(self):
from sqlalchemy.exc import OperationalError
try:
self.__engine.connect()
except OperationalError as op_error:
logger.fatal("Error trying to connect to database: %s", op_error)
sys.exit(errno.ECONNREFUSED)
def get_a_session(self, autoflush=True, autocommit=False,
expire_on_commit=True, info=None):
from firenado.util.sqlalchemy_util import Session
Session.configure(bind=self.__engine, autoflush=autoflush,
autocommit=autocommit,
expire_on_commit=expire_on_commit, info=info)
return Session()
@property
def backend(self):
return self.__connection['backend']
@property
def engine(self):
return self.__engine
@property
def session(self):
return self.get_a_session()
def process_config(self, conf):
db_conf = {
'type': 'sqlalchemy',
}
for key in conf:
# TODO Handle other properties and create the url if needed.
if key in ["db", "database", "dialect", "driver", "future", "host",
"pass", "password", "pool", "port", "session", "url",
"user", "username"]:
index = key
if index == "db":
index = "database"
if index == "user":
index = "username"
if index == "pass":
index = "password"
db_conf[index] = conf[key]
# TODO: Handler errors here
if "url" in db_conf:
db_conf['backend'] = db_conf['url'].split(':')[0].split('+')[0]
else:
url = ""
if "dialect" in db_conf:
db_conf['backend'] = db_conf['dialect']
url = "%s" % db_conf.pop("dialect")
if "driver" in db_conf:
url = "%s+%s" % (url, db_conf.pop("driver"))
if "username" in db_conf:
url = "%s://%s" % (url, db_conf.pop("username"))
if "password" in db_conf:
from urllib.parse import quote
url = "%s:%s" % (url, quote(db_conf.pop("password")))
if "host" in db_conf:
url = "%s@%s" % (url, db_conf.pop("host"))
if "port" in db_conf:
url = "%s:%s" % (url, db_conf.pop("port"))
if "database" in db_conf:
url = "%s/%s" % (url, db_conf.pop("database"))
db_conf['url'] = url
return db_conf
def config_to_data_source(name, conf, data_connected):
""" Convert a data source conf to it's respective data source. We need
a data connected to use while instantiating the data source.
:param name: Datasource name
:param conf: A data source confuration item
:param data_connected: A data connected object
:return: Connector
"""
connector_conf = firenado.conf.data['connectors'][conf['connector']]
# TODO: Test if handler was returned None. An error occurred.
handler_class = config.get_from_module(connector_conf['module'],
connector_conf['class'])
data_source_instance = handler_class(data_connected)
conf = data_source_instance.process_config(conf)
data_source_instance.configure(name, conf)
return data_source_instance
def configure_data_sources(data_sources, data_connected):
""" Configure all data sources from configuration and set to the data
connected.
:param data_sources: List of data sources to be configured
:param data_connected: Data connected object where the data sources will
be configured.
"""
if isinstance(data_sources, str):
if data_sources in firenado.conf.data['sources']:
logger.debug("Found data source [%s] in the list. Preceding with "
"the configuration process." % data_sources)
conf = firenado.conf.data['sources'][data_sources]
| |
<reponame>wyq24/suncasa<filename>suncasa/pygsfit/gsutils.py
import numpy as np
# import sys
import math
import os, sys, platform
import astropy.units as u
from sunpy import map as smap
from astropy.coordinates import SkyCoord
from suncasa.io import ndfits
import lmfit
from astropy.time import Time
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
from suncasa.utils import mstools
from suncasa.utils import qlookplot as ql
from mpl_toolkits.axes_grid1 import make_axes_locatable
from tqdm import tqdm
from astropy.io import fits
import numpy.ma as ma
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
import gstools
# name of the fast gyrosynchrotron codes shared library
if platform.system() == 'Linux' or platform.system() == 'Darwin':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr.so')
if platform.system() == 'Windows':
libname = os.path.join(os.path.dirname(os.path.realpath(__file__)),
'binaries/MWTransferArr64.dll')
def kev2k(eng):
return 11604525.00617 * eng
def ff_emission(em, T=1.e7, Z=1., mu=1.e10):
from astropy import constants as const
import astropy.units as u
T = T * u.k
mu = mu * u.Hz
esu = const.e.esu
k_B = const.k_B.cgs
m_e = const.m_e.cgs
c = const.c.cgs
bmax = (3 * k_B * T * u.k / m_e) ** 0.5 / 2.0 / np.pi / (mu * u.Hz)
bmin = Z * esu ** 2 / 3. / k_B / T
lnbb = np.log((bmax / bmin).value)
ka_mu = 1. / mu ** 2 / T ** 1.5 * (
Z ** 2 * esu ** 6 / c / np.sqrt(2. * np.pi * (m_e * k_B) ** 3)) * np.pi ** 2 / 4.0 * lnbb
# print(ka_mu, em)
opc = ka_mu * em
return T.value * (1 - np.exp(-opc.value))
def sfu2tb(freq, flux, area):
# frequency in Hz
# flux in sfu
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
Tb = flux * sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr)
return Tb
def tb2sfu(freq, tb, area):
# frequency in Hz
# brightness temperature in K
# area: area of the radio source in arcsec^2
sfu2cgs = 1e-19
vc = 2.998e10
kb = 1.38065e-16
# sr = np.pi * (size[0] / 206265. / 2.) * (size[1] / 206265. / 2.)
sr = area / 206265. ** 2
flux = tb / (sfu2cgs * vc ** 2. / (2. * kb * freq ** 2. * sr))
return flux
def initspecplot(axes, cplts):
errobjs = []
for cpltidx, cplt in enumerate(cplts):
errobjs.append(axes.errorbar([], [], yerr=[], linestyle='', marker='o', mfc='none', mec=cplt, alpha=1.0))
axes.set_yscale("log")
axes.set_xscale("log")
axes.set_xlim([1, 20])
axes.set_ylim([0.1, 1000])
axes.set_xticks([1, 5, 10, 20])
axes.set_xticklabels([1, 5, 10, 20])
axes.set_xticks([1, 5, 10, 20])
axes.set_yticks([])
axes.set_yticks([0.01, 0.1, 1, 10, 100, 1000])
axes.set_ylabel('T$_b$ [MK]')
axes.set_xlabel('Frequency [GHz]')
x = np.linspace(1, 20, 10)
for ll in [-1, 0, 1, 2, 3, 4]:
y = 10. ** (-2 * np.log10(x) + ll)
axes.plot(x, y, 'k--', alpha=0.1)
# y2 = 10. ** (-4 * np.log10(x) + ll)
# y3 = 10. ** (-8 * np.log10(x) + ll)
# ax_eospec.plot(x, y, 'k--', x, y2, 'k:', x, y3, 'k-.', alpha=0.1)
return errobjs
def set_errorobj(xout, yout, errobj, yerr=None):
eospec, dummy, (errbar_eospec,) = errobj
eospec.set_data(xout, yout)
if yerr is not None:
yerr_top = yout + yerr
yerr_bot = yout - yerr
new_segments_y = [np.array([[x, yt], [x, yb]]) for x, yt, yb in zip(xout, yerr_top, yerr_bot)]
errbar_eospec.set_segments(new_segments_y)
def mwspec2min_1src(params, freqghz, tb=None, tb_err=None, arcsec2cm=0.725e8, showplt=False):
# params are defined by lmfit.Paramters()
'''
params: parameters defined by lmfit.Paramters()
freqghz: frequencies in GHz
ssz: pixel size in arcsec
tb: reference brightness temperature in K
tb_err: uncertainties of reference brightness temperature in K
'''
from scipy import interpolate
GET_MW = gstools.initGET_MW(libname) # load the library
ssz = float(params['ssz'].value) # # source area in arcsec^2
depth = float(params['depth'].value) # total source depth in arcsec
Bmag = float(params['Bmag'].value) # magnetic field strength in G
Tth = float(params['Tth'].value) # thermal temperature in MK
nth = float(params['nth'].value) # thermal density in 1e10 cm^{-3}
nrlh = 10. ** float(params['lognrlh'].value) # total nonthermal density above 0.1 MeV
delta = float(params['delta'].value) # powerlaw index
theta = float(params['theta'].value) # viewing angle in degrees
Emin = float(params['Emin'].value) # low energy cutoff of nonthermal electrons in MeV
Emax = float(params['Emax'].value) # high energy cutoff of nonthermal electrons in MeV
E_hi = 0.1
nrl = nrlh * (Emin ** (1. - delta) - Emax * (1. - delta)) / (E_hi ** (1. - delta) - Emax ** (1. - delta))
Nf = 100 # number of frequencies
NSteps = 1 # number of nodes along the line-of-sight
N_E = 15 # number of energy nodes
N_mu = 15 # number of pitch-angle nodes
Lparms = np.zeros(11, dtype='int32') # array of dimensions etc.
Lparms[0] = NSteps
Lparms[1] = Nf
Lparms[2] = N_E
Lparms[3] = N_mu
Rparms = np.zeros(5, dtype='double') # array of global floating-point parameters
Rparms[0] = ssz * arcsec2cm ** 2 # Area, cm^2
# Rparms[0] = 1e20 # area, cm^2
Rparms[1] = 1e9 # starting frequency to calculate spectrum, Hz
Rparms[2] = 0.02 # logarithmic step in frequency
Rparms[3] = 12 # f^C
Rparms[4] = 12 # f^WH
ParmLocal = np.zeros(24, dtype='double') # array of voxel parameters - for a single voxel
ParmLocal[0] = depth * arcsec2cm / NSteps # voxel depth, cm
ParmLocal[1] = Tth * 1e6 # T_0, K
ParmLocal[2] = nth * 1e10 # n_0 - thermal electron density, cm^{-3}
ParmLocal[3] = Bmag # B - magnetic field, G
Parms = np.zeros((24, NSteps), dtype='double', order='F') # 2D array of input parameters - for multiple voxels
for i in range(NSteps):
Parms[:, i] = ParmLocal # most of the parameters are the same in all voxels
# if NSteps > 1:
# Parms[4, i] = 50.0 + 30.0 * i / (NSteps - 1) # the viewing angle varies from 50 to 80 degrees along the LOS
# else:
# Parms[4, i] = 50.0 # the viewing angle varies from 50 to 80 degrees along the LOS
Parms[4, i] = theta
# parameters of the electron distribution function
n_b = nrl # n_b - nonthermal electron density, cm^{-3}
mu_c = np.cos(np.pi * 70 / 180) # loss-cone boundary
dmu_c = 0.2 # Delta_mu
E_arr = np.logspace(np.log10(Emin), np.log10(Emax), N_E, dtype='double') # energy grid (logarithmically spaced)
mu_arr = np.linspace(-1.0, 1.0, N_mu, dtype='double') # pitch-angle grid
f0 = np.zeros((N_E, N_mu), dtype='double') # 2D distribution function array - for a single voxel
# computing the distribution function (equivalent to PLW & GLC)
A = n_b / (2.0 * np.pi) * (delta - 1.0) / (Emin ** (1.0 - delta) - Emax ** (1.0 - delta))
B = 0.5 / (mu_c + dmu_c * np.sqrt(np.pi) / 2 * math.erf((1.0 - mu_c) / dmu_c))
for i in range(N_E):
for j in range(N_mu):
amu = abs(mu_arr[j])
f0[i, j] = A * B * E_arr[i] ** (-delta) * (1.0 if amu < mu_c else np.exp(-((amu - mu_c) / dmu_c) ** 2))
f_arr = np.zeros((N_E, N_mu, NSteps), dtype='double',
order='F') # 3D distribution function array - for multiple voxels
for k in range(NSteps):
f_arr[:, :, k] = f0 # electron distribution function is the same in all voxels
RL = np.zeros((7, Nf), dtype='double', order='F') # input/output array
# calculating the emission for array distribution (array -> on)
res = GET_MW(Lparms, Rparms, Parms, E_arr, mu_arr, f_arr, RL)
if res:
# retrieving the results
f = RL[0]
I_L = RL[5]
I_R = RL[6]
if showplt:
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 1)
ax.plot(f, I_L + I_R)
ax.set_xscale('log')
ax.set_yscale('log')
ax.set_title('Total intensity (array)')
ax.set_xlabel('Frequency, GHz')
ax.set_ylabel('Intensity, sfu')
flx_model = I_L + I_R
flx_model = np.nan_to_num(flx_model) + 1e-11
logf = np.log10(f)
logflx_model = np.log10(flx_model)
logfreqghz = np.log10(freqghz)
interpfunc = interpolate.interp1d(logf, logflx_model, kind='linear')
logmflx = interpfunc(logfreqghz)
mflx = 10. ** logmflx
mtb = sfu2tb(np.array(freqghz) * 1.e9, mflx, ssz)
else:
print("Calculation error!")
if tb is None:
return mtb
if tb_err is None:
# return mTb - Tb
return mtb - tb
# wt = 1./flx_err
# wt = 1./(Tb_err/Tb/np.log(10.))
# residual = np.abs((logmTb - np.log10(Tb))) * wt
# residual = np.abs((mflx - flx)) | |
<reponame>bayeshack2016/icon-service
# -*- coding: utf-8 -*-
# Copyright 2018 ICON Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""IconScoreEngine testcase
"""
from typing import TYPE_CHECKING, List, Tuple, Dict, Union, Optional
from iconservice.base.address import Address
from iconservice.base.address import SYSTEM_SCORE_ADDRESS
from iconservice.base.type_converter_templates import ConstantKeys
from iconservice.icon_constant import ConfigKey, Revision, PREP_MAIN_PREPS, \
PREP_MAIN_AND_SUB_PREPS
from iconservice.iconscore.icon_score_context import IconScoreContext
from iconservice.iiss import IISSMethod
from iconservice.prep import PRepMethod
from iconservice.prep.data import Term
from iconservice.utils import icx_to_loop
from tests.integrate_test.test_integrate_base import TestIntegrateBase, TOTAL_SUPPLY, MINIMUM_STEP_LIMIT
if TYPE_CHECKING:
from iconservice.base.block import Block
from iconservice.iconscore.icon_score_result import TransactionResult
from tests.integrate_test.test_integrate_base import EOAAccount
class TestIISSBase(TestIntegrateBase):
CALCULATE_PERIOD = 10
TERM_PERIOD = 10
def _make_init_config(self) -> dict:
return {
ConfigKey.SERVICE: {
ConfigKey.SERVICE_FEE: True
},
ConfigKey.IISS_CALCULATE_PERIOD: self.CALCULATE_PERIOD,
ConfigKey.TERM_PERIOD: self.TERM_PERIOD,
ConfigKey.IISS_META_DATA: {
ConfigKey.UN_STAKE_LOCK_MIN: 10,
ConfigKey.UN_STAKE_LOCK_MAX: 20
}
}
def make_blocks(self,
to: int,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None,
prev_block_votes: Optional[List[Tuple['Address', int]]] = None) \
-> List[List['TransactionResult']]:
block_height = self._block_height
tx_results: List[List['TransactionResult']] = []
while to > block_height:
tx = self.create_transfer_icx_tx(self._admin, self._genesis, 0)
tx_results.append(self.process_confirm_block_tx([tx],
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators,
prev_block_votes=prev_block_votes))
block_height = self._block_height
return tx_results
def make_empty_blocks(self,
count: int,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None,
prev_block_votes: Optional[List[Tuple['Address', int]]] = None) \
-> List[List['TransactionResult']]:
tx_results: List[List['TransactionResult']] = []
for _ in range(count):
tx_results.append(
self.process_confirm_block_tx(
[],
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators,
prev_block_votes=prev_block_votes
)
)
return tx_results
def make_blocks_to_end_calculation(self,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None,
prev_block_votes: Optional[List[Tuple['Address', int]]] = None) -> int:
iiss_info: dict = self.get_iiss_info()
next_calculation: int = iiss_info.get('nextCalculation', 0)
cur_block_height: int = self._block_height
if cur_block_height == next_calculation - 1:
# last calculate block
self.make_blocks(to=next_calculation,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators,
prev_block_votes=prev_block_votes)
iiss_info: dict = self.get_iiss_info()
next_calculation: int = iiss_info.get('nextCalculation', 0)
self.make_blocks(to=next_calculation - 1,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators,
prev_block_votes=prev_block_votes)
self.assertEqual(self._block_height, next_calculation - 1)
return next_calculation - 1
def create_set_stake_tx(self,
from_: Union['EOAAccount', 'Address'],
value: int) -> dict:
return self.create_score_call_tx(from_,
to_=SYSTEM_SCORE_ADDRESS,
func_name=IISSMethod.SET_STAKE,
params={"value": hex(value)})
def create_set_delegation_tx(self,
from_: Union['EOAAccount', 'Address'],
origin_delegations: List[Tuple[Union['EOAAccount', 'Address'], int]]) -> dict:
delegations: List[Dict[str, str]] = self.create_delegation_params(origin_delegations)
return self.create_score_call_tx(from_=from_,
to_=SYSTEM_SCORE_ADDRESS,
func_name=IISSMethod.SET_DELEGATION,
params={"delegations": delegations})
@classmethod
def create_delegation_params(cls, params: List[Tuple[Union['EOAAccount', 'Address'], int]]) -> List[Dict[str, str]]:
return [{"address": str(cls._convert_address_from_address_type(address)), "value": hex(value)}
for (address, value) in params
if value > 0]
def create_register_prep_tx(self,
from_: 'EOAAccount',
reg_data: Dict[str, Union[str, bytes]] = None,
value: int = None) -> dict:
if value is None:
value: int = self._config[ConfigKey.PREP_REGISTRATION_FEE]
if reg_data is None:
reg_data: dict = self.create_register_prep_params(from_)
return self.create_score_call_tx(from_=from_,
to_=SYSTEM_SCORE_ADDRESS,
func_name=PRepMethod.REGISTER,
params=reg_data,
value=value)
@classmethod
def create_register_prep_params(cls,
from_: 'EOAAccount') -> Dict[str, str]:
name = str(from_)
return {
ConstantKeys.NAME: name,
ConstantKeys.COUNTRY: "KOR",
ConstantKeys.CITY: "Unknown",
ConstantKeys.EMAIL: f"{<EMAIL>",
ConstantKeys.WEBSITE: f"https://{name}.example.com",
ConstantKeys.DETAILS: f"https://{name}.example.com/details",
ConstantKeys.P2P_ENDPOINT: f"{name}.example.com:7100",
}
def create_set_prep_tx(self,
from_: Union['EOAAccount', 'Address'],
set_data: Dict[str, Union[str, bytes]] = None) -> dict:
if set_data is None:
set_data: dict = {}
return self.create_score_call_tx(from_=from_,
to_=SYSTEM_SCORE_ADDRESS,
func_name=PRepMethod.SET_PREP,
params=set_data)
def create_set_governance_variables(self,
from_: Union['EOAAccount', 'Address'],
irep: int) -> dict:
"""Create a setGovernanceVariables TX
:param from_:
:param irep: irep in loop
:return:
"""
return self.create_score_call_tx(
from_=from_,
to_=SYSTEM_SCORE_ADDRESS,
func_name=PRepMethod.SET_GOVERNANCE_VARIABLES,
params={"irep": hex(irep)}
)
def create_unregister_prep_tx(self,
from_: 'EOAAccount') -> dict:
return self.create_score_call_tx(from_=from_,
to_=SYSTEM_SCORE_ADDRESS,
func_name=PRepMethod.UNREGISTER,
params={})
def create_claim_tx(self,
from_: Union['EOAAccount', 'Address']) -> dict:
return self.create_score_call_tx(from_=from_,
to_=SYSTEM_SCORE_ADDRESS,
func_name=IISSMethod.CLAIM_ISCORE,
params={})
def get_prep_term(self) -> dict:
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": PRepMethod.GET_PREP_TERM
}
}
return self._query(query_request)
def get_main_prep_list(self) -> dict:
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": PRepMethod.GET_MAIN_PREPS,
"params": {}
}
}
return self._query(query_request)
def get_sub_prep_list(self) -> dict:
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": PRepMethod.GET_SUB_PREPS,
"params": {}
}
}
return self._query(query_request)
def get_prep_list(self,
start_ranking: Optional[int] = None,
end_ranking: Optional[int] = None) -> dict:
params = {}
if start_ranking is not None:
params['startRanking'] = hex(start_ranking)
if end_ranking is not None:
params['endRanking'] = hex(end_ranking)
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": PRepMethod.GET_PREPS,
"params": params
}
}
return self._query(query_request)
def get_prep(self,
from_: Union['EOAAccount', 'Address', str]) -> dict:
address: Optional['Address'] = self._convert_address_from_address_type(from_)
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": PRepMethod.GET_PREP,
"params": {"address": str(address)}
}
}
return self._query(query_request)
def get_stake(self,
from_: Union['EOAAccount', 'Address', str]) -> dict:
address: Optional['Address'] = self._convert_address_from_address_type(from_)
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": IISSMethod.GET_STAKE,
"params": {"address": str(address)}
}
}
return self._query(query_request)
def estimate_unstake_lock_period(self) -> dict:
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": IISSMethod.ESTIMATE_UNLOCK_PERIOD,
}
}
return self._query(query_request)
def get_delegation(self,
from_: Union['EOAAccount', 'Address', str]) -> dict:
address: Optional['Address'] = self._convert_address_from_address_type(from_)
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": IISSMethod.GET_DELEGATION,
"params": {"address": str(address)}
}
}
return self._query(query_request)
def query_iscore(self,
address: Union['EOAAccount', 'Address', str]) -> dict:
address: Optional['Address'] = self._convert_address_from_address_type(address)
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": IISSMethod.QUERY_ISCORE,
"params": {"address": str(address)}
}
}
return self._query(query_request)
def get_iiss_info(self) -> dict:
query_request = {
"version": self._version,
"from": self._admin,
"to": SYSTEM_SCORE_ADDRESS,
"dataType": "call",
"data": {
"method": "getIISSInfo",
"params": {}
}
}
return self._query(query_request)
# ===== API =====#
def claim_iscore(self,
from_: Union['EOAAccount', 'Address'],
expected_status: bool = True,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None) -> List['TransactionResult']:
tx: dict = self.create_claim_tx(from_=from_)
return self.process_confirm_block_tx([tx],
expected_status=expected_status,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators)
def set_stake(self,
from_: Union['EOAAccount', 'Address'],
value: int,
expected_status: bool = True,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None) -> List['TransactionResult']:
tx: dict = self.create_set_stake_tx(from_=from_,
value=value)
return self.process_confirm_block_tx([tx],
expected_status=expected_status,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators)
def set_delegation(self,
from_: Union['EOAAccount', 'Address'],
origin_delegations: List[Tuple[Union['EOAAccount', 'Address'], int]],
expected_status: bool = True,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None) -> List['TransactionResult']:
tx: dict = self.create_set_delegation_tx(from_=from_,
origin_delegations=origin_delegations)
return self.process_confirm_block_tx([tx],
expected_status=expected_status,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators)
def register_prep(self,
from_: 'EOAAccount',
reg_data: Dict[str, Union[str, bytes]] = None,
value: int = None,
expected_status: bool = True,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None) -> List['TransactionResult']:
tx: dict = self.create_register_prep_tx(from_=from_,
reg_data=reg_data,
value=value)
return self.process_confirm_block_tx([tx],
expected_status=expected_status,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators)
def unregister_prep(self,
from_: 'EOAAccount',
expected_status: bool = True,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None) -> List['TransactionResult']:
tx: dict = self.create_unregister_prep_tx(from_=from_)
return self.process_confirm_block_tx([tx],
expected_status=expected_status,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators)
def set_governance_variables(self,
from_: Union['EOAAccount', 'Address'],
irep: int,
expected_status: bool = True,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None) -> List['TransactionResult']:
tx: dict = self.create_set_governance_variables(from_=from_,
irep=irep)
return self.process_confirm_block_tx([tx],
expected_status=expected_status,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators)
def distribute_icx(self,
accounts: List[Union['EOAAccount', 'Address']],
init_balance: int,
prev_block_generator: Optional['Address'] = None,
prev_block_validators: Optional[List['Address']] = None) -> List['TransactionResult']:
tx_list: List[dict] = []
for account in accounts:
tx: dict = self.create_transfer_icx_tx(from_=self._admin,
to_=account,
value=init_balance)
tx_list.append(tx)
return self.process_confirm_block_tx(tx_list,
prev_block_generator=prev_block_generator,
prev_block_validators=prev_block_validators)
def init_decentralized(self, network_proposal: bool = False, clear: bool = True):
"""
:param network_proposal: update governance score to enable network proposal
:param clear: clear stake, delegation and balance of self._accounts
:return:
"""
# decentralized
self.update_governance()
# set Revision REV_IISS
self.set_revision(Revision.IISS.value)
total_supply = icx_to_loop(TOTAL_SUPPLY)
# Minimum_delegate_amount is 0.02 * total_supply
# In this test delegate 0.03*total_supply because `Issue transaction` exists since REV_IISS
minimum_delegate_amount_for_decentralization: int = total_supply * 2 // 1000 + 1
init_balance: int = minimum_delegate_amount_for_decentralization * 2
# distribute icx PREP_MAIN_PREPS ~ PREP_MAIN_PREPS + PREP_MAIN_PREPS - 1
self.distribute_icx(accounts=self._accounts[PREP_MAIN_PREPS:PREP_MAIN_AND_SUB_PREPS],
init_balance=init_balance)
# stake PREP_MAIN_PREPS ~ PREP_MAIN_PREPS + PREP_MAIN_PREPS - 1
stake_amount: int = minimum_delegate_amount_for_decentralization
tx_list: list = []
for i in range(PREP_MAIN_PREPS):
tx: dict = self.create_set_stake_tx(from_=self._accounts[PREP_MAIN_PREPS + i],
value=stake_amount)
tx_list.append(tx)
self.process_confirm_block_tx(tx_list)
# distribute 3000 icx to the self._accounts
# which range from 0 to PREP_MAIN_PREPS, exclusive
self.distribute_icx(accounts=self._accounts[:PREP_MAIN_PREPS],
init_balance=icx_to_loop(3000))
# register PRep
tx_list: list = []
for account in self._accounts[:PREP_MAIN_PREPS]:
tx: dict = self.create_register_prep_tx(from_=account)
tx_list.append(tx)
self.process_confirm_block_tx(tx_list)
# delegate to PRep
tx_list: list = []
for i in range(PREP_MAIN_PREPS):
tx: dict = self.create_set_delegation_tx(from_=self._accounts[PREP_MAIN_PREPS + i],
origin_delegations=[
(
self._accounts[i],
minimum_delegate_amount_for_decentralization
)
])
tx_list.append(tx)
self.process_confirm_block_tx(tx_list)
# get main prep
response: dict = self.get_main_prep_list()
expected_response: dict = {
"preps": [],
"totalDelegated": 0
}
self.assertEqual(expected_response, response)
# set Revision REV_IISS (decentralization)
self.set_revision(Revision.DECENTRALIZATION.value)
if network_proposal:
# Update governance SCORE-1.0.0 to support | |
<gh_stars>100-1000
#!/usr/bin/env python
# Copyright (C) 2013 Ion Torrent Systems, Inc. All Rights Reserved
# python system package
import json
import datetime
import uuid
import random
import string
import logging
import copy
from traceback import format_exc
# django package
from iondb.bin.djangoinit import *
from django.db import transaction
from iondb.rundb import models
logger = logging.getLogger(__name__)
PGM = "PGM"
PROTON = "PROTON"
S5 = "S5"
DEFAULT_3_PRIME_ADAPTER_SEQUENCE = "ATCACCGACTGCCCATAGAGAGGCTGAGAC" # Ion P1B
DEFAULT_MUSEEK_3_PRIME_ADAPTER_SEQUENCE = (
"TGAACTGACGCACGAAATCACCGACTGCCCATAGAGAGGCTGAGAC"
)
# set to True to cache template params and plugin config without committing
_OFFCYCLE_DEBUG = False
_CACHE_TEMPLATE_PARAMS = {}
def cache_template_params(func):
def wrapper_func(params, *args, **kwargs):
name = params.templateName if params else ""
if name:
if name in _CACHE_TEMPLATE_PARAMS:
_CACHE_TEMPLATE_PARAMS[name].update(copy.deepcopy(params.__dict__))
else:
_CACHE_TEMPLATE_PARAMS[name] = copy.deepcopy(params.__dict__)
# offcycle JSON use 'application' in place of 'runType'
if "runType" in _CACHE_TEMPLATE_PARAMS[name]:
application = _CACHE_TEMPLATE_PARAMS[name].pop("runType")
_CACHE_TEMPLATE_PARAMS[name]["application"] = application
return func(params, *args, **kwargs)
return wrapper_func
def cache_plugin_config(func):
def wrapper_func(*args, **kwargs):
template_params_index = 2
plugin_index = 3
if len(args) > template_params_index:
name = args[template_params_index].templateName
elif kwargs.get("templateParams", None):
name = kwargs["templateParams"].templateName
else:
name = ""
if len(args) > plugin_index:
plugin_config = args[plugin_index]
elif kwargs.get("plugins", None):
plugin_config = kwargs["plugins"]
else:
plugin_config = {}
if name and plugin_config:
# offcycle JSON use "plugins_preselect" as key for list of plugin configuration dict
# only retain name and userInput fields
plugin_list = [
{"name": config.get("name"), "userInput": config.get("userInput")}
for _, config in plugin_config.items()
]
_CACHE_TEMPLATE_PARAMS[name]["plugins_preselect"] = plugin_list
return func(*args, **kwargs)
return wrapper_func
class TemplateParams:
def __init__(self, templateName, instrument, runType="GENS"):
# PlannedExperiment fields
self.templateName = templateName
self.runType = runType
self.applicationGroup = "DNA"
self.sampleGrouping = None
self.categories = ""
self.usePreBeadfind = True
self.usePostBeadfind = True
self.templatingKitName = ""
self.controlSequencekitname = None
self.samplePrepKitName = None
self.libraryReadLength = 0
self.samplePrepProtocol = ""
self.irworkflow = ""
# Experiment
self.chipType = ""
self.flows = 0
self.sequencekitname = ""
self.flowOrder = ""
# EAS
self.barcodeKitName = ""
self.libraryKey = "TCAG"
self.tfKey = "ATCG"
self.threePrimeAdapter = ""
self.reference = ""
self.targetRegionBedFile = ""
self.hotSpotRegionBedFile = ""
self.libraryKitName = ""
self.selectedPlugins = ""
self.endBarcodeKitName = ""
if instrument == PGM:
self.instrumentType = "PGM"
self.libraryKitName = "Ion Xpress Plus Fragment Library Kit"
self.templatingKitName = "Ion PGM Hi-Q View OT2 Kit - 200"
self.sequencekitname = "IonPGMHiQView"
self.threePrimeAdapter = DEFAULT_3_PRIME_ADAPTER_SEQUENCE
self.flows = 500
self.planStatus = "inactive"
elif instrument == PROTON:
self.instrumentType = "PROTON"
self.libraryKitName = "Ion Xpress Plus Fragment Library Kit"
self.templatingKitName = "Ion PI Template OT2 200 Kit v3"
self.sequencekitname = "ProtonI200Kit-v3"
self.threePrimeAdapter = DEFAULT_3_PRIME_ADAPTER_SEQUENCE
self.flows = 260
self.usePreBeadfind = True
self.usePostBeadfind = False
self.planStatus = "inactive"
elif instrument == S5:
self.instrumentType = "S5"
self.libraryKitName = "Ion Xpress Plus Fragment Library Kit"
self.templatingKitName = "Ion 540 Control Ion Spheres"
self.sequencekitname = "Ion S5 Sequencing Kit"
self.threePrimeAdapter = DEFAULT_3_PRIME_ADAPTER_SEQUENCE
self.flows = 500
self.usePreBeadfind = True
self.usePostBeadfind = False
self.planStatus = "planned"
else:
raise Exception("Unknown instrument key: %s" % instrument)
def update(self, d):
fields = list(self.__dict__.keys())
for key, value in list(d.items()):
if key in fields:
setattr(self, key, value)
else:
raise Exception("Incorrect field key: %s" % key)
def finish_creating_sys_template(currentTime, sysTemplate, templateParams):
planGUID = str(uuid.uuid4())
sysTemplate.planGUID = planGUID
sysTemplate.date = currentTime
planShortID = "".join(
random.choice(string.ascii_uppercase + string.digits) for x in list(range(5))
)
while models.PlannedExperiment.objects.filter(
planShortID=planShortID, planExecuted=False
):
planShortID = "".join(
random.choice(string.ascii_uppercase + string.digits)
for x in list(range(5))
)
print(
(
"...Finished creating System template.id=%d; name=%s; shortID=%s"
% (sysTemplate.id, sysTemplate.planDisplayedName, str(planShortID))
)
)
sysTemplate.planShortID = planShortID
sysTemplate.save()
for qcType in models.QCType.objects.all():
sysDefaultQC, isQcCreated = models.PlannedExperimentQC.objects.get_or_create(
plannedExperiment=sysTemplate, qcType=qcType, threshold=30
)
sysTemplate.plannedexperimentqc_set.add(sysDefaultQC)
sysTemplate.save()
def create_sys_template_experiment(currentTime, sysTemplate, templateParams):
exp_kwargs = {
"autoAnalyze": True,
"chipType": templateParams.chipType,
"date": currentTime,
"flows": templateParams.flows,
"plan": sysTemplate,
"sequencekitname": templateParams.sequencekitname,
# temp experiment name value below will be replaced in crawler
"expName": sysTemplate.planGUID,
"displayName": sysTemplate.planShortID,
"pgmName": "",
"log": "",
# db constraint requires a unique value for experiment. temp unique value
# below will be replaced in crawler
"unique": sysTemplate.planGUID,
"chipBarcode": "",
"seqKitBarcode": "",
"sequencekitbarcode": "",
"reagentBarcode": "",
"cycles": 0,
"diskusage": 0,
"expCompInfo": "",
"baselineRun": "",
"flowsInOrder": templateParams.flowOrder,
"ftpStatus": "",
"runMode": sysTemplate.runMode,
"storageHost": "",
"notes": "",
"status": templateParams.planStatus,
}
experiment = models.Experiment(**exp_kwargs)
experiment.save()
print(
"*** AFTER saving experiment.id=%d for system template.id=%d; name=%s"
% (experiment.id, sysTemplate.id, sysTemplate.planName)
)
return experiment
def create_sys_template_eas(
currentTime, experiment, sysTemplate, templateParams, plugins
):
eas_kwargs = {
"barcodedSamples": "",
"barcodeKitName": templateParams.barcodeKitName,
"date": currentTime,
"endBarcodeKitName": templateParams.endBarcodeKitName,
"experiment": experiment,
"hotSpotRegionBedFile": templateParams.hotSpotRegionBedFile,
"isEditable": True,
"isOneTimeOverride": False,
"libraryKey": templateParams.libraryKey,
"libraryKitName": templateParams.libraryKitName,
"reference": templateParams.reference,
"selectedPlugins": plugins,
"status": sysTemplate.planStatus,
"targetRegionBedFile": templateParams.targetRegionBedFile,
"threePrimeAdapter": templateParams.threePrimeAdapter,
"tfKey": templateParams.tfKey,
}
eas = models.ExperimentAnalysisSettings(**eas_kwargs)
eas.save()
sysTemplate.latestEAS = eas
sysTemplate.save()
print(
"*** AFTER saving EAS.id=%d for system template.id=%d; name=%s"
% (eas.id, sysTemplate.id, sysTemplate.planName)
)
return sysTemplate
@cache_plugin_config
def finish_sys_template(sysTemplate, isCreated, templateParams, plugins={}):
# when debug, do nothing
if _OFFCYCLE_DEBUG:
return sysTemplate
currentTime = datetime.datetime.now()
if isCreated:
finish_creating_sys_template(currentTime, sysTemplate, templateParams)
experiment = create_sys_template_experiment(
currentTime, sysTemplate, templateParams
)
create_sys_template_eas(
currentTime, experiment, sysTemplate, templateParams, plugins
)
exps = models.Experiment.objects.filter(plan=sysTemplate)
if not exps:
experiment = create_sys_template_experiment(
currentTime, sysTemplate, templateParams
)
return create_sys_template_eas(
currentTime, experiment, sysTemplate, templateParams, plugins
)
exp = exps[0]
hasChanges = False
if exp.status != sysTemplate.planStatus:
print(
">>> DIFF: orig exp.status=%s for system template.id=%d; name=%s"
% (exp.status, sysTemplate.id, sysTemplate.planName)
)
exp.status = sysTemplate.planStatus
hasChanges = True
if exp.chipType != templateParams.chipType:
print(
">>> DIFF: orig exp.chipType=%s for system template.id=%d; name=%s"
% (exp.chipType, sysTemplate.id, sysTemplate.planName)
)
exp.chipType = templateParams.chipType
hasChanges = True
if exp.flows != templateParams.flows:
print(
">>> DIFF: orig exp.flows=%s for system template.id=%d; name=%s"
% (exp.flows, sysTemplate.id, sysTemplate.planName)
)
exp.flows = templateParams.flows
hasChanges = True
if exp.sequencekitname != templateParams.sequencekitname:
print(
">>> DIFF: orig exp.sequencekitname=%s for system template.id=%d; name=%s"
% (exp.sequencekitname, sysTemplate.id, sysTemplate.planName)
)
exp.sequencekitname = templateParams.sequencekitname
hasChanges = True
if exp.platform != templateParams.instrumentType:
print(
">>> DIFF: orig exp.platform=%s new instrumentType=%s for system template.id=%d; name=%s"
% (
exp.platform,
templateParams.instrumentType,
sysTemplate.id,
sysTemplate.planName,
)
)
exp.platform = templateParams.instrumentType
hasChanges = True
if exp.flowsInOrder != templateParams.flowOrder:
print(
">>> DIFF: orig exp.flowInOrder=%s for system template.id=%d; name=%s"
% (exp.flowsInOrder, sysTemplate.id, sysTemplate.planName)
)
exp.flowsInOrder = templateParams.flowOrder
hasChanges = True
if hasChanges:
exp.date = currentTime
exp.save()
print(
"*** AFTER updating experiment.id=%d for system template.id=%d; name=%s"
% (exp.id, sysTemplate.id, sysTemplate.planName)
)
eas_set = models.ExperimentAnalysisSettings.objects.filter(
experiment=exp, isEditable=True, isOneTimeOverride=False
)
if not eas_set:
return create_sys_template_eas(
currentTime, exp, sysTemplate, templateParams, plugins
)
eas = eas_set[0]
hasChanges = False
if eas.barcodeKitName != templateParams.barcodeKitName:
print(
">>> DIFF: orig eas.barcodeKitName=%s for system template.id=%d; name=%s"
% (eas.barcodeKitName, sysTemplate.id, sysTemplate.planName)
)
eas.barcodeKitName = templateParams.barcodeKitName
hasChanges = True
if eas.endBarcodeKitName != templateParams.endBarcodeKitName:
print(
">>> DIFF: orig eas.endBarcodeKitName=%s for system template.id=%d; name=%s"
% (eas.endBarcodeKitName, sysTemplate.id, sysTemplate.planName)
)
eas.endBarcodeKitName = templateParams.endBarcodeKitName
hasChanges = True
if eas.hotSpotRegionBedFile != templateParams.hotSpotRegionBedFile:
print(
">>> DIFF: orig eas.hotSpotRegionBedFile=%s for system template.id=%d; name=%s"
% (eas.hotSpotRegionBedFile, sysTemplate.id, sysTemplate.planName)
)
eas.hotSpotRegionBedFile = templateParams.hotSpotRegionBedFile
hasChanges = True
if eas.libraryKey != templateParams.libraryKey:
print(
">>> DIFF: orig eas.libraryKeye=%s for system template.id=%d; name=%s"
% (eas.libraryKey, sysTemplate.id, sysTemplate.planName)
)
eas.libraryKey = templateParams.libraryKey
hasChanges = True
if eas.libraryKitName != templateParams.libraryKitName:
print(
">>> DIFF: orig eas.libraryKitName=%s for system template.id=%d; name=%s"
% (eas.libraryKitName, sysTemplate.id, sysTemplate.planName)
)
eas.libraryKitName = templateParams.libraryKitName
hasChanges = True
if eas.reference != templateParams.reference:
print(
">>> DIFF: orig eas.reference=%s for system template.id=%d; name=%s"
% (eas.reference, sysTemplate.id, sysTemplate.planName)
)
eas.reference = templateParams.reference
hasChanges = True
if not simple_compare_dict(eas.selectedPlugins, plugins):
print(
">>> DIFF: orig eas.selectedPlugins=%s for system template.id=%d; name=%s"
% (eas.selectedPlugins, sysTemplate.id, sysTemplate.planName)
)
print(
">>> DIFF: NEW selectedPlugins=%s for system template.id=%d; name=%s"
% (plugins, sysTemplate.id, sysTemplate.planName)
)
eas.selectedPlugins = plugins
hasChanges = True
if eas.status != sysTemplate.planStatus:
print(
">>> DIFF: orig eas.status=%s for system template.id=%d; name=%s"
% (eas.status, sysTemplate.id, sysTemplate.planName)
)
eas.status = sysTemplate.planStatus
hasChanges = True
if eas.targetRegionBedFile != templateParams.targetRegionBedFile:
print(
">>> DIFF: orig eas.targetRegionBedFile=%s for system template.id=%d; name=%s"
% (eas.targetRegionBedFile, sysTemplate.id, sysTemplate.planName)
)
eas.targetRegionBedFile = templateParams.targetRegionBedFile
hasChanges = True
if eas.threePrimeAdapter != templateParams.threePrimeAdapter:
print(
">>> DIFF: orig eas.threePrimeAdapter=%s for system template.id=%d; name=%s"
% (eas.threePrimeAdapter, sysTemplate.id, sysTemplate.planName)
)
eas.threePrimeAdapter = templateParams.threePrimeAdapter
hasChanges = True
if eas.tfKey != templateParams.tfKey:
print(
">>> DIFF: orig eas.tfKey=%s for system template.id=%d; name=%s"
% (eas.tfKey, sysTemplate.id, sysTemplate.planName)
)
eas.tfKey = templateParams.tfKey
hasChanges = True
if sysTemplate.latestEAS != eas:
print(
">>> DIFF: orig eas.latestEAS=%s for system template.id=%d; name=%s"
% (sysTemplate.latestEAS, sysTemplate.id, sysTemplate.planName)
)
sysTemplate.latestEAS = eas
sysTemplate.save()
if hasChanges:
eas.date = currentTime
eas.save()
print(
"*** AFTER saving EAS.id=%d for system default template.id=%d; name=%s"
% (eas.id, sysTemplate.id, sysTemplate.planName)
)
return sysTemplate
def _get_plugin_dict(pluginName, userInput={}):
try:
| |
"""Functions for loading/saving/reducing flash data
General terminology
-------------------
dat: Integrated time-series quantities found in [model].dat file
chk: Checkpoint data found in 'chk' files
profile: Radial profile data as extracted from chk files
multiprofile: A single Dataset of multiple profiles
log: Data printed to terminal during model, stored in .log files
tracers: Mass shell tracers, interpolated from profiles
extract: Extract and reduce data from raw output files
save: Save pre-extracted data to file
load: Load pre-extracted data from file
get: Get reduced data by first attempting 'load', then fall back on 'extract'
"""
import os
import numpy as np
import pandas as pd
import xarray as xr
import subprocess
import sys
import yt
import h5py
# flashbang
from . import paths
from .quantities import get_mass_enclosed
from .extract_tracers import extract_multi_tracers
from .tools import get_missing_elements, printv
from .config import Config, check_config
# ===============================================================
# Cache files
# ===============================================================
def load_cache(name, run, model, model_set,
chk=None,
verbose=True):
"""Load pre-cached data
parameters
----------
name : str
run : str
model : str
model_set : str
chk : int
verbose : bool
"""
filepath = paths.cache_filepath(name,
run=run,
model=model,
model_set=model_set,
chk=chk)
printv(f'Loading {name} cache: {filepath}', verbose)
if name in ['dat', 'chk_table', 'timesteps']:
data = pd.read_pickle(filepath)
if name in ['timesteps']:
data.set_index('chk', inplace=True)
elif name in ['multiprofile', 'profile', 'tracers']:
data = xr.load_dataset(filepath)
else:
raise ValueError(f"'{name}' not a valid cache type")
return data
def save_cache(name, data, run, model, model_set,
chk=None,
verbose=True):
"""Save data for faster loading
parameters
----------
name : str
data : pd.DataFrame or xr.DataSet
run : str
model : str
model_set : str
chk : int
verbose : bool
"""
ensure_cache_dir_exists(model, model_set=model_set, verbose=False)
filepath = paths.cache_filepath(name,
run=run,
model=model,
model_set=model_set,
chk=chk)
printv(f'Saving {name} cache: {filepath}', verbose)
if name in ['dat', 'chk_table', 'timesteps']:
if name in ['timesteps']:
data = data.reset_index()
data.to_pickle(filepath)
elif name in ['multiprofile', 'profile', 'tracers']:
data.to_netcdf(filepath)
else:
raise ValueError(f"'{name}' not a valid cache type")
# =======================================================================
# Dat files
# =======================================================================
def get_dat(run, model, model_set,
cols_dict=None,
derived=None,
reload=False,
save=True,
config=None,
verbose=True):
"""Get reduced set of integrated quantities, as contained in [run].dat file
Returns : pandas.DataFrame
parameters
----------
run: str
model : str
model_set : str
cols_dict : {}
dictionary with column names and indexes (Note: 1-indexed)
derived : [str]
list of derived variables
config : str or Config
reload : bool
save : bool
verbose : bool
"""
dat_table = None
config = check_config(config, verbose=verbose)
if cols_dict is None:
cols_dict = config.dat('columns')
if derived is None:
derived = config.dat('derived')
# attempt to load cache file
if not reload:
try:
dat_table = load_cache('dat',
run=run,
model=model,
model_set=model_set,
verbose=verbose)
except FileNotFoundError:
printv('dat cache not found, reloading', verbose)
# fall back on loading raw .dat
if dat_table is None:
dat_table = extract_dat(run=run,
model=model,
model_set=model_set,
cols_dict=cols_dict,
derived=derived,
config=config,
verbose=verbose)
if save:
save_cache('dat',
data=dat_table,
run=run,
model=model,
model_set=model_set,
verbose=verbose)
return dat_table
def extract_dat(run, model, model_set,
cols_dict=None,
derived=None,
config=None,
verbose=True):
"""Extract and reduce data from .dat file
Returns : dict of 1D quantities
parameters
----------
run: str
model : str
model_set : str
cols_dict : {}
dictionary with column names and indexes (Note: 1-indexed)
derived : [str]
list of derived variables
config: str or Config
verbose : bool
"""
config = check_config(config, verbose=verbose)
if cols_dict is None:
cols_dict = config.dat('columns')
if derived is None:
derived = config.dat('derived')
filepath = paths.flash_filepath('dat',
run=run,
model=model,
model_set=model_set)
printv(f'Extracting dat: {filepath}', verbose=verbose)
idxs = []
keys = []
for key, idx_1 in cols_dict.items():
idxs += [idx_1 - 1] # change to zero-indexed
keys += [key]
dat = pd.read_csv(filepath,
usecols=idxs,
names=keys,
skiprows=1,
header=None,
delim_whitespace=True,
low_memory=False,
dtype='float64')
dat.sort_values('time', inplace=True) # ensure monotonic
if 'heat_eff' in derived:
add_heat_eff(dat)
return dat
def add_heat_eff(dat):
"""Add neutrino heating efficiency (eta_heat) to dat table
Parameters
----------
dat : pd.DataFrame
"""
if ('lnue' not in dat) or ('lnueb' not in dat) or ('gain_heat' not in dat):
raise ValueError(f'Need lnue, lnueb, and gain_heat to calculate heat_eff')
gain_heat = dat['gain_heat']
lnue = 1e51 * dat['lnue'] # convert to erg/s
lnueb = 1e51 * dat['lnueb']
dat['heat_eff'] = gain_heat / (lnue + lnueb + gain_heat)
def print_dat_colnames(run, model, model_set):
"""Print all column names from .dat file
parameters
----------
run : str
model : str
model_set : str
"""
filepath = paths.flash_filepath('dat',
run=run,
model=model,
model_set=model_set)
with open(filepath, 'r') as f:
colnames = f.readline().split()
count = 1
for word in colnames:
if str(count) in word:
print(f'\n{count}', end=' ')
count += 1
else:
print(word, end=' ')
# ===============================================================
# Profiles
# ===============================================================
def get_multiprofile(run, model, model_set,
chk_list=None,
params=None,
derived_params=None,
config=None,
reload=False,
save=True,
verbose=True):
"""Get all available profiles as multiprofile Dataset
see: get_all_profiles()
parameters
----------
run : str
model : str
model_set : str
chk_list : [int]
params : [str]
derived_params : [str]
config : str or Config
reload : bool
save : bool
verbose : bool
"""
def save_file():
if save:
save_cache('multiprofile',
data=multiprofile,
run=run,
model=model,
model_set=model_set,
verbose=verbose)
if chk_list is None:
chk_list = find_chk(run=run,
model=model,
model_set=model_set,
verbose=verbose)
# 1. Try loading multiprofile
multiprofile = None
if not reload:
multiprofile = try_load_multiprofile(run=run,
model=model,
model_set=model_set,
verbose=verbose)
# 2. Reload individual profiles
if multiprofile is None:
profiles = get_all_profiles(run=run,
model=model,
model_set=model_set,
chk_list=chk_list,
params=params,
derived_params=derived_params,
save=save,
verbose=verbose,
config=config)
multiprofile = join_profiles(profiles, verbose=verbose)
save_file()
# 3. Check for missing profiles
else:
multi_chk = multiprofile.coords['chk'].values
missing_chk = get_missing_elements(chk_list, multi_chk)
if len(missing_chk) > 0:
printv('Loading missing profiles', verbose=verbose)
missing_profiles = get_all_profiles(run=run,
model=model,
model_set=model_set,
chk_list=missing_chk,
params=params,
save=save,
verbose=verbose,
derived_params=derived_params,
config=config)
multiprofile = append_to_multiprofile(multiprofile,
profiles=missing_profiles)
save_file()
return multiprofile
def get_all_profiles(run, model, model_set,
chk_list=None,
params=None,
derived_params=None,
config=None,
reload=False,
save=True,
verbose=True):
"""Get all available chk profiles
see: get_profile()
Returns: {chk: profile}
parameters
----------
run : str
model : str
model_set : str
chk_list : [int]
params : [str]
derived_params : [str]
config : str or Config
reload : bool
save : bool
verbose : bool
"""
printv(f'Loading chk profiles', verbose=verbose)
if chk_list is None:
chk_list = find_chk(run=run,
model=model,
model_set=model_set,
verbose=verbose)
profiles = {}
chk_max = chk_list[-1]
for chk in chk_list:
printv(f'\rchk: {chk}/{chk_max}', end='', verbose=verbose)
profiles[chk] = get_profile(chk,
run=run,
model=model,
model_set=model_set,
params=params,
derived_params=derived_params,
config=config,
reload=reload,
save=save,
verbose=False)
printv('', verbose=verbose)
return profiles
def try_load_multiprofile(run, model, model_set, verbose=True):
"""Attempt to load cached multiprofile
Returns : xr.Dataset, or None
parameters
----------
run : str
model : str
model_set : str
verbose : bool
"""
multiprofile = None
try:
multiprofile = load_cache('multiprofile',
run=run,
model=model,
model_set=model_set,
verbose=verbose)
except FileNotFoundError:
printv('multiprofile cache not found, reloading', verbose=verbose)
pass
return multiprofile
def get_profile(chk, run, model, model_set,
params=None,
derived_params=None,
config=None,
reload=False,
save=True,
verbose=True):
"""Get reduced radial profile, as contained in checkpoint file
Loads pre-extracted profile if available, otherwise from raw file
Returns : xr.Dataset
parameters
----------
chk : int
run : str
model : str
model_set : str
params : [str]
profile parameters to extract and return from chk file
derived_params : [str]
secondary profile parameters, derived from primary parameters
config : str or Config
reload : bool
force reload from chk file, else try to load pre-extracted profile
save : bool
save extracted profile to file for faster loading
verbose : bool
"""
profile = None
# attempt to load cache file
if not reload:
try:
profile = load_cache('profile',
chk=chk,
run=run,
model=model,
model_set=model_set,
verbose=verbose)
except FileNotFoundError:
printv('profile cache not found, reloading', verbose)
# fall back on loading raw chk
if profile is None:
profile = extract_profile(chk,
run=run,
model=model,
model_set=model_set,
config=config,
params=params,
derived_params=derived_params)
if save:
save_cache('profile',
data=profile,
chk=chk,
run=run,
model=model,
model_set=model_set,
verbose=verbose)
return profile
def join_profiles(profiles, verbose=True):
"""Join multiple profile Datasets into a single Dataset (a 'multiprofile')
Returns : xr.Dataset
parameters
----------
profiles : {chk: profile}
dict of profile Datasets to join (with corresponding chk as keys)
verbose : bool
"""
printv('Joining profiles', verbose=verbose)
joined = xr.concat(profiles.values(), dim='chk')
joined.coords['chk'] = list(profiles.keys())
return joined
def append_to_multiprofile(multiprofile, profiles, verbose=True):
"""
Append new profiles to an existing multiprofile Dataset
Returns : xr.Dataset
parameters
----------
multiprofile : xr.Dataset
multiprofile to append onto
profiles : {chk: profile}
new profile Datasets to append, with chks as keys
verbose : bool
"""
printv('Appending new profiles onto multiprofile', verbose=verbose)
new_profiles = join_profiles(profiles, verbose=False)
joined = xr.concat([multiprofile, new_profiles], dim='chk')
return joined
def extract_profile(chk, run, model, model_set,
params=None,
derived_params=None,
config=None,
verbose=True):
"""Extract and reduce profile data from chk file
Returns : xr.Dataset
parameters
----------
chk | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import json
import os
import queue
import sys
import time
from .model import DeepSpeechReconstructionModel, get_variable_by_name
LOG_LEVEL_INDEX = sys.argv.index('--log_level') + 1 if '--log_level' in sys.argv else 0
DESIRED_LOG_LEVEL = sys.argv[LOG_LEVEL_INDEX] if 0 < LOG_LEVEL_INDEX < len(sys.argv) else '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = DESIRED_LOG_LEVEL
import absl.app
import tensorflow as tf
import tensorflow.compat.v1 as tfv1
import pickle as pkl
from tqdm import tqdm
import numpy as np
tfv1.logging.set_verbosity({
'0': tfv1.logging.DEBUG,
'1': tfv1.logging.INFO,
'2': tfv1.logging.WARN,
'3': tfv1.logging.ERROR
}.get(DESIRED_LOG_LEVEL))
from ds_ctcdecoder import Scorer
from six.moves import zip, range
from src.deepspeech_training.util.config import Config, initialize_globals
from src.deepspeech_training.util.checkpoints import load_or_init_graph_for_training
from src.deepspeech_training.util.feeding import create_dataset
from src.flags import create_flags, FLAGS
from src.deepspeech_training.util.helpers import check_ctcdecoder_version, ExceptionBox
from src.deepspeech_training.util.logging import log_debug, log_info, log_warn
check_ctcdecoder_version()
float_type = tf.float64
def create_optimizer(learning_rate_var, opt=None):
if opt == 'adam':
return tfv1.train.AdamOptimizer(
learning_rate=1,
beta1=FLAGS.beta1,
beta2=FLAGS.beta2,
epsilon=FLAGS.epsilon)
elif opt == 'sgd':
return tfv1.train.GradientDescentOptimizer(learning_rate=1)
else:
raise ValueError
# Towers
# ======
# In order to properly make use of multiple GPU's, one must introduce new abstractions,
# not present when using a single GPU, that facilitate the multi-GPU use case.
# In particular, one must introduce a means to isolate the inference and gradient
# calculations on the various GPU's.
# The abstraction we intoduce for this purpose is called a 'tower'.
# A tower is specified by two properties:
# * **Scope** - A scope, as provided by `tf.name_scope()`,
# is a means to isolate the operations within a tower.
# For example, all operations within 'tower 0' could have their name prefixed with `tower_0/`.
# * **Device** - A hardware device, as provided by `tf.device()`,
# on which all operations within the tower execute.
# For example, all operations of 'tower 0' could execute on the first GPU `tf.device('/gpu:0')`.
def get_tower_results(model):
r'''
With this preliminary step out of the way, we can for each GPU introduce a
tower for which's batch we calculate and return the optimization gradients
and the average loss across towers.
'''
# To calculate the mean of the losses
tower_avg_losses = []
# Tower gradients to return
tower_gradients = []
all_update_ops = []
with tfv1.variable_scope(tfv1.get_variable_scope(), reuse=tf.AUTO_REUSE):
# Loop over available_devices
for i in range(len(Config.available_devices)):
# Execute operations of tower i on device i
device = Config.available_devices[i]
with tf.device(device):
# Create a scope for all operations of tower i
with tf.name_scope('tower_%d' % i):
avg_loss, grads, update_ops = model.calculate_loss_and_gradients()
# Allow for variables to be re-used by the next tower
tfv1.get_variable_scope().reuse_variables()
# Retain tower's avg losses
tower_avg_losses.append(avg_loss)
# Retain tower's gradients
gradients = [(grads, model.batch_x_reconstructed)]
tower_gradients.append(gradients)
all_update_ops += update_ops
avg_loss_across_towers = tf.reduce_mean(input_tensor=tower_avg_losses, axis=0)
# Return gradients and the average loss
return tower_gradients, avg_loss_across_towers, all_update_ops
def average_gradients(tower_gradients):
r'''
A routine for computing each variable's average of the gradients obtained from the GPUs.
Note also that this code acts as a synchronization point as it requires all
GPUs to be finished with their mini-batch before it can run to completion.
'''
# List of average gradients to return to the caller
average_grads = []
# Run this on cpu_device to conserve GPU memory
with tf.device(Config.cpu_device):
# Loop over gradient/variable pairs from all towers
for grad_and_vars in zip(*tower_gradients):
# Introduce grads to store the gradients for the current variable
grads = []
# Loop over the gradients for the current variable
for g, _ in grad_and_vars:
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension
grad = tf.concat(grads, 0)
grad = tf.reduce_sum(input_tensor=grad, axis=0)
# Create a gradient/variable tuple for the current variable with its average gradient
grad_and_var = (grad, grad_and_vars[0][1])
# Add the current tuple to average_grads
average_grads.append(grad_and_var)
# Return result to caller
return average_grads
def train():
do_cache_dataset = True
# pylint: disable=too-many-boolean-expressions
if (FLAGS.data_aug_features_multiplicative > 0 or
FLAGS.data_aug_features_additive > 0 or
FLAGS.augmentation_spec_dropout_keeprate < 1 or
FLAGS.augmentation_freq_and_time_masking or
FLAGS.augmentation_pitch_and_tempo_scaling or
FLAGS.augmentation_speed_up_std > 0 or
FLAGS.augmentation_sparse_warp):
do_cache_dataset = False
exception_box = ExceptionBox()
# Create training and validation datasets
train_set = create_dataset(FLAGS.train_files.split(','),
batch_size=FLAGS.train_batch_size,
enable_cache=FLAGS.feature_cache and do_cache_dataset,
cache_path=FLAGS.feature_cache,
train_phase=True,
exception_box=exception_box,
process_ahead=len(Config.available_devices) * FLAGS.train_batch_size * 2,
buffering=FLAGS.read_buffer)
iterator = tfv1.data.Iterator.from_structure(tfv1.data.get_output_types(train_set),
tfv1.data.get_output_shapes(train_set),
output_classes=tfv1.data.get_output_classes(train_set))
# Make initialization ops for switching between the two sets
train_init_op = iterator.make_initializer(train_set)
# Dropout
dropout_rates = [tfv1.placeholder(tf.float32, name='dropout_{}'.format(i)) for i in range(6)]
dropout_feed_dict = {
dropout_rates[0]: FLAGS.dropout_rate,
dropout_rates[1]: FLAGS.dropout_rate2,
dropout_rates[2]: FLAGS.dropout_rate3,
dropout_rates[3]: FLAGS.dropout_rate4,
dropout_rates[4]: FLAGS.dropout_rate5,
dropout_rates[5]: FLAGS.dropout_rate6,
}
no_dropout_feed_dict = {
rate: 0. for rate in dropout_rates
}
# Building the graph
learning_rate_var = tfv1.get_variable('learning_rate', initializer=FLAGS.learning_rate, trainable=False)
reduce_learning_rate_op = learning_rate_var.assign(tf.math.maximum(tf.multiply(learning_rate_var, FLAGS.plateau_reduction), FLAGS.min_step))
optimizer = create_optimizer(learning_rate_var, opt=FLAGS.optimizer)
# Enable mixed precision trainingreconstruct_both_x_y_update_ratio
# if FLAGS.automatic_mixed_precision:
# log_info('Enabling automatic mixed precision training.')
# optimizer = tfv1.train.experimental.enable_mixed_precision_graph_rewrite(optimizer)
base_name = os.path.splitext(os.path.basename(FLAGS.train_files))[0]
# fn = (FLAGS.input_path or 'outputs') + '/%s_samples.pkl' % base_name
fn = os.path.join(FLAGS.input_path or 'outputs', 'samples.pkl')
with open(fn, 'rb') as f:
audio, mfccs, mfcc_lengths, target = pkl.load(f)
log_info("Basename: %s" % base_name)
log_info("Length of original signal: %d" % (audio.shape[1] if FLAGS.reconstruct_input == 'audio' else mfccs.shape[1]))
log_info("Length of target sequence: %d" % (target.shape[1]))
log_info("Mean absolute values of coefficients: %s" % str(np.mean(np.abs(mfccs[0]), axis=0)))
model = DeepSpeechReconstructionModel(dropout_rates, audio, mfccs, mfcc_lengths, target)
model.learning_rate = learning_rate_var
tfv1.summary.scalar('performance/learning_rate', learning_rate_var, collections=['train'])
if FLAGS.summary_frames:
frame_idx = tfv1.placeholder(dtype=tf.int32, name="frame_idx")
tfv1.summary.scalar(
'frames/mae',
tf.norm(model.batch_x_full[0, frame_idx] - model.batch_x_original[0, frame_idx], ord=1) / model.input_dim,
collections=['frame'])
tfv1.summary.scalar(
'frames/mean_diff',
tf.abs(
tf.reduce_mean(model.batch_x_full[0, frame_idx]) -
tf.reduce_mean(model.batch_x_original[0, frame_idx])),
collections=['frame'])
if FLAGS.summary_coefficients:
coeff_idx = tfv1.placeholder(dtype=tf.int32, name="coeff_idx")
tfv1.summary.scalar(
'coefficients/mae',
tf.norm(model.batch_x_full[0, :, coeff_idx] - model.batch_x_original[0, :, coeff_idx], ord=1) / model.input_length,
collections=['coeff'])
tfv1.summary.scalar(
'coefficients/radius_decay',
model.search_radii_decay[0][coeff_idx],
collections=['coeff'])
tfv1.summary.scalar(
'coefficients/radius_non_decreasing_iterations',
model.search_radii_num_non_decreasing_iterations[0][coeff_idx],
collections=['coeff'])
# global_step is automagically incremented by the optimizer
global_step = tfv1.train.get_or_create_global_step()
if FLAGS.num_steps > 1:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=FLAGS.model_learning_rate)
local_step_ops = []
x = tf.expand_dims(tf.expand_dims(model.batch_x_reconstructed[0], 0), 0)
xs = [x]
V, batch_search_radius_idx, batch_search_radii = model.generate_perturbed_noises(0)
noise = tf.Variable(tf.zeros(V.get_shape()), name="multi_step_noise")
initialize_noise_op = tf.assign(noise, V)
xs += model.create_perturbed_tensors(x, noise, batch_search_radii)
for i in range(FLAGS.grad_estimation_sample_size + 1):
local_step_op = model.get_local_step_op(optimizer, xs, i)
local_step_ops.append(local_step_op)
reset_model_ops = [model.get_multi_step_reset_model_op(i) for i in range(FLAGS.grad_estimation_sample_size + 1)]
update_op, loss = model.get_multi_step_gradients(xs, noise, batch_search_radii)
else:
gradients, loss, update_ops = get_tower_results(model)
# Average tower gradients across GPUs
avg_tower_gradients = average_gradients(gradients)
apply_gradient_op = optimizer.apply_gradients(avg_tower_gradients, global_step=global_step)
# update transcript
if FLAGS.reconstruct in ['y', 'both']:
update_target_op = model.get_target_update_op(0, FLAGS.update_y_transcript_num_samples)
if 1 > FLAGS.ema > 0:
ema = tf.train.ExponentialMovingAverage(decay=FLAGS.ema)
with tf.control_dependencies([apply_gradient_op]):
train_op = ema.apply([model.batch_x_reconstructed])
else:
train_op = apply_gradient_op
tfv1.summary.scalar('performance/loss', loss, collections=['train'])
metrics, metric_ops = model.get_metric_ops()
for m in metrics:
tfv1.summary.scalar('performance/' + m, metrics[m], collections=['eval'])
# Summaries
train_summaries_op = tfv1.summary.merge_all('train')
train_summary_writer = tfv1.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'train'), max_queue=120)
eval_summaries_op = tfv1.summary.merge_all('eval')
eval_summary_writer = tfv1.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'eval'))
if FLAGS.summary_frames:
frame_summary_writers = [
tfv1.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'frame_%03d' % (i + 1)))
for i in range(model.input_length)]
frame_summaries_op = tfv1.summary.merge_all('frame')
if FLAGS.summary_coefficients:
coeff_summary_writers = [
tfv1.summary.FileWriter(os.path.join(FLAGS.summary_dir, 'coeff_%02d' % (i + 1)))
for i in range(model.input_dim)]
coeff_summary_op = tfv1.summary.merge_all('coeff')
# Save flags next to checkpoints
os.makedirs(FLAGS.save_checkpoint_dir, exist_ok=True)
flags_file = os.path.join(FLAGS.save_checkpoint_dir, 'flags.txt')
with open(flags_file, 'w') as fout:
fout.write(FLAGS.flags_into_string())
losses = []
is_sum_vector_applied = []
num_unit_vectors_applied = []
transcripts = []
with tfv1.Session(config=Config.session_config) as session:
log_debug('Session opened.')
# Prevent further graph changes
# tfv1.get_default_graph().finalize()
# Load checkpoint or initialize variables
load_or_init_graph_for_training(session)
session.run(tf.local_variables_initializer())
if FLAGS.num_iterations == 0:
client_gradients = session.run(model.client_gradients)
os.makedirs(FLAGS.output_path, exist_ok=True)
fn = os.path.join(FLAGS.output_path or 'outputs', 'grads.pkl')
with open(fn, 'wb') as f:
pkl.dump(client_gradients, f)
print("Gradients written to %s" % fn)
def run_set(set_name, init_op):
feed_dict = no_dropout_feed_dict
# Initialize iterator to the appropriate dataset
session.run(init_op)
is_sum_vector_applied = False
num_unit_vectors_applied = 0
debug_values = {}
# Batch loop
try:
if FLAGS.num_steps > 1:
bx = session.run(model.batch_x_full)
session.run(reset_model_ops + [initialize_noise_op], feed_dict=feed_dict)
for _ in range(FLAGS.num_steps):
session.run(local_step_ops, feed_dict=feed_dict)
_, current_step, train_summary, batch_loss, debug_values = session.run([update_op, global_step, train_summaries_op, loss, model.debug_tensors], feed_dict=feed_dict)
else:
if FLAGS.reconstruct == 'x' or (FLAGS.reconstruct == 'both' and epoch % int(FLAGS.reconstruct_both_x_y_update_ratio) != 0): # transcript given
_, _, current_step, batch_loss, bx, grads, debug_values, is_sum_vector_applied, num_unit_vectors_applied, train_summary = \
session.run(
[train_op, update_ops + model.update_ops, global_step, loss, model.batch_x_full, gradients, model.debug_tensors, model.is_sum_vector_applied, model.num_unit_vectors_applied, train_summaries_op],
feed_dict=feed_dict)
elif FLAGS.reconstruct == 'y' or (FLAGS.reconstruct == 'both'): # transcript not given
_, current_step, batch_loss, bx, by, debug_values, train_summary, test = session.run([
update_target_op, global_step, loss, model.batch_x_full, model.batch_y_reconstructed, model.debug_tensors, train_summaries_op], feed_dict=feed_dict)
labels = [' '] + [chr(c) for c in range(ord('a'), ord('z') + 1)] + ['\'', '']
transcripts.append(''.join([labels[idx] for idx in by[0]]))
print('"%s"' % transcripts[-1])
if FLAGS.summary_frames:
| |
exn:
# smooth over race condition with multiple processes trying to init swarm
if "already part of a swarm" not in str(exn):
raise exn
logger.notice( # pyre-fixme
_(
"waiting for local docker swarm manager & worker(s)",
manager=state,
workers=len(worker_nodes),
)
)
time.sleep(2)
miniwdl_services = [
d
for d in [s.attrs for s in client.services.list()]
if "Spec" in d and "Labels" in d["Spec"] and "miniwdl_run_id" in d["Spec"]["Labels"]
]
if miniwdl_services and cfg["docker_swarm"].get_bool("auto_init"):
logger.warning(
"docker swarm lists existing miniwdl-related services. "
"This is normal if other miniwdl processes are running concurrently; "
"otherwise, stale state could interfere with this run. To reset it, `docker swarm leave --force`"
)
finally:
client.close()
# Detect swarm's CPU & memory resources. Even on a localhost swarm, these may be less than
# multiprocessing.cpu_count() and psutil.virtual_memory().total; in particular on macOS,
# where Docker containers run in a virtual machine with limited resources.
resources_max_mem = {}
total_NanoCPUs = 0
total_MemoryBytes = 0
for node in worker_nodes:
logger.debug(
_(
"swarm worker",
ID=node.attrs["ID"],
Spec=node.attrs["Spec"],
Hostname=node.attrs["Description"]["Hostname"],
Resources=node.attrs["Description"]["Resources"],
Status=node.attrs["Status"],
)
)
resources = node.attrs["Description"]["Resources"]
total_NanoCPUs += resources["NanoCPUs"]
total_MemoryBytes += resources["MemoryBytes"]
if (
not resources_max_mem
or resources["MemoryBytes"] > resources_max_mem["MemoryBytes"]
or (
resources["MemoryBytes"] == resources_max_mem["MemoryBytes"]
and resources["NanoCPUs"] > resources_max_mem["NanoCPUs"]
)
):
resources_max_mem = resources
max_cpu = int(resources_max_mem["NanoCPUs"] / 1_000_000_000)
max_mem = resources_max_mem["MemoryBytes"]
logger.notice( # pyre-ignore
_(
"docker swarm resources",
workers=len(worker_nodes),
max_cpus=max_cpu,
max_mem_bytes=max_mem,
total_cpus=int(total_NanoCPUs / 1_000_000_000),
total_mem_bytes=total_MemoryBytes,
)
)
cls._limits = {"cpu": max_cpu, "mem_bytes": max_mem}
@classmethod
def detect_resource_limits(cls, cfg: config.Loader, logger: logging.Logger) -> Dict[str, int]:
assert cls._limits, f"{cls.__name__}.global_init"
return cls._limits
create_service_kwargs: Optional[Dict[str, Any]] = None
# override kwargs to docker service create() (may be set by plugins)
_bind_input_files: bool = True
_observed_states: Optional[Set[str]] = None
def copy_input_files(self, logger: logging.Logger) -> None:
assert self._bind_input_files
super().copy_input_files(logger)
# now that files have been copied into the working dir, it won't be necessary to bind-mount
# them individually
self._bind_input_files = False
def _run(self, logger: logging.Logger, terminating: Callable[[], bool], command: str,) -> int:
self._observed_states = set()
with open(os.path.join(self.host_dir, "command"), "x") as outfile:
outfile.write(command)
# prepare docker configuration
image_tag = self.runtime_values.get("docker", "ubuntu:18.04")
if ":" not in image_tag:
# seems we need to do this explicitly under some configurations -- issue #232
image_tag += ":latest"
logger.info(_("docker image", tag=image_tag))
mounts = self.prepare_mounts(logger)
# we want g+rw on files (and g+rwx on directories) under host_dir, to ensure the container
# command will be able to access them regardless of what user id it runs as (we will
# configure docker to make the container a member of the invoking user's primary group)
chmod_R_plus(self.host_dir, file_bits=0o660, dir_bits=0o770)
# connect to dockerd
client = docker.from_env(timeout=900)
resources, user, groups = self.misc_config(logger, client)
svc = None
exit_code = None
try:
# run container as a transient docker swarm service, letting docker handle the resource
# scheduling (waiting until requested # of CPUs are available).
kwargs = {
# unique name with some human readability; docker limits to 63 chars (issue #327)
"name": self.unique_service_name(self.run_id),
"command": [
"/bin/bash",
"-c",
"id; ls -Rl ..; bash ../command >> ../stdout.txt 2>> ../stderr.txt",
],
# restart_policy 'none' so that swarm runs the container just once
"restart_policy": docker.types.RestartPolicy("none"),
"workdir": os.path.join(self.container_dir, "work"),
"mounts": mounts,
"resources": resources,
"user": user,
"groups": groups,
"labels": {"miniwdl_run_id": self.run_id},
"container_labels": {"miniwdl_run_id": self.run_id},
}
kwargs.update(self.create_service_kwargs or {})
logger.debug(_("docker create service kwargs", **kwargs))
svc = client.services.create(image_tag, **kwargs)
logger.debug(_("docker service", name=svc.name, id=svc.short_id))
# stream stderr into log
with contextlib.ExitStack() as cleanup:
poll_stderr = cleanup.enter_context(
PygtailLogger(
logger,
os.path.join(self.host_dir, "stderr.txt"),
callback=self.stderr_callback,
)
)
# poll for container exit
running_states = {"preparing", "running"}
was_running = False
while exit_code is None:
time.sleep(random.uniform(1.0, 2.0)) # spread out work over the GIL
if terminating():
quiet = not self._observed_states.difference(
# reduce log noise if the terminated task only sat in docker's queue
{"(UNKNOWN)", "new", "allocated", "pending"}
)
if not quiet:
self.poll_service(logger, svc, verbose=True)
raise Terminated(quiet=quiet)
exit_code = self.poll_service(logger, svc)
if not was_running and self._observed_states.intersection(running_states):
# indicate actual container start in status bar
# 'preparing' is when docker is pulling and extracting the image, which can
# be a lengthy and somewhat intensive operation, so we count it as running.
cleanup.enter_context(
_statusbar.task_running(
self.runtime_values.get("cpu", 0),
self.runtime_values.get("memory_reservation", 0),
)
)
was_running = True
if "running" in self._observed_states:
poll_stderr()
logger.debug(
_(
"docker service logs",
stdout=list(msg.decode().rstrip() for msg in svc.logs(stdout=True)),
stderr=list(msg.decode().rstrip() for msg in svc.logs(stderr=True)),
)
)
# retrieve and check container exit status
assert isinstance(exit_code, int)
return exit_code
finally:
if svc:
try:
svc.remove()
except:
logger.exception("failed to remove docker service")
self.chown(logger, client, exit_code == 0)
try:
client.close()
except:
logger.exception("failed to close docker-py client")
def prepare_mounts(self, logger: logging.Logger) -> List[docker.types.Mount]:
def touch_mount_point(container_file: str) -> None:
# touching each mount point ensures they'll be owned by invoking user:group
assert container_file.startswith(self.container_dir + "/")
host_file = os.path.join(
self.host_dir, os.path.relpath(container_file, self.container_dir)
)
assert host_file.startswith(self.host_dir + "/")
os.makedirs(os.path.dirname(host_file), exist_ok=True)
with open(host_file, "x") as _:
pass
mounts = []
# mount input files and command
if self._bind_input_files:
perm_warn = True
for host_path, container_path in self.input_file_map.items():
st = os.stat(host_path)
if perm_warn and not (
(st.st_mode & 4) or (st.st_gid == os.getegid() and (st.st_mode & 0o40))
):
# file is neither world-readable, nor group-readable for the invoking user's primary group
logger.warning(
_(
"one or more input file(s) could be inaccessible to docker images that don't run as root; "
"it may be necessary to `chmod g+r` them, or set --copy-input-files",
example_file=host_path,
)
)
perm_warn = False
touch_mount_point(container_path)
mounts.append(
docker.types.Mount(container_path, host_path, type="bind", read_only=True)
)
mounts.append(
docker.types.Mount(
os.path.join(self.container_dir, "command"),
os.path.join(self.host_dir, "command"),
type="bind",
read_only=True,
)
)
# mount stdout, stderr, and working directory read/write
for pipe_file in ["stdout.txt", "stderr.txt"]:
touch_mount_point(os.path.join(self.container_dir, pipe_file))
mounts.append(
docker.types.Mount(
os.path.join(self.container_dir, pipe_file),
os.path.join(self.host_dir, pipe_file),
type="bind",
)
)
mounts.append(
docker.types.Mount(
os.path.join(self.container_dir, "work"),
os.path.join(self.host_dir, "work"),
type="bind",
)
)
return mounts
def misc_config(
self, logger: logging.Logger, client: docker.DockerClient
) -> Tuple[Optional[Dict[str, str]], Optional[str], List[str]]:
resources = {}
cpu = self.runtime_values.get("cpu", 0)
if cpu > 0:
# the cpu unit expected by swarm is "NanoCPUs"
resources["cpu_limit"] = cpu * 1_000_000_000
resources["cpu_reservation"] = cpu * 1_000_000_000
memory_reservation = self.runtime_values.get("memory_reservation", 0)
if memory_reservation > 0:
resources["mem_reservation"] = memory_reservation
memory_limit = self.runtime_values.get("memory_limit", 0)
if memory_limit > 0:
resources["mem_limit"] = memory_limit
if resources:
logger.debug(_("docker resources", **resources))
resources = docker.types.Resources(**resources)
else:
resources = None
user = None
if self.cfg["task_runtime"].get_bool("as_user"):
user = f"{os.geteuid()}:{os.getegid()}"
logger.info(_("docker user", uid_gid=user))
if os.geteuid() == 0:
logger.warning(
"container command will run explicitly as root, since you are root and set --as-me"
)
# add invoking user's group to ensure that command can access the mounted working
# directory even if the docker image assumes some arbitrary uid
groups = [str(os.getegid())]
if groups == ["0"]:
logger.warning(
"container command will run as a root/wheel group member, since this is your primary group (gid=0)"
)
return resources, user, groups
def poll_service(
self, logger: logging.Logger, svc: docker.models.services.Service, verbose: bool = False
) -> Optional[int]:
status = {"State": "(UNKNOWN)"}
svc.reload()
assert svc.attrs["Spec"]["Labels"]["miniwdl_run_id"] == self.run_id
tasks = svc.tasks()
if tasks:
assert len(tasks) == 1, "docker service should have at most 1 task"
status = tasks[0]["Status"]
status["DesiredState"] = tasks[0].get("DesiredState", None)
if logger.isEnabledFor(logging.DEBUG):
logger.debug(_("docker task status", **status))
else:
assert (
len(self._observed_states or []) <= 1
), "docker task shouldn't disappear from service"
# references on docker task states:
# https://docs.docker.com/engine/swarm/how-swarm-mode-works/swarm-task-states/
# https://github.com/docker/swarmkit/blob/master/design/task_model.md
# https://github.com/moby/moby/blob/8fbf2598f58fb212230e6ddbcfbde628b0458250/api/types/swarm/task.go#L12
# log each new state
state = status["State"]
assert isinstance(state, str) and isinstance(self._observed_states, set)
if state not in self._observed_states:
loginfo = {"service": svc.short_id}
if tasks:
loginfo["task"] = tasks[0]["ID"][:10]
if "NodeID" in tasks[0]:
loginfo["node"] = tasks[0]["NodeID"][:10]
if status["DesiredState"] != state:
loginfo["desired"] = status["DesiredState"]
logmsg = status.get("Err", status.get("Message", None))
if logmsg and logmsg != state:
loginfo["message"] = logmsg
method = logger.info
if state == "running":
method = logger.notice # pyre-fixme
elif state in ["failed", "shutdown", "rejected", "orphaned", "remove"]:
method = logger.error
method(_(f"docker task {state}", **loginfo))
self._observed_states.add(state)
# determine whether docker task has exited
exit_code = None
if "ExitCode" in status.get("ContainerStatus", {}):
exit_code = status["ContainerStatus"]["ExitCode"] # pyre-fixme
assert isinstance(exit_code, int)
if state in ("complete", "failed"):
msg = _("docker task exit", state=state, exit_code=exit_code)
if state == "failed":
logger.error(msg)
else:
logger.notice(msg) # pyre-fixme
| |
not self.pp_stack[-1].seen_else:
# This is the first #else or #elif block. Remember the
# whole nesting stack up to this point. This is what we
# keep after the #endif.
self.pp_stack[-1].seen_else = True
self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack)
# Restore the stack to how it was before the #if
self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if)
else:
# TODO(unknown): unexpected #else, issue warning?
pass
elif Match(r'^\s*#\s*endif\b', line):
# End of #if or #else blocks.
if self.pp_stack:
# If we saw an #else, we will need to restore the nesting
# stack to its former state before the #else, otherwise we
# will just continue from where we left off.
if self.pp_stack[-1].seen_else:
# Here we can just use a shallow copy since we are the last
# reference to it.
self.stack = self.pp_stack[-1].stack_before_else
# Drop the corresponding #if
self.pp_stack.pop()
else:
# TODO(unknown): unexpected #endif, issue warning?
pass
# TODO(unknown): Update() is too long, but we will refactor later.
def Update(self, filename, clean_lines, linenum, error):
"""Update nesting state with current line.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
error: The function to call with any errors found.
"""
line = clean_lines.elided[linenum]
# Remember top of the previous nesting stack.
#
# The stack is always pushed/popped and not modified in place, so
# we can just do a shallow copy instead of copy.deepcopy. Using
# deepcopy would slow down cpplint by ~28%.
if self.stack:
self.previous_stack_top = self.stack[-1]
else:
self.previous_stack_top = None
# Update pp_stack
self.UpdatePreprocessor(line)
# Count parentheses. This is to avoid adding struct arguments to
# the nesting stack.
if self.stack:
inner_block = self.stack[-1]
depth_change = line.count('(') - line.count(')')
inner_block.open_parentheses += depth_change
# Also check if we are starting or ending an inline assembly block.
if inner_block.inline_asm in (_NO_ASM, _END_ASM):
if (depth_change != 0 and
inner_block.open_parentheses == 1 and
_MATCH_ASM.match(line)):
# Enter assembly block
inner_block.inline_asm = _INSIDE_ASM
else:
# Not entering assembly block. If previous line was _END_ASM,
# we will now shift to _NO_ASM state.
inner_block.inline_asm = _NO_ASM
elif (inner_block.inline_asm == _INSIDE_ASM and
inner_block.open_parentheses == 0):
# Exit assembly block
inner_block.inline_asm = _END_ASM
# Consume namespace declaration at the beginning of the line. Do
# this in a loop so that we catch same line declarations like this:
# namespace proto2 { namespace bridge { class MessageSet; } }
while True:
# Match start of namespace. The "\b\s*" below catches namespace
# declarations even if it weren't followed by a whitespace, this
# is so that we don't confuse our namespace checker. The
# missing spaces will be flagged by CheckSpacing.
namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line)
if not namespace_decl_match:
break
new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum)
self.stack.append(new_namespace)
line = namespace_decl_match.group(2)
if line.find('{') != -1:
new_namespace.seen_open_brace = True
line = line[line.find('{') + 1:]
# Look for a class declaration in whatever is left of the line
# after parsing namespaces. The regexp accounts for decorated classes
# such as in:
# class LOCKABLE API Object {
# };
class_decl_match = Match(
r'^(\s*(?:template\s*<[\w\s<>,:=]*>\s*)?'
r'(class|struct)\s+(?:[A-Z_]+\s+)*(\w+(?:::\w+)*))'
r'(.*)$', line)
if (class_decl_match and
(not self.stack or self.stack[-1].open_parentheses == 0)):
# We do not want to accept classes that are actually template arguments:
# template <class Ignore1,
# class Ignore2 = Default<Args>,
# template <Args> class Ignore3>
# void Function() {};
#
# To avoid template argument cases, we scan forward and look for
# an unmatched '>'. If we see one, assume we are inside a
# template argument list.
end_declaration = len(class_decl_match.group(1))
if not self.InTemplateArgumentList(clean_lines, linenum, end_declaration):
self.stack.append(_ClassInfo(
class_decl_match.group(3), class_decl_match.group(2),
clean_lines, linenum))
line = class_decl_match.group(4)
# If we have not yet seen the opening brace for the innermost block,
# run checks here.
if not self.SeenOpenBrace():
self.stack[-1].CheckBegin(filename, clean_lines, linenum, error)
# Update access control if we are inside a class/struct
if self.stack and isinstance(self.stack[-1], _ClassInfo):
classinfo = self.stack[-1]
access_match = Match(
r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?'
r':(?:[^:]|$)',
line)
if access_match:
classinfo.access = access_match.group(2)
# Consume braces or semicolons from what's left of the line
while True:
# Match first brace, semicolon, or closed parenthesis.
matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line)
if not matched:
break
token = matched.group(1)
if token == '{':
# If namespace or class hasn't seen a opening brace yet, mark
# namespace/class head as complete. Push a new block onto the
# stack otherwise.
if not self.SeenOpenBrace():
self.stack[-1].seen_open_brace = True
elif Match(r'^extern\s*"[^"]*"\s*\{', line):
self.stack.append(_ExternCInfo(linenum))
else:
self.stack.append(_BlockInfo(linenum, True))
if _MATCH_ASM.match(line):
self.stack[-1].inline_asm = _BLOCK_ASM
elif token == ';' or token == ')':
# If we haven't seen an opening brace yet, but we already saw
# a semicolon, this is probably a forward declaration. Pop
# the stack for these.
#
# Similarly, if we haven't seen an opening brace yet, but we
# already saw a closing parenthesis, then these are probably
# function arguments with extra "class" or "struct" keywords.
# Also pop these stack for these.
if not self.SeenOpenBrace():
self.stack.pop()
else: # token == '}'
# Perform end of block checks and pop the stack.
if self.stack:
self.stack[-1].CheckEnd(filename, clean_lines, linenum, error)
self.stack.pop()
line = matched.group(2)
def InnermostClass(self):
"""Get class info on the top of the stack.
Returns:
A _ClassInfo object if we are inside a class, or None otherwise.
"""
for i in range(len(self.stack), 0, -1):
classinfo = self.stack[i - 1]
if isinstance(classinfo, _ClassInfo):
return classinfo
return None
def CheckForNonStandardConstructs(filename, clean_lines, linenum,
nesting_state, error):
r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2.
Complain about several constructs which gcc-2 accepts, but which are
not standard C++. Warning about these in lint is one way to ease the
transition to new compilers.
- put storage class first (e.g. "static const" instead of "const static").
- "%lld" instead of %qd" in printf-type functions.
- "%1$d" is non-standard in printf-type functions.
- "\%" is an undefined character escape sequence.
- text after #endif is not allowed.
- invalid inner-style forward declaration.
- >? and <? operators, and their >?= and <?= cousins.
Additionally, check for constructor/destructor style violations and reference
members, as it is very convenient to do so while checking for
gcc-2 compliance.
Args:
filename: The name of the current file.
clean_lines: A CleansedLines instance containing the file.
linenum: The number of the line to check.
nesting_state: A NestingState instance which maintains information about
the current stack of nested blocks being parsed.
error: A callable to which errors are reported, which takes 4 arguments:
filename, line number, error level, and message
"""
# Remove comments from the line, but leave in strings for now.
line = clean_lines.lines[linenum]
if Search(r'printf\s*\(.*".*%[-+ ]?\d*q', line):
error(filename, linenum, 'runtime/printf_format', 3,
'%q in format strings is deprecated. Use %ll instead.')
if Search(r'printf\s*\(.*".*%\d+\$', line):
error(filename, linenum, 'runtime/printf_format', 2,
'%N$ formats are unconventional. Try rewriting to avoid them.')
# Remove escaped backslashes before looking for undefined escapes.
line = line.replace('\\\\', '')
if Search(r'("|\').*\\(%|\[|\(|{)', line):
error(filename, linenum, 'build/printf_format', 3,
'%, [, (, and { are undefined character escapes. Unescape them.')
# For the rest, work with both comments and strings removed.
line = clean_lines.elided[linenum]
if Search(r'\b(const|volatile|void|char|short|int|long'
r'|float|double|signed|unsigned'
r'|schar|u?int8|u?int16|u?int32|u?int64)'
r'\s+(register|static|extern|typedef)\b',
line):
error(filename, linenum, 'build/storage_class', 5,
'Storage-class specifier (static, extern, typedef, etc) should be '
'at the beginning of the declaration.')
if Search(r'^\s*const\s*string\s*&\s*\w+\s*;', line):
# TODO(unknown): Could it be expanded safely to arbitrary references,
# without triggering too many false positives? The first
# attempt triggered 5 warnings for mostly benign code in the regtest, hence
# the restriction.
# Here's the original regexp, for the reference:
# type_name = r'\w+((\s*::\s*\w+)|(\s*<\s*\w+?\s*>))?'
# r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;'
error(filename, linenum, 'runtime/member_string_references', 2,
'const string& members are dangerous. It is much better to use '
'alternatives, such as pointers or simple constants.')
# Everything else in this function operates on class declarations.
# Return early if the top of the nesting stack is not a class, or if
# the class head is | |
##
# Copyright (c) 2013-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Group membership caching implementation tests
"""
from twext.enterprise.jobs.jobitem import JobItem
from twext.who.idirectory import RecordType
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twistedcaldav.stdconfig import config
from twistedcaldav.test.util import StoreTestCase
from txdav.common.icommondatastore import NotFoundError
from txdav.who.groups import GroupCacher, diffAssignments, GroupRefreshWork
from txdav.who.test.support import TestRecord, CalendarInMemoryDirectoryService
class GroupCacherTest(StoreTestCase):
@inlineCallbacks
def setUp(self):
yield super(GroupCacherTest, self).setUp()
self.groupCacher = GroupCacher(self.directory)
@inlineCallbacks
def test_multipleCalls(self):
"""
Ensure multiple calls to groupByUID() don't raise an exception
"""
store = self.storeUnderTest()
txn = store.newTransaction()
record = yield self.directory.recordWithUID(u"__top_group_1__")
yield txn.groupByUID(record.uid)
yield txn.groupByUID(record.uid)
yield txn.commit()
@inlineCallbacks
def test_refreshGroup(self):
"""
Verify refreshGroup() adds a group to the Groups table with the
expected membership hash value and members
"""
store = self.storeUnderTest()
txn = store.newTransaction()
record = yield self.directory.recordWithUID(u"__top_group_1__")
yield self.groupCacher.refreshGroup(txn, record.uid)
group = (yield txn.groupByUID(record.uid))
self.assertEquals(group.extant, True)
self.assertEquals(group.membershipHash, "553eb54e3bbb26582198ee04541dbee4")
group = yield txn.groupByID(group.groupID)
self.assertEquals(group.groupUID, record.uid)
self.assertEquals(group.name, u"Top Group 1")
self.assertEquals(group.membershipHash, "553eb54e3bbb26582198ee04541dbee4")
self.assertEquals(group.extant, True)
members = (yield txn.groupMemberUIDs(group.groupID))
self.assertEquals(
set([u'__cdaboo1__', u'__glyph1__', u'__sagen1__', u'__wsanchez1__']),
members
)
records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
self.assertEquals(
set([r.uid for r in records]),
set([u'__cdaboo1__', u'__glyph1__', u'__sagen1__', u'__wsanchez1__'])
)
# sagen is in the top group, even though it's actually one level
# removed
record = yield self.directory.recordWithUID(u"__sagen1__")
groups = (yield self.groupCacher.cachedGroupsFor(txn, record.uid))
self.assertEquals(set([u"__top_group_1__"]), groups)
yield txn.commit()
@inlineCallbacks
def test_synchronizeMembers(self):
"""
After loading in a group via refreshGroup(), pass new member sets to
synchronizeMembers() and verify members are added and removed as
expected
"""
store = self.storeUnderTest()
txn = store.newTransaction()
# Refresh the group so it's assigned a group_id
uid = u"__top_group_1__"
yield self.groupCacher.refreshGroup(txn, uid)
group = yield txn.groupByUID(uid)
# Remove two members, and add one member
newSet = set()
for name in (u"wsanchez1", u"<PASSWORD>", u"<PASSWORD>"):
record = (
yield self.directory.recordWithShortName(
RecordType.user,
name
)
)
newSet.add(record.uid)
added, removed = (
yield self.groupCacher.synchronizeMembers(
txn, group.groupID, newSet
)
)
self.assertEquals(added, set(["__dre1__", ]))
self.assertEquals(removed, set(["__glyph1__", "__sagen1__", ]))
records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
self.assertEquals(
set([r.shortNames[0] for r in records]),
set(["wsanchez1", "cdaboo1", "dre1"])
)
# Remove all members
added, removed = (
yield self.groupCacher.synchronizeMembers(txn, group.groupID, set())
)
self.assertEquals(added, set())
self.assertEquals(removed, set(["__wsanchez1__", "__cdaboo1__", "__dre1__", ]))
records = (yield self.groupCacher.cachedMembers(txn, group.groupID))
self.assertEquals(len(records), 0)
yield txn.commit()
@inlineCallbacks
def test_groupByID(self):
store = self.storeUnderTest()
txn = store.newTransaction()
# Non-existent groupID
yield self.failUnlessFailure(txn.groupByID(42), NotFoundError)
uid = u"__top_group_1__"
hash = "553eb54e3bbb26582198ee04541dbee4"
yield self.groupCacher.refreshGroup(txn, uid)
group = yield txn.groupByUID(uid)
group = yield txn.groupByID(group.groupID)
self.assertEqual(group.groupUID, uid)
self.assertEqual(group.name, u"Top Group 1")
self.assertEqual(group.membershipHash, hash)
self.assertEqual(group.extant, True)
yield txn.commit()
@inlineCallbacks
def test_externalAssignments(self):
store = self.storeUnderTest()
txn = store.newTransaction()
oldExternalAssignments = (yield txn.externalDelegates())
self.assertEquals(oldExternalAssignments, {})
newAssignments = {
u"__wsanchez1__": (None, u"__top_group_1__")
}
yield self.groupCacher.scheduleExternalAssignments(
txn, newAssignments, immediately=True
)
oldExternalAssignments = (yield txn.externalDelegates())
self.assertEquals(
oldExternalAssignments,
{
u"__wsanchez1__":
(
None,
u"__top_group_1__"
)
}
)
newAssignments = {
u"__cdaboo1__":
(
u"__sub_group_1__",
None
),
u"__wsanchez1__":
(
u"__sub_group_1__",
u"__top_group_1__"
),
}
yield self.groupCacher.scheduleExternalAssignments(
txn, newAssignments, immediately=True
)
oldExternalAssignments = (yield txn.externalDelegates())
self.assertEquals(
oldExternalAssignments,
{
u"__wsanchez1__":
(
u"__sub_group_1__",
u"__top_group_1__"
),
u"__cdaboo1__":
(
u"__sub_group_1__",
None
)
}
)
allGroupDelegates = (yield txn.allGroupDelegates())
self.assertEquals(
allGroupDelegates,
set(
[
u"__top_group_1__",
u"__sub_group_1__"
]
)
)
# Fault in the read-only group
yield self.groupCacher.refreshGroup(txn, u"__sub_group_1__")
# Wilfredo should have Sagen and Daboo as read-only delegates
delegates = (yield txn.delegates(
u"__wsanchez1__", False, expanded=True)
)
self.assertEquals(
delegates,
set(
[
u"__sagen1__",
u"__cdaboo1__"
]
)
)
# Fault in the read-write group
yield self.groupCacher.refreshGroup(txn, u"__top_group_1__")
# Wilfredo should have 4 users as read-write delegates
delegates = (yield txn.delegates(
u"__wsanchez1__", True, expanded=True)
)
self.assertEquals(
delegates,
set(
[
u"__sagen1__",
u"__cdaboo1__",
u"__glyph1__"
]
)
)
#
# Now, remove some external assignments
#
newAssignments = {
u"__wsanchez1__":
(
u"__sub_group_1__",
None
),
}
yield self.groupCacher.scheduleExternalAssignments(
txn, newAssignments, immediately=True
)
oldExternalAssignments = (yield txn.externalDelegates())
self.assertEquals(
oldExternalAssignments,
{
u"__wsanchez1__":
(
u"__sub_group_1__",
None
),
}
)
allGroupDelegates = (yield txn.allGroupDelegates())
self.assertEquals(
allGroupDelegates,
set(
[
u"__sub_group_1__"
]
)
)
# Wilfredo should have Sagen and Daboo as read-only delegates
delegates = (yield txn.delegates(
u"__wsanchez1__", False, expanded=True)
)
self.assertEquals(
delegates,
set(
[
u"__sagen1__",
u"__cdaboo1__"
]
)
)
# Wilfredo should have no read-write delegates
delegates = (yield txn.delegates(
u"__wsanchez1__", True, expanded=True)
)
self.assertEquals(
delegates,
set([])
)
# Only 1 group as delegate now:
allGroupDelegates = (yield txn.allGroupDelegates())
self.assertEquals(
allGroupDelegates,
set(
[
u"__sub_group_1__"
]
)
)
#
# Say somebody messed up and stuck a non-existent group UID in
# as a delegate
#
newAssignments = {
u"__wsanchez1__":
(
u"__sub_group_1__",
u"__non_existent_group__",
),
}
yield self.groupCacher.scheduleExternalAssignments(
txn, newAssignments, immediately=True
)
oldExternalAssignments = (yield txn.externalDelegates())
self.assertEquals(
oldExternalAssignments,
{
u"__wsanchez1__":
(
u"__sub_group_1__",
None # <--- (not __non_existent_group__)
),
}
)
yield txn.commit()
def test_diffAssignments(self):
"""
Ensure external proxy assignment diffing works
"""
self.assertEquals(
(
# changed
[],
# removed
[],
),
diffAssignments(
# old
{},
# new
{}
)
)
self.assertEquals(
(
# changed
[],
# removed
[],
),
diffAssignments(
# old
{"B": ("1", "2")},
# new
{"B": ("1", "2")},
)
)
self.assertEquals(
map(set, (
# changed
[("A", ("1", "2")), ("B", ("3", "4"))],
# removed
[],
)),
map(set, diffAssignments(
# old
{},
# new
{"A": ("1", "2"), "B": ("3", "4")}
))
)
self.assertEquals(
map(set, (
# changed
[],
# removed
["A", "B"],
)),
map(set, diffAssignments(
# old
{"A": ("1", "2"), "B": ("3", "4")},
# new
{},
))
)
self.assertEquals(
map(set, (
# changed
[('C', ('4', '5')), ('D', ('7', '8'))],
# removed
["B"],
)),
map(set, diffAssignments(
# old
{"A": ("1", "2"), "B": ("3", "4"), "C": ("5", "6")},
# new
{"D": ("7", "8"), "C": ("4", "5"), "A": ("1", "2")},
))
)
@inlineCallbacks
def test_recursiveGroup(self):
"""
Verify refreshGroup() adds a group to the Groups table with the
expected membership hash value and members
"""
store = self.storeUnderTest()
txn = store.newTransaction()
record = yield self.directory.recordWithUID(u"recursive1_coasts")
members = yield record.expandedMembers()
self.assertEquals(
set([r.uid for r in members]),
set([u'6423F94A-6B76-4A3A-815B-D52CFD77935D', u'5A985493-EE2C-4665-94CF-4DFEA3A89500'])
)
yield txn.commit()
@inlineCallbacks
def test_groupChangeCacheNotificationRefreshGroup(self):
"""
Verify refreshGroup() triggers a cache notification for the group and all
members that are added or removed
"""
class TestNotifier(object):
changedTokens = []
def changed(self, token):
self.changedTokens.append(token)
self.groupCacher.cacheNotifier = TestNotifier()
# No change
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__top_group_1__")
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__sub_group_1__")
yield self.commit()
self.assertEqual(TestNotifier.changedTokens, [])
# Add member to group
record = yield self.directory.recordWithUID(u"__top_group_1__")
addrecord = yield self.directory.recordWithUID(u"__dre1__")
yield record.addMembers([addrecord, ])
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__top_group_1__")
yield self.commit()
self.assertEqual(TestNotifier.changedTokens, [
"__top_group_1__",
"__dre1__",
])
TestNotifier.changedTokens = []
# Remove member from group
record = yield self.directory.recordWithUID(u"__top_group_1__")
addrecord = yield self.directory.recordWithUID(u"__dre1__")
yield record.removeMembers([addrecord, ])
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__top_group_1__")
yield self.commit()
self.assertEqual(TestNotifier.changedTokens, [
"__top_group_1__",
"__dre1__",
])
TestNotifier.changedTokens = []
# Add member to sub-group
record = yield self.directory.recordWithUID(u"__sub_group_1__")
addrecord = yield self.directory.recordWithUID(u"__dre1__")
yield record.addMembers([addrecord, ])
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__top_group_1__")
yield self.commit()
self.assertEqual(TestNotifier.changedTokens, [
"__top_group_1__",
"__dre1__",
])
TestNotifier.changedTokens = []
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__sub_group_1__")
yield self.commit()
self.assertEqual(TestNotifier.changedTokens, [
"__sub_group_1__",
"__dre1__",
])
TestNotifier.changedTokens = []
# Remove member from sub-group
record = yield self.directory.recordWithUID(u"__sub_group_1__")
addrecord = yield self.directory.recordWithUID(u"__dre1__")
yield record.removeMembers([addrecord, ])
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__top_group_1__")
yield self.commit()
self.assertEqual(TestNotifier.changedTokens, [
"__top_group_1__",
"__dre1__",
])
TestNotifier.changedTokens = []
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__sub_group_1__")
yield self.commit()
self.assertEqual(TestNotifier.changedTokens, [
"__sub_group_1__",
"__dre1__",
])
TestNotifier.changedTokens = []
# Remove sub-group member from group
record = yield self.directory.recordWithUID(u"__top_group_1__")
addrecord = yield self.directory.recordWithUID(u"__sub_group_1__")
yield record.removeMembers([addrecord, ])
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__top_group_1__")
yield self.commit()
self.assertEqual(set(TestNotifier.changedTokens), set([
"__top_group_1__",
"__sagen1__",
"__cdaboo1__",
]))
TestNotifier.changedTokens = []
# Add sub-group member to group
record = yield self.directory.recordWithUID(u"__top_group_1__")
addrecord = yield self.directory.recordWithUID(u"__sub_group_1__")
yield record.addMembers([addrecord, ])
yield self.groupCacher.refreshGroup(self.transactionUnderTest(), "__top_group_1__")
yield self.commit()
self.assertEqual(set(TestNotifier.changedTokens), set([
"__top_group_1__",
"__sagen1__",
"__cdaboo1__",
]))
TestNotifier.changedTokens = []
@inlineCallbacks
def test_groupChangeCacheNotificationApplyExternalAssignments(self):
"""
Verify applyExternalAssignments() triggers a cache notification for the
delegator and delegates
"""
class TestNotifier(object):
changedTokens = []
def changed(self, token):
self.changedTokens.append(token)
self.groupCacher.cacheNotifier = TestNotifier()
yield self.groupCacher.applyExternalAssignments(self.transactionUnderTest(), "__dre1__", None, None)
yield self.commit()
self.assertEqual(
TestNotifier.changedTokens,
["__dre1__"]
)
TestNotifier.changedTokens = []
yield self.groupCacher.applyExternalAssignments(self.transactionUnderTest(), | |
"""
Utility methods to deal with "names" of relations.
To be safe, we always delimit names in queries but would prefer not to during logging.
See TableName.
There are additional methods and classes here to support the feature of choosing relations
by a pattern from the command line.
"""
import fnmatch
import re
import uuid
from typing import List, Optional, Tuple
import etl.config
from etl.errors import ETLSystemError
from etl.text import join_with_single_quotes
def as_staging_name(name):
"""Transform the schema name to its staging position."""
return "$".join(("etl_staging", name))
def as_backup_name(name):
"""Transform the schema name to its backup position."""
return "$".join(("etl_backup", name))
class TableName:
"""
Class to automatically create delimited identifiers for tables.
Given a table s.t, then the cautious identifier for SQL code is: "s"."t"
But the more readable name is still: s.t
Note that the preference for logging is to always use single-quotes, 's.t' (see {:x} below).
Another, more curious use for instances is to store shell patterns for the schema name
and table name so that we can match against other instances.
Comparisons (for schema and table names) are case-insensitive.
TableNames have a notion of known "managed" schemas, which include both
sources and transformations listed in configuration files. A TableName
is considered unmanaged if its schema does not belong to the list of
managed schemas, and in that case its schema property is never translated
into a staging version.
>>> from etl.config.dw import DataWarehouseSchema
>>> orders = TableName.from_identifier("www.orders")
>>> str(orders)
'"www"."orders"'
>>> orders.identifier
'www.orders'
>>> same_orders = TableName.from_identifier("WWW.Orders")
>>> orders == same_orders
True
>>> id(orders) == id(same_orders)
False
>>> hash(orders) == hash(same_orders)
True
>>> w3 = TableName.from_identifier("w3.orders")
>>> orders == w3
False
>>> purchases = TableName.from_identifier("www.purchases")
>>> orders < purchases
True
>>> purchases.managed_schemas = ['www']
>>> staging_purchases = purchases.as_staging_table_name()
>>> staging_purchases.managed_schemas = ['www']
>>> # Now the table names are the same but they are in different schemas (staging vs. not)
>>> staging_purchases.table == purchases.table
True
>>> staging_purchases.schema == purchases.schema
False
"""
__slots__ = ("_schema", "_table", "_is_staging", "_managed_schemas", "_external_schemas")
def __init__(self, schema: Optional[str], table: str, is_staging=False) -> None:
# Concession to subclasses ... schema is optional
self._schema = schema.lower() if schema else None
self._table = table.lower()
self._is_staging = is_staging
self._managed_schemas: Optional[frozenset] = None
self._external_schemas: Optional[frozenset] = None
@property
def schema(self):
if self.is_staging and self.is_managed:
return as_staging_name(self._schema)
else:
return self._schema
@property
def table(self):
return self._table
@property
def is_staging(self):
return self._is_staging
@property
def managed_schemas(self) -> frozenset:
"""
List of schemas that are managed by Arthur.
This contains all schemas not just the schema of this relation.
"""
if self._managed_schemas is None:
try:
schemas = etl.config.get_dw_config().schemas
except AttributeError:
raise ETLSystemError("dw_config has not been set!")
self._managed_schemas = frozenset(schema.name for schema in schemas)
return self._managed_schemas
@managed_schemas.setter
def managed_schemas(self, schema_names: List) -> None:
# This setter only exists for tests.
self._managed_schemas = frozenset(schema_names)
@property
def external_schemas(self) -> frozenset:
"""List external schemas that are not managed by us and may not exist during validation."""
if self._external_schemas is None:
try:
schemas = etl.config.get_dw_config().external_schemas
except AttributeError:
raise ETLSystemError("dw_config has not been set!")
self._external_schemas = frozenset(schema.name for schema in schemas)
return self._external_schemas
def to_tuple(self):
"""
Return schema name and table name as a handy tuple.
>>> tn = TableName("weather", "temp")
>>> schema_name, table_name = tn.to_tuple()
>>> schema_name, table_name
('weather', 'temp')
"""
return self.schema, self.table
@property
def identifier(self) -> str:
"""
Return simple identifier, like one would use on the command line.
>>> tn = TableName("hello", "world")
>>> tn.identifier
'hello.world'
"""
return f"{self.schema}.{self.table}"
@property
def identifier_as_re(self) -> str:
r"""
Return a regular expression that would look for the (unquoted) identifier.
>>> tn = TableName("dw", "fact")
>>> tn.identifier_as_re
'\\bdw\\.fact\\b'
>>> import re
>>> re.match(tn.identifier_as_re, "dw.fact") is not None
True
>>> re.match(tn.identifier_as_re, "dw_fact") is None
True
"""
return r"\b{}\b".format(re.escape(self.identifier))
@property
def is_managed(self) -> bool:
return self._schema in self.managed_schemas
@property
def is_external(self) -> bool:
return self._schema in self.external_schemas
@classmethod
def from_identifier(cls, identifier: str):
"""
Split identifier into schema and table before creating a new TableName instance.
>>> identifier = "ford.mustang"
>>> tn = TableName.from_identifier(identifier)
>>> identifier == tn.identifier
True
"""
schema, table = identifier.split(".", 1)
return cls(schema, table)
def __str__(self):
"""
Return delimited table identifier with quotes around schema and table name.
This safeguards against unscrupulous users who use "default" as table name.
>>> import etl.config
>>> from collections import namedtuple
>>> MockDWConfig = namedtuple('MockDWConfig', ['schemas'])
>>> MockSchema = namedtuple('MockSchema', ['name'])
>>> etl.config._dw_config = MockDWConfig(schemas=[MockSchema(name='hello')])
>>> tn = TableName("hello", "world")
>>> str(tn)
'"hello"."world"'
>>> str(tn.as_staging_table_name())
'"etl_staging$hello"."world"'
"""
return f'"{self.schema}"."{self.table}"'
def __format__(self, code):
"""
Format name as delimited identifier (with quotes) or just as an identifier.
With the default or ':s', it's a delimited identifier with quotes.
With ':x", the name is left bare but single quotes are around it.
>>> pu = TableName("public", "users")
>>> format(pu)
'"public"."users"'
>>> format(pu, 'x')
"'public.users'"
>>> "SELECT * FROM {:s}".format(pu)
'SELECT * FROM "public"."users"'
>>> "Table {:x} contains users".format(pu) # new style with using formatting code
"Table 'public.users' contains users"
>>> "Table '{}' contains users".format(pu.identifier) # old style by accessing property
"Table 'public.users' contains users"
>>> "Oops: {:y}".format(pu)
Traceback (most recent call last):
ValueError: unknown format code 'y' for TableName
"""
if (not code) or (code == "s"):
return str(self)
elif code == "x":
return "'{:s}'".format(self.identifier)
else:
raise ValueError("unknown format code '{}' for {}".format(code, self.__class__.__name__))
def __eq__(self, other: object):
if not isinstance(other, TableName):
return False
return self.to_tuple() == other.to_tuple()
def __hash__(self):
return hash(self.to_tuple())
def __lt__(self, other: "TableName"):
"""
Order two table names, case-insensitive.
>>> ta = TableName("Iowa", "Cedar Rapids")
>>> tb = TableName("Iowa", "Davenport")
>>> ta < tb
True
"""
return self.identifier < other.identifier
def match(self, other: "TableName") -> bool:
"""
Treat yo'self as a tuple of patterns and match against the other table.
>>> tp = TableName("w*", "o*")
>>> tn = TableName("www", "orders")
>>> tp.match(tn)
True
>>> tn = TableName("worldwide", "octopus")
>>> tp.match(tn)
True
>>> tn = TableName("sales", "orders")
>>> tp.match(tn)
False
"""
other_schema = other.schema
other_table = other.table
return fnmatch.fnmatch(other_schema, self.schema) and fnmatch.fnmatch(other_table, self.table)
def match_pattern(self, pattern: str) -> bool:
"""
Test whether this table matches the given pattern.
>>> tn = TableName("www", "orders")
>>> tn.match_pattern("w*.o*")
True
>>> tn.match_pattern("o*.w*")
False
"""
return fnmatch.fnmatch(self.identifier, pattern)
def as_staging_table_name(self):
return TableName(*self.to_tuple(), is_staging=True)
class TempTableName(TableName):
r"""
Class to deal with names of temporary relations.
Note that temporary views or tables do not have a schema (*) and have a name starting with '#'.
(* = strictly speaking, their schema is one of the pg_temp% schemas. But who's looking.)
>>> temp = TempTableName("#hello")
>>> str(temp)
'"#hello"'
>>> temp.identifier
'#hello'
>>> "For SQL: {:s}, for logging: {:x}".format(temp, temp)
'For SQL: "#hello", for logging: \'#hello\''
Schema and name comparison in SQL continues to work if you use LIKE for schema names:
>>> temp.schema
'pg_temp%'
"""
def __init__(self, table) -> None:
if not table.startswith("#"):
raise ValueError("name of temporary table must start with '#'")
super().__init__(None, table)
# Enable remembering whether this is a temporary view with late schema binding.
self.is_late_binding_view = False
@property
def schema(self):
return "pg_temp%"
@property
def identifier(self):
return self.table
def __str__(self):
return '"{}"'.format(self.table)
@staticmethod
def for_table(table: TableName):
"""
Return a valid name for a temporary table that's derived from the given table name.
Leaks Redshift spec in that we make sure that names are less than 127 characters long.
>>> example = "public.speakeasy"
>>> tn = TableName.from_identifier(example)
>>> temp = TempTableName.for_table(tn)
>>> temp.identifier
'#public$speakeasy'
>>> str(temp)
'"#public$speakeasy"'
>>> too_long = "public." + "long" * 32
>>> tt = TempTableName.for_table(TableName.from_identifier(too_long))
>>> len(tt.identifier)
127
"""
temp_name = "#{0.schema}${0.table}".format(table)
if len(temp_name) > 127:
temp_name = temp_name[:119] + uuid.uuid4().hex[:8]
return TempTableName(temp_name)
class TableSelector:
"""
Class to hold patterns to filter table names.
Patterns that are supported are based on "glob" matches, which use *, ?, and [] -- just
like the shell does. But note that all matching is done case-insensitive.
There is a concept of "base schemas." This list should be based on the configuration and
defines the set of usable schemas. ("Schemas" here refers to either upstream sources or
schemas storing transformations.) So when | |
VNI pool ID (required)
:type vni_pool: :class:`com.vmware.nsx.model_client.VniPool`
:param vni_pool: (required)
:rtype: :class:`com.vmware.nsx.model_client.VniPool`
:return: com.vmware.nsx.model.VniPool
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('update',
{
'pool_id': pool_id,
'vni_pool': vni_pool,
})
class VtepLabelPools(VapiInterface):
"""
"""
_VAPI_SERVICE_ID = 'com.vmware.nsx.pools.vtep_label_pools'
"""
Identifier of the service in canonical form.
"""
def __init__(self, config):
"""
:type config: :class:`vmware.vapi.bindings.stub.StubConfiguration`
:param config: Configuration to be used for creating the stub.
"""
VapiInterface.__init__(self, config, _VtepLabelPoolsStub)
self._VAPI_OPERATION_IDS = {}
def get(self,
pool_id,
):
"""
Returns information about the specified virtual tunnel endpoint label
pool.
:type pool_id: :class:`str`
:param pool_id: Virtual tunnel endpoint label pool ID (required)
:rtype: :class:`com.vmware.nsx.model_client.VtepLabelPool`
:return: com.vmware.nsx.model.VtepLabelPool
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('get',
{
'pool_id': pool_id,
})
def list(self,
cursor=None,
included_fields=None,
page_size=None,
sort_ascending=None,
sort_by=None,
):
"""
Returns a list of all virtual tunnel endpoint label pools
:type cursor: :class:`str` or ``None``
:param cursor: Opaque cursor to be used for getting next page of records (supplied
by current result page) (optional)
:type included_fields: :class:`str` or ``None``
:param included_fields: Comma separated list of fields that should be included in query
result (optional)
:type page_size: :class:`long` or ``None``
:param page_size: Maximum number of results to return in this page (server may return
fewer) (optional, default to 1000)
:type sort_ascending: :class:`bool` or ``None``
:param sort_ascending: (optional)
:type sort_by: :class:`str` or ``None``
:param sort_by: Field by which records are sorted (optional)
:rtype: :class:`com.vmware.nsx.model_client.VtepLabelPoolListResult`
:return: com.vmware.nsx.model.VtepLabelPoolListResult
:raise: :class:`com.vmware.vapi.std.errors_client.ServiceUnavailable`
Service Unavailable
:raise: :class:`com.vmware.vapi.std.errors_client.InvalidRequest`
Bad Request, Precondition Failed
:raise: :class:`com.vmware.vapi.std.errors_client.InternalServerError`
Internal Server Error
:raise: :class:`com.vmware.vapi.std.errors_client.Unauthorized`
Forbidden
:raise: :class:`com.vmware.vapi.std.errors_client.NotFound`
Not Found
"""
return self._invoke('list',
{
'cursor': cursor,
'included_fields': included_fields,
'page_size': page_size,
'sort_ascending': sort_ascending,
'sort_by': sort_by,
})
class _IpBlocksStub(ApiInterfaceStub):
def __init__(self, config):
# properties for create operation
create_input_type = type.StructType('operation-input', {
'ip_block': type.ReferenceType('com.vmware.nsx.model_client', 'IpBlock'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/pools/ip-blocks',
request_body_parameter='ip_block',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'block_id': type.StringType(),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/pools/ip-blocks/{block-id}',
path_variables={
'block_id': 'block-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'block_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/pools/ip-blocks/{block-id}',
path_variables={
'block_id': 'block-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/pools/ip-blocks',
path_variables={
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'block_id': type.StringType(),
'ip_block': type.ReferenceType('com.vmware.nsx.model_client', 'IpBlock'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.concurrent_change':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/api/v1/pools/ip-blocks/{block-id}',
request_body_parameter='ip_block',
path_variables={
'block_id': 'block-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'create': {
'input_type': create_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'IpBlock'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'IpBlock'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'IpBlockListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'IpBlock'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.pools.ip_blocks',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _IpPoolsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for allocateorrelease operation
allocateorrelease_input_type = type.StructType('operation-input', {
'pool_id': type.StringType(),
'allocation_ip_address': type.ReferenceType('com.vmware.nsx.model_client', 'AllocationIpAddress'),
'action': type.StringType(),
})
allocateorrelease_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.concurrent_change':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
allocateorrelease_input_value_validator_list = [
]
allocateorrelease_output_validator_list = [
]
allocateorrelease_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/pools/ip-pools/{pool-id}',
request_body_parameter='allocation_ip_address',
path_variables={
'pool_id': 'pool-id',
},
query_parameters={
'action': 'action',
},
content_type='application/json'
)
# properties for create operation
create_input_type = type.StructType('operation-input', {
'ip_pool': type.ReferenceType('com.vmware.nsx.model_client', 'IpPool'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
]
create_output_validator_list = [
]
create_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/pools/ip-pools',
request_body_parameter='ip_pool',
path_variables={
},
query_parameters={
},
content_type='application/json'
)
# properties for delete operation
delete_input_type = type.StructType('operation-input', {
'pool_id': type.StringType(),
'force': type.OptionalType(type.BooleanType()),
})
delete_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
delete_input_value_validator_list = [
]
delete_output_validator_list = [
]
delete_rest_metadata = OperationRestMetadata(
http_method='DELETE',
url_template='/api/v1/pools/ip-pools/{pool-id}',
path_variables={
'pool_id': 'pool-id',
},
query_parameters={
'force': 'force',
},
content_type='application/json'
)
# properties for get operation
get_input_type = type.StructType('operation-input', {
'pool_id': type.StringType(),
})
get_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
get_input_value_validator_list = [
]
get_output_validator_list = [
]
get_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/pools/ip-pools/{pool-id}',
path_variables={
'pool_id': 'pool-id',
},
query_parameters={
},
content_type='application/json'
)
# properties for list operation
list_input_type = type.StructType('operation-input', {
'cursor': type.OptionalType(type.StringType()),
'included_fields': type.OptionalType(type.StringType()),
'page_size': type.OptionalType(type.IntegerType()),
'sort_ascending': type.OptionalType(type.BooleanType()),
'sort_by': type.OptionalType(type.StringType()),
})
list_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
list_input_value_validator_list = [
]
list_output_validator_list = [
]
list_rest_metadata = OperationRestMetadata(
http_method='GET',
url_template='/api/v1/pools/ip-pools',
path_variables={
},
query_parameters={
'cursor': 'cursor',
'included_fields': 'included_fields',
'page_size': 'page_size',
'sort_ascending': 'sort_ascending',
'sort_by': 'sort_by',
},
content_type='application/json'
)
# properties for update operation
update_input_type = type.StructType('operation-input', {
'pool_id': type.StringType(),
'ip_pool': type.ReferenceType('com.vmware.nsx.model_client', 'IpPool'),
})
update_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
update_input_value_validator_list = [
]
update_output_validator_list = [
]
update_rest_metadata = OperationRestMetadata(
http_method='PUT',
url_template='/api/v1/pools/ip-pools/{pool-id}',
request_body_parameter='ip_pool',
path_variables={
'pool_id': 'pool-id',
},
query_parameters={
},
content_type='application/json'
)
operations = {
'allocateorrelease': {
'input_type': allocateorrelease_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'AllocationIpAddress'),
'errors': allocateorrelease_error_dict,
'input_value_validator_list': allocateorrelease_input_value_validator_list,
'output_validator_list': allocateorrelease_output_validator_list,
'task_type': TaskType.NONE,
},
'create': {
'input_type': create_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'IpPool'),
'errors': create_error_dict,
'input_value_validator_list': create_input_value_validator_list,
'output_validator_list': create_output_validator_list,
'task_type': TaskType.NONE,
},
'delete': {
'input_type': delete_input_type,
'output_type': type.VoidType(),
'errors': delete_error_dict,
'input_value_validator_list': delete_input_value_validator_list,
'output_validator_list': delete_output_validator_list,
'task_type': TaskType.NONE,
},
'get': {
'input_type': get_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'IpPool'),
'errors': get_error_dict,
'input_value_validator_list': get_input_value_validator_list,
'output_validator_list': get_output_validator_list,
'task_type': TaskType.NONE,
},
'list': {
'input_type': list_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'IpPoolListResult'),
'errors': list_error_dict,
'input_value_validator_list': list_input_value_validator_list,
'output_validator_list': list_output_validator_list,
'task_type': TaskType.NONE,
},
'update': {
'input_type': update_input_type,
'output_type': type.ReferenceType('com.vmware.nsx.model_client', 'IpPool'),
'errors': update_error_dict,
'input_value_validator_list': update_input_value_validator_list,
'output_validator_list': update_output_validator_list,
'task_type': TaskType.NONE,
},
}
rest_metadata = {
'allocateorrelease': allocateorrelease_rest_metadata,
'create': create_rest_metadata,
'delete': delete_rest_metadata,
'get': get_rest_metadata,
'list': list_rest_metadata,
'update': update_rest_metadata,
}
ApiInterfaceStub.__init__(
self, iface_name='com.vmware.nsx.pools.ip_pools',
config=config, operations=operations, rest_metadata=rest_metadata,
is_vapi_rest=False)
class _IpSubnetsStub(ApiInterfaceStub):
def __init__(self, config):
# properties for allocateorreleasefromipblocksubnet operation
allocateorreleasefromipblocksubnet_input_type = type.StructType('operation-input', {
'subnet_id': type.StringType(),
'allocation_ip_address': type.ReferenceType('com.vmware.nsx.model_client', 'AllocationIpAddress'),
'action': type.StringType(),
})
allocateorreleasefromipblocksubnet_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.concurrent_change':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
allocateorreleasefromipblocksubnet_input_value_validator_list = [
]
allocateorreleasefromipblocksubnet_output_validator_list = [
]
allocateorreleasefromipblocksubnet_rest_metadata = OperationRestMetadata(
http_method='POST',
url_template='/api/v1/pools/ip-subnets/{subnet-id}',
request_body_parameter='allocation_ip_address',
path_variables={
'subnet_id': 'subnet-id',
},
query_parameters={
'action': 'action',
},
content_type='application/json'
)
# properties for create operation
create_input_type = type.StructType('operation-input', {
'ip_block_subnet': type.ReferenceType('com.vmware.nsx.model_client', 'IpBlockSubnet'),
})
create_error_dict = {
'com.vmware.vapi.std.errors.service_unavailable':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ServiceUnavailable'),
'com.vmware.vapi.std.errors.invalid_request':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InvalidRequest'),
'com.vmware.vapi.std.errors.internal_server_error':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'InternalServerError'),
'com.vmware.vapi.std.errors.concurrent_change':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'ConcurrentChange'),
'com.vmware.vapi.std.errors.unauthorized':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'Unauthorized'),
'com.vmware.vapi.std.errors.not_found':
type.ReferenceType('com.vmware.vapi.std.errors_client', 'NotFound'),
}
create_input_value_validator_list = [
]
| |
<filename>pycatia/hybrid_shape_interfaces/hybrid_shape_conic.py<gh_stars>10-100
#! usr/bin/python3.6
"""
Module initially auto generated using V5Automation files from CATIA V5 R28 on 2020-07-06 14:02:20.222384
.. warning::
The notes denoted "CAA V5 Visual Basic Help" are to be used as reference only.
They are there as a guide as to how the visual basic / catscript functions work
and thus help debugging in pycatia.
"""
from pycatia.hybrid_shape_interfaces.hybrid_shape_direction import HybridShapeDirection
from pycatia.in_interfaces.reference import Reference
from pycatia.knowledge_interfaces.length import Length
from pycatia.mec_mod_interfaces.hybrid_shape import HybridShape
class HybridShapeConic(HybridShape):
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| System.IUnknown
| System.IDispatch
| System.CATBaseUnknown
| System.CATBaseDispatch
| System.AnyObject
| MecModInterfaces.HybridShape
| HybridShapeConic
|
| Represents the hybrid shape conic object.
| Role: To access the data of the hybrid shape conic object. This data
| includes:
|
| The start point and its associated tangent contraint
| The end point and its associated tangent contraint
| The supporting plane
| The tangent intersection point
| The conic parameter: p = 0.5 (parabola), 0<=p<=0.5 (ellipse), 0.5<= p <=1.0 (hyperbola)
|
| Use the HybridShapeFactory to create a HybridShapeConic
| object.
"""
def __init__(self, com_object):
super().__init__(com_object)
self.hybrid_shape_conic = com_object
@property
def conic_parameter(self) -> float:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ConicParameter() As double
|
| Returns or sets the conic parameter.
|
| Example:
| This example retrieves in conicParm the conic parameter of the conic
| hybConic.
|
| Dim conicParm As double
| Set conicParm = hybConic.ConicParameter
:return: float
:rtype: float
"""
return self.hybrid_shape_conic.ConicParameter
@conic_parameter.setter
def conic_parameter(self, value: float):
"""
:param float value:
"""
self.hybrid_shape_conic.ConicParameter = value
@property
def conic_user_tol(self) -> Length:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property ConicUserTol() As Length (Read Only)
|
| Gets or sets the conic User Tolerance.
|
| Example:
| This example retrieves in conicUserTol the conic user tolerance of the
| conic HybridShapeConic.
|
| Dim oConicUserTol As CATIALength
| Set oConicUserTol = HybridShapeConic.conicUserTol
|
|
| See also:
| Length
:return: Length
:rtype: Length
"""
return Length(self.hybrid_shape_conic.ConicUserTol)
@property
def end_point(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property EndPoint() As Reference
|
| Returns or sets the conic end point.
| Sub-element(s) supported (see Boundary object): Vertex.
|
| Example:
| This example retrieves in endPt the end point of the conic
| hybConic.
|
| Dim endPt As Reference
| Set endPt = hybConic.EndPoint
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_conic.EndPoint)
@end_point.setter
def end_point(self, reference_point: Reference):
"""
:param Reference reference_point:
"""
self.hybrid_shape_conic.EndPoint = reference_point.com_object
@property
def end_tangent(self) -> HybridShapeDirection:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property EndTangent() As HybridShapeDirection
|
| Returns or sets the tangent direction at the conic end
| point.
|
| Example:
| This example retrieves in endTgt the tangent direction associated with
| the end point of the conic hybConic.
|
| Dim endTgt As Reference
| Set endTgt = hybConic.EndTangent
:return: HybridShapeDirection
:rtype: HybridShapeDirection
"""
return HybridShapeDirection(self.hybrid_shape_conic.EndTangent)
@end_tangent.setter
def end_tangent(self, direction: HybridShapeDirection):
"""
:param HybridShapeDirection direction:
"""
self.hybrid_shape_conic.EndTangent = direction.com_object
@property
def start_point(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property StartPoint() As Reference
|
| Returns or sets the conic start point.
| Sub-element(s) supported (see Boundary object): Vertex.
|
| Example:
| This example sets startPt as the start point of the conic
| hybConic.
|
| Dim startPt As Reference
| ... ' Value startPt
| hybConic.StartPoint startPt
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_conic.StartPoint)
@start_point.setter
def start_point(self, reference_point: Reference):
"""
:param Reference reference_point:
"""
self.hybrid_shape_conic.StartPoint = reference_point
@property
def start_tangent(self) -> HybridShapeDirection:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property StartTangent() As HybridShapeDirection
|
| Returns or sets the tangent direction at the conic start
| point.
|
| Example:
| This example sets startTgt as the tangent direction at the start point
| of the conic hybConic.
|
| Dim startTgt As Reference
| ... ' Value startTangent
| hybConic.StartTangent startTgt
:return: HybridShapeDirection
:rtype: HybridShapeDirection
"""
return HybridShapeDirection(self.hybrid_shape_conic.StartTangent)
@start_tangent.setter
def start_tangent(self, direction: HybridShapeDirection):
"""
:param HybridShapeDirection direction:
"""
self.hybrid_shape_conic.StartTangent = direction
@property
def support_plane(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property SupportPlane() As Reference
|
| Returns or sets the conic supporting plane.
| Sub-element(s) supported (see Boundary object):
| PlanarFace.
|
| Example:
| This example retrieves in supportPln the supporting plane of the conic
| hybConic.
|
| Dim supportPln As Reference
| Set supportPln = hybConic.SupportPlane
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_conic.SupportPlane)
@support_plane.setter
def support_plane(self, reference_plane: Reference):
"""
:param Reference reference_plane:
"""
self.hybrid_shape_conic.SupportPlane = reference_plane.com_object
@property
def tangent_int_point(self) -> Reference:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384)
| o Property TangentIntPoint() As Reference
|
| Returns or sets the conic tangent intersection point.
| Sub-element(s) supported (see Boundary object): Vertex.
|
| Example:
| This example retrieves in tgtIntPt the tangent intersection point of
| the conic hybConic.
|
| Dim tgtIntPt As Reference
| Set tgtIntPt = hybConic.TangentIntPoint
:return: Reference
:rtype: Reference
"""
return Reference(self.hybrid_shape_conic.TangentIntPoint)
@tangent_int_point.setter
def tangent_int_point(self, reference_tangent_point: Reference):
"""
:param Reference reference_tangent_point:
"""
self.hybrid_shape_conic.TangentIntPoint = reference_tangent_point
def get_end_tangent_direction_flag(self, o_orientation: int) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetEndTangentDirectionFlag(long oOrientation)
|
| Retrieves the tangent direction orientation at the conic end
| point.
|
| Parameters:
|
| oOrientation
| The direction orientation applied to the tangent direction at the
| conic end point
| Legal values: 1 if the tangent direction is used as is, and -1 if
| it is inverted
|
| Example:
|
| This example retrieves the direction orientation of the tangent at
| the end point of
| the conic hybConic.
|
|
| Dim endPtTgtOrient As long
| hybConic.GetEndTangentDirectionFlag endPtTgtOrient
:param int o_orientation:
:return: None
:rtype: None
"""
return self.hybrid_shape_conic.GetEndTangentDirectionFlag(o_orientation)
def get_intermed_tangent(self, i_index_point: int) -> HybridShapeDirection:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Func GetIntermedTangent(long iIndexPoint) As
| HybridShapeDirection
|
| Retrieves the tangent direction at one of the conic intermediate passing
| points.
|
| Parameters:
|
| iIndexPoint
| An index that designates the passing point to
| retrieve
| Legal values: 1 for the first passing point, and 2 for the second
| one
| oTgtDir
| The retrieved tangent direction at the given passing point
|
|
| Example:
|
| This example retrieves in tgtDir the tangent direction at point
| passingPtIdx
| through which the conic hybConic passes.
|
|
| Dim tgtDir As Reference
| passingPtIdx = 1
| Set tgtDir = hybConic.GetIntermedTangent (passingPtIdx)
:param int i_index_point:
:return: HybridShapeDirection
:rtype: HybridShapeDirection
"""
return HybridShapeDirection(self.hybrid_shape_conic.GetIntermedTangent(i_index_point))
def get_intermediate_point(self, i_index_point: int, o_end_point: Reference) -> None:
"""
.. note::
:class: toggle
CAA V5 Visual Basic Help (2020-07-06 14:02:20.222384))
| o Sub GetIntermediatePoint(long iIndexPoint,
| Reference oEndPoint)
|
| Retrieves one of the conic intermediate passing points.
|
| Parameters:
|
| iIndexPoint
| An index that designates the passing point to
| retrieve
| Legal values: 1 for the first passing point, 2 for the second one,
| and 3 for the third one
| oEndPoint
| The retrieved passing point
|
| Example:
|
| This | |
#######################################################################
############################# PART 1 - Import CSV FILE ################
#######################################################################
# First we'll import the os module
# This will allow us to create file paths across operating systems
import os
# Module for reading CSV files
import csv
#Based on the location of the python main.py file and the Resources folder, no need existed for the .. or any other command to move up and down the folders
csvpath = os.path.join('Resources', 'budget_data.csv')
with open(csvpath) as csvfile:
# CSV reader specifies delimiter and variable that holds contents
csvreader = csv.reader(csvfile, delimiter=',')
print(csvreader)
#Command below defiined 'row' that I am going to use and leverage later to append my long list--move the data from just being read and printed.
#once loaded into a list or nested list, I can start doing calcs and using python to leverage the data.
##########################################################################
###########################Part 1 END - CSV FILE IMPORTED ################
##########################################################################
##########################################################################
######## Part 2 - Import/Load CSV file data into Long_List ###############
##########################################################################
#Now that I can open and read the .csv file, the data needs to be saved into memory.
#Based on the structure of the data, I am going to create or have a nested list (list within a list) when the data is imported in
#Going to name my list long_list. First, I must create an empty list before I can write the rows into it
#Remember the = sign when declaring or setting a list.
#Note: Originally, I did not indent the long_list to be under the opening of the file. Received I/O error on a closed file.
long_list = []
for row in csvreader:
long_list.append(row)
#Had a print(long_list) but the final output was each row in tripilicate many 3 or 54 times. So, I commented out and removed the print of the long_list until after the pop that removed the column headers
#The long list includes the header row. I need to remove the header row so that I can do calculations on the remaining lists within the list.
#Lists are zero indexed and the header row will be included when the calculations are performed.
#As a result, the pop() function is used because it allows index references to remove the column headers at the 0 index.
#Had to move the pop out of the iteration once I resolved the issue with the printing.
long_list.pop(0)
print(long_list)
##########################################################################
######## Part 3 - Total Number of Months in the data set #################
##########################################################################
#Now that the header has been removed using pop(), the length function can be used to get the number of months in the datase
number_of_months=len(long_list)
print('number of months: ' + str(number_of_months))
#Total number of months or rows once the header is removed should be 86.
#Number of months validates according to the results of the print statement
##########################################################################
######## Part 4 - Total Profit and Loss over the entire period ###########
######## Also calculated the average PL for the full data set ###########
######## Note: This average PL calced here is NOT the same as the ########
######## calc of the average change that occurs later. #########
##########################################################################
#Assigning PL to long list and then printing values at index 1
for PL in long_list:
print(PL[1])
#Reviewed and confimred the print output
#Total PLs using Total_PL
#Initialize Total_PL and set it to 0 (zero)
# #Had to convert the PL values to int (integer) so that they could sum. Without the int(PL[1]) line errored out
Total_PL = 0
for PL in long_list:
Total_PL += int(PL[1])
print('Total Profit and loss: ' + str(Total_PL))
avg = Total_PL/number_of_months
print('Average monthly Profile and Loss: ' + str(avg))
##########################################################################
######## Part 5 - Total of the CHANGE in Profit and Loss ###########
##########################################################################
#Calced the Total Profit and loss in Part 4. This section will focus on calculating the total of the CHANGES in PL
#Creating and empty set name 'nums' that will be populated with the monthly PL values at index 1.
#These elements are converted to integer values so that in formulas and calculations may be performed on them
#Note to self: when creating a set or empty set, remember the =
nums=[]
for short_list in long_list:
nums.append(int(short_list[1]))
print(nums)
#Note: when 'nums' list was printed out, the values are in single quotes - meaning they are string and not integer.
#Next code must convert the string data into integer like in initial solution above
#Create empty list to capture changes
#Create empty list to capture monthly variance. Monthly variance will be appended with calculated monthly change
monthly_changes = []
monthly_variance = []
#Create a range start with 1 and going to length of nums. Range will be used in a for loop
#we start with 1 instead of 0 because we are going to use a formula that subtracts i-1 the starting range number will get us to our first 0 index value
#Once monthly_changes are calculated, the monthly_variance list will be appended with the values calcualted
#An attempt was made to append monthly changes with the monthly changes, but the syntax failed. This two step approach worked and the table values reconciled
print(len(nums))
for i in range(1, len(nums)):
monthly_changes = (nums[i]-nums[(i-1)])
monthly_variance.append(monthly_changes)
print(monthly_variance)
#Output from monthly variance was printed to validate and confirm code and calculations
##########################################################################
######## Part 6 - Mean (Average), Max and Min ###########################
##########################################################################
#Calculation of Mean/Average
#Caclculated an average earlier. However, the earlier calc was the average of each months PL. This section focuses on the mean, max and min of the varaince
#Or delta/change from month to month
#Mean has to be imported into python
from statistics import mean
avg_variance = mean(monthly_variance)
max_variance = max(monthly_variance)
min_variance = min(monthly_variance)
print('Average monthly PL Variance: ' + str(avg_variance))
print('Max monthly PL Variance: ' + str(max_variance))
print('Min monthly PL Variance: ' + str(min_variance))
##########################################################################
######## Part 7 - Assigning a date to the Max and Min#####################
##########################################################################
#Earlier, the PL amounts were split from the dates and converted from int to string so that calcs could be performed
#At the start, we also removed the header column - which was located and index 0 - to get it out of the way so we had only the data with which to work
#Ventually, monthly variance list named monthly_variance was created that contained the difference in the PL for current and prior month
#With the header removed trom the original table, the index between the original table and monthly variance is one.
#If I insert zero or null into index position 0 of the monthly_variance list, the index for both tables will align
#once aligned, the index position of the varaince can determined and used in the retrieiving of the data from the first table
#insert None into monthly variance table at index 0 to align the index with the original table
monthly_variance.insert(0,None)
print(monthly_variance)
#Get index for Max and Min from the monthly variance table
max_variance_index = monthly_variance.index(max_variance)
min_variance_index = monthly_variance.index(min_variance)
print(max_variance_index)
print(min_variance_index)
#Split original long list into a short list of dates same as that in Step 2
dates=[]
for short_list_dates in long_list:
dates.append(short_list_dates[0])
print(dates)
#Get value from dates list using the max_varaince_index and min_variance_index values
#Note: This code commented out does not work. However, the print statement using the results from earlier does work.
#I can string the others together to get it to print. However, not sure how to get it all into a file to save.
#max_date_index=dates.index(max_variance_index)
#min_date_index=dates.index(min_variance_index)
#max_date_index=long_list.index(25)
#min_date_index=long_list.index(44)
print(dates[max_variance_index])
print(dates[min_variance_index])
##########################################################################
######## Part 8 - Terminal Print #####################
##########################################################################
print('Financial Analysis')
print('--------------------------------------')
print('Number of Months: ' + str(number_of_months))
print('Total Profit and loss: ' + str(Total_PL))
print('Average monthly Profit and Loss: ' + str(avg))
print('Average monthly PL Variance: ' + str(avg_variance))
print('Max monthly PL Variance: ' + str(dates[max_variance_index]) + ': '+str(max_variance))
print('Min monthly PL Variance: ' + str(dates[min_variance_index]) + ': '+str(min_variance))
##########################################################################
######## Part 9 - Save file #####################
##########################################################################
f = open('FinancialAnalysis.txt', 'w')
f.write('Financial Analysis\n')
f.write('--------------------------------------\n') #The new-line character has been used
f.write('Number of Months: ' + str(number_of_months)+'\n')
f.write('Total Profit and loss: ' + str(Total_PL)+'\n')
f.write('Average monthly Profit and Loss: ' + str(avg)+'\n')
f.write('Average monthly PL Variance: ' + str(avg_variance)+'\n')
f.write('Max monthly PL Variance: '+ str(dates[max_variance_index]) + ': | |
import abc
import six
import inspect
from functools import partial
import numpy as np
from tsfuse.data import Tags
from ..data import Collection
from ..errors import InvalidPreconditionError
@six.add_metaclass(abc.ABCMeta)
class Node(object):
"""
Node of a computation graph.
Parameters
----------
parents : list(Node), optional
Parent nodes.
is_output : bool, optional
True if the node must be an output node or False if the node should not be an output node.
By default, the node is an output node if it is not used as a parent for another node.
Attributes
----------
parents : list(Node)
Parent nodes.
children : list(Node)
Child nodes.
is_input : bool
True if the node is an input node.
is_output : bool
True if the node is an output node.
"""
def __init__(self, parents=None, is_output=None):
self._id = None
self._parents = [] if parents is None else parents
for p in self._parents:
p.add_child(self)
self._children = []
self._output = None
self._is_output = is_output
@property
def parents(self):
return self._parents
@property
def children(self):
return self._children
@property
def is_input(self):
return False
@property
def is_output(self):
if self._is_output is None:
return len(self._children) == 0
else:
return self._is_output
def add_child(self, child):
"""
Add a child node.
Parameters
----------
child : Node
Child node.
"""
self._children.append(child)
def __add__(self, other):
return Add(self, other)
def __sub__(self, other):
return Subtract(self, other)
def __mul__(self, other):
return Multiply(self, other)
def __div__(self, other):
return Divide(self, other)
def __and__(self, other):
return And(self, other)
def __or__(self, other):
return Or(self, other)
def __truediv__(self, other):
return self.__div__(other)
def __gt__(self, other):
return Greater(self, other)
def __lt__(self, other):
return Less(self, other)
def __le__(self, other):
return LessEqual(self, other)
def __ge__(self, other):
return GreaterEqual(self, other)
def __neg__(self):
return Not(self)
@property
def trace(self):
return ()
@property
def __name__(self):
return str(self)
@property
def __repr__(self):
return self.__str__
class Input(Node):
"""
Node that serves as the input of a computation graph.
Parameters
----------
input_id : int or str
Input identifier.
"""
def __init__(self, input_id):
super(Input, self).__init__()
self.input_id = input_id
@property
def is_input(self):
return True
@property
def is_output(self):
return False
def apply(self, copy=True):
pass
@property
def trace(self):
return 'Input', self.input_id
@property
def name(self):
return str(self.input_id)
def __str__(self):
return 'Input({})'.format(self.input_id)
class Constant(Node):
"""
Node that produces a constant value,
given as :class:`~tsfuse.data.Collection` object.
Parameters
----------
data : int, float, str or object
Output data.
"""
def __init__(self, data):
super(Constant, self).__init__()
self.output = data
def apply(self):
pass
@property
def trace(self):
return 'Constant', self.output
@property
def name(self):
return 'Constant'
def __str__(self):
return 'Constant({})'.format(self.output)
@six.add_metaclass(abc.ABCMeta)
class Transformer(Node):
"""
Transformer node.
"""
def __init__(self, *parents, **kwargs):
is_output = kwargs.get('is_output', None)
if not hasattr(self, 'preconditions'):
self.preconditions = []
self.preconditions += kwargs.get('with_preconditions', [])
super(Transformer, self).__init__(parents=parents, is_output=is_output)
def check_preconditions(self, *collections):
"""
Check that the preconditions are satisfied.
Parameters
----------
*collections
:class:`~tsfuse.data.Collection` objects used as input.
Returns
-------
satisfied : bool
"""
def satisfied(c):
return all(p(*c) for p in self.preconditions)
if isinstance(collections[0].shape[1], tuple):
for i in range(len(collections[0].shape[1])):
if not satisfied([c for c in collections]):
raise InvalidPreconditionError(self)
else:
if not satisfied(collections):
raise InvalidPreconditionError(self)
def transform(self, *collections, ignore_preconditions=False):
if not ignore_preconditions:
self.check_preconditions(*collections)
result = None
if hasattr(self, 'apply'):
if isinstance(collections[0].shape[1], tuple):
f = partial(_apply, apply=self.apply, collections=collections[:])
try:
results = [f(i) for i in range(len(collections[0].values))]
except: # TODO: Make more restrictive!!
# TODO: Generate warning instead of error
results = None
if results is not None:
if any(r is None for r in results):
result = None
elif len(set(r.shape for r in results)) == 1:
result = Collection.from_array(
np.concatenate([r.values for r in results]),
time=np.concatenate([r.time for r in results]),
dims=results[0].dims,
)
else:
result = Collection.from_array(results)
else:
result = None
else:
try:
result = self.apply(*collections)
except:
# TODO: Generate warning instead of error
result = None
elif hasattr(self, 'graph'):
graph = self.graph(*[Input(i) for i in range(len(collections))])
outputs = graph.transform({
i: c for i, c in enumerate(collections)
}, return_dataframe=False)
result = outputs[graph.outputs[-1]]
if result is None:
return None
else:
result._tags = self.tags(*collections)
result._unit = self.unit(*collections)
return result
def tags(self, *collections):
collections = [c for c in collections if hasattr(c, '_tags')]
if len(collections) < 1:
return Tags()
propagated = Tags(collections[0]._tags)
for i in range(1, len(collections)):
propagated = propagated.intersect(collections[i]._tags)
return propagated
def unit(self, *collections):
pass
@property
def trace(self):
def parameter(p):
if isinstance(p, Transformer):
return p.trace
else:
return p
values = {p: self.__dict__[p] for p in self.__dict__ if _is_parameter(p)}
params = tuple([parameter(values[p]) for p in sorted(values)])
parents = tuple([p.trace for p in self.parents])
t = tuple([self.__class__.__name__, params, parents])
return t
@property
def n_inputs(self):
if hasattr(self, 'apply'):
f = self.apply
else:
f = self.graph
args = inspect.getfullargspec(f)[0]
return len(args) - 1 if 'self' in args else len(args)
def __str__(self):
s = str(self.__class__.__name__)
values = {p: self.__dict__[p] for p in self.__dict__ if _is_parameter(p)}
params = sorted(list(values))
s += '({})'.format(', '.join(
[str(p) for p in self.parents] +
['{}={}'.format(p, values[p]) for p in params if values[p] is not None]
))
return s
@property
def name(self):
args = [
a for a in list(self.__dict__)
if a not in [
'preconditions',
'_id',
'_parents',
'_children',
'_output',
'_is_output',
]
]
values = [getattr(self, a) for a in args]
argsvalues = [(a, v) for a, v in zip(args, values) if v is not None]
if len(argsvalues) > 0:
parameters = '(' + ', '.join([
'{}={}'.format(a, v) for a, v in argsvalues
]) + ')'
else:
parameters = ''
return str(self.__class__.__name__) + parameters
def _is_parameter(p):
if p[0].startswith('_'):
return False
else:
return p not in ('preconditions',)
def _apply(i, apply=None, collections=None):
inputs = []
for c in collections:
if isinstance(c, Collection) and isinstance(c.values[i], Collection):
inputs.append(c.values[i])
else:
inputs.append(c)
return apply(*inputs)
class Add(Transformer):
"""
Element-wise addition.
Preconditions:
- Number of inputs: 2
- Input data must be numeric.
"""
def __init__(self, *parents, **kwargs):
super(Add, self).__init__(*parents, **kwargs)
self.preconditions = [
lambda *collections: len(collections) == 2,
lambda x, y: np.issubdtype(x.dtype, np.float64) & np.issubdtype(x.dtype, np.float64),
]
@staticmethod
def apply(x, y):
x, y = _collections(x, y)
values = x.values + y.values
return _result(x, y, values)
class Subtract(Transformer):
"""
Element-wise subtraction.
Preconditions:
- Number of inputs: 2
- Input data must be numeric.
"""
def __init__(self, *parents, **kwargs):
super(Subtract, self).__init__(*parents, **kwargs)
self.preconditions = [
lambda *collections: len(collections) == 2,
lambda x, y: np.issubdtype(x.dtype, np.float64) & np.issubdtype(x.dtype, np.float64),
]
@staticmethod
def apply(x, y):
x, y = _collections(x, y)
values = x.values - y.values
return _result(x, y, values)
class Multiply(Transformer):
"""
Element-wise multiplication.
Preconditions:
- Number of inputs: 2
- Input data must be numeric.
"""
def __init__(self, *parents, **kwargs):
super(Multiply, self).__init__(*parents, **kwargs)
self.preconditions = [
lambda *collections: len(collections) == 2,
lambda x, y: np.issubdtype(x.dtype, np.float64) & np.issubdtype(x.dtype, np.float64),
]
@staticmethod
def apply(x, y):
x, y = _collections(x, y)
values = x.values * y.values
return _result(x, y, values)
class Divide(Transformer):
"""
Element-wise division.
Preconditions:
- Number of inputs: 2
- Input data must be numeric.
"""
def __init__(self, *parents, **kwargs):
super(Divide, self).__init__(*parents, **kwargs)
self.preconditions = [
lambda *collections: len(collections) == 2,
lambda x, y: np.issubdtype(x.dtype, np.float64) & np.issubdtype(x.dtype, np.float64),
]
@staticmethod
def apply(x, y):
x, y = _collections(x, y)
with np.errstate(divide='ignore', invalid='ignore'):
values = np.true_divide(x.values, y.values)
return _result(x, y, values)
class Greater(Transformer):
"""
Element-wise greater than comparison.
Preconditions:
- Number of inputs: 2
- Input data must be numeric.
"""
def __init__(self, *parents, **kwargs):
super(Greater, self).__init__(*parents, **kwargs)
self.preconditions = [
lambda *collections: len(collections) == 2,
lambda x, y: np.issubdtype(x.dtype, np.float64) & np.issubdtype(x.dtype, np.float64),
]
@staticmethod
def apply(x, y):
x, y = _collections(x, y)
values = np.array(x.values > y.values, dtype=bool)
return _result(x, y, values)
class GreaterEqual(Transformer):
"""
Element-wise greater than or equal comparison.
Preconditions:
- Number of inputs: 2
- Input data must be numeric.
"""
def __init__(self, *parents, **kwargs):
super(GreaterEqual, self).__init__(*parents, **kwargs)
self.preconditions = [
lambda *collections: len(collections) == 2,
lambda x, y: np.issubdtype(x.dtype, np.float64) & np.issubdtype(x.dtype, np.float64),
]
@staticmethod
def apply(x, y):
x, y = _collections(x, y)
values = np.array(x.values >= y.values, dtype=bool)
return _result(x, y, values)
class Less(Transformer):
"""
Element-wise less than comparison.
Preconditions:
- Number of inputs: 2
- Input data must be numeric.
"""
def __init__(self, *parents, **kwargs):
super(Less, self).__init__(*parents, **kwargs)
self.preconditions = [
lambda *collections: len(collections) == 2,
lambda x, y: np.issubdtype(x.dtype, np.float64) & np.issubdtype(x.dtype, np.float64),
]
@staticmethod
def apply(x, y):
x, y = | |
import pprint
import re
from typing import Any, Dict
import numpy as np
import pytest
from qcelemental.molutil import compute_scramble
from qcengine.programs.tests.standard_suite_contracts import (
contractual_accsd_prt_pr,
contractual_ccd,
contractual_ccsd,
contractual_ccsd_prt_pr,
contractual_ccsdpt_prccsd_pr,
contractual_ccsdt,
contractual_ccsdt1a,
contractual_ccsdt1b,
contractual_ccsdt2,
contractual_ccsdt3,
contractual_ccsdt_prq_pr,
contractual_ccsdtq,
contractual_cisd,
contractual_current,
contractual_dft_current,
contractual_fci,
contractual_hf,
contractual_lccd,
contractual_lccsd,
contractual_mp2,
contractual_mp2p5,
contractual_mp3,
contractual_mp4,
contractual_mp4_prsdq_pr,
contractual_qcisd,
contractual_qcisd_prt_pr,
query_has_qcvar,
query_qcvar,
)
from qcengine.programs.tests.standard_suite_ref import answer_hash, std_suite
from qcengine.programs.util import mill_qcvars
from .utils import compare, compare_values
pp = pprint.PrettyPrinter(width=120)
def runner_asserter(inp, ref_subject, method, basis, tnm, scramble, frame):
qcprog = inp["qc_module"].split("-")[0]
qc_module_in = inp["qc_module"] # returns "<qcprog>"|"<qcprog>-<module>" # input-specified routing
qc_module_xptd = (
(qcprog + "-" + inp["xptd"]["qc_module"]) if inp.get("xptd", {}).get("qc_module", None) else None
) # expected routing
driver = inp["driver"]
reference = inp["reference"]
fcae = inp["fcae"]
mode_options = inp.get("cfg", {})
if qc_module_in == "nwchem-tce" and basis == "cc-pvdz":
pytest.skip(
f"TCE throwing 'non-Abelian symmetry not permitted' for HF molecule when not C1. fix this a different way than setting C1."
)
# <<< Molecule >>>
# 1. ref mol: `ref_subject` nicely oriented mol taken from standard_suite_ref.py
ref_subject.update_geometry()
min_nonzero_coords = np.count_nonzero(np.abs(ref_subject.geometry(np_out=True)) > 1.0e-10)
# print(
# "MOL 1/REF: ref_subject",
# ref_subject.com_fixed(),
# ref_subject.orientation_fixed(),
# ref_subject.symmetry_from_input(),
# )
# with np.printoptions(precision=3, suppress=True):
# print(ref_subject.geometry(np_out=True))
if scramble is None:
subject = ref_subject
ref2in_mill = compute_scramble(
subject.natom(), do_resort=False, do_shift=False, do_rotate=False, do_mirror=False
) # identity AlignmentMill
else:
subject, scramble_data = ref_subject.scramble(**scramble, do_test=False, fix_mode="copy")
ref2in_mill = scramble_data["mill"]
# with np.printoptions(precision=12, suppress=True):
# print(f"ref2in scramble mill= {ref2in_mill}")
# print("MOL 2/IN: subject", subject.com_fixed(), subject.orientation_fixed(), subject.symmetry_from_input())
# with np.printoptions(precision=3, suppress=True):
# print(subject.geometry(np_out=True))
# 2. input mol: `subject` now ready for `atin.molecule`. may have been scrambled away from nice ref orientation
# <<< Reference Values >>>
# ? precedence on next two
mp2_type = inp.get("corl_type", inp["keywords"].get("mp2_type", "df")) # hard-code of read_options.cc MP2_TYPE
mp_type = inp.get("corl_type", inp["keywords"].get("mp_type", "conv")) # hard-code of read_options.cc MP_TYPE
ci_type = inp.get("corl_type", inp["keywords"].get("ci_type", "conv")) # hard-code of read_options.cc CI_TYPE
cc_type = inp.get("corl_type", inp["keywords"].get("cc_type", "conv")) # hard-code of read_options.cc CC_TYPE
corl_natural_values = {
"hf": "conv", # dummy to assure df/cd/conv scf_type refs available
"mp2": mp2_type,
"mp3": mp_type,
"mp4(sdq)": mp_type,
"mp4": mp_type,
"cisd": ci_type,
"qcisd": ci_type,
"qcisd(t)": ci_type,
"fci": ci_type,
"lccd": cc_type,
"lccsd": cc_type,
"ccd": cc_type,
"ccsd": cc_type,
"ccsd+t(ccsd)": cc_type,
"ccsd(t)": cc_type,
"a-ccsd(t)": cc_type,
"ccsdt-1a": cc_type,
"ccsdt-1b": cc_type,
"ccsdt-2": cc_type,
"ccsdt-3": cc_type,
"ccsdt": cc_type,
"ccsdt(q)": cc_type,
"ccsdtq": cc_type,
"pbe": "conv",
"b3lyp": "conv",
"b3lyp5": "conv",
"mrccsdt-1a": cc_type,
"mrccsdt-1b": cc_type,
"mrccsdt-2": cc_type,
"mrccsdt-3": cc_type,
}
corl_type = corl_natural_values[method]
natural_ref = {"conv": "pk", "df": "df", "cd": "cd"}
scf_type = inp["keywords"].get("scf_type", natural_ref[corl_type])
natural_values = {"pk": "pk", "direct": "pk", "df": "df", "mem_df": "df", "disk_df": "df", "cd": "cd"}
scf_type = natural_values[scf_type]
is_dft = method in ["pbe", "b3lyp", "b3lyp5"]
# * absolute and relative tolerances function approx as `or` operation. see https://numpy.org/doc/stable/reference/generated/numpy.allclose.html
# * can't go lower on atol_e because hit digit limits accessible for reference values
# * dz gradients tend to be less accurate than larger basis sets/mols
# * analytic Hessian very loose to catch gms/nwc HF Hessian
atol_e, rtol_e = 2.0e-7, 1.0e-16
atol_g, rtol_g = 5.0e-7, 2.0e-5
atol_h, rtol_h = 1.0e-5, 2.0e-5
if is_dft:
atol_g = 6.0e-6
using_fd = "xptd" in inp and "fd" in inp["xptd"] # T/F: notate fd vs. anal for docs table
loose_fd = inp.get("xptd", {}).get("fd", False) # T/F: relax conv crit for 3-pt internal findif fd
if loose_fd:
if basis == "cc-pvdz":
atol_g = 1.0e-4
atol_h, rtol_h = 1.0e-4, 5.0e-4
else:
atol_g = 2.0e-5
atol_h, rtol_h = 5.0e-5, 2.0e-4
# VIEW atol_e, atol_g, atol_h, rtol_e, rtol_g, rtol_h = 1.e-9, 1.e-9, 1.e-9, 1.e-16, 1.e-16, 1.e-16
chash = answer_hash(
system=subject.name(),
basis=basis,
fcae=fcae,
scf_type=scf_type,
reference=reference,
corl_type=corl_type,
)
ref_block = std_suite[chash]
# check all calcs against conventional reference to looser tolerance
atol_conv = 1.0e-4
rtol_conv = 1.0e-3
chash_conv = answer_hash(
system=subject.name(),
basis=basis,
fcae=fcae,
reference=reference,
corl_type="conv",
scf_type="pk",
)
ref_block_conv = std_suite[chash_conv]
# <<< Prepare Calculation and Call API >>>
import qcdb
driver_call = {"energy": qcdb.energy, "gradient": qcdb.gradient, "hessian": qcdb.hessian}
# local_options = {"nnodes": 1, "ncores": 2, "scratch_messy": False, "memory": 4}
local_options = {"nnodes": 1, "ncores": 1, "scratch_messy": False, "memory": 10}
qcdb.set_options(
{
# "guess": "sad",
# "e_convergence": 8,
# "d_convergence": 7,
# "r_convergence": 7,
"e_convergence": 10,
"d_convergence": 9,
# "r_convergence": 9,
# "points": 5,
}
)
extra_kwargs = inp["keywords"].pop("function_kwargs", {})
qcdb.set_options(inp["keywords"])
if "error" in inp:
errtype, errmatch, reason = inp["error"]
with pytest.raises(errtype) as e:
driver_call[driver](inp["call"], molecule=subject, local_options=local_options, **extra_kwargs)
assert re.search(errmatch, str(e.value)), f"Not found: {errtype} '{errmatch}' in {e.value}"
_recorder(qcprog, qc_module_in, driver, method, reference, fcae, scf_type, corl_type, "error", "nyi: " + reason)
return
ret, wfn = driver_call[driver](
inp["call"], molecule=subject, return_wfn=True, local_options=local_options, mode_options=mode_options, **extra_kwargs
)
print("WFN")
pp.pprint(wfn)
qc_module_out = wfn["provenance"]["creator"].lower()
if "module" in wfn["provenance"]:
qc_module_out += "-" + wfn["provenance"]["module"] # returns "<qcprog>-<module>"
# assert 0, f"{qc_module_xptd=} {qc_module_in=} {qc_module_out=}" # debug
# 3. output mol: `wfn.molecule` after calc. orientation for nonscalar quantities may be different from `subject` if fix_=False
wfn_molecule = qcdb.Molecule.from_schema(wfn["molecule"])
# print(
# "MOL 3/WFN: wfn.mol",
# wfn_molecule.com_fixed(),
# wfn_molecule.orientation_fixed(),
# wfn_molecule.symmetry_from_input(),
# )
# with np.printoptions(precision=3, suppress=True):
# print(wfn_molecule.geometry(np_out=True))
_, ref2out_mill, _ = ref_subject.B787(wfn_molecule, atoms_map=False, mols_align=True, fix_mode="true", verbose=0)
# print(f"{ref2out_mill=}")
# print("PREE REF")
# print(ref_block["HF TOTAL GRADIENT"])
if subject.com_fixed() and subject.orientation_fixed():
assert frame == "fixed"
with np.printoptions(precision=3, suppress=True):
assert compare_values(
subject.geometry(), wfn_molecule.geometry(), atol=5.0e-8
), f"coords: atres ({wfn_molecule.geometry(np_out=True)}) != atin ({subject.geometry(np_out=True)})" # 10 too much
assert (
ref_subject.com_fixed()
and ref_subject.orientation_fixed()
and subject.com_fixed()
and subject.orientation_fixed()
and wfn_molecule.com_fixed()
and wfn_molecule.orientation_fixed()
), f"fixed, so all T: {ref_subject.com_fixed()} {ref_subject.orientation_fixed()} {subject.com_fixed()} {subject.orientation_fixed()} {wfn_molecule.com_fixed()} {wfn_molecule.orientation_fixed()}"
ref_block = mill_qcvars(ref2in_mill, ref_block)
ref_block_conv = mill_qcvars(ref2in_mill, ref_block_conv)
else:
assert frame == "free" or frame == "" # "": direct from standard_suite_ref.std_molecules
with np.printoptions(precision=3, suppress=True):
assert compare(
min_nonzero_coords,
np.count_nonzero(np.abs(wfn_molecule.geometry(np_out=True)) > 1.0e-10),
tnm + " !0 coords wfn",
), f"ncoords {wfn_molecule.geometry(np_out=True)} != {min_nonzero_coords}"
assert (
(not ref_subject.com_fixed())
and (not ref_subject.orientation_fixed())
and (not subject.com_fixed())
and (not subject.orientation_fixed())
and (not wfn_molecule.com_fixed())
and (not wfn_molecule.orientation_fixed())
), f"free, so all F: {ref_subject.com_fixed()} {ref_subject.orientation_fixed()} {subject.com_fixed()} {subject.orientation_fixed()} {wfn_molecule.com_fixed()} {wfn_molecule.orientation_fixed()}"
if scramble is None:
# wfn exactly matches ref_subject and ref_block
with np.printoptions(precision=3, suppress=True):
assert compare_values(
ref_subject.geometry(), wfn_molecule.geometry(), atol=5.0e-8
), f"coords: atres ({wfn_molecule.geometry(np_out=True)}) != atin ({ref_subject.geometry(np_out=True)})"
else:
# wfn is "pretty" (max zeros) but likely not exactly ref_block (by axis exchange, phasing, atom shuffling) since Psi4 ref frame is not unique
ref_block = mill_qcvars(ref2out_mill, ref_block)
ref_block_conv = mill_qcvars(ref2out_mill, ref_block_conv)
# print("POST REF")
# print(ref_block["HF TOTAL GRADIENT"])
# <<< Comparison Tests >>>
assert wfn["success"] is True
assert (
wfn["provenance"]["creator"].lower() == qcprog
), f'ENGINE used ({ wfn["provenance"]["creator"].lower()}) != requested ({qcprog})'
# qcvars
contractual_args = [
qc_module_out,
driver,
reference,
method,
corl_type,
fcae,
]
asserter_args = [
[qcdb, wfn["qcvars"]],
ref_block,
[atol_e, atol_g, atol_h],
[rtol_e, rtol_g, rtol_h],
ref_block_conv,
atol_conv,
rtol_conv,
tnm,
]
def qcvar_assertions():
print("BLOCK", chash, contractual_args)
if method == "hf":
_asserter(asserter_args, contractual_args, contractual_hf)
elif method == "mp2":
_asserter(asserter_args, contractual_args, contractual_mp2)
elif method == "mp3":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
elif method == "mp4(sdq)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
_asserter(asserter_args, contractual_args, contractual_mp4_prsdq_pr)
elif method == "mp4":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_mp2p5)
_asserter(asserter_args, contractual_args, contractual_mp3)
_asserter(asserter_args, contractual_args, contractual_mp4_prsdq_pr)
_asserter(asserter_args, contractual_args, contractual_mp4)
elif method == "cisd":
_asserter(asserter_args, contractual_args, contractual_cisd)
elif method == "qcisd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_qcisd)
elif method == "qcisd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_qcisd)
_asserter(asserter_args, contractual_args, contractual_qcisd_prt_pr)
elif method == "fci":
_asserter(asserter_args, contractual_args, contractual_fci)
elif method == "lccd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_lccd)
elif method == "lccsd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_lccsd)
elif method == "ccd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccd)
elif method == "ccsd":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
elif method == "ccsd+t(ccsd)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_ccsdpt_prccsd_pr)
elif method == "ccsd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_ccsd_prt_pr)
elif method == "a-ccsd(t)":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsd)
_asserter(asserter_args, contractual_args, contractual_accsd_prt_pr)
elif method == "ccsdt-1a":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt1a)
elif method == "ccsdt-1b":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt1b)
elif method == "ccsdt-2":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt2)
elif method == "ccsdt-3":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt3)
elif method == "ccsdt":
_asserter(asserter_args, contractual_args, contractual_mp2)
_asserter(asserter_args, contractual_args, contractual_ccsdt)
elif method == "ccsdt(q)":
_asserter(asserter_args, contractual_args, contractual_mp2)
| |
objs, pyObjs
elif not ready:
objs = objs + updates
objs.append(NotesEnd)
pyObjs[acnt][tableName]['deltas']['all'] = '[E] ERROR'
raise ValueError('[E] Table is not ready ', tableName, title)
return
objs = objs + updates
# printer ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# printer ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# printer ("---> 00B. table_Modify")
# printer ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# printer ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
updates, pytable = self.table_Modify(table_present, defined, tableName, config, pkeys, rows, gsi, lsi, changeTable)
objs = objs + updates # print table
pyObjs[acnt][tableName] = pytable
printer(' ****** **** **** ')
objs.append(NotesEnd)
return (objs, pyObjs)
def UpdateTables(self, dynoObj, targetEnv, applyChanges=False, override=False):
global ACTION_KEY
# print dynoObj[o][t]['rows'][0]['keys']
# print dynoObj[o][t]['rows'][0]['records']
#applyChanges=False
action = False
if applyChanges:
createMissingTables = True
deleteit = False
changeTable = True if override else False
else:
createMissingTables = False
deleteit = False
changeTable = False
printer("...create it? (((( %s )))))"% applyChanges)
printer('dObject:: %s'%dynoObj)
deltas = [] ## all changes go here
loging = [] ## all warnings go here
changedRows = 0
changedConfigs = 0
pyObjs ={}
objs = []
client = self.__get_client__()
resource = self.__get_resource__()
todelete=[]
# key changes has change made [UPDATE,NEW, DUP_FOUND, DELETE]
## each row adds a CHANGE column [UPDATE,NEW, DUP_FOUND, DELETE]
## {envACCOUNT:{account:'', table1:{isNew:0, rows=[{keys:,records:['change':new, update, waiting] }], table2:, .....table3}
for title, env in dynoObj.items():
printer(' :: %s'%(title) )
printer( targetEnv)
if targetEnv != 'all' and targetEnv != title:
printer(' NOT-->%s skipping...' % (title))
continue
acnt = env['account']
found = False
tables_review = []
TABLES_CREATED = []
TABLES_MODIFIED = []
currentObjs = []
objs.append([acnt,title])
pyObjs[acnt]={}
HEADER = ['DELTA', 'Name[%s]' % ('dynamodb'), 'Audit', 'Owner', 'Partition Key', 'totalReads',
'totalWrites', 'Columns']
objs.append(HEADER)
for t,defined in env['tables'].items():
printer(' ****** **** **** %s'%targetEnv)
printer(' ..^..>>%s'%t)
#print(' ..^..>>', defined)
if ACTION_KEY in defined:
action=True
if 'delete' in defined[ACTION_KEY]:
# delete table and all records
printer('DELETING TABLES')
todelete.append(t)
continue
config = defined['config']
rows = defined['rows'] # [index] --> keys and #records
printer(defined)
gsi=lsi=None
if 'gsi' in defined:
gsi = defined['gsi']
if 'lsi' in defined:
lsi = defined['lsi']
NotesEnd = ['NOTES', t]
tables_review.append(t)
printer( 'ddddddd uuuu dddd eeeee')
printer( defined)
printer( ' ---> ddddddd uuuu dddd eeeee')
#s=100/0
############################################################
#### DOES TABLE EXIST??? ## CREATE TABLE AND RECORDS??? ####
############################################################
#pks = config['Partition key'].split('.')
#pkeys = dict([p.split(':')[0], p.split(':')[1]] for p in pks)
pkeys =config['Partition Key']
#print pkeys
created, ready, updates, pytable, table_present = self.tablePresent(t, config, pkeys, rows, gsi,lsi, createMissingTables)
printer(' results (created:%s, ready:%s)' % (created, ready) )
pyObjs[acnt][t]=pytable
if created and ready: ## table new and ready
##add rows in tableExists()
TABLES_CREATED.append(t)
#tvalue = resource.Table(t)
objs = objs + updates
objs = objs
objs.append(NotesEnd)
continue
elif not ready:
objs = objs + updates
objs.append(NotesEnd)
pyObjs[acnt][t]['deltas']['all']='[E] ERROR'
continue
#else: # is ready and already exists!!!
#tvalue = resource.Table(t)
# current object queries current table as it exists!!! this should only be used for comparisons
# existingObj = self.tableDefine( auditMeth.namingCrud(2), aconnect, currentObjs, tvalue, t )
#objDefine, pyDefine = get_tableInfo(table_present)
printer('-----> %s'%config)
printer(' ~~~>%s'%pkeys)
############################################################
#### DOES RECORDS EXIST??? ## CREATE RECORDS??? #######
############################################################
objs = objs + updates
#updates, pytable = self.table2Update(tvalue,pyDefine,config, pkeys, rows, changeTable)
# printer ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# printer ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# printer ("---> 00A. table_Modify")
# printer ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
# printer ("~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~")
updates, pytable = self.table_Modify(table_present,defined,t, config, pkeys, rows,gsi,lsi, changeTable)
#updates, pytable = self.table2Update(tvalue, defined, pyDefine, rows[0]['keys'], changeTable)
objs = objs + updates # print table
pyObjs[acnt][t]=pytable
printer(' ****** **** **** ')
objs.append(NotesEnd)
missing = self.tablesAbsent( tables_review)
objs.append(['TABLES_CREATED'])
objs.append(TABLES_CREATED)
objs.append(['TABLES_UNFOUND'])
objs.append(missing)
objs.append(['TABLES_DELETE'])
objs.append(todelete)
pyObjs[acnt]['TABLES_CREATED']=TABLES_CREATED
pyObjs[acnt]['TABLES_UNFOUND']=missing
pyObjs[acnt]['TABLES_DELETE']=todelete
if deleteit:
printer('tables deleted now')
#self.tablesDrop(todelete)
else:
printer('[W] SKIP TABLE DELETION %s'%todelete)
# break
return (objs, pyObjs)
## TODO: setup ADD in new table to format to table level dict
##################################################
#### EXISTING TABLE NEEDS TO BE UPDATED!! #######
##################################################
def table_Modify(self,table_present, defined, tablename, config, pkeys, rows, gsi,lsi, changeit=False):
resource = self.__get_resource__()
client = self.__get_client__()
ready =True
ignore = False
obj=[]
#updatedRows=[]
pyObj = {'config':config,'rows':rows,'deltas':{}}
xkema = pkeys
if xkema[0]['KeyType'] != 'HASH':
xkema.reverse()
attr = self.attributeDefinitions(config, xkema, pkeys, gsi, lsi)
tvalue = resource.Table(tablename)
####################
objDefine, pyDefine = get_tableInfo(table_present)
###########################################################
##### UPDATE TABLE CONFIGURATION/DEFINITION################
###########################################################
if changeit:
changed = dynamoTableUpdate(client, tablename, xkema,pyDefine, attr, config, gsi, lsi)
##obj.append([change, tablename, audit, owner, readableKeys, config['totalReads'], config['totalWrites'],readableColumns])
# printer (";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;")
# printer (";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;")
# printer ("---> 002. table2Update")
# printer (";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;")
# printer (";;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;")
##updated rows below make sure you only get ROW DATA and append to master Obj pyobj for return
deltas, pydeltas = self.table2Update(tvalue, defined, pyDefine, changeit)
if deltas is not None:
obj.append(deltas)
pyObj['deltas'].update({'items':pydeltas})
return obj, pyObj
### ONLY RESULTS IN ROW CHANGES NOT TABLE CHANGES OR HEADER
def table2Update(self, resourceTable, future, keys, changeit=False):
global ACTION_KEY
#UPDATE ALL ROWS/RECORDS
csv_create = True
row_delete=[]
row_insert=[]
row_update=[]
row_conflict=[]
row_alter=[]
######## BELOW USED TO BUILD CSV RESULTS ######
row_CVS=[]
m_keys = future['config']['Partition Key']
r_future = future['rows']
sortKeys = [k['AttributeName'] for k in m_keys]
header=[]
totTableRecords = resourceTable.item_count
for rf in r_future:
changing = CREATE
cKeys = { '%s'%v.split(':')[0]:'%s'%v.split(':')[1] for v in rf['keys'] }
# print("////////////////////////////////////////////////////////////")
# print("////////////////////////////////////////////////////////////")
# print(". START RECORDS. ---1111. %s"%len(r_future))
# print("////////////////////////////////////////////////////////////")
# print("////////////////////////////////////////////////////////////")
print(rf)
for r in rf['records']:
rowIN=[]
if csv_create:
#setup tok create CSV file
for key in keys:
kkey = key.split(':')[0]
if kkey not in r:
continue
vlu = r[kkey]
rowIN.append(vlu)
# rvalue = r.values()[0]
# if isinstance(rvalue, (basestring)):
# rkey = r.keys()[0]
# if rkey + ':' in rvalue: # used to make sure columns arent' inserted as row
# return (None, None)
#nh = generateHeader(r, header)
# ACTION to be taken @ ROW level
print (" @ -ROW LEVEL--->>>001 %s"%r)
if not ACTION_KEY in r:
raise ValueError("[E] '%s' KEY required with value('update', 'delete', 'new') given:%s"%(ACTION_KEY,r) )
print (" @ -ROW LEVEL--->>>002")
action = r[ACTION_KEY]
item = {'item':r,'crud':action, 'key':sortKeys, 'ignore':False if changeit else True }
del r[ACTION_KEY]
r = convertAcceptedRowValue(r)
if action == 'delete':
changing = DELETE
item = copy.copy(item)
item['item']={ x if i!='' else '':i if i!='' else None for x,i in r.items() }
row_delete.append(item)
continue
item.update({'column':cKeys})
if resourceTable:
if totTableRecords>0: #DON"T BOTHER QUERYING TABLE IF EMPTY!!!!!
print('----===-----1 %s'%resourceTable)
print(keys)
keyTypes= dict([aa['AttributeName'],aa['AttributeType']] for aa in keys['config']['Columns'])
print('----===-----2')
results = self.tableConditionQuery(resourceTable, sortKeys, r,keyTypes)
#if action == 'insert':
if results['Count'] >= 1:
if action == 'insert':
row_conflict.append(item)
elif action == 'update':
row_update.append(item)
changing = UPDATE
continue
else:
row_insert.append(item)
else:
row_insert.append(item)
change = "%s/%s" % ("" if changeit else namingCrud(IGNORE), namingCrud(changing))
if csv_create:
rowIN = [change]+rowIN
row_CVS.append(rowIN)
#table_change
#table_drop
#table_create...wrong FUNCTION
#future table definition compare to fact/current definition
row_alter = row_delete + row_insert
printer( '**** changeit: %s'%changeit)
printer( ' **** conflict: %s'%row_conflict)
if changeit:
response, warn =self.table_BatchSync(resourceTable, row_alter, keys)
## this can cause issues for the update process
response, warn =self.table_BatchUpdate(resourceTable, row_update)
#objs.append(['NOTES[%s]' %(provider_name), 'other relevant info here'])
return row_CVS, row_alter+row_update
########################################################################################################
## where items['item'] is the Object to put {'item':{'somekey': 'somevalue', ...}, 'crud':'delete', 'key':{}} ####
########################################################################################################
def table_BatchSync(self,table, items, keys):
warn=[]
remainingItems=[]
printer( ' ')
printer( ' ')
printer( ' ')
printer( ' ')
printer( ' ******** table_BatchSync >> 1 >> ....%s'%items)
try:
lastBatch=[]
completed=[]
with table.batch_writer() as batch:
for i in items:
printer( ' ******** >> 1 >> ....')
printer( 'key %s'%i['key'])
printer( 'item %s'%i['item'])
printer( ' ******** >> 2 >> ....')
unprocessed=False
if i['crud'] == 'delete':
if '' in i['item']:
del i['item']['']
printer( i['item'])
printer( ' ******** >> 3 >> ....')
response =batch.delete_item( Key=i['item'] )
else:
response =batch.put_item( Item=i['item'] )
if response:
unprocessed = response.get('UnprocessedItems', None)
if not unprocessed:
i['status'] = 'success'
else:
i['status'] = 'fail'
#items.remove(i)
btot = len(lastBatch)
lastBatch.append(i)
completed.append(i)
if btot>10:
diff=btot-10
lastBatch=lastBatch[diff:]
except ClientError as e:
dups=[]
add=[]
for row in lastBatch:
r=row['item']
sortKeys = row['key']
keyTypes= dict([aa['AttributeName'],aa['AttributeType']] for aa in keys['config']['Columns'])
r = convertAcceptedRowValue(r)
results=self.tableConditionQuery( table, sortKeys, r, keyTypes)
if results['Count'] >= 1:
dups.append(row)
else:
add.append(row)
for a in add:
if a in completed:
completed.remove(a)
for lbl,prop in a['item'].items():
for lbl2,prop2 in a['item'].items():
if prop2 == prop:
randint=andint(1, 100)
space=" "
for dot in randint:
space=space+space
a['item'][lbl] = "%s %s."%(prop, space)
for c in completed:
if c in items:
items.remove(c)
if "keys contains duplicates" | |
from django.utils.translation import gettext_lazy as _
SLICE_EFFECTIVE = (
("00", _("0 salarié (n'ayant pas d'effectif au 31/12 mais ayant employé des salariés au cours de l'année de référence)")),
("01", _("1 ou 2 salariés")),
("02", _("3 à 5 salariés")),
("03", _("6 à 9 salariés")),
("11", _("10 à 19 salariés")),
("12", _("20 à 49 salariés")),
("21", _("50 à 99 salariés")),
("22", _("100 à 199 salariés")),
("31", _("200 à 249 salariés")),
("32", _("250 à 499 salariés")),
("41", _("500 à 999 salariés")),
("42", _("1 000 à 1 999 salariés")),
("51", _("2 000 à 4 999 salariés")),
("52", _("5 000 à 9 999 salariés")),
("53", _("10 000 salariés et plus")),
)
CODEREF = (
("AFEPMEDEF", _("Afep MEDEF")),
("MIDDLENEXT", _("MiddleNext")),
)
INDEX = (
("CAC40", _("CAC40")),
("NEXT80", _("NEXT80")),
("NEXTPP", _("NEXT++")),
)
GOVERNANCE = (
("CAWPDG", _("CA avec PDG")),
("CAGOVDISLO", _("CA à gouvernance dissociée")),
("CSDIRECTOR", _("Conseil de Surveillance/Directoire")),
("COMMANDITE", _("Commandite")),
)
EVALUATION = (
("AUTOYEAR", _("Auto-évaluation chaque année")),
("AUTO2YEAR", _("Auto-évaluation (1 fois tous les 2 ans)")),
("AUTO3YEAR", _("Auto-évaluation (1 fois tous les 3 ans)")),
("EXTYEAR", _("Consultant externe chaque année")),
("EXT2YEAR", _("Consultant externe tous les 2 ans")),
("EXT3YEAR", _("Consultant externe tous les 3 ans")),
("AUTO2YEAREXT", _("Auto-évaluation annuelle et consultant externe tous les 2 ans")),
("AUTO3YEAREXT", _("Auto-évaluation annuelle et consultant externe tous les 3 ans")),
("PRESNOTNECESSARY", _("Le Président estime qu’il n’est pas nécessaire de procéder à l’évaluation du fonctionnement du Conseil d’administration.")),
("0PROCESSENABLE", _("A ce jour, aucune procédure d’auto-évaluation du fonctionnement n’est mise en place par le Conseil.")),
("0FORMDO", _("Aucune évaluation formalisée du Conseil n’a été réalisée jusqu’à aujourd’hui")),
)
LEGALFORM = (
(1000, _("Entrepreneur individue")),
(2110, _("Indivision entre personnes physiques")),
(2120, _("Indivision avec personne morale")),
(2210, _("Société créée de fait entre personnes physiques")),
(2220, _("Société créée de fait avec personne morale")),
(2310, _("Société en participation entre personnes physiques")),
(2320, _("Société en participation avec personne morale")),
(2385, _("Société en participation de professions libérales")),
(2400, _("Fiducie")),
(2700, _("Paroisse hors zone concordataire")),
(2900, _("Autre groupement de droit privé non doté de la personnalité morale")),
(3110, _("Représentation ou agence commerciale d'état ou organisme public étranger immatriculé au RCS")),
(3120, _("Société commerciale étrangère immatriculée au RC")),
(3205, _("Organisation internationale")),
(3210, _("État, collectivité ou établissement public étrange")),
(3220, _("Société étrangère non immatriculée au RCS")),
(3290, _("Autre personne morale de droit étranger")),
(4110, _("Établissement public national à caractère industriel ou commercial doté d'un comptable public")),
(4120, _("Établissement public national à caractère industriel ou commercial non doté d'un comptable public")),
(4130, _("Exploitant public")),
(4140, _("Établissement public local à caractère industriel ou commercial")),
(4150, _("Régie d'une collectivité locale à caractère industriel ou commercial")),
(4160, _("Institution Banque de France")),
(5191, _("Société de caution mutuelle")),
(5192, _("Société coopérative de banque populaire")),
(5193, _("Caisse de crédit maritime mutuel")),
(5194, _("Caisse (fédérale) de crédit mutuel")),
(5195, _("Association coopérative inscrite (droit local Alsace Moselle)")),
(5196, _("Caisse d'épargne et de prévoyance à forme coopérative")),
(5202, _("Société en nom collectif")),
(5203, _("Société en nom collectif coopérative")),
(5306, _("Société en commandite simple")),
(5307, _("Société en commandite simple coopérative")),
(5308, _("Société en commandite par actions")),
(5309, _("Société en commandite par actions coopérative")),
(5370, _("Société de Participations Financières de Profession Libérale Société en commandite par actions (SPFPL SCA")),
(5385, _("Société d'exercice libéral en commandite par actions")),
(5410, _("SARL nationale")),
(5415, _("SARL d'économie mixte")),
(5422, _("SARL immobilière pour le commerce et l'industrie (SICOMI)")),
(5426, _("SARL immobilière de gestio")),
(5430, _("SARL d'aménagement foncier et d'équipement rural (SAFER")),
(5431, _("SARL mixte d'intérêt agricole (SMIA)")),
(5432, _("SARL d'intérêt collectif agricole (SICA)")),
(5442, _("SARL d'attribution")),
(5443, _("SARL coopérative de construction")),
(5451, _("SARL coopérative de consommation")),
(5453, _("SARL coopérative artisanale")),
(5454, _("SARL coopérative d'intérêt maritime")),
(5455, _("SARL coopérative de transpor")),
(5458, _("SARL coopérative ouvrière de production (SCOP")),
(5459, _("SARL union de sociétés coopératives")),
(5460, _("Autre SARL coopérative")),
(5470, _("Société de Participations Financières de Profession Libérale Société à responsabilité limitée (SPFPL SARL")),
(5485, _("Société d'exercice libéral à responsabilité limitée")),
(5498, _("SARL unipersonnelle")),
(5499, _("Société à responsabilité limitée (sans autre indication")),
(5505, _("SA à participation ouvrière à conseil d'administration")),
(5510, _("SA nationale à conseil d'administration")),
(5515, _("SA d'économie mixte à conseil d'administration")),
(5520, _("Fonds à forme sociétale à conseil d'administratio")),
(5522, _("SA immobilière pour le commerce et l'industrie (SICOMI) à conseil d'administratio")),
(5525, _("SA immobilière d'investissement à conseil d'administratio")),
(5530, _("SA d'aménagement foncier et d'équipement rural (SAFER) à conseil d'administratio")),
(5531, _("Société anonyme mixte d'intérêt agricole (SMIA) à conseil d'administration")),
(5532, _("SA d'intérêt collectif agricole (SICA) à conseil d'administratio")),
(5542, _("SA d'attribution à conseil d'administratio")),
(5543, _("SA coopérative de construction à conseil d'administratio")),
(5546, _("SA de HLM à conseil d'administration")),
(5547, _("SA coopérative de production de HLM à conseil d'administration")),
(5548, _("SA de crédit immobilier à conseil d'administration")),
(5551, _("SA coopérative de consommation à conseil d'administration")),
(5552, _("SA coopérative de commerçants-détaillants à conseil d'administratio")),
(5553, _("SA coopérative artisanale à conseil d'administration")),
(5554, _("SA coopérative (d'intérêt) maritime à conseil d'administration")),
(5555, _("SA coopérative de transport à conseil d'administratio")),
(5558, _("SA coopérative ouvrière de production (SCOP) à conseil d'administratio")),
(5559, _("SA union de sociétés coopératives à conseil d'administration")),
(5560, _("Autre SA coopérative à conseil d'administration")),
(5570, _("Société de Participations Financières de Profession Libérale Société anonyme à conseil d'administration (SPFPL SA à conseil d'administration")),
(5585, _("Société d'exercice libéral à forme anonyme à conseil d'administration")),
(5599, _("SA à conseil d'administration (s.a.i.")),
(5605, _("SA à participation ouvrière à directoire")),
(5610, _("SA nationale à directoire")),
(5615, _("SA d'économie mixte à directoire")),
(5620, _("Fonds à forme sociétale à directoire")),
(5622, _("SA immobilière pour le commerce et l'industrie (SICOMI) à directoire")),
(5625, _("SA immobilière d'investissement à directoire")),
(5630, _("Safer anonyme à directoire")),
(5631, _("SA mixte d'intérêt agricole (SMIA")),
(5632, _("SA d'intérêt collectif agricole (SICA")),
(5642, _("SA d'attribution à directoire")),
(5643, _("SA coopérative de construction à directoire")),
(5646, _("SA de HLM à directoire")),
(5647, _("Société coopérative de production de HLM anonyme à directoire")),
(5648, _("SA de crédit immobilier à directoire")),
(5651, _("SA coopérative de consommation à directoire")),
(5652, _("SA coopérative de commerçants-détaillants à directoire")),
(5653, _("SA coopérative artisanale à directoire")),
(5654, _("SA coopérative d'intérêt maritime à directoire")),
(5655, _("SA coopérative de transport à directoire")),
(5658, _("SA coopérative ouvrière de production (SCOP) à directoire")),
(5659, _("SA union de sociétés coopératives à directoire")),
(5660, _("Autre SA coopérative à directoire")),
(5670, _("Société de Participations Financières de Profession Libérale Société anonyme à Directoire (SPFPL SA à directoire)")),
(5685, _("Société d'exercice libéral à forme anonyme à directoire")),
(5699, _("SA à directoire (s.a.i.")),
(5710, _("SAS, société par actions simplifiée")),
(5720, _("Société par actions simplifiée à associé unique ou société par actions simplifiée unipersonnelle")),
(5770, _("Société de Participations Financières de Profession Libérale Société par actions simplifiée (SPFPL SAS)")),
(5785, _("Société d'exercice libéral par action simplifiée")),
(5800, _("Société européenne")),
(6100, _("Caisse d'Épargne et de Prévoyance")),
(6210, _("Groupement européen d'intérêt économique (GEIE)")),
(6220, _("Groupement d'intérêt économique (GIE)")),
(6316, _("Coopérative d'utilisation de matériel agricole en commun (CUMA)")),
(6317, _("Société coopérative agricole")),
(6318, _("Union de sociétés coopératives agricoles")),
(6411, _("Société d'assurance à forme mutuell")),
(6511, _("Sociétés Interprofessionnelles de Soins Ambulatoires")),
(6521, _("Société civile de placement collectif immobilier (SCPI)")),
(6532, _("Société civile d'intérêt collectif agricole (SICA)")),
(6533, _("Groupement agricole d'exploitation en commun (GAEC)")),
(6534, _("Groupement foncier agricole")),
(6535, _("Groupement agricole foncier")),
(6536, _("Groupement forestier")),
(6537, _("Groupement pastoral")),
(6538, _("Groupement foncier et rura")),
(6539, _("Société civile foncière")),
(6540, _("Société civile immobilière")),
(6541, _("Société civile immobilière de construction-vent")),
(6542, _("Société civile d'attribution")),
(6543, _("Société civile coopérative de construction")),
(6544, _("Société civile immobilière d' accession progressive à la propriété")),
(6551, _("Société civile coopérative de consommation")),
(6554, _("Société civile coopérative d'intérêt maritime")),
(6558, _("Société civile coopérative entre médecins")),
(6560, _("Autre société civile coopérative")),
(6561, _("SCP d'avocats")),
(6562, _("SCP d'avocats aux conseils")),
(6563, _("SCP d'avoués d'appel")),
(6564, _("SCP d'huissiers")),
(6565, _("SCP de notaires")),
(6566, _("SCP de commissaires-priseurs")),
(6567, _("SCP de greffiers de tribunal de commerce")),
(6568, _("SCP de conseils juridiques")),
(6569, _("SCP de commissaires aux comptes")),
(6571, _("SCP de médecins")),
(6572, _("SCP de dentistes")),
(6573, _("SCP d'infirmiers")),
(6574, _("SCP de masseurs-kinésithérapeute")),
(6575, _("SCP de directeurs de laboratoire d'analyse médicale")),
(6576, _("SCP de vétérinaires")),
(6577, _("SCP de géomètres expert")),
(6578, _("SCP d'architectes")),
(6585, _("Autre société civile professionnell")),
(6588, _("Société civile laitière")),
(6589, _("Société civile de moyens")),
(6595, _("Caisse locale de crédit mutuel")),
(6596, _("Caisse de crédit agricole mutuel")),
(6597, _("Société civile d'exploitation agricole")),
(6598, _("Exploitation agricole à responsabilité limitée")),
(6599, _("Autre société civile")),
(6901, _("Autre personne de droit privé inscrite au registre du commerce et des société")),
(7111, _("Autorité constitutionnelle")),
(7112, _("Autorité administrative ou publique indépendant")),
(7113, _("Ministère")),
(7120, _("Service central d'un ministère")),
(7150, _("Service du ministère de la Défense")),
(7160, _("Service déconcentré à compétence nationale d'un ministère (hors Défense")),
(7171, _("Service déconcentré de l'État à compétence (inter) régionale")),
(7172, _("Service déconcentré de l'État à compétence (inter) départementale")),
(7179, _("(Autre) Service déconcentré de l'État à compétence territoriale")),
(7190, _("Ecole nationale non dotée de la personnalité morale")),
(7210, _("Commune et commune nouvelle")),
(7220, _("Département")),
(7225, _("Collectivité et territoire d'Outre Me")),
(7229, _("(Autre) Collectivité territoriale")),
(7230, _("Région")),
(7312, _("Commune associée et commune déléguée")),
(7313, _("Section de commune")),
(7314, _("Ensemble urbain")),
(7321, _("Association syndicale autorisée")),
(7322, _("Association foncière urbaine")),
(7323, _("Association foncière de remembrement")),
(7331, _("Établissement public local d'enseignement")),
(7340, _("Pôle métropolitai")),
(7341, _("Secteur de commune")),
(7342, _("District urbain")),
(7343, _("Communauté urbaine")),
(7344, _("Métropol")),
(7345, _("Syndicat intercommunal à vocation multiple (SIVOM)")),
(7346, _("Communauté de communes")),
(7347, _("Communauté de villes")),
(7348, _("Communauté d'agglomération")),
(7349, _("Autre établissement public local de coopération non spécialisé ou entente")),
(7351, _("Institution interdépartementale ou entent")),
(7352, _("Institution interrégionale | |
0, 0, 2, -1]
waste -1.8 0.9798 [-2, -2, -1, -3, -2, -1, -1, -4, -1, -1]
wasted -2.2 0.6 [-2, -3, -2, -3, -1, -3, -2, -2, -2, -2]
wasting -1.7 0.9 [-3, -1, -2, -2, -1, -2, -3, 0, -1, -2]
wavering -0.6 1.0198 [-1, -1, 0, 0, -1, -1, -1, 2, -1, -2]
weak -1.9 0.7 [-1, -3, -2, -2, -3, -2, -2, -1, -2, -1]
weaken -1.8 0.6 [-2, -2, -2, -1, -1, -3, -2, -2, -1, -2]
weakened -1.3 0.9 [-2, -1, -1, -1, -1, -2, -2, -2, 1, -2]
weakener -1.6 1.11355 [-2, -1, -1, -1, -2, -2, -3, -3, 1, -2]
weakeners -1.3 0.45826 [-1, -2, -1, -2, -1, -1, -2, -1, -1, -1]
weakening -1.3 0.45826 [-2, -1, -1, -1, -1, -1, -1, -2, -2, -1]
weakens -1.3 0.45826 [-1, -1, -1, -1, -1, -2, -1, -2, -1, -2]
weaker -1.9 0.83066 [-2, -2, -2, -2, -2, -1, -4, -1, -1, -2]
weakest -2.3 0.64031 [-2, -4, -2, -3, -2, -2, -2, -2, -2, -2]
weakfish -0.2 1.07703 [0, -2, 0, 0, 0, 0, -2, 0, 2, 0]
weakfishes -0.6 0.8 [0, -1, 0, -2, 0, 0, -1, 0, -2, 0]
weakhearted -1.6 0.8 [-1, -3, -1, -1, -2, -1, -3, -2, -1, -1]
weakish -1.2 0.4 [-1, -2, -1, -1, -1, -1, -2, -1, -1, -1]
weaklier -1.5 0.67082 [-1, -2, -1, -3, -2, -1, -1, -2, -1, -1]
weakliest -2.1 0.83066 [-2, -2, -2, -2, -3, -1, -2, -1, -4, -2]
weakling -1.3 1.00499 [-1, -2, -1, -3, -2, -2, -1, -1, 1, -1]
weaklings -1.4 0.66332 [-2, -1, -1, -1, -1, -2, -2, 0, -2, -2]
weakly -1.8 0.87178 [-2, -2, -2, -2, -4, -1, -1, -1, -1, -2]
weakness -1.8 0.6 [-2, -2, -2, -1, -1, -2, -1, -3, -2, -2]
weaknesses -1.5 0.5 [-2, -2, -1, -1, -2, -1, -1, -2, -1, -2]
weakside -1.1 1.37477 [-3, -2, -3, -1, -2, -1, 1, 1, -1, 0]
wealth 2.2 0.4 [2, 3, 2, 2, 2, 3, 2, 2, 2, 2]
wealthier 2.2 0.6 [3, 2, 1, 3, 2, 2, 2, 3, 2, 2]
wealthiest 2.2 0.9798 [2, 4, 4, 1, 2, 1, 2, 2, 2, 2]
wealthily 2.0 0.89443 [2, 3, 1, 4, 2, 1, 1, 2, 2, 2]
wealthiness 2.4 1.11355 [2, 4, 2, 4, 1, 2, 4, 1, 2, 2]
wealthy 1.5 1.0247 [1, 2, 1, 4, 1, 0, 2, 1, 2, 1]
weapon -1.2 0.87178 [0, -2, -2, -1, 0, -2, -1, -2, 0, -2]
weaponed -1.4 0.91652 [-2, -2, -3, -1, -1, 0, 0, -2, -1, -2]
weaponless 0.1 1.13578 [2, -1, 0, 0, -1, 1, -1, 0, 2, -1]
weaponry -0.9 0.7 [-2, -2, 0, -1, 0, -1, -1, -1, 0, -1]
weapons -1.9 0.9434 [-2, -1, -2, -2, -1, -3, -3, -3, -2, 0]
weary -1.1 1.13578 [-2, -1, -2, -3, 0, -1, -1, -2, 0, 1]
weep -2.7 0.9 [-2, -4, -4, -3, -3, -3, -3, -2, -1, -2]
weeper -1.9 0.53852 [-2, -2, -2, -3, -1, -1, -2, -2, -2, -2]
weepers -1.1 1.13578 [-2, -2, -1, -2, -1, 1, -2, 1, -1, -2]
weepie -0.4 0.91652 [0, 1, -1, 0, -1, -2, 0, -1, -1, 1]
weepier -1.8 0.87178 [-3, -3, -2, -1, -2, -2, -2, 0, -1, -2]
weepies -1.6 0.8 [-2, -3, -2, -1, -1, -2, -2, 0, -1, -2]
weepiest -2.4 0.91652 [-4, -2, -2, -2, -2, -1, -2, -2, -4, -3]
weeping -1.9 0.9434 [-2, -2, -1, -1, -1, -1, -4, -2, -2, -3]
weepings -1.9 0.9434 [-2, -2, -3, 0, -1, -2, -2, -3, -1, -3]
weeps -1.4 1.35647 [-2, -3, -1, -2, -1, -3, 1, -2, 1, -2]
weepy -1.3 1.55242 [-2, -3, -1, -2, 2, -3, -1, -2, 1, -2]
weird -0.7 0.64031 [-1, 0, 0, -1, -1, -1, 0, 0, -2, -1]
weirder -0.5 0.80623 [1, -1, -1, -1, -1, 1, -1, -1, 0, -1]
weirdest -0.9 1.22066 [-2, 0, -2, -1, -1, -1, -3, 1, 1, -1]
weirdie -1.3 0.45826 [-1, -2, -1, -2, -1, -1, -2, -1, -1, -1]
weirdies -1.0 0.63246 [0, -1, -1, -1, -1, 0, -2, -2, -1, -1]
weirdly -1.2 0.74833 [0, -1, -1, -2, -3, -1, -1, -1, -1, -1]
weirdness -0.9 1.64012 [-3, -2, -1, -1, 2, -1, 1, -3, 1, -2]
weirdnesses -0.7 1.00499 [-1, -2, 0, -1, -2, 1, -1, -1, -1, 1]
weirdo -1.8 0.6 [-2, -2, -2, -2, -2, -2, -1, -1, -3, -1]
weirdoes -1.3 0.64031 [-2, -1, -2, -1, -1, -2, -1, 0, -1, -2]
weirdos -1.1 0.9434 [-1, -1, -1, -2, 1, -3, -1, -1, -1, -1]
weirds -0.6 0.4899 [-1, -1, -1, 0, -1, 0, 0, -1, 0, -1]
weirdy -0.9 0.83066 [-1, -1, 0, 0, -1, 0, -2, -2, 0, -2]
welcome 2.0 0.63246 [1, 3, 2, 1, 2, 2, 2, 2, 3, 2]
welcomed 1.4 0.4899 [1, 1, 2, 2, 1, 2, 1, 2, 1, 1]
welcomely 1.9 0.53852 [2, 2, 2, 2, 1, 3, 2, 1, 2, 2]
welcomeness 2.0 0.89443 [2, 3, 1, 2, 3, 0, 2, 3, 2, 2]
welcomer 1.4 0.4899 [1, 1, 2, 2, 2, 2, 1, 1, 1, 1]
welcomers 1.9 0.7 [2, 2, 3, 2, 2, 1, 1, 3, 1, 2]
welcomes 1.7 0.78102 [1, 1, 2, 2, 3, 3, 1, 2, 1, 1]
welcoming 1.9 0.7 [2, 2, 1, 1, 2, 2, 2, 3, 3, 1]
well 1.1 1.04403 [0, 0, 2, 0, 2, 0, 1, 1, 3, 2]
welladay 0.3 1.18743 [2, -2, 0, 0, -1, 1, 0, 1, 2, 0]
wellaway -0.8 1.98997 [3, -2, -3, -3, -1, -2, 1, -2, -1, 2]
wellborn 1.8 0.74833 [2, 1, 2, 1, 2, 2, 1, 3, 1, 3]
welldoer 2.5 0.67082 [2, 2, 2, 3, 2, 3, 4, 3, 2, 2]
welldoers 1.6 0.8 [3, 1, 1, 0, 2, 1, 2, 2, 2, 2]
welled 0.4 0.8 [0, 0, 2, 0, 0, 0, 0, 0, 2, 0]
wellhead 0.1 0.3 [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
wellheads 0.5 0.92195 [0, 2, 0, 2, -1, 0, 1, 1, 0, 0]
wellhole -0.1 0.3 [0, 0, 0, -1, 0, 0, 0, 0, 0, 0]
wellies 0.4 0.4899 [0, 1, 0, 0, 0, 1, 0, 0, 1, 1]
welling 1.6 0.8 [2, 0, 1, 2, 2, 1, 3, 2, 2, 1]
wellness 1.9 0.9434 [1, 2, 2, 1, 2, 1, 1, 3, 4, 2]
wells 1.0 1.0 [2, 0, 3, 0, 1, 0, 2, 1, 1, 0]
wellsite 0.5 0.67082 [0, 0, 1, 2, 0, 0, 0, 0, 1, 1]
wellspring 1.5 0.92195 [3, 1, 1, 1, 0, 2, 2, 3, 1, 1]
wellsprings 1.4 0.8 [1, 0, 0, 2, 2, 2, 1, 2, 2, 2]
welly 0.2 0.4 [0, 0, 0, 1, 0, 1, 0, 0, 0, 0]
wept -2.0 1.09545 [-3, -2, -3, -3, -1, -3, 0, -1, -1, -3]
whimsical 0.3 1.61555 [2, 1, 1, 2, -1, 1, -3, -2, 1, 1]
whine -1.5 1.11803 [-1, -4, -1, -1, -1, -3, 0, -2, -1, -1]
whined -0.9 1.04403 [-2, -1, -2, -1, -1, -1, -2, 1, 1, -1]
whiner -1.2 0.4 [-1, -2, -1, -1, -1, -2, -1, -1, -1, -1]
whiners -0.6 1.95959 [-2, 0, -2, -2, 4, 1, 1, -2, -2, -2]
whines -1.8 0.6 [-2, -2, -2, -1, -2, -2, -3, -1, -2, -1]
whiney -1.3 0.45826 [-1, -2, -1, -1, -1, -2, -2, -1, -1, -1]
whining -0.9 1.51327 [-3, 0, -1, -1, -3, -1, 1, 2, -2, -1]
whitewash 0.1 0.7 [-1, 0, 1, -1, 0, 0, 0, 1, 1, 0]
whore -3.3 0.64031 [-4, -4, -3, -2, -3, -4, -3, -3, -4, -3]
whored -2.8 0.87178 [-2, -3, -4, -2, -2, -3, -4, -4, -2, -2]
whoredom -2.1 2.02237 [-4, -2, -4, -3, -3, -3, -4, -1, 2, 1]
whoredoms -2.4 1.11355 [-1, -3, 0, -3, -2, -3, -3, -4, -2, -3]
whorehouse -1.1 2.11896 [-2, -2, -2, 3, 3, -3, -3, -2, -1, -2]
whorehouses -1.9 1.92094 [-4, -3, -4, -3, 0, 0, -3, 2, -1, -3]
whoremaster -1.9 1.22066 [-1, -3, -3, -2, -1, -3, 0, 0, -3, -3]
whoremasters -1.5 1.85742 [-3, -1, -1, -4, -2, 2, -1, 1, -2, -4]
whoremonger -2.6 0.91652 [-3, -1, -3, -3, -3, -3, -3, -1, -4, -2]
whoremongers -2.0 1.78885 [-4, -3, 0, -3, -3, -3, -3, 1, 1, -3]
whores -3.0 1.0 [-3, -3, -4, -2, -1, -3, -4, -4, -2, -4]
whoreson -2.2 1.46969 [-2, -3, -4, -4, -3, -1, -1, 1, -3, -2]
whoresons -2.5 1.20416 [-3, -3, -2, -2, 0, -4, -3, -1, -3, -4]
wicked -2.4 0.8 [-3, -4, -3, -3, -2, -2, -2, -1, -2, -2]
wickeder -2.2 1.32665 [-2, -3, -1, -4, 1, -3, -3, -3, -2, -2]
wickedest -2.9 1.04403 [-3, -1, -3, -3, -3, -3, -1, -4, -4, -4]
wickedly -2.1 0.83066 [-2, -2, -1, -3, -2, -3, -1, -3, -1, -3]
wickedness -2.1 0.83066 [-2, -1, -2, -2, -3, -1, -2, -4, -2, -2]
wickednesses -2.2 1.16619 [-1, -2, -4, -2, -3, -1, -3, -4, -1, -1]
widowed -2.1 1.22066 [0, -4, -2, -4, -3, -2, -2, -2, -1, -1]
willingness 1.1 0.7 [0, 2, 1, 1, 2, 2, 1, 0, 1, 1]
wimp -1.4 1.28062 [-2, -3, -1, -2, -1, -2, -2, -1, 2, -2]
wimpier -1.0 1.18322 [-1, -2, -2, -1, -2, 0, 1, -2, 1, -2]
wimpiest -0.9 1.22066 [-3, -1, -2, -1, -2, -1, 1, 1, 0, -1]
wimpiness -1.2 0.9798 [1, -1, -3, -1, -2, -1, -1, -1, -2, -1]
wimpish -1.6 0.4899 [-2, -1, -2, -1, -2, -2, -1, -2, -2, -1]
wimpishness -0.2 1.249 [-3, -1, 0, -1, -1, 1, 1, 1, 1, 0]
wimple -0.2 0.74833 [0, -1, 0, 0, 0, 0, -2, 0, 0, 1]
wimples -0.3 0.78102 [-2, 0, 0, 0, 0, -1, 0, -1, 1, 0]
wimps -1.0 1.18322 [-2, -2, -1, -1, 0, -2, -2, 1, -2, 1]
wimpy -0.9 1.04403 [-2, -1, -1, -1, -1, -2, -1, 1, -2, 1]
win 2.8 0.87178 [3, 2, 4, 3, 2, 4, 3, 1, 3, 3]
winnable 1.8 0.6 [3, 2, 2, 2, 2, 1, 1, 1, 2, 2]
winned 1.8 0.6 [2, 2, 2, 2, 1, 2, 1, 1, 3, 2]
winner 2.8 0.87178 [2, 2, 2, 3, 4, 2, 3, 4, 2, 4]
winners 2.1 1.44568 [3, 3, 2, 3, 3, 2, 2, -2, 3, 2]
winning 2.4 0.4899 [2, 3, 3, 2, 2, 2, 3, 3, 2, 2]
winningly 2.3 1.48661 [1, 3, 4, 3, 3, 1, 2, 3, -1, 4]
winnings 2.5 0.92195 [3, 4, 3, 2, 2, 3, 1, 1, 3, 3]
winnow -0.3 1.00499 [0, -1, 0, -2, 1, 1, -2, 0, 0, 0]
winnower -0.1 0.3 [0, 0, 0, 0, 0, -1, 0, 0, 0, 0]
winnowers -0.2 0.6 [0, 0, 0, 0, 0, 0, 0, 0, -2, 0]
winnowing -0.1 0.53852 [0, 0, -1, 0, 0, 0, 0, 1, -1, 0]
winnows -0.2 0.4 [0, 0, -1, -1, 0, 0, 0, 0, 0, 0]
wins 2.7 0.78102 [2, 2, 3, 3, 4, 4, 3, 2, | |
= "tensor",
input = 0.5 * T.mean(T.sqr(gan_net.layers['D(x)'].output - 1)) + \
0.5 * T.mean(T.sqr(gan_net.layers['D(G(z))'].output)),
input_shape = (1,),
id = "discriminator_task"
)
gan_net.add_layer ( type = "objective",
id = "discriminator_obj",
origin = "discriminator_task",
layer_type = 'value',
objective = gan_net.dropout_layers['discriminator_task'].output,
datastream_origin = 'data',
verbose = verbose
)
#generator objective
gan_net.add_layer (type = "tensor",
input = 0.5 * T.mean(T.sqr(gan_net.layers['D(G(z))'].output - 1)),
input_shape = (1,),
id = "objective_task"
)
gan_net.add_layer ( type = "objective",
id = "generator_obj",
layer_type = 'value',
origin = "objective_task",
objective = gan_net.dropout_layers['objective_task'].output,
datastream_origin = 'data',
verbose = verbose
)
#softmax objective.
gan_net.add_layer ( type = "objective",
id = "classifier_obj",
origin = "softmax",
objective = "nll",
layer_type = 'discriminator',
datastream_origin = 'data',
verbose = verbose
)
# from yann.utils.graph import draw_network
# draw_network(net.graph, filename = 'gan.png')
# gan_net.pretty_print()
if cook is True:
"""gan_net.datastream['data'].batches2train = 10
gan_net.datastream['data'].batches2validate = 2
gan_net.datastream['data'].batches2test = 1"""
gan_net.cook ( objective_layers = ["classifier_obj", "discriminator_obj", "generator_obj"],
optimizer_params = optimizer_params,
discriminator_layers = ["D1-x", "D2-x","D3-x","D4-x"],
generator_layers = ["G1","G2","G3", "G4", "G(z)"],
classifier_layers = ["D1-x", "D2-x","D3-x","D4-x","softmax"],
softmax_layer = "softmax",
game_layers = ("D(x)", "D(G(z))"),
verbose = verbose )
return gan_net
def _mlp ( self,
id,
dataset = None,
root = '.',
params = None,
num_classes = None,
cook = True,
verbose = 1 ):
"""
This method is initializes and trains an MLP on some dataset.
Args:
root: save location for data
params: Initialize network with params.
cook: <True> If False, won't cook.
increment: which increment of MLP should be trained.
id: For directory setup.
dataset: an already created dataset.
"""
if verbose >=2:
print (".. Creating the MLP network")
if dataset is None:
dataset = self.base_dataset
if num_classes is None:
num_classes = self.num_classes
input_params = None
optimizer_params = {
"momentum_type" : 'false',
"momentum_params" : (0.65, 0.9, 30),
"regularization" : (0.0001, 0.0001),
"optimizer_type" : 'adam',
"id" : "optim"
}
dataset_params = {
"dataset" : dataset,
"svm" : False,
"n_classes" : num_classes,
"id" : 'data'
}
visualizer_params = {
"root" : root + '/visualizer/network-' + id,
"frequency" : 1,
"sample_size": 225,
"rgb_filters": True,
"debug_functions" : False,
"debug_layers": False,
"id" : 'visualizer'
}
resultor_params = {
"root" : root + "/resultor/network-" + id,
"id" : "resultor"
}
net = network( borrow = True,
verbose = verbose )
net.add_module ( type = 'optimizer',
params = optimizer_params,
verbose = verbose )
net.add_module ( type = 'datastream',
params = dataset_params,
verbose = verbose )
net.add_module ( type = 'visualizer',
params = visualizer_params,
verbose = verbose
)
net.add_module ( type = 'resultor',
params = resultor_params,
verbose = verbose
)
net.add_layer ( type = "input",
id = "input",
verbose = verbose,
datastream_origin = 'data')
if not params is None:
input_params = params ['c1']
net.add_layer ( type = "conv_pool",
id = "c1",
origin = "input",
num_neurons = 20,
filter_size = (5,5),
pool_size = (2,2),
activation = 'relu',
regularize = True,
batch_norm= True,
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['c2']
net.add_layer ( type = "conv_pool",
id = "c2",
origin = "c1",
num_neurons = 50,
filter_shape = (3,3),
pool_size = (2,2),
batch_norm= True,
regularize = True,
activation = 'relu',
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['fc1']
net.add_layer ( type = "dot_product",
origin = "c2",
id = "fc1",
num_neurons = 800,
activation = 'relu',
batch_norm= True,
regularize = True,
dropout_rate = 0.5,
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['fc2']
net.add_layer ( type = "dot_product",
origin = "fc1",
id = "fc2",
num_neurons = 800,
activation = 'relu',
batch_norm= True,
dropout_rate = 0.5,
regularize = True,
input_params = input_params,
verbose = verbose
)
if not params is None:
input_params = params ['softmax']
net.add_layer ( type = "classifier",
id = "softmax",
origin = "fc2",
num_classes = num_classes,
activation = 'softmax',
regularize = True,
input_params = input_params,
verbose = verbose
)
net.add_layer ( type = "objective",
id = "obj",
origin = "softmax",
verbose = verbose
)
# self.base.pretty_print()
# draw_network(gan_net.graph, filename = 'base.png')
if cook is True:
net.cook( optimizer = 'optim',
objective_layers = ['obj'],
datastream = 'data',
classifier = 'softmax',
verbose = verbose
)
return net
def _update_num_classes (self, dataset ):
""" This method simply updates calsses.
"""
self.dataset.append(dataset)
f = open(self.dataset[-1] + '/data_params.pkl', 'rb')
data_params = cPickle.load(f)
f.close()
splits = data_params["splits"]
# This will only work if the classes are ordered
# from 0, ....
# also that max of test is basically the number of
# classes involved.
self.num_classes = max( splits ['test'] ) + 1
def _increment (self):
""" updates increment """
self.increment += 1
def create_gan ( self,
dataset = None,
params = None,
optimizer_params = None,
cook = True,
root = '.', verbose = 1 ):
"""
This function is a demo sets up one additional GAN on the new dataset.
Args:
dataset: Supply a dataset.
root: location to save down stuff.
params: Initialize network with parameters.
cook: <True> If False, won't cook.
increment: which number of GAN to
verbose: Similar to the rest of the dataset.
Returns:
net: A Network object.
"""
self._update_num_classes(dataset)
gan_net = self._gan(dataset = dataset,
params = params,
cook = cook,
root = root,
optimizer_params = optimizer_params,
verbose = verbose)
self.gans[self.increment] = gan_net
self._increment()
def train_gan ( self, gan = None, lr = (0.04, 0.001),
save_after_epochs = 1, epochs= (15), verbose = 2):
"""
This method will train the initial GAN on base dataset.
Args:
lr : leanring rates to train with. Default is (0.04, 0.001)
epochs: Epochs to train with. Default is (15)
save_after_epochs: Saves the network down after so many epochs.
verbose : As usual.
"""
if gan is None:
gan = self.gans[self.increment - 1]
if verbose >=2 :
print ( ".. Training GAN " )
gan.train( epochs = epochs,
k = 1,
learning_rates = lr,
pre_train_discriminator = 0,
validate_after_epochs = 10,
visualize_after_epochs = 2,
save_after_epochs = save_after_epochs,
training_accuracy = True,
show_progress = True,
early_terminate = False,
verbose = verbose)
def setup_base_mlp ( self,
dataset = None,
root = '.',
params = None,
cook = True,
verbose = 1 ):
"""
This method is sets up the first MLP on some dataset.
Args:
root: save location for data
params: Initialize network with params.
cook: <True> If False, won't cook.
increment: which increment of MLP should be trained.
dataset: Latest created dataset.
"""
if dataset is None:
dataset = self.dataset[-1]
self._update_num_classes(dataset)
self.base = self._mlp(dataset = dataset,
params = params,
cook = cook,
root = root,
id = 'base' + str(self.increment-1),
num_classes = self.num_classes,
verbose = verbose)
self.mini_batch_size = self.base.layers['input'].output_shape[0]
def train_mlp ( self,
mlp,
lr = (0.05, 0.01, 0.001),
epochs = (20, 20),
save_after_epochs = 1,
verbose = 2):
"""
This method will train the initial MLP on base dataset.
Args:
lr : leanring rates to train with. Default is (0.05, 0.01, 0.001)
save_after_epochs: Saves the network down after so many epochs.
epochs: Epochs to train with. Default is (20, 20)
verbose : As usual.
"""
if verbose >=2 :
print ( ".. Training MLP ")
mlp.train( epochs = epochs,
validate_after_epochs = 10,
visualize_after_epochs = 10,
save_after_epochs = save_after_epochs,
training_accuracy = True,
show_progress = True,
early_terminate = False,
learning_rates = lr,
verbose = verbose)
| |
socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.SOL_TCP)
else:
this_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if this_socket:
try:
if self.useInetSocket:
this_socket.connect((self.socketHost, self.socketPort))
else:
this_socket.connect(self.socket_name)
except:
this_socket.close()
return this_socket
@staticmethod
def write_to_socket(this_socket, message):
try:
# Python 3 readiness
encoded_message=message.encode(encoding="cp437")
this_socket.sendall(encoded_message)
return True
except:
return False
@staticmethod
def read_from_socket(this_socket):
try:
encoded_message = this_socket.recv(65536)
return encoded_message.decode(encoding="cp437")
except:
return None
def send_message(self, message, message_extended=None, read_response=False):
message_to_send = message
if message_extended is not None:
message_to_send += "=" + message_extended
this_socket = self.open_socket()
if this_socket:
if self.write_to_socket(this_socket, message_to_send):
if read_response:
return self.read_from_socket(this_socket)
else:
return True
return False
def read_lcd(self):
try:
lcd_text = json.loads(self.send_message("lcd", read_response=True))
except:
lcd_text = ["Cannot receive", "LCD text from", "Controller/Script"]
# Due to the various codepage swaps, we're now receiving the raw degree symbol (0xB0) back when we poll the
# LCD under Python 3. Let's replace it with "°" for display in HTML
deg_symbol = bytes([0xB0]).decode(encoding="cp437")
sanitized_text = [n.replace(deg_symbol, "°") for n in lcd_text]
return sanitized_text
def is_connected(self):
# Tests if we're connected to the device via BrewPi-Script
try:
_ = json.loads(self.send_message("lcd", read_response=True))
except:
return False
return True
def retrieve_version(self):
try:
version_data = json.loads(self.send_message("getVersion", read_response=True))
except:
return None
return version_data
def is_legacy(self, version=None):
if version == None:
version = self.retrieve_version()
if not version:
# If we weren't passed a version & can't load from the device itself, return None
return None # There's probably a better way of doing this.
if version['version'][:3] == "0.2":
return True
return False
def retrieve_control_constants(self):
version = self.retrieve_version()
if version:
if self.is_legacy(version):
# If we're dealing with a legacy controller, we need to work with the old control constants.
control_constants = OldControlConstants()
control_constants.load_from_controller(self)
else:
# Otherwise, we need to work with the NEW control constants
control_constants = NewControlConstants()
control_constants.load_from_controller(self)
# Returning both the control constants structure as well as which structure we ended up using
control_constants.controller = self
return control_constants, self.is_legacy(version=version)
return None, None
def request_device_refresh(self):
self.send_message("refreshDeviceList") # refreshDeviceList refreshes the cache within brewpi-script
time.sleep(0.1)
# We don't persist the "sensor" (onewire/pin) list in the database, so we always have to load it from the
# controller
def load_sensors_from_device(self):
# Note - getDeviceList actually is reading the cache from brewpi-script - not the firmware itself
loop_number = 1
device_response = self.send_message("getDeviceList", read_response=True)
# If the cache wasn't up to date, request that brewpi-script refresh it
if device_response == "device-list-not-up-to-date":
self.request_device_refresh()
# This can take a few seconds. Periodically poll brewpi-script to try to get a response.
while device_response == "device-list-not-up-to-date" and loop_number <= 4:
time.sleep(5)
device_response = self.send_message("getDeviceList", read_response=True)
loop_number += 1
if not device_response or device_response == "device-list-not-up-to-date":
self.all_pins = None
self.available_devices = None
self.installed_devices = None
if not device_response:
# We weren't able to reach brewpi-script
self.error_message = "Unable to reach brewpi-script. Try restarting brewpi-script."
else:
# We were able to reach brewpi-script, but it wasn't able to reach the controller
self.error_message = "BrewPi-script wasn't able to load sensors from the controller. "
self.error_message += "Try restarting brewpi-script. If that fails, try restarting the controller."
return False # False
# Devices loaded
devices = json.loads(device_response)
self.all_pins = PinDevice.load_all_from_pinlist(devices['pinList'])
self.available_devices = SensorDevice.load_all_from_devicelist(devices['deviceList']['available'], self.all_pins, self)
self.installed_devices = SensorDevice.load_all_from_devicelist(devices['deviceList']['installed'], self.all_pins, self)
# Loop through the installed devices to set up the special links to the key ones
for this_device in self.installed_devices:
if this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_DOOR: # (1, 'CHAMBER_DOOR'),
self.door_pin = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_HEAT: # (2, 'CHAMBER_HEAT'),
self.heat_pin = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_COOL: # (3, 'CHAMBER_COOL'),
self.cool_pin = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_TEMP: # (5, 'CHAMBER_TEMP'),
self.chamber_sensor = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_CHAMBER_ROOM_TEMP: # (6, 'CHAMBER_ROOM_TEMP'),
self.room_sensor = this_device
elif this_device.device_function == SensorDevice.DEVICE_FUNCTION_BEER_TEMP: # (9, 'BEER_TEMP'),
self.beer_sensor = this_device
return True
# TODO - Determine if we care about controlSettings
# # Retrieve the control settings from the controller
# def retrieve_control_settings(self):
# version = self.retrieve_version()
# if version:
# if self.is_legacy(version):
# # If we're dealing with a legacy controller, we need to work with the old control constants.
# control_settings = OldControlSettings()
# control_settings.load_from_controller(self)
# else:
# # Otherwise, we need to work with the NEW control constants
# control_settings = OldControlSettings()
# control_settings.load_from_controller(self)
#
# # Returning both the control constants structure as well as which structure we ended up using
# control_settings.controller = self
# return control_settings, self.is_legacy(version=version)
# return None, None
def sync_temp_format(self) -> bool:
# This queries the controller to see if we have the correct tempFormat set (If it matches what is specified
# in the device definition above). If it doesn't, we overwrite what is on the device to match what is in the
# device definition.
control_constants, legacy_mode = self.retrieve_control_constants()
if control_constants is None:
return False
if control_constants.tempFormat != self.temp_format: # The device has the wrong tempFormat - We need to update
control_constants.tempFormat = self.temp_format
if legacy_mode:
if self.temp_format == 'C':
control_constants.tempSetMax = 35.0
control_constants.tempSetMin = -8.0
elif self.temp_format == 'F':
control_constants.tempSetMax = 90.0
control_constants.tempSetMin = 20.0
else:
return False # If we can't define a good max/min, don't do anything
else:
# TODO - Fix/expand this when we add "modern" controller support
return False
control_constants.save_to_controller(self, "tempFormat")
if legacy_mode:
control_constants.save_to_controller(self, "tempSetMax")
control_constants.save_to_controller(self, "tempSetMin")
return True
return False
def get_temp_control_status(self):
device_mode = self.send_message("getMode", read_response=True)
control_status = {}
if (device_mode is None) or (not device_mode): # We were unable to read from the device
control_status['device_mode'] = "unable_to_connect" # Not sure if I want to pass the message back this way
return control_status
# If we could connect to the device, force-sync the temp format
self.sync_temp_format()
if device_mode == 'o': # Device mode is off
control_status['device_mode'] = "off"
elif device_mode == 'b': # Device mode is beer constant
control_status['device_mode'] = "beer_constant"
control_status['set_temp'] = self.send_message("getBeer", read_response=True)
elif device_mode == 'f': # Device mode is fridge constant
control_status['device_mode'] = "fridge_constant"
control_status['set_temp'] = self.send_message("getFridge", read_response=True)
elif device_mode == 'p': # Device mode is beer profile
control_status['device_mode'] = "beer_profile"
else:
# No idea what the device mode is
logger.error("Invalid device mode '{}'".format(device_mode))
return control_status
def reset_profile(self):
if self.active_profile is not None:
self.active_profile = None
if self.time_profile_started is not None:
self.time_profile_started = None
self.save()
def set_temp_control(self, method, set_temp=None, profile=None, profile_startat=None):
if method == "off":
self.reset_profile()
self.send_message("setOff")
elif method == "beer_constant":
if set_temp is not None:
self.reset_profile()
self.send_message("setBeer", str(set_temp))
else:
error_message = "Device {} set to beer_constant without a setpoint".format(self.device_name)
logger.error(error_message)
raise ValueError(error_message)
elif method == "fridge_constant":
if set_temp is not None:
self.reset_profile()
self.send_message("setFridge", str(set_temp))
else:
error_message = "Device {} set to fridge_constant without a setpoint".format(self.device_name)
logger.error(error_message)
raise ValueError(error_message)
elif method == "beer_profile":
try:
ferm_profile = FermentationProfile.objects.get(id=profile)
except:
error_message ="Device {} set to beer_profile {} but the profile could not be located".format(
self.device_name, profile)
logger.error(error_message)
raise ValueError(error_message)
if not ferm_profile.is_assignable():
error_message = "Device {} set to beer_profile {} but the profile isn't assignable".format(
self.device_name, profile)
logger.error(error_message)
raise ValueError(error_message)
if profile_startat is not None:
start_at = profile_startat
else:
start_at = datetime.timedelta(seconds=0) # Set start_at to have no effect
self.active_profile = ferm_profile
timezone_obj = pytz.timezone(getattr(settings, 'TIME_ZONE', 'UTC'))
# We're subtracting start_at because we want to start in the past
self.time_profile_started = timezone.now() - start_at
self.save()
transaction.on_commit(lambda: self.send_message("setActiveProfile", str(self.active_profile.id)))
return True # If we made it here, return True (we did our job)
def start_new_brew(self, active_beer):
self.logging_status = self.DATA_LOGGING_ACTIVE
self.active_beer = active_beer
self.save()
transaction.on_commit(lambda: self.send_message("startNewBrew", message_extended=active_beer.name, read_response=False))
def manage_logging(self, status):
if status == 'stop':
if hasattr(self, 'gravity_sensor') and self.gravity_sensor is not None:
# If there is a linked gravity log, stop that as well
self.gravity_sensor.active_log = None
self.gravity_sensor.save()
self.active_beer = None
self.logging_status = self.DATA_LOGGING_STOPPED
self.save()
transaction.on_commit(lambda: self.send_message("stopLogging", read_response=False))
elif status == 'resume':
self.logging_status = self.DATA_LOGGING_ACTIVE
self.save()
transaction.on_commit(lambda: self.send_message("resumeLogging", read_response=False))
elif status == 'pause':
self.logging_status = self.DATA_LOGGING_PAUSED
self.save()
transaction.on_commit(lambda: self.send_message("pauseLogging", read_response=False))
def reset_eeprom(self):
response = self.send_message("resetController") # Reset the controller
time.sleep(1) # Give it 1 second to complete
synced = self.sync_temp_format() # ...then resync the temp format
return synced
def reset_wifi(self) -> bool:
response = self.send_message("resetWiFi") # Reset the controller WiFi settings
time.sleep(1) # Give it 1 | |
#!/usr/bin/env python3
# -*- coding : utf-8 -*-
# PyAX-12
# The MIT License
#
# Copyright (c) 2010,2015 <NAME> (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
This module contains unit tests for the "InstructionPacket" class.
"""
import pyax12.packet as pk
import pyax12.instruction_packet as ip
import unittest
class TestInstructionPacket(unittest.TestCase):
"""
Contains unit tests for the "InstructionPacket" class.
"""
# Test the "dynamixel_id" argument ########################################
def test_wrong_id_type(self):
"""Check that the instanciation of InstructionPacket fails when the
argument "dynamixel_id" has a wrong type."""
# Check with None
dynamixel_id = None # wrong id
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with float
dynamixel_id = 1.0 # wrong id
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with string
dynamixel_id = "hi" # wrong id
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with tuple
dynamixel_id = () # wrong id
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_id_value(self):
"""Check that the instanciation of InstructionPacket fails when the
argument "dynamixel_id" has a wrong value (too low or too high)."""
# Too high
dynamixel_id = 0xff # wrong id
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Too low
dynamixel_id = -1 # wrong id
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# # TODO: should it be considered as an actual error or can it be neglected?
# def test_wrong_id_value_sync_write(self):
# """Check that the instanciation of InstructionPacket fails when the
# argument "dynamixel_id" has a wrong value (the SYNC_WRITE
# instruction expects the broadcast id)."""
#
# dynamixel_id = 1 # wrong id (must be 0xfe)
# instruction = ip.SYNC_WRITE
# params = (pk.LED, 1, 1, 1)
#
# with self.assertRaises(ValueError):
# ip.InstructionPacket(dynamixel_id, instruction, params)
# Test the "instruction" argument #########################################
def test_wrong_instruction_type(self):
"""Check that the instanciation of InstructionPacket fails when the
argument "instruction" has a wrong type."""
# Check with None
dynamixel_id = 1
instruction = None # wrong instruction
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with float
dynamixel_id = 1
instruction = 1.0 # wrong instruction
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with string
dynamixel_id = 1
instruction = "hi" # wrong instruction
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with tuple
dynamixel_id = 1
instruction = () # wrong instruction
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_instruction_value(self):
"""Check that the instanciation of InstructionPacket fails when the
argument "instruction" has a wrong value."""
dynamixel_id = 1
instruction = 1000 # wrong instruction
params = (pk.PRESENT_TEMPERATURE, 0x01)
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Test the "parameters" argument ##########################################
def test_wrong_params_type(self):
"""Check that the instanciation of InstructionPacket fails when the
argument "parameters" has a wrong type."""
# Check with a float.
dynamixel_id = 1
instruction = ip.READ_DATA
params = 1.0 # wrong type
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with a string.
dynamixel_id = 1
instruction = ip.READ_DATA
params = "hello world" # wrong type
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with an integer.
# There is no instruction which take only one parameter (some take 0
# parameters, some take 2 or more parameters but none take 1
# parameter).
dynamixel_id = 1
instruction = ip.PING
params = 1
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_good_params_type(self):
"""Check that the instanciation of InstructionPacket doesn't fail when
the argument "parameters" has a right type."""
# Check with None (some instructions like "PING" doesn't take any
# parameter)
dynamixel_id = 1
instruction = ip.PING
params = None # wrong type
try:
ip.InstructionPacket(dynamixel_id, instruction, params)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
# Check with a tuple
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0x01)
try:
ip.InstructionPacket(dynamixel_id, instruction, params)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
# Check with a list
dynamixel_id = 1
instruction = ip.READ_DATA
params = [pk.PRESENT_TEMPERATURE, 0x01]
try:
ip.InstructionPacket(dynamixel_id, instruction, params)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
# Check with a bytes string
dynamixel_id = 1
instruction = ip.READ_DATA
params = bytes((pk.PRESENT_TEMPERATURE, 0x01))
try:
ip.InstructionPacket(dynamixel_id, instruction, params)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
# Check with a bytearray
dynamixel_id = 1
instruction = ip.READ_DATA
params = bytearray((pk.PRESENT_TEMPERATURE, 0x01))
try:
ip.InstructionPacket(dynamixel_id, instruction, params)
except (TypeError, ValueError):
self.fail("Encountered an unexpected exception.")
def test_wrong_params_items_type(self):
"""Check that the instanciation of InstructionPacket fails when the
"parameters" items type is wrong."""
# Check with None
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, None) # wrong value
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with float
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 1.0) # wrong value
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with string
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, "hi") # wrong value
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Check with tuple
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, ()) # wrong value
with self.assertRaises(TypeError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_params_items_value(self):
"""Check that the instanciation of InstructionPacket fails when the
"parameters" items type is value."""
# Too high value
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0xffff) # wrong value
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Too low value
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, -1) # wrong value
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_num_params_ping(self):
"""Check that the instanciation of InstructionPacket fails when the
number of paramaters is wrong (greater than 0 for the PING
instruction)."""
# Too high
dynamixel_id = 1
instruction = ip.PING
params = (0x00, ) # wrong nb of params
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_num_params_read(self):
"""Check that the instanciation of InstructionPacket fails when the
number of paramaters is wrong (for the READ_DATA instruction)."""
# Too high
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, 0x01, 0x00) # wrong nb of params
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
# Too low
dynamixel_id = 1
instruction = ip.READ_DATA
params = (pk.PRESENT_TEMPERATURE, ) # wrong nb of params
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_num_params_write(self):
"""Check that the instanciation of InstructionPacket fails when the
number of paramaters is wrong (for the WRITE_DATA instruction)."""
# Too low
dynamixel_id = 1
instruction = ip.WRITE_DATA
params = (pk.LED, ) # wrong nb of params
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_num_params_reg_write(self):
"""Check that the instanciation of InstructionPacket fails when the
number of paramaters is wrong (for the REG_WRITE instruction)."""
# Too low
dynamixel_id = 1
instruction = ip.REG_WRITE
params = (pk.LED, ) # wrong nb of params
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_num_params_action(self):
"""Check that the instanciation of InstructionPacket fails when the
number of paramaters is wrong (greater than 0 for the ACTION
instruction)."""
# Too high
dynamixel_id = 1
instruction = ip.ACTION
params = (0x00, ) # wrong nb of params
with self.assertRaises(ValueError):
ip.InstructionPacket(dynamixel_id, instruction, params)
def test_wrong_num_params_reset(self):
"""Check that the instanciation of InstructionPacket fails when the
number | |
<gh_stars>0
from PyQt5 import QtGui, QtCore
from PyQt5.QtWidgets import (QGridLayout, QGroupBox, QLineEdit, QHBoxLayout,
QWidget, QLabel, QPushButton, QVBoxLayout,
QComboBox, QScrollArea, QMainWindow)
import pyqtgraph as pg
from ecogvis.functions.misc_dialogs import SelectChannelsDialog
from .FS_colorLUT import get_lut
import numpy as np
# Creates Event-Related Potential dialog ---------------------------------------
class ERPDialog(QMainWindow):
def __init__(self, parent):
super().__init__()
self.setWindowTitle('Event-Related Potentials')
self.resize(1300, 600)
self.parent = parent
self.nCols = 16
self.alignment = 'start_time'
self.interval_type = 'speaker'
self.grid_order = np.arange(256)
self.transparent = []
self.Y_start_speaker_mean = {}
self.Y_start_speaker_sem = {}
self.Y_stop_speaker_mean = {}
self.Y_stop_speaker_sem = {}
self.Y_start_mic_mean = {}
self.Y_start_mic_sem = {}
self.Y_stop_mic_mean = {}
self.Y_stop_mic_sem = {}
self.X = []
self.Yscale = {}
self.source = self.parent.model.nwb.processing['ecephys'].data_interfaces['high_gamma'].data
self.fs = self.parent.model.nwb.processing['ecephys'].data_interfaces['high_gamma'].rate
self.electrodes = self.parent.model.nwb.processing['ecephys'].data_interfaces['high_gamma'].electrodes
self.speaker_start_times = self.parent.model.nwb.intervals['TimeIntervals_speaker']['start_time'].data[:]
self.speaker_stop_times = self.parent.model.nwb.intervals['TimeIntervals_speaker']['stop_time'].data[:]
self.mic_start_times = self.parent.model.nwb.intervals['TimeIntervals_mic']['start_time'].data[:]
self.mic_stop_times = self.parent.model.nwb.intervals['TimeIntervals_mic']['stop_time'].data[:]
# Get only reference times smaller than the main signal duration
self.maxTime = self.source.shape[0] / self.fs
self.speaker_start_times = self.speaker_start_times[self.speaker_start_times < self.maxTime]
self.speaker_stop_times = self.speaker_stop_times[self.speaker_stop_times < self.maxTime]
self.mic_start_times = self.mic_start_times[self.mic_start_times < self.maxTime]
self.mic_stop_times = self.mic_stop_times[self.mic_stop_times < self.maxTime]
# Left panel
self.push0_0 = QPushButton('Draw ERP')
self.push0_0.clicked.connect(self.set_elec_group)
label0 = QLabel('Group:')
self.combo0 = QComboBox()
self.find_groups()
self.combo0.activated.connect(self.set_elec_group)
label1 = QLabel('Alignment:')
self.push1_0 = QPushButton('Onset')
self.push1_0.setCheckable(True)
self.push1_0.setChecked(True)
self.push1_0.clicked.connect(self.set_onset)
self.push1_1 = QPushButton('Offset')
self.push1_1.setCheckable(True)
self.push1_1.setChecked(False)
self.push1_1.clicked.connect(self.set_offset)
self.push1_2 = QPushButton('Stimulus')
self.push1_2.setCheckable(True)
self.push1_2.setChecked(True)
self.push1_2.clicked.connect(self.set_stim)
self.push1_3 = QPushButton('Response')
self.push1_3.setCheckable(True)
self.push1_3.setChecked(False)
self.push1_3.clicked.connect(self.set_resp)
label2 = QLabel('Width (sec):')
self.qline2 = QLineEdit('2')
self.qline2.returnPressed.connect(self.set_width)
label3 = QLabel('Y scale:')
self.combo1 = QComboBox()
self.combo1.addItem('individual')
self.combo1.addItem('global max')
self.combo1.addItem('global std')
self.combo1.activated.connect(self.scale_plots)
self.push2_0 = QPushButton('Significant')
self.push2_0.setCheckable(True)
self.push2_0.setChecked(False)
self.push3_0 = QPushButton('Brain areas')
self.push3_0.clicked.connect(self.areas_select)
self.push4_0 = QPushButton('Save image')
self.push4_0.clicked.connect(self.save_image)
label4 = QLabel('Rotate grid:')
self.push5_0 = QPushButton('90°')
self.push5_0.clicked.connect(lambda: self.rearrange_grid(90))
self.push5_0.setToolTip('Counter-clockwise')
self.push5_1 = QPushButton('-90°')
self.push5_1.clicked.connect(lambda: self.rearrange_grid(-90))
self.push5_1.setToolTip('Clockwise')
self.push5_2 = QPushButton('T')
self.push5_2.clicked.connect(lambda: self.rearrange_grid('T'))
self.push5_2.setToolTip('Transpose')
label5 = QLabel('Rearrange grid:')
self.push5_3 = QPushButton('L-R')
self.push5_3.clicked.connect(lambda: self.rearrange_grid('FLR'))
self.push5_3.setToolTip('Flip Left-Right')
self.push5_4 = QPushButton('U-D')
self.push5_4.clicked.connect(lambda: self.rearrange_grid('FUD'))
self.push5_4.setToolTip('Flip Up-Down')
self.push5_5 = QPushButton('2FL')
self.push5_5.clicked.connect(lambda: self.rearrange_grid('2FL'))
self.push5_5.setToolTip('Double flip')
self.push1_0.setEnabled(False)
self.push1_1.setEnabled(False)
self.push1_2.setEnabled(False)
self.push1_3.setEnabled(False)
self.qline2.setEnabled(False)
self.combo1.setEnabled(False)
self.push2_0.setEnabled(False)
self.push3_0.setEnabled(False)
self.push4_0.setEnabled(False)
self.push5_0.setEnabled(False)
self.push5_1.setEnabled(False)
self.push5_2.setEnabled(False)
self.push5_3.setEnabled(False)
self.push5_4.setEnabled(False)
self.push5_5.setEnabled(False)
grid0 = QGridLayout()
grid0.addWidget(label0, 0, 0, 1, 2)
grid0.addWidget(self.combo0, 0, 2, 1, 4)
grid0.addWidget(label1, 1, 0, 1, 6)
grid0.addWidget(self.push1_0, 2, 0, 1, 3)
grid0.addWidget(self.push1_1, 2, 3, 1, 3)
grid0.addWidget(self.push1_2, 3, 0, 1, 3)
grid0.addWidget(self.push1_3, 3, 3, 1, 3)
grid0.addWidget(QHLine(), 4, 0, 1, 6)
grid0.addWidget(label2, 5, 0, 1, 6)
grid0.addWidget(self.qline2, 6, 0, 1, 6)
grid0.addWidget(QHLine(), 7, 0, 1, 6)
grid0.addWidget(label3, 8, 0, 1, 6)
grid0.addWidget(self.combo1, 9, 0, 1, 6)
grid0.addWidget(QHLine(), 10, 0, 1, 6)
grid0.addWidget(self.push2_0, 11, 0, 1, 6)
grid0.addWidget(self.push3_0, 12, 0, 1, 6)
grid0.addWidget(self.push4_0, 13, 0, 1, 6)
grid0.addWidget(QHLine(), 14, 0, 1, 6)
grid0.addWidget(label4, 15, 0, 1, 6)
grid0.addWidget(self.push5_0, 16, 0, 1, 2)
grid0.addWidget(self.push5_1, 16, 2, 1, 2)
grid0.addWidget(self.push5_2, 16, 4, 1, 2)
grid0.addWidget(label5, 17, 0, 1, 6)
grid0.addWidget(self.push5_3, 18, 0, 1, 2)
grid0.addWidget(self.push5_4, 18, 2, 1, 2)
grid0.addWidget(self.push5_5, 18, 4, 1, 2)
grid0.setAlignment(QtCore.Qt.AlignTop)
panel0 = QGroupBox('Controls:')
panel0.setFixedWidth(180)
panel0.setLayout(grid0)
self.leftbox = QVBoxLayout()
self.leftbox.addWidget(self.push0_0)
self.leftbox.addWidget(panel0)
# Right panel
self.win = pg.GraphicsLayoutWidget()
self.win.resize(1020, 1020)
background_color = self.palette().color(QtGui.QPalette.Background)
self.win.setBackground(background_color)
# this is to avoid the error:
# RuntimeError: wrapped C/C++ object of type GraphicsScene has been deleted
vb = CustomViewBox(self, 0)
p = self.win.addPlot(0, 0, viewBox=vb)
p.hideAxis('left')
p.hideAxis('bottom')
# Scroll Area Properties
self.scroll = QScrollArea()
self.scroll.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scroll.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAsNeeded)
self.scroll.setWidgetResizable(False)
self.scroll.setWidget(self.win)
self.centralwidget = QWidget()
self.setCentralWidget(self.centralwidget)
self.hbox = QHBoxLayout(self.centralwidget)
self.hbox.addLayout(self.leftbox) # add panels first
self.hbox.addWidget(self.scroll)
self.show()
def find_groups(self):
"""Find electrodes groups present in current file."""
elec_groups = list(self.parent.model.nwb.electrode_groups.keys())
for grp in elec_groups:
self.combo0.addItem(grp)
def set_elec_group(self):
"""Sets electrodes group to be plotted, resizes plot grid."""
self.elec_group = self.combo0.currentText()
self.grid_order = np.where(self.electrodes.table['group_name'].data[:] == self.elec_group)[0]
self.nElecs = len(self.grid_order)
self.nCols = 16
self.nRows = int(self.nElecs / self.nCols)
self.set_grid()
self.draw_erp()
def set_grid(self):
# remove previous items
for i in np.arange(16):
for j in np.arange(16):
it = self.win.getItem(i, j)
if it is not None:
self.win.removeItem(it)
for j in np.arange(self.nCols):
self.win.ci.layout.setColumnFixedWidth(j, 60)
self.win.ci.layout.setColumnSpacing(j, 3)
for i in np.arange(self.nRows):
self.win.ci.layout.setRowFixedHeight(i, 60)
self.win.ci.layout.setRowSpacing(i, 3)
def set_onset(self):
self.alignment = 'start_time'
self.push1_1.setChecked(False)
self.draw_erp()
def set_offset(self):
self.alignment = 'stop_time'
self.push1_0.setChecked(False)
self.draw_erp()
def set_stim(self):
self.interval_type = 'speaker'
self.push1_3.setChecked(False)
self.draw_erp()
def set_resp(self):
self.interval_type = 'mic'
self.push1_2.setChecked(False)
self.draw_erp()
def set_width(self):
self.Y_start_speaker_mean = {}
self.Y_start_speaker_sem = {}
self.Y_stop_speaker_mean = {}
self.Y_stop_speaker_sem = {}
self.Y_start_mic_mean = {}
self.Y_start_mic_sem = {}
self.Y_stop_mic_mean = {}
self.Y_stop_mic_sem = {}
self.X = []
self.draw_erp()
def rearrange_grid(self, angle):
grid = self.grid_order.reshape(-1, self.nCols) # re-arranges as 2D array
# 90 degrees clockwise
if angle == -90:
grid = np.rot90(grid, axes=(1, 0))
if self.nRows != self.nCols:
aux = np.copy(self.nRows)
self.nRows = np.copy(self.nCols)
self.nCols = aux
# 90 degrees conter-clockwise
elif angle == 90:
grid = np.rot90(grid, axes=(0, 1))
if self.nRows != self.nCols:
aux = np.copy(self.nRows)
self.nRows = np.copy(self.nCols)
self.nCols = aux
# Transpose
elif angle == 'T':
grid = grid.T
if self.nRows != self.nCols:
aux = np.copy(self.nRows)
self.nRows = np.copy(self.nCols)
self.nCols = aux
# Flip left-right
elif angle == 'FLR':
grid = np.flip(grid, 1)
# Flip up-down
elif angle == 'FUD':
grid = np.flip(grid, 0)
# Double flip
elif angle == '2FL':
grid = np.flip(grid, 1)
grid = np.flip(grid, 0)
self.grid_order = grid.flatten() # re-arranges as 1D array
self.draw_erp()
def save_image(self):
p = self.win.getItem(row=0, col=0)
self.win.sceneObj.contextMenuItem = p
self.win.sceneObj.showExportDialog()
def scale_plots(self):
for ind, ch in enumerate(self.grid_order):
row = np.floor(ind / self.nCols)
col = ind % self.nCols
p = self.win.getItem(row=row, col=col)
if p is None:
return
else:
curr_txt = self.combo1.currentText()
if curr_txt != 'individual':
p.setYRange(self.Yscale[curr_txt][0], self.Yscale[curr_txt][1])
else:
if (self.alignment == 'start_time') and (self.interval_type == 'speaker'):
yrng = max(abs(self.Y_start_speaker_mean[str(ch)]))
elif (self.alignment == 'start_time') and (self.interval_type == 'mic'):
yrng = max(abs(self.Y_start_mic_mean[str(ch)]))
elif (self.alignment == 'stop_time') and (self.interval_type == 'speaker'):
yrng = max(abs(self.Y_stop_speaker_mean[str(ch)]))
elif (self.alignment == 'stop_time') and (self.interval_type == 'mic'):
yrng = max(abs(self.Y_stop_mic_mean[str(ch)]))
p.setYRange(-yrng, yrng)
def get_erp(self, ch):
if (self.alignment == 'start_time') and (self.interval_type == 'speaker'):
if str(ch) in self.Y_start_speaker_mean: # If it was calculated already
return self.Y_start_speaker_mean[str(ch)], self.Y_start_speaker_sem[str(ch)], self.X
else: # If it isn't calculated yet
Y_mean, Y_sem, X = self.calc_erp(ch=ch)
self.Y_start_speaker_mean[str(ch)] = Y_mean
self.Y_start_speaker_sem[str(ch)] = Y_sem
self.X = X
return self.Y_start_speaker_mean[str(ch)], self.Y_start_speaker_sem[str(ch)], self.X
if (self.alignment == 'start_time') and (self.interval_type == 'mic'):
if str(ch) in self.Y_start_mic_mean: # If it was calculated already
return self.Y_start_mic_mean[str(ch)], self.Y_start_mic_sem[str(ch)], self.X
else: # If it isn't calculated yet
Y_mean, Y_sem, X = self.calc_erp(ch=ch)
self.Y_start_mic_mean[str(ch)] = Y_mean
self.Y_start_mic_sem[str(ch)] = Y_sem
self.X = X
return self.Y_start_mic_mean[str(ch)], self.Y_start_mic_sem[str(ch)], self.X
if (self.alignment == 'stop_time') and (self.interval_type == 'speaker'):
if str(ch) in self.Y_stop_speaker_mean: # If it was calculated already
return self.Y_stop_speaker_mean[str(ch)], self.Y_stop_speaker_sem[str(ch)], self.X
else: # If it isn't calculated yet
Y_mean, Y_sem, X = self.calc_erp(ch=ch)
self.Y_stop_speaker_mean[str(ch)] = Y_mean
self.Y_stop_speaker_sem[str(ch)] = Y_sem
self.X = X
return self.Y_stop_speaker_mean[str(ch)], self.Y_stop_speaker_sem[str(ch)], self.X
if (self.alignment == 'stop_time') and (self.interval_type == 'mic'):
if str(ch) in self.Y_stop_mic_mean: # If it was calculated already
return self.Y_stop_mic_mean[str(ch)], self.Y_stop_mic_sem[str(ch)], self.X
else: # If it isn't calculated yet
Y_mean, Y_sem, X = self.calc_erp(ch=ch)
self.Y_stop_mic_mean[str(ch)] = Y_mean
self.Y_stop_mic_sem[str(ch)] = Y_sem
self.X = X
return self.Y_stop_mic_mean[str(ch)], self.Y_stop_mic_sem[str(ch)], self.X
def calc_erp(self, ch):
if (self.alignment == 'start_time') and (self.interval_type == 'speaker'):
ref_times = self.speaker_start_times
if (self.alignment == 'stop_time') and (self.interval_type == 'speaker'):
ref_times = self.speaker_stop_times
if (self.alignment == 'start_time') and (self.interval_type == 'mic'):
ref_times = self.mic_start_times
if (self.alignment == 'stop_time') and (self.interval_type == 'mic'):
ref_times = self.mic_stop_times
ref_bins = (ref_times * self.fs).astype('int')
nBinsTr = int(float(self.qline2.text()) * self.fs / 2)
start_bins = ref_bins - nBinsTr
stop_bins = ref_bins + nBinsTr
nTrials = len(ref_times)
Y = np.zeros((nTrials, 2 * nBinsTr)) + np.nan
for tr in np.arange(nTrials):
Y[tr, :] = self.source[start_bins[tr]:stop_bins[tr], ch]
Y_mean = np.nanmean(Y, 0)
Y_sem = np.nanstd(Y, 0) / np.sqrt(Y.shape[0])
X = np.arange(0, 2 * nBinsTr) / self.fs
return Y_mean, Y_sem, X
def draw_erp(self):
self.push1_0.setEnabled(True)
self.push1_1.setEnabled(True)
self.push1_2.setEnabled(True)
self.push1_3.setEnabled(True)
self.qline2.setEnabled(True)
self.combo1.setEnabled(True)
self.push3_0.setEnabled(True)
self.push4_0.setEnabled(True)
self.push5_0.setEnabled(True)
self.push5_1.setEnabled(True)
self.push5_2.setEnabled(True)
self.push5_3.setEnabled(True)
self.push5_4.setEnabled(True)
self.push5_5.setEnabled(True)
self.combo1.setCurrentIndex(self.combo1.findText('individual'))
self.set_grid()
cmap = get_lut()
ymin, ymax = 0, 0
ystd = 0
for ind, ch in enumerate(self.grid_order):
if ch in self.transparent: # if it should be made transparent
elem_alpha = 30
else:
elem_alpha = 255
Y_mean, Y_sem, X = self.get_erp(ch=ch)
dc = np.mean(Y_mean)
Y_mean -= dc
ymax = max(max(Y_mean), ymax)
ymin = min(min(Y_mean), ymin)
ystd = max(np.std(Y_mean), ystd)
# Include items
row = np.floor(ind / self.nCols).astype('int')
col = int(ind % self.nCols)
p = self.win.getItem(row=row, col=col)
if p is None:
vb = CustomViewBox(self, ch)
p = self.win.addPlot(row=row, col=col, viewBox=vb)
p.hideAxis('left')
| |
of TypedList support all operations available for
Python Lists (as of Python version 1.5.2a2)
"""
_is_a_typed_list = 1
def __init__(self, function, elements=None):
self._func = function
self.elements = []
if not elements is None:
if self._func(elements):
self.elements.append(elements)
else:
for el in elements:
self.append(el)
def append(self, el):
if self._func(el):
self.elements.append(el)
else:
raise TypeError, 'Element to be added to list has incorrect type.'
def __len__(self): return len(self.elements)
def __repr__(self):
return "%s(%s, %s)" % (self.__class__, self._func.__name__,
`self.elements`)
def __str__(self):
return `self.elements`
def __getitem__(self, i): return self.elements[i]
def __setitem__(self, i, v):
if self._func(v):
self.elements[i] = v
else:
raise TypeError, 'Item not of correct type in __setitem__'
def __delitem__(self, i): del self.elements[i]
def __getslice__(self, i, j):
new = self.__class__(self._func)
new.elements = self.elements[i:j]
return new
def __setslice__(self, i, j, v):
if self._alltrue(v):
self.elements[i:j] = v
def __delslice__(self, i, j):
del self.elements[i:j]
def __add__(self, other):
if not hasattr(other, '_is_a_typed_list'):
raise TypeError,'List to be concatenated not instance of %s' %\
self.__class__
if self._func <> other._func:
raise TypeError, 'Lists to be added not of same type'
new = self.__class__(self._func)
new.elements = self.elements + other.elements
return new
def __mul__(self, other):
if type(other) == type(0):
new = self.__class__(self._func)
new.elements = self.elements * other
return new
else:
raise TypeError, "can't multiply list with non-int"
__rmul__ = __mul__
def __copy__(self):
new = self.__class__(self._func)
for el in self.elements:
new.elements.append(el.__copy__())
have = new.__dict__.has_key
for key in self.__dict__.keys():
if not have(key):
new.__dict__[key] = copy.deepcopy(self.__dict__[key])
return new
__deepcopy__ = clone = __copy__
def _alltrue(self, els):
return len(els) == len(filter(None, map(self._func, els)))
def sort(self): self.elements.sort()
def reverse(self): self.elements.reverse()
def count(self, el): return self.elements.count(el)
def extend(self, els):
if self._alltrue(els):
if HAVE_EXTEND:
self.elements.extend(els)
else:
for el in els:
self.elements.append(el)
else:
raise TypeError, 'One or more elements of list not of correct type'
def pop(self):
if HAVE_POP:
return self.elements.pop()
else:
el = self.elements[-1]
del self.elements[-1]
return el
def index(self, el): return self.elements.index(el)
def remove(self, el): self.elements.remove(el)
def insert(self, pos, el):
if self._func(el):
self.elements.insert(pos, el)
else:
raise TypeError, 'Item not of correct type in insert'
def indices(self, len=len):
return xrange(len(self.elements))
def reverse_indices(self, len=len):
return xrange(len(self.elements)-1, -1, -1)
####################
class molResidue(TypedList):
_is_a_residue = 1
def __init__(self, name='', atoms=None, **kw):
TypedList.__init__(self, is_atom, atoms)
self.name = name
for key, value in kw.items():
setattr(self, key, value)
def num_atoms(self):
"""returns the number of atoms in residue"""
return len(self.elements)
def has_atom(self, name):
"""returns true if residue has an atom named 'name'"""
for atom in self.elements:
if atom.name == name:
return 1
return 0
def atoms(self):
return self.elements
def atoms_with_name(self, *names):
"""returns atoms in residue with specified names"""
ret = []
Append = ret.append
for name in names:
for atom in self.elements:
if atom.name == name:
Append(atom)
break
return ret
def delete_atoms_with_name(self, *names):
"""delete atoms in residue with specified names"""
els = self.elements
for i in self.reverse_indices():
atom = els[i]
if atom.name in names:
del els[i]
def atoms_not_with_name(self, *names):
"""returns atoms in residue excluding specified names"""
ret = []
for atom in self.elements:
if not atom.name in names:
ret.append(atom)
return ret
def atom_coordinates(self, *names):
"""returns coordinates of named atoms.
If names is omitted all atom coordinates are returned."""
if len(names)==0:
atoms = self.elements
else:
atoms = apply(self.atoms_with_name, names)
na = len(atoms)
if na == 0: return
if HAVE_NUMPY:
a = Numeric.zeros((na, 3), 'd')
else:
a = [None]*na
pos = 0
for atom in atoms:
a[pos] = atom.coords()
pos = pos + 1
return a
def assign_radii(self):
raise AttributeError, 'Should be defined in specialized class'
def type(self):
return 'residue'
class molChain(TypedList):
_is_a_chain = 1
def __init__(self, name='', residues=None, **kw):
self.name = name
TypedList.__init__(self, is_residue, residues)
for key, value in kw.items():
setattr(self, key, value)
def num_residues(self): return len(self)
def num_atoms(self):
na = 0
for res in self.elements:
na = na + len(res.elements)
return na
def atoms(self):
ret = []
Append = ret.append
for res in self.elements:
for atom in res.elements:
Append(atom)
return ret
def residues_with_name(self, *names):
"""returns named residues as a python list"""
if len(names) == 0:
return
l = []
for res in self.elements:
if res.name in names:
l.append(res)
return l
def delete_residues_with_name(self, *names):
"""delete named residues from Chain"""
if len(names) == 0:
return
els = self.elements
for i in self.reverse_indices():
if els[i].name in names:
del els[i]
def residues_not_with_name(self, *names):
"""returns residues excluding specified names as a python list"""
ret = []
for res in self.elements:
if not res.name in names:
ret.append(res)
return ret
def atoms_with_name(self, *names):
ret = []
Append = ret.append
if len(names) > 0:
for res in self.elements:
for name in names:
for atom in res.elements:
if atom.name == name:
Append(atom)
break
else:
for res in self.elements:
for atom in res.elements:
Append(atom)
return ret
def delete_atoms_with_name(self, *names):
"""delete atoms in residue with specified names"""
for res in self.elements:
apply(res.delete_atoms_with_name, names)
def atoms_not_with_name(self, *names):
"""returns atoms in residue excluding specified names"""
ret = []
for res in self.elements:
ret[len(ret):] = apply(res.atoms_not_with_name, names)
return ret
def atom_coordinates(self, *names):
"""returns coordinates of named atoms. if names is None
all atom coordinates are returned."""
coords = []
if len(names) > 0:
for res in self.elements:
for atom in res.elements:
if atom.name in names:
coords.append(atom.coords())
else:
atoms = apply(self.atoms_with_name, names)
coords = map(lambda a:a.coords(), atoms)
if HAVE_NUMPY:
return Numeric.array(coords)
else:
return coords
def atoms(self):
atoms = []
for res in self.elements:
for atom in res: atoms.append(atom)
return atoms
def delete_alt_locs(self):
"""delete_alt_locs - remove secondary conformations in the chain
In a chain with multiple occupancy and alternate location identifiers,
it is often desirable to eliminate the secondary conformations for
use in simulation, etc. This function (abitrarily) finds and selects
the first given conformation and deletes all other conformations.
"""
AtomCount = self.present
chain = self.elements
delete = []
for i in xrange(len(chain)):
residue = chain[i]
rid = (residue.idx, residue.icode)
for j in xrange(len(residue)):
atom = residue[j]
anam = atom.name
try:
acnt = AtomCount[rid][anam]
except KeyError:
print "Unable to locate %s %s %s in present dictionary."%\
(rid[0], rid[1], anam)
return
if acnt == 1:
continue
if acnt < 1:
AtomCount[rid][anam] = acnt + 1
delete.append((i, j))
continue
atom.alt = ' '
AtomCount[rid][anam] = -acnt + 2
delete.reverse()
for r, a in delete:
del chain[r][a]
def assign_radii(self):
for res in self:
res.assign_radii()
def preserve_chain_hetero(self):
"""prevent hetero residues from being deleted as hetero atoms
Normally, delete_hetero will delete all hetero atoms from a
molecule. This includes waters and heterogroups (hemes, etc.),
but it also includes hetero residues--nonstandard residues
that have backbone connectivity but perhaps extra atoms (e.g.
S-hydroxy-cysteine). Deleting these residues may disrupt an
otherwise continuous chain and may be undesirable.
Given that a chain has a valid SEQRES entry, this function will
'unset' the hetero flag for heterogroups that are involved in
the sequence itself. When delete_hetero is run, these atoms
will be preserved.
"""
for i in xrange(self.num_residues()):
if hasattr(self[i], 'chain_het') and hasattr(self[i], 'het'):
delattr(self[i], 'het')
def delete_hetero(self):
"""delete all hetero atoms from a protein
This function removes all HETATM records from the molecular
structure. This include waters, heme groups, as well as residues
with nonstandard structures
"""
for i in xrange(self.num_residues()-1, -1, -1):
if hasattr(self[i], 'het'):
del self[i]
def delete_waters(self):
for i in xrange(self.num_residues()-1, -1, -1):
if self[i].name == 'HOH':
del self[i]
def translate(self, dx, dy=None, dz=None):
for res in self.elements:
res.translate(dx, dy, dz)
def rotatex(self, theta):
for res in self.elements:
res.rotatex(theta)
def rotatey(self, theta):
res.rotatey(theta)
def rotatez(self, theta):
for res in self.elements:
res.rotatez(theta)
def type(self):
return 'Chain'
class molMol(TypedList):
_is_a_mol = 1
def __init__(self, name='', chains=None):
self.name = name
self.resolution = None
self.method = []
self.rprog = None
self.rfree = None
self.rvalu = None
TypedList.__init__(self, is_chain, chains)
def num_chain(self): return len(self)
def num_res(self):
nr = 0
for chain in self: nr = nr + len(chain)
return nr
def num_atoms(self):
na = 0
for chain in self: na = na + chain.num_atoms()
return na
| |
import json
import os
import torch
import torch.nn as nn
from utils import get_latest_timestamp
def create_embedding(size):
return nn.Embedding(*size, _weight=torch.zeros(*size).normal_(0, 0.01))
def create_embeddings(embedding_meta_dict):
embedding_dict = dict()
for key, value in embedding_meta_dict.items():
embedding_dict[key] = create_embedding(value)
return embedding_dict
class FeatureCrossComputer(object):
def compute(self, feature_group_list):
fg_int_list = [e for e in feature_group_list]
start_index = 1
for fg in feature_group_list:
for i in range(start_index, len(feature_group_list)):
fg_2 = feature_group_list[i]
fg_int_list.append(torch.cat((fg, fg_2), dim=1))
start_index += 1
return fg_int_list
def compute_feature_crossing(feature_cross_model, feature_group_list):
if feature_cross_model:
return feature_cross_model.compute(feature_group_list)
return feature_group_list
class GlobalModel(object):
def __init__(self,
source_classifier,
regional_model_list,
embedding_dict,
partition_data_fn,
beta=1.0,
pos_class_weight=1.0,
loss_name="BCE",
feature_cross_model=None,
feature_interaction_model=None,
discriminator=None):
self.global_discriminator = discriminator
self.source_classifier = source_classifier
self.regional_model_list = list() if regional_model_list is None else regional_model_list
self.feature_cross_model = feature_cross_model
self.feature_interaction_model = feature_interaction_model
self.embedding_dict = embedding_dict
self.loss_name = loss_name
if loss_name == "CE":
self.classifier_criterion = nn.CrossEntropyLoss(weight=torch.tensor([1.0, pos_class_weight]))
elif loss_name == "BCE":
self.classifier_criterion = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(pos_class_weight))
else:
raise RuntimeError(f"Does not support loss:{loss_name}")
self.beta = beta
self.partition_data_fn = partition_data_fn
self.discriminator_criterion = nn.CrossEntropyLoss()
def print_parameters(self, print_all=False):
print("-" * 50)
print("Global models:")
for name, param in self.source_classifier.named_parameters():
# if param.requires_grad:
print(f"{name}: {param.data}, {param.requires_grad}")
# print(f"{name}: {param.requires_grad}")
if print_all:
print("Region models:")
for wrapper in self.regional_model_list:
wrapper.print_parameters()
print("Embedding models:")
for emb in self.embedding_dict.values():
for name, param in emb.named_parameters():
print(f"{name}: {param.requires_grad}")
print("-" * 50)
def get_global_classifier_parameters(self, get_tensor=False):
param_dict = dict()
for name, param in self.source_classifier.named_parameters():
if get_tensor:
param_dict[name] = param
else:
param_dict[name] = param.data.tolist()
return param_dict
def load_model(self, root, task_id, task_meta_file_name="task_meta", load_global_classifier=True, timestamp=None):
task_folder = task_id
task_folder_path = os.path.join(root, task_folder)
if not os.path.exists(task_folder_path):
raise FileNotFoundError(f"{task_folder_path} is not found.")
print(f"[INFO] load model from:{task_folder_path}")
if timestamp is None:
timestamp = get_latest_timestamp("models_checkpoint", task_folder_path)
print(f"[INFO] get latest timestamp {timestamp}")
model_checkpoint_folder = "models_checkpoint_" + str(timestamp)
model_checkpoint_folder = os.path.join(task_folder_path, model_checkpoint_folder)
if not os.path.exists(model_checkpoint_folder):
raise FileNotFoundError(f"{model_checkpoint_folder} is not found.")
task_meta_file_name = str(task_meta_file_name) + "_" + str(timestamp) + '.json'
task_meta_file_path = os.path.join(model_checkpoint_folder, task_meta_file_name)
if not os.path.exists(task_meta_file_path):
raise FileNotFoundError(f"{task_meta_file_path} is not found.")
with open(task_meta_file_path) as json_file:
print(f"[INFO] load task meta file from {task_meta_file_path}")
task_meta_dict = json.load(json_file)
if load_global_classifier:
global_classifier_path = task_meta_dict["global_part"]["classifier"]
self.source_classifier.load_state_dict(torch.load(global_classifier_path))
print(f"[INFO] load global classifier from {global_classifier_path}")
# load global discriminator
global_discriminator_path = task_meta_dict["global_part"]["discriminator"]
if self.global_discriminator:
self.global_discriminator.load_state_dict(torch.load(global_discriminator_path))
print(f"[INFO] load global discriminator from {global_discriminator_path}")
# load embeddings
embedding_meta_dict = task_meta_dict["global_part"]["embeddings"]
for key, emb_path in embedding_meta_dict.items():
print(f"[INFO] load embedding of [{key}] from {emb_path}")
self.embedding_dict[key].load_state_dict(torch.load(emb_path))
# load region models
region_part_dict = task_meta_dict["region_part"]
num_region = len(region_part_dict)
assert num_region == len(self.regional_model_list)
for idx, region_model in enumerate(self.regional_model_list):
region = "region_" + str(idx)
region_model.load_model(region_part_dict[region]["models"])
# load interactive model
if self.feature_interaction_model:
interactive_model_part_dict = task_meta_dict["interactive_part"]
self.feature_interaction_model.load_model(interactive_model_part_dict)
def save_model(self, root, task_id, file_name="task_meta", timestamp=None):
"""Save trained model."""
if timestamp is None:
raise RuntimeError("timestamp is missing.")
task_folder = task_id
task_root_folder = os.path.join(root, task_folder)
if not os.path.exists(task_root_folder):
os.makedirs(task_root_folder)
model_checkpoint_folder = "models_checkpoint_" + str(timestamp)
model_checkpoint_folder = os.path.join(task_root_folder, model_checkpoint_folder)
if not os.path.exists(model_checkpoint_folder):
os.makedirs(model_checkpoint_folder)
extension = ".pth"
# save global model
global_classifier = "global_classifier_" + str(timestamp) + extension
global_classifier_path = os.path.join(model_checkpoint_folder, global_classifier)
global_discriminator = "global_discriminator" + str(timestamp) + extension
global_discriminator_path = os.path.join(model_checkpoint_folder, global_discriminator)
model_meta = dict()
model_meta["global_part"] = dict()
model_meta["global_part"]["classifier"] = global_classifier_path
model_meta["global_part"]["discriminator"] = global_discriminator_path
torch.save(self.source_classifier.state_dict(), global_classifier_path)
if self.global_discriminator:
torch.save(self.global_discriminator.state_dict(), global_discriminator_path)
print(f"[INFO] saved global classifier model to: {global_classifier_path}")
print(f"[INFO] saved global discriminator model to: {global_discriminator_path}")
# save embeddings
embedding_meta_dict = dict()
for key, emb in self.embedding_dict.items():
emb_file_name = "embedding_" + key + "_" + str(timestamp) + extension
emb_path = os.path.join(model_checkpoint_folder, emb_file_name)
torch.save(emb.state_dict(), emb_path)
print(f"[INFO] saved embedding of [{key}] to: {emb_path}")
embedding_meta_dict[key] = emb_path
model_meta["global_part"]["embeddings"] = embedding_meta_dict
# save region models
model_meta["region_part"] = dict()
for idx, regional_model in enumerate(self.regional_model_list):
region = "region_" + str(idx)
res = regional_model.save_model(model_checkpoint_folder, region + "_" + str(timestamp) + extension)
model_meta["region_part"][region] = dict()
model_meta["region_part"][region]["order"] = idx
model_meta["region_part"][region]["models"] = res
# save interactive model
if self.feature_interaction_model:
interactive_model = self.feature_interaction_model.save_model(model_checkpoint_folder,
str(timestamp) + extension)
model_meta["interactive_part"] = interactive_model
file_name = str(file_name) + "_" + str(timestamp) + '.json'
file_full_name = os.path.join(model_checkpoint_folder, file_name)
with open(file_full_name, 'w') as outfile:
json.dump(model_meta, outfile)
return model_meta
def freeze_source_classifier(self, is_freeze=False):
for param in self.source_classifier.parameters():
param.requires_grad = not is_freeze
def freeze_bottom(self, is_freeze=False, region_idx_list=None):
# freeze global discriminator model
if self.global_discriminator:
for param in self.global_discriminator.parameters():
param.requires_grad = not is_freeze
# freeze region models
for rg_model in self.regional_model_list:
for param in rg_model.parameters():
param.requires_grad = not is_freeze
# freeze embedding
for emb in self.embedding_dict.values():
for param in emb.parameters():
param.requires_grad = not is_freeze
# freeze interaction model
if self.feature_interaction_model:
self.feature_interaction_model.freeze(is_freeze)
# TODO add feature_interactive_model
def freeze_bottom_extractors(self, is_freeze=False, region_idx_list=None):
if region_idx_list is None:
for rg_model in self.regional_model_list:
for param in rg_model.extractor_parameters():
param.requires_grad = not is_freeze
else:
print(f"freeze region idx list:{region_idx_list}")
for region_idx in region_idx_list:
for param in self.regional_model_list[region_idx].extractor_parameters():
param.requires_grad = not is_freeze
# TODO add feature_interactive_model
def freeze_bottom_aggregators(self, is_freeze=False, region_idx_list=None):
if region_idx_list is None:
for rg_model in self.regional_model_list:
for param in rg_model.aggregator_parameters():
param.requires_grad = not is_freeze
else:
print(f"freeze region idx list:{region_idx_list}")
for region_idx in region_idx_list:
for param in self.regional_model_list[region_idx].aggregator_parameters():
param.requires_grad = not is_freeze
def get_num_regions(self):
num_feature_groups = len(self.regional_model_list)
if self.feature_interaction_model:
num_feature_groups += self.feature_interaction_model.get_num_feature_groups()
return num_feature_groups
def check_discriminator_exists(self):
for rg_model in self.regional_model_list:
if rg_model.has_discriminator() is False:
raise RuntimeError('Discriminator not set.')
if self.feature_interaction_model: self.feature_interaction_model.check_discriminator_exists()
def change_to_train_mode(self):
self.source_classifier.train()
for rg_model in self.regional_model_list:
rg_model.change_to_train_mode()
for embedding in self.embedding_dict.values():
embedding.train()
if self.feature_interaction_model: self.feature_interaction_model.change_to_train_mode()
def change_to_eval_mode(self):
self.source_classifier.eval()
for rg_model in self.regional_model_list:
rg_model.change_to_eval_mode()
for embedding in self.embedding_dict.values():
embedding.eval()
if self.feature_interaction_model: self.feature_interaction_model.change_to_eval_mode()
def parameters(self):
param_list = list(self.source_classifier.parameters())
for rg_model in self.regional_model_list:
param_list += rg_model.parameters()
for embedding in self.embedding_dict.values():
param_list += embedding.parameters()
if self.feature_interaction_model:
param_list += self.feature_interaction_model.parameters()
return param_list
def source_classifier_parameters(self):
return list(self.source_classifier.parameters())
def _combine_features(self, feat_dict):
"""
:param feat_dict:
:return:
"""
features_list = []
# print("feat_dict", feat_dict)
embeddings = feat_dict.get("embeddings")
if embeddings is not None:
for key, feat in embeddings.items():
embedding = self.embedding_dict[key]
feat = feat.long()
# print("@@:", key, feat)
emb = embedding(feat)
features_list.append(emb)
non_embedding = feat_dict.get("non_embedding")
if non_embedding is not None:
for _, feat in non_embedding.items():
# print(f"feat shape:{feat.shape}")
if len(feat.shape) == 1:
features_list.append(feat.reshape(-1, 1))
else:
features_list.append(feat)
comb_feat_tensor = torch.cat(features_list, dim=1)
# print(f"comb_feat_tensor shape:{comb_feat_tensor.shape}, {comb_feat_tensor}")
return comb_feat_tensor
def is_regional_model_list_empty(self):
if self.regional_model_list is None or len(self.regional_model_list) == 0:
return True
return False
def compute_feature_group_loss(self,
total_domain_loss,
src_feat_gp_list,
tgt_feat_gp_list,
domain_source_labels,
domain_target_labels,
**kwargs):
src_output_list = list()
tgt_output_list = list()
if self.is_regional_model_list_empty() is False:
for regional_model, src_fg, tgt_fg in zip(self.regional_model_list, src_feat_gp_list, tgt_feat_gp_list):
domain_loss, src_output, tgt_output = regional_model.compute_loss(src_fg,
tgt_fg,
domain_source_labels,
domain_target_labels,
**kwargs)
src_output_list.append(src_output)
tgt_output_list.append(tgt_output)
total_domain_loss += domain_loss
return total_domain_loss, src_output_list, tgt_output_list
def compute_feature_group_interaction_loss(self,
total_domain_loss,
src_feat_gp_list,
tgt_feat_gp_list,
domain_source_labels,
domain_target_labels,
**kwargs):
src_output_list = list()
tgt_output_list = list()
if self.feature_interaction_model:
intr_domain_loss, src_int_output_list, tgt_int_output_list = self.feature_interaction_model.compute_loss(
src_feat_gp_list,
tgt_feat_gp_list,
domain_source_labels,
domain_target_labels,
**kwargs)
total_domain_loss += intr_domain_loss
src_output_list += src_int_output_list
tgt_output_list += tgt_int_output_list
return total_domain_loss, src_output_list, tgt_output_list
def compute_total_loss(self,
source_data, target_data,
source_label, target_label,
domain_source_labels, domain_target_labels,
**kwargs):
src_wide_list, src_deep_par_list = self.partition_data_fn(source_data)
tgt_wide_list, tgt_deep_par_list = self.partition_data_fn(target_data)
src_feat_gp_list = list()
tgt_feat_gp_list = list()
for src_data, tgt_data in zip(src_deep_par_list, tgt_deep_par_list):
src_feat_gp_list.append(self._combine_features(src_data))
tgt_feat_gp_list.append(self._combine_features(tgt_data))
src_feat_gp_list = compute_feature_crossing(self.feature_cross_model, src_feat_gp_list)
tgt_feat_gp_list = compute_feature_crossing(self.feature_cross_model, tgt_feat_gp_list)
region_domain_loss = torch.tensor(0.)
src_all_output_list = list()
tgt_all_output_list = list()
region_domain_loss, src_output_list, tgt_output_list = self.compute_feature_group_loss(region_domain_loss,
src_feat_gp_list,
tgt_feat_gp_list,
domain_source_labels,
domain_target_labels,
**kwargs)
src_all_output_list += src_output_list
tgt_all_output_list += tgt_output_list
region_domain_loss, src_output_list, tgt_output_list = self.compute_feature_group_interaction_loss(
region_domain_loss,
src_feat_gp_list,
tgt_feat_gp_list,
domain_source_labels,
domain_target_labels,
**kwargs)
src_all_output_list += src_output_list
tgt_all_output_list += tgt_output_list
src_fed_output_list = src_wide_list + src_all_output_list if len(src_wide_list) > 0 else src_all_output_list
src_fed_output = torch.cat(src_fed_output_list, dim=1)
# print(f"[DEBUG] src_all_output_list shape:{len(src_all_output_list)}")
# print(f"[DEBUG] tgt_all_output_list shape:{len(tgt_all_output_list)}")
# print(f"[DEBUG] src_fed_output shape:{src_fed_output.shape}")
src_fed_prediction = self.source_classifier(src_fed_output)
# compute global classification loss
if self.loss_name == "CE":
source_label = source_label.flatten().long()
else:
# using BCELogitLoss
source_label = source_label.reshape(-1, 1).type_as(src_fed_prediction)
src_class_loss = self.classifier_criterion(src_fed_prediction, source_label)
src_total_loss = src_class_loss + self.beta * region_domain_loss
return {"src_total_loss": src_total_loss}
def _calculate_feature_group_output_list(self, deep_par_list):
fg_list = []
for fg_data in deep_par_list:
fg_list.append(self._combine_features(fg_data))
output_list = None if self.is_regional_model_list_empty() is True \
else [regional_model.compute_aggregated_output(fg) for regional_model, fg in
zip(self.regional_model_list, fg_list)]
# compute output from feature interaction model that wraps feature interactions
fgi_output_list = None if self.feature_interaction_model is None \
else self.feature_interaction_model.compute_output_list(fg_list)
return output_list, fgi_output_list
def calculate_feature_group_embedding_list(self, data):
_, deep_par_list = self.partition_data_fn(data)
fg_list = []
for fg_data in deep_par_list:
fg_list.append(self._combine_features(fg_data))
output_list = [regional_model.compute_feature_group_embedding(fg) for regional_model, fg in
zip(self.regional_model_list, fg_list)]
return output_list
def calculate_global_classifier_input_vector(self, data):
wide_list, deep_par_list = self.partition_data_fn(data)
if len(deep_par_list) == 0:
output_list = wide_list
else:
fg_output_list, fgi_output_list = self._calculate_feature_group_output_list(deep_par_list)
if fg_output_list is None:
all_fg_output_list = fgi_output_list
else:
all_fg_output_list = fg_output_list + fgi_output_list if fgi_output_list else fg_output_list
output_list = wide_list + all_fg_output_list if len(wide_list) > 0 else | |
0.00435488,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00248228,
'Renaming Unit/Peak Dynamic': 3.58947,
'Renaming Unit/Runtime Dynamic': 0.048789,
'Renaming Unit/Subthreshold Leakage': 0.0552466,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0276461,
'Runtime Dynamic': 2.02835,
'Subthreshold Leakage': 6.16288,
'Subthreshold Leakage with power gating': 2.55328},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.202689,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.0,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.0942803,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.152071,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.0767602,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.323111,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.107828,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.03376,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.0,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.00395454,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.028596,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0292463,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.028596,
'Execution Unit/Register Files/Runtime Dynamic': 0.0332008,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.0602439,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.175613,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 1.13999,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.000491469,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.000491469,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.000431518,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.000168934,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.000420125,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00183458,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.00458892,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0281152,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 1.78837,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.0552162,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.0954918,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 4.09368,
'Instruction Fetch Unit/Runtime Dynamic': 0.185247,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0325003,
'L2/Runtime Dynamic': 0.00796026,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 2.33463,
'Load Store Unit/Data Cache/Runtime Dynamic': 0.539007,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0355069,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0355069,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 2.5023,
'Load Store Unit/Runtime Dynamic': 0.749621,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.087554,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.175108,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0310732,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.0315608,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.111194,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.00905339,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.320681,
'Memory Management Unit/Runtime Dynamic': 0.0406142,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 14.5724,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.0,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.00425367,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.0496696,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': 0.00435488,
| |
cb.set_active(self.tree_view_dict['songs'].get_rules_hint())
hbox.pack_start(cb)
display_box.pack_start(hbox, False, False, 0)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
cb = gtk.CheckButton(_("Playlist Column"))
cb.connect("toggled", self.toggle_alternate_row_colors, 'playlist')
cb.set_active(self.tree_view_dict['playlist'].get_rules_hint())
hbox.pack_start(cb)
display_box.pack_start(hbox, False, False, 0)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
cb = gtk.CheckButton(_("Downloads Column"))
cb.connect("toggled", self.toggle_alternate_row_colors, 'downloads')
cb.set_active(self.tree_view_dict['downloads'].get_rules_hint())
hbox.pack_start(cb)
display_box.pack_start(hbox, False, False, 0)
"""End Display Settings"""
"""Start Catalog Settings"""
#################################
# catalog Settings
#################################
catalog_box = gtk.VBox()
catalog_box.set_border_width(10)
hbox = gtk.HBox()
label = gtk.Label()
label.set_markup(_('<b>Catalog Cache</b>'))
hbox.pack_start(label, False, False)
catalog_box.pack_start(hbox, False, False, 3)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
cb = gtk.CheckButton(_("Automatically clear local catalog when Ampache is updated"))
cb.set_active(self.automatically_update)
cb.connect("toggled", self.toggle_automatically_update)
hbox.pack_start(cb)
catalog_box.pack_start(hbox, False, False, 2)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
label = gtk.Label()
label.set_line_wrap(True)
image = gtk.Image()
if self.ampache_conn.has_credentials() and self.ampache_conn.is_authenticated():
if self.catalog_up_to_date:
image.set_from_stock(gtk.STOCK_YES,gtk.ICON_SIZE_SMALL_TOOLBAR)
label.set_text(_("Local catalog is up-to-date."))
else:
image.set_from_stock(gtk.STOCK_NO,gtk.ICON_SIZE_SMALL_TOOLBAR)
label.set_text(_("Local catalog is older than Ampache catalog! To update the local catalog go to File -> Clear Local Cache."))
hbox.pack_start(image, False, False, 5)
hbox.pack_start(label, False, False, 0)
catalog_box.pack_start(hbox, False, False, 2)
"""End Catalog Settings"""
"""Start Download Settings"""
#################################
# Download Settings
#################################
download_box = gtk.VBox(False, 0)
download_box.set_border_width(10)
hbox = gtk.HBox()
label = gtk.Label()
label.set_markup(_('<b>Local Downloads</b>'))
hbox.pack_start(label, False, False)
download_box.pack_start(hbox, False, False, 3)
hbox = gtk.HBox()
label = gtk.Label(_(" Select where downloaded files should go."))
hbox.pack_start(label, False, False, 4)
download_box.pack_start(hbox, False, False, 2)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 1)
self.downloads_text_entry = gtk.Entry()
self.downloads_text_entry.set_text(self.downloads_directory)
hbox.pack_start(self.downloads_text_entry)
fcbutton = gtk.Button(stock=gtk.STOCK_OPEN)
fcbutton.connect('clicked', self.button_open_downloads_file_chooser_clicked)
hbox.pack_start(fcbutton, False, False, 4)
download_box.pack_start(hbox, False, False, 2)
"""End Download Settings"""
"""Start Tray Icon Settings"""
#################################
# Tray Icon Settings
#################################
trayicon_box = gtk.VBox(False, 0)
trayicon_box.set_border_width(10)
hbox = gtk.HBox()
label = gtk.Label()
label.set_markup(_('<b>Status Tray Icon</b>'))
hbox.pack_start(label, False, False)
trayicon_box.pack_start(hbox, False, False, 3)
cb = gtk.CheckButton(_("Quit Viridian when window is closed"))
cb.connect("toggled", self.toggle_quit_when_window_closed)
cb.set_active(self.quit_when_window_closed)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
button = gtk.RadioButton(None, _("Standard Tray Icon"))
button.connect("toggled", self.trayicon_settings_toggled, "standard", cb)
if self.tray_icon_to_display == 'standard':
button.set_active(True)
hbox.pack_start(button, True, True, 0)
trayicon_box.pack_start(hbox, False, False, 2)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
button = gtk.RadioButton(button, _("Unified Sound Icon ( Ubuntu 10.10 or higher )"))
button.connect("toggled", self.trayicon_settings_toggled, "unified", cb)
button.set_sensitive(False) # Ubuntu unified sound
if self.tray_icon_to_display == 'unified':
button.set_active(True)
hbox.pack_start(button, True, True, 0)
trayicon_box.pack_start(hbox, False, False, 2)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
button = gtk.RadioButton(button, _("Disabled"))
button.connect("toggled", self.trayicon_settings_toggled, "disabled", cb)
if self.tray_icon_to_display == 'disabled':
button.set_active(True)
hbox.pack_start(button, True, True, 0)
trayicon_box.pack_start(hbox, False, False, 2)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
hbox.pack_start(cb, True, True, 0)
trayicon_box.pack_start(hbox, False, False, 5)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
label = gtk.Label(_("Note: changes to the type of icon will take effect the next time this program is opened."))
label.set_line_wrap(True)
hbox.pack_start(label, False, False, 4)
trayicon_box.pack_start(hbox, False, False, 5)
"""End Tray Icon Settings"""
"""Start Server Settings"""
#################################
# Server Settings
#################################
server_box = gtk.VBox(False, 0)
server_box.set_border_width(10)
hbox = gtk.HBox()
label = gtk.Label()
label.set_markup(_('<b>Server Settings</b>'))
hbox.pack_start(label, False, False)
server_box.pack_start(hbox, False, False, 3)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
hbox.pack_start(gtk.Label(_("XML RPC Server: ")), False, False, 0)
label = gtk.Label()
image = gtk.Image()
if self.xml_server.is_running:
image.set_from_stock(gtk.STOCK_YES,gtk.ICON_SIZE_SMALL_TOOLBAR)
label.set_text(_("Running. (port %d)") % self.xml_server.port)
else:
image.set_from_stock(gtk.STOCK_NO,gtk.ICON_SIZE_SMALL_TOOLBAR)
label.set_text(_("Not Running."))
hbox.pack_start(image, False, False, 5)
hbox.pack_start(label, False, False, 0)
server_box.pack_start(hbox, False, False, 2)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
port = gtk.Entry()
button = gtk.Button(_("Start"))
button.connect("clicked", self.button_xml_server_clicked, 'start', label, image, port)
#button.set_sensitive(False)
hbox.pack_start(button, True, True, 0)
button = gtk.Button(_("Stop"))
button.connect("clicked", self.button_xml_server_clicked, 'stop', label, image, port)
#button.set_sensitive(False)
hbox.pack_start(button, True, True, 0)
button = gtk.Button(_("Restart"))
button.connect("clicked", self.button_xml_server_clicked, 'restart', label, image, port)
#button.set_sensitive(False)
hbox.pack_start(button, True, True, 0)
hbox.pack_start(gtk.Label(_('Port: ')), False, False, 1)
port.set_text(str(self.db_session.variable_get('xmlrpc_port', XML_RPC_PORT)))
hbox.pack_start(port)
server_box.pack_start(hbox, False, False, 2)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
cb = gtk.CheckButton(_("Start XML RPC server when Viridan starts"))
cb.connect("toggled", self.toggle_start_xml_rpc_server)
start_xml_rpc_server = self.db_session.variable_get('enable_xmlrpc_server', False)
cb.set_active(start_xml_rpc_server)
hbox.pack_start(cb, False, False, 1)
server_box.pack_start(hbox, False, False, 2)
"""End Server Settings"""
"""Start System Settings"""
#################################
# System Settings
#################################
system_box = gtk.VBox()
system_box.set_border_width(10)
hbox = gtk.HBox()
label = gtk.Label()
label.set_markup(_('<b>System</b>'))
hbox.pack_start(label, False, False)
system_box.pack_start(hbox, False, False, 3)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
label = gtk.Label(_("To delete all personal information (including your username, password, album-art, cached information, etc.) press this button. NOTE: This will delete all personal settings stored on this computer and Viridian will close itself. When you reopen, it will be as though it is the first time you are running Viridian."))
label.set_line_wrap(True)
hbox.pack_start(label, False, False)
system_box.pack_start(hbox, False, False, 2)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(" "), False, False, 0)
cb = gtk.Button(_("Reset Everything"))
cb.connect("clicked", self.button_reset_everything_clicked)
hbox.pack_start(cb, False, False, 2)
system_box.pack_start(hbox, False, False, 2)
"""End System Settings"""
"""End Notebook"""
notebook.append_page(account_box, gtk.Label(_("Account")))
notebook.append_page(display_box, gtk.Label(_("Display")))
notebook.append_page(catalog_box, gtk.Label(_("Catalog")))
notebook.append_page(download_box, gtk.Label(_("Downloads")))
notebook.append_page(trayicon_box, gtk.Label(_("Tray Icon")))
notebook.append_page(server_box, gtk.Label(_("Server")))
notebook.append_page(system_box, gtk.Label(_("System")))
"""Start Bottom Bar"""
bottom_bar = gtk.HBox()
close = gtk.Button(stock=gtk.STOCK_CLOSE)
close.connect("clicked", self.button_cancel_preferences_clicked, self.preferences_window)
bottom_bar.pack_end(close, False, False, 2)
"""End Bottom Bar"""
main_vbox.pack_start(notebook)
main_vbox.pack_start(bottom_bar, False, False, 2)
"""End bottom row"""
self.preferences_window.add(main_vbox)
self.preferences_window.show_all()
def destroy_settings(self, widget=None, data=None):
"""Close the preferences window."""
self.preferences_window.destroy()
self.preferences_window = None
def show_help(self, widget, data=None):
"""The Help pane"""
#################################
# Help Window
#################################
if hasattr(self, 'help_window'):
if self.help_window != None:
self.help_window.present()
return True
self.help_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.help_window.set_transient_for(self.window)
self.help_window.set_title(_("Viridian Help"))
self.help_window.set_icon(self.images_pixbuf_viridian_simple)
self.help_window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.help_window.resize(350, 300)
self.help_window.set_resizable(False)
self.help_window.connect("delete_event", self.destroy_help)
self.help_window.connect("destroy", self.destroy_help)
vbox = gtk.VBox(False, 4)
vbox.set_border_width(10)
label = gtk.Label()
label.set_markup(_('<span size="14000"><b>Viridian Help</b></span>'))
vbox.pack_start(label, False, False, 1)
hbox = gtk.HBox()
label = gtk.Label(_("Home Page:"))
link = guifunctions.hyperlink('http://viridian.daveeddy.com')
hbox.pack_start(label, False, False, 1)
hbox.pack_start(link, False, False, 2)
vbox.pack_start(hbox, False, False, 0)
hbox = gtk.HBox()
label = gtk.Label(_("Launchpad:"))
link = guifunctions.hyperlink('https://launchpad.net/viridianplayer')
hbox.pack_start(label, False, False, 1)
hbox.pack_start(link, False, False, 2)
vbox.pack_start(hbox, False, False, 0)
hbox = gtk.HBox()
label = gtk.Label(_("FAQ:"))
link = guifunctions.hyperlink('https://answers.launchpad.net/viridianplayer/+faqs')
hbox.pack_start(label, False, False, 1)
hbox.pack_start(link, False, False, 2)
vbox.pack_start(hbox, False, False, 0)
hbox = gtk.HBox()
label = gtk.Label(_("Bugs:"))
link = guifunctions.hyperlink('https://bugs.launchpad.net/viridianplayer')
hbox.pack_start(label, False, False, 1)
hbox.pack_start(link, False, False, 2)
vbox.pack_start(hbox, False, False, 0)
hbox = gtk.HBox()
label = gtk.Label(_("Questions:"))
link = guifunctions.hyperlink('https://answers.launchpad.net/viridianplayer')
hbox.pack_start(label, False, False, 1)
hbox.pack_start(link, False, False, 2)
vbox.pack_start(hbox, False, False, 0)
self.help_window.add(vbox)
self.help_window.show_all()
def destroy_help(self, widget=None, data=None):
self.help_window.destroy()
self.help_window = None
def show_playlist_select(self, type=None):
"""The playlist pane"""
#################################
# playlist select
#################################
if type != "Load" and type != "Save" and type != "Export":
return True
if hasattr(self, 'playlist_select_window'):
if self.playlist_select_window != None:
self.playlist_select_window.present()
return True
self.playlist_select_window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.playlist_select_window.set_transient_for(self.window)
self.playlist_select_window.set_position(gtk.WIN_POS_CENTER_ON_PARENT)
self.playlist_select_window.resize(490, 300)
self.playlist_select_window.set_resizable(True)
self.playlist_select_window.set_icon(self.images_pixbuf_viridian_simple)
self.playlist_select_window.connect("delete_event", self.destroy_playlist)
self.playlist_select_window.connect("destroy", self.destroy_playlist)
self.playlist_select_window.set_title(type + " playlist")
vbox = gtk.VBox()
vbox.set_border_width(10)
hbox = gtk.HBox()
hbox.pack_start(gtk.Label("Select a Playlist to " + type + "..."), False, False, 2)
vbox.pack_start(hbox, False, False, 2)
scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_shadow_type(gtk.SHADOW_ETCHED_IN)
scrolled_window.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
# name, items, owner, type, id
playlist_list_store = gtk.ListStore(str, int, str, str, int)
tree_view = gtk.TreeView(playlist_list_store)
tree_view.set_rules_hint(True)
i = 0
for column in (_("Name"), _("Songs"), _("Owner"), _("Type")):
if column == _("Name"):
renderer_text = gtk.CellRendererText()
new_column = gtk.TreeViewColumn(column, renderer_text, markup=0)
else:
new_column = guifunctions.create_column(column, i)
new_column.set_reorderable(True)
new_column.set_resizable(True)
new_column.set_sizing(gtk.TREE_VIEW_COLUMN_FIXED)
if column == _("Name"):
new_column.set_fixed_width(200)
elif column == _("Songs"):
new_column.set_fixed_width(70)
elif column == _("Owner"):
new_column.set_fixed_width(90)
elif column == _("Type"):
new_column.set_fixed_width(60)
tree_view.append_column(new_column)
i += 1
scrolled_window.add(tree_view)
vbox.pack_start(scrolled_window, True, True, 5)
text_entry = gtk.Entry()
text_entry.set_text('')
if type == 'Save':
hbox = gtk.HBox()
hbox.pack_start(gtk.Label(_("Playlist Name: ")), False, False, 1)
hbox.pack_start(text_entry, False, False, 2)
vbox.pack_start(hbox, False, False, 2)
bottom_bar = gtk.HBox()
remove = gtk.Button(stock=gtk.STOCK_DELETE)
remove.connect("clicked", self.button_delete_playlist_clicked, tree_view.get_selection(), type)
close = gtk.Button(stock=gtk.STOCK_CLOSE)
close.connect("clicked", self.destroy_playlist)
button = gtk.Button(stock=gtk.STOCK_SAVE)
if type == 'Load':
button = gtk.Button(stock=gtk.STOCK_OPEN)
elif type == 'Export':
button = gtk.Button(_("Export as M3U..."))
button.connect("clicked", self.button_load_or_save_playlist_clicked, tree_view.get_selection(), text_entry, type)
bottom_bar.pack_start(remove, False, False, 2)
bottom_bar.pack_end(button, False, False, 2)
bottom_bar.pack_end(close, False, False, 2)
vbox.pack_start(bottom_bar, False, False, 1)
self.playlist_select_window.add(vbox)
self.update_playlist_select(type, playlist_list_store)
self.playlist_select_window.show_all()
def update_playlist_select(self, type, playlist_list_store):
"""Refresh the contents of the playlist list store"""
playlist_list_store.clear()
if type == "Load": # load playlists window
# get playlists from Ampache
ampache_playlists = self.ampache_conn.get_playlists()
if ampache_playlists == None:
ampache_playlists = []
print(ampache_playlists)
playlist_list_store.append(['<b> - Ampache Playlists - </b>', len(ampache_playlists), '----', '----', -1])
if len(ampache_playlists) == 0:
playlist_list_store.append(['<i>-(None)-</i>', 0, '', '', -1])
else:
for playlist in ampache_playlists:
playlist_list_store.append([ helperfunctions.convert_string_to_html(playlist['name']), playlist['items'], playlist['owner'], playlist['type'], playlist['id']])
# get playlists stored locally
local_playlists = dbfunctions.get_playlists(self.db_session)
playlist_list_store.append(['<b> - Local Playlists | |
= block
def diff_info(self):
return (While, self.condition)
def get_children(self, f):
f(self)
for i in self.block:
i.get_children(f)
def chain(self, next): # @ReservedAssignment
self.next = next
chain_block(self.block, self)
def replace_next(self, old, new):
Node.replace_next(self, old, new)
if self.block and (self.block[0] is old):
self.block.insert(0, new)
def execute(self):
next_node(self.next)
statement_name("while")
if renpy.python.py_eval(self.condition):
next_node(self.block[0])
def predict(self):
return [ self.block[0], self.next ]
def scry(self):
rv = Node.scry(self)
rv._next = None
return rv
def restructure(self, callback):
callback(self.block)
class If(Node):
__slots__ = [ 'entries' ]
def __init__(self, loc, entries):
"""
@param entries: A list of (condition, block) tuples.
"""
super(If, self).__init__(loc)
self.entries = entries
def diff_info(self):
return (If,)
def get_children(self, f):
f(self)
for _condition, block in self.entries:
for i in block:
i.get_children(f)
def chain(self, next): # @ReservedAssignment
self.next = next
for _condition, block in self.entries:
chain_block(block, next)
def replace_next(self, old, new):
Node.replace_next(self, old, new)
for _condition, block in self.entries:
if (block) and (block[0] is old):
block.insert(0, new)
def execute(self):
next_node(self.next)
statement_name("if")
for condition, block in self.entries:
if renpy.python.py_eval(condition):
next_node(block[0])
return
def predict(self):
return [ block[0] for _condition, block in self.entries ] + \
[ self.next ]
def scry(self):
rv = Node.scry(self)
rv._next = None
return rv
def restructure(self, callback):
for _condition, block in self.entries:
callback(block)
class UserStatement(Node):
__slots__ = [
'line',
'parsed',
'block',
'translatable' ]
def __new__(cls, *args, **kwargs):
self = Node.__new__(cls)
self.block = [ ]
self.translatable = False
return self
def __init__(self, loc, line, block):
super(UserStatement, self).__init__(loc)
self.line = line
self.block = block
self.parsed = None
# Do not store the parse quite yet.
_parse_info = renpy.statements.parse(self, self.line, self.block)
def diff_info(self):
return (UserStatement, self.line)
def execute(self):
next_node(self.get_next())
statement_name(self.get_name())
self.call("execute")
def predict(self):
predictions = self.call("predict")
if predictions is not None:
for i in predictions:
renpy.easy.predict(i)
return [ self.get_next() ]
def call(self, method, *args, **kwargs):
parsed = self.parsed
if parsed is None:
parsed = renpy.statements.parse(self, self.line, self.block)
self.parsed = parsed
return renpy.statements.call(method, parsed, *args, **kwargs)
def get_name(self):
parsed = self.parsed
if parsed is None:
parsed = renpy.statements.parse(self, self.line, self.block)
self.parsed = parsed
return renpy.statements.get_name(parsed)
def get_next(self):
rv = self.call("next")
if rv is not None:
return renpy.game.script.lookup(rv)
else:
return self.next
def scry(self):
rv = Node.scry(self)
rv._next = self.get_next()
self.call("scry", rv)
return rv
def get_code(self, dialogue_filter=None):
return self.line
def create_store(name):
if name not in renpy.config.special_namespaces:
renpy.python.create_store(name)
class StoreNamespace(object):
def __init__(self, store):
self.store = store
def set(self, name, value):
renpy.python.store_dicts[self.store][name] = value
def get_namespace(store):
"""
Returns the namespace object for `store`, and a flag that is true if the
namespace is special, and false if it is a normal store.
"""
if store in renpy.config.special_namespaces:
return renpy.config.special_namespaces[store], True
return StoreNamespace(store), False
# Config variables that are set twice - once when the rpy is first loaded,
# and then again at init time.
EARLY_CONFIG = { "save_directory" }
class Define(Node):
__slots__ = [
'varname',
'code',
'store',
]
def __new__(cls, *args, **kwargs):
self = Node.__new__(cls)
self.store = 'store'
return self
def __init__(self, loc, store, name, expr):
super(Define, self).__init__(loc)
self.store = store
self.varname = name
self.code = PyCode(expr, loc=loc, mode='eval')
def diff_info(self):
return (Define, self.store, self.varname)
def early_execute(self):
create_store(self.store)
if self.store == "store.config" and self.varname in EARLY_CONFIG:
value = renpy.python.py_eval_bytecode(self.code.bytecode)
setattr(renpy.config, self.varname, value)
def execute(self):
next_node(self.next)
statement_name("define")
value = renpy.python.py_eval_bytecode(self.code.bytecode)
if self.store == 'store':
renpy.exports.pure(self.varname)
renpy.dump.definitions.append((self.varname, self.filename, self.linenumber))
else:
renpy.dump.definitions.append((self.store[6:] + "." + self.varname, self.filename, self.linenumber))
ns, _special = get_namespace(self.store)
ns.set(self.varname, value)
# All the default statements, in the order they were registered.
default_statements = [ ]
class Default(Node):
__slots__ = [
'varname',
'code',
'store',
]
def __new__(cls, *args, **kwargs):
self = Node.__new__(cls)
self.store = 'store'
return self
def __init__(self, loc, store, name, expr):
super(Default, self).__init__(loc)
self.store = store
self.varname = name
self.code = PyCode(expr, loc=loc, mode='eval')
def diff_info(self):
return (Default, self.store, self.varname)
def early_execute(self):
create_store(self.store)
def execute(self):
next_node(self.next)
statement_name("default")
ns, special = get_namespace(self.store)
if special:
value = renpy.python.py_eval_bytecode(self.code.bytecode)
ns.set_default(self.varname, value)
return
default_statements.append(self)
if self.store == 'store':
renpy.dump.definitions.append((self.varname, self.filename, self.linenumber))
else:
renpy.dump.definitions.append((self.store[6:] + "." + self.varname, self.filename, self.linenumber))
def set_default(self, start):
d = renpy.python.store_dicts[self.store]
defaults_set = d.get("_defaults_set", None)
if defaults_set is None:
d["_defaults_set"] = defaults_set = renpy.python.RevertableSet()
if self.varname not in defaults_set:
d[self.varname] = renpy.python.py_eval_bytecode(self.code.bytecode)
defaults_set.add(self.varname)
else:
if start and renpy.config.developer:
raise Exception("{}.{} is being given a default a second time.".format(self.store, self.varname))
def report_traceback(self, name, last):
return [ (self.filename, self.linenumber, name, None) ]
class Screen(Node):
__slots__ = [
'screen',
]
def __init__(self, loc, screen):
"""
@param name: The name of the image being defined.
@param expr: An expression yielding a Displayable that is
assigned to the image.
"""
super(Screen, self).__init__(loc)
self.screen = screen
def diff_info(self):
return (Screen, self.screen.name)
def execute(self):
next_node(self.next)
statement_name("screen")
self.screen.define((self.filename, self.linenumber))
renpy.dump.screens.append((self.screen.name, self.filename, self.linenumber))
################################################################################
# Translations
################################################################################
class Translate(Node):
"""
A translation block, produced either by explicit translation statements
or implicit translation blocks.
If language is None, when executed this transfers control to the translate
statement in the current language, if any, and otherwise runs the block.
If language is not None, causes an error to occur if control reaches this
statement.
When control normally leaves a translate statement, in any language, it
goes to the end of the translate statement in the None language.
"""
translation_relevant = True
__slots__ = [
"identifier",
"language",
"block",
"after",
]
def __init__(self, loc, identifier, language, block):
super(Translate, self).__init__(loc)
self.identifier = identifier
self.language = language
self.block = block
def diff_info(self):
return (Translate, self.identifier, self.language)
def chain(self, next): # @ReservedAssignment
if self.block:
self.next = self.block[0]
chain_block(self.block, next)
else:
self.next = next
self.after = next
def replace_next(self, old, new):
Node.replace_next(self, old, new)
if self.block and (self.block[0] is old):
self.block.insert(0, new)
if self.after is old:
self.after = new
def execute(self):
statement_name("translate")
if self.language is not None:
next_node(self.next)
raise Exception("Translation nodes cannot be run directly.")
if self.identifier not in renpy.game.persistent._seen_translates: # @UndefinedVariable
renpy.game.persistent._seen_translates.add(self.identifier) # @UndefinedVariable
renpy.game.seen_translates_count += 1
renpy.game.new_translates_count += 1
next_node(renpy.game.script.translator.lookup_translate(self.identifier))
renpy.game.context().translate_identifier = self.identifier
renpy.game.context().translate_block_language = self.language
def predict(self):
node = renpy.game.script.translator.lookup_translate(self.identifier)
return [ node ]
def scry(self):
rv = Scry()
rv._next = renpy.game.script.translator.lookup_translate(self.identifier)
return rv
def get_children(self, f):
f(self)
for i in self.block:
i.get_children(f)
def restructure(self, callback):
return callback(self.block)
class EndTranslate(Node):
"""
A node added implicitly after each translate block. It's responsible for
resetting the translation identifier.
"""
def __init__(self, loc):
super(EndTranslate, self).__init__(loc)
def diff_info(self):
return (EndTranslate,)
def execute(self):
next_node(self.next)
statement_name("end translate")
renpy.game.context().translate_identifier = None
renpy.game.context().translate_block_language = None
class TranslateString(Node):
"""
A node used for translated strings.
"""
translation_relevant = True
__slots__ = [
"language",
"old",
"new",
"newloc",
]
def __init__(self, loc, language, old, new, newloc):
super(TranslateString, self).__init__(loc)
self.language = language
self.old = old
self.new = new
self.newloc = newloc
def diff_info(self):
return (TranslateString,)
def execute(self):
next_node(self.next)
statement_name("translate string")
newloc = getattr(self, "newloc", (self.filename, self.linenumber + 1))
renpy.translation.add_string_translation(self.language, self.old, self.new, newloc)
class TranslatePython(Node):
"""
Runs python code when changing the language.
This is no longer generated, but is still run when encountered.
"""
translation_relevant = True
__slots__ = [
'language',
'code',
]
def __init__(self, loc, language, python_code):
"""
@param code: A PyCode object.
@param hide: If True, the code will be executed with its
own local dictionary.
"""
super(TranslatePython, self).__init__(loc)
self.language = language
self.code = PyCode(python_code, loc=loc, mode='exec')
def diff_info(self):
return (TranslatePython, self.code.source)
def execute(self):
next_node(self.next)
statement_name("translate_python")
# def early_execute(self):
# renpy.python.create_store(self.store)
# renpy.python.py_exec_bytecode(self.code.bytecode, self.hide, store=self.store)
class TranslateBlock(Node):
"""
Runs a block of code when changing the language.
"""
translation_relevant = True
__slots__ = [
'block',
'language',
]
def __init__(self, loc, language, block):
super(TranslateBlock, self).__init__(loc)
self.language = language
self.block = block
def get_children(self, f):
f(self)
for i in self.block:
i.get_children(f)
# We handle chaining specially. We want to chain together the nodes in
# the block, but we want that chain to end in None, and we also want
# this node to just continue on to the next node in normal execution.
def chain(self, next): # @ReservedAssignment
self.next = next
chain_block(self.block, None)
def execute(self):
next_node(self.next)
statement_name("translate_block")
def restructure(self, callback):
callback(self.block)
class TranslateEarlyBlock(TranslateBlock):
"""
This is similar to the TranslateBlock, except it runs before deferred
styles do.
"""
class Style(Node):
__slots__ = [
'style_name',
'parent',
'properties',
'clear',
'take',
'delattr',
'variant',
]
def __init__(self, loc, name):
"""
`name`
The name of | |
<reponame>dyahalomi/koi3278<filename>JointModel/JointMCMC_analyze.py
"""
Analyze the results of an MCMC run.
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as plticker
from scipy import interpolate
from JointMCMC_funcs import msage, kepler_problem, isointerp, loadisos, solve_WDmassRV, RV_WDmass_eq
from inputs import labels
# the file with the MCMC chain results
infile_SPC = './JointChain_spcFeb27.txt'
infile_SpecMatch = './JointChain_SpecMatchFeb28.txt'
infile_Brewer = './JointChain_BrewerFeb28.txt'
# after the burn in, only use every thin amount for speed
nthin = 1
# does this include limb darkening as free parameters
fitlimb = False
# output the median and 1-sigma error results to a TeX file
# use None if not desired
texout = 'None'
# whether or not to evaluate all the isochrones to get inferred properties
# in the TeX file (adds a lot of time)
inferredparams = False
# iteration where burn-in stops
burnin = 20000
# make the triangle plot
maketriangle = True
# ========================================================================== #
if fitlimb:
labels.append('$u_{S1,1}$')
labels.append('$u_{S1,2}$')
nparams = len(labels)
x = np.loadtxt(infile_Brewer)
print 'File loaded'
# split the metadata from the chain results
iteration = x[:, 0]
walkers = x[:, 1]
uwalkers = np.unique(walkers)
loglike = x[:, 2]
x = x[:, 3:]
# thin the file if we want to speed things up
thin = np.arange(0, iteration.max(), nthin)
good = np.in1d(iteration, thin)
x = x[good, :]
iteration = iteration[good]
walkers = walkers[good]
loglike = loglike[good]
# plot the value of each chain for each parameter as well as its log likelihood
plt.figure()
plt.clf()
for ii in np.arange(nparams+1):
# use 3 columns of plots
ax = plt.subplot(np.ceil((nparams+1)/3.), 3, ii+1)
for jj in uwalkers:
this = np.where(walkers == jj)[0]
if ii < nparams:
# if this chain is really long, cut down on plotting time by only
# plotting every tenth element
if len(iteration[this]) > 5000:
plt.plot(iteration[this][::10],
x[this, ii].reshape((-1,))[::10])
else:
plt.plot(iteration[this], x[this, ii].reshape((-1,)))
# plot the likelihood
else:
if len(iteration[this]) > 5000:
plt.plot(iteration[this][::10], loglike[this][::10])
else:
plt.plot(iteration[this], loglike[this])
# show the burnin location
plt.plot([burnin, burnin], plt.ylim(), lw=2)
# add the labels
if ii < nparams:
plt.ylabel(labels[ii])
else:
plt.ylabel('Log Likelihood')
plt.xlabel('Iterations')
ax.ticklabel_format(useOffset=False)
# now remove the burnin phase
pastburn = np.where(iteration > burnin)[0]
iteration = iteration[pastburn]
walkers = walkers[pastburn]
loglike = loglike[pastburn]
x = x[pastburn, :]
# ========================================================================== #
# Taken from RadVel Github, April 16, 2019
def gelman_rubin(pars0, minTz, maxGR):
'''Gelman-Rubin Statistic
Calculates the Gelman-Rubin statistic and the number of
independent draws for each parameter, as defined by Ford et
al. (2006) (http://adsabs.harvard.edu/abs/2006ApJ...642..505F).
The chain is considered well-mixed if all parameters have a
Gelman-Rubin statistic of <= 1.03 and >= 1000 independent draws.
Args:
pars0 (array): A 3 dimensional array (NPARS,NSTEPS,NCHAINS) of
parameter values
minTz (int): minimum Tz to consider well-mixed
maxGR (float): maximum Gelman-Rubin statistic to
consider well-mixed
Returns:
tuple: tuple containing:
ismixed (bool):
Are the chains well-mixed?
gelmanrubin (array):
An NPARS element array containing the
Gelman-Rubin statistic for each parameter (equation
25)
Tz (array):
An NPARS element array containing the number
of independent draws for each parameter (equation 26)
History:
2010/03/01:
Written: <NAME> - The Ohio State University
2012/10/08:
Ported to Python by <NAME> - University of Hawaii,
Institute for Astronomy
2016/04/20:
Adapted for use in RadVel. Removed "angular" parameter.
'''
pars = pars0.copy() # don't modify input parameters
sz = pars.shape
msg = 'MCMC: GELMAN_RUBIN: ERROR: pars must have 3 dimensions'
assert pars.ndim == 3, msg
npars = float(sz[0])
nsteps = float(sz[1])
nchains = float(sz[2])
msg = 'MCMC: GELMAN_RUBIN: ERROR: NSTEPS must be greater than 1'
assert nsteps > 1, msg
# Equation 21: W(z) in Ford 2006
variances = np.var(pars,axis=1, dtype=np.float64)
meanofvariances = np.mean(variances,axis=1)
withinChainVariances = np.mean(variances, axis=1)
# Equation 23: B(z) in Ford 2006
means = np.mean(pars,axis=1)
betweenChainVariances = np.var(means,axis=1, dtype=np.float64) * nsteps
varianceofmeans = np.var(means,axis=1, dtype=np.float64) / (nchains-1)
varEstimate = (
(1.0 - 1.0/nsteps) * withinChainVariances
+ 1.0 / nsteps * betweenChainVariances
)
bz = varianceofmeans * nsteps
# Equation 24: varhat+(z) in Ford 2006
varz = (nsteps-1.0)/bz + varianceofmeans
# Equation 25: Rhat(z) in Ford 2006
gelmanrubin = np.sqrt(varEstimate/withinChainVariances)
# Equation 26: T(z) in Ford 2006
vbz = varEstimate / bz
tz = nchains*nsteps*vbz[vbz < 1]
if tz.size == 0:
tz = [-1]
# well-mixed criteria
ismixed = min(tz) > minTz and max(gelmanrubin) < maxGR
return (ismixed, gelmanrubin, tz)
# ========================================================================== #
pars0 = np.reshape(x.T, (nparams, 100000-burnin-1, 50))
print gelman_rubin(pars0, 1000, 1.1)
# sort the results by likelihood for the triangle plot
lsort = np.argsort(loglike)
lsort = lsort[::-1]
iteration = iteration[lsort]
walkers = walkers[lsort]
loglike = loglike[lsort]
x = x[lsort, :]
if maketriangle:
plt.figure(figsize = (18,18))
plt.clf()
# set unrealistic default mins and maxes
maxes = np.zeros(len(x[0, :])) - 9e9
mins = np.zeros(len(x[0, :])) + 9e9
nbins = 50
# go through each combination of parameters
for jj in np.arange(len(x[0, :])):
for kk in np.arange(len(x[0, :])):
# only handle each combination once
if kk < jj:
# pick the right subplot
ax = plt.subplot(len(x[0, :]), len(x[0, :]),
jj * len(x[0, :]) + kk + 1)
# 3, 2, and 1 sigma levels
sigmas = np.array([0.9973002, 0.9544997, 0.6826895])
# put each sample into 2D bins
hist2d, xedge, yedge = np.histogram2d(x[:, jj], x[:, kk],
bins=[nbins, nbins],
normed=False)
# convert the bins to frequency
hist2d /= len(x[:, jj])
flat = hist2d.flatten()
# get descending bin frequency
fargs = flat.argsort()[::-1]
flat = flat[fargs]
# cumulative fraction up to each bin
cums = np.cumsum(flat)
levels = []
# figure out where each sigma cutoff bin is
for ii in np.arange(len(sigmas)):
above = np.where(cums > sigmas[ii])[0][0]
levels.append(flat[above])
levels.append(1.)
# figure out the min and max range needed for this plot
# then see if this is beyond the range of previous plots.
# this is necessary so that we can have a common axis
# range for each row/column
above = np.where(hist2d > levels[0])
thismin = xedge[above[0]].min()
if thismin < mins[jj]:
mins[jj] = thismin
thismax = xedge[above[0]].max()
if thismax > maxes[jj]:
maxes[jj] = thismax
thismin = yedge[above[1]].min()
if thismin < mins[kk]:
mins[kk] = thismin
thismax = yedge[above[1]].max()
if thismax > maxes[kk]:
maxes[kk] = thismax
# make the contour plot for these two parameters
plt.contourf(yedge[1:]-np.diff(yedge)/2.,
xedge[1:]-np.diff(xedge)/2., hist2d,
levels=levels,
colors=('k', '#444444', '#888888'))
# plot the distribution of each parameter
if jj == kk:
ax = plt.subplot(len(x[0, :]), len(x[0, :]),
jj*len(x[0, :]) + kk + 1)
plt.hist(x[:, jj], bins=nbins, facecolor='k')
# allow for some empty space on the sides
diffs = maxes - mins
mins -= 0.05*diffs
maxes += 0.05*diffs
# go back through each figure and clean it up
for jj in np.arange(len(x[0, :])):
for kk in np.arange(len(x[0, :])):
if kk < jj or jj == kk:
ax = plt.subplot(len(x[0, :]), len(x[0, :]),
jj*len(x[0, :]) + kk + 1)
# set the proper limits
if kk < jj:
ax.set_ylim(mins[jj], maxes[jj])
ax.set_xlim(mins[kk], maxes[kk])
# make sure tick labels don't overlap between subplots
ax.yaxis.set_major_locator(plticker.MaxNLocator(nbins=4,
prune='both'))
# only show tick labels on the edges
if kk != 0 or jj == 0:
ax.set_yticklabels([])
else:
# tweak the formatting
plt.ylabel(labels[jj])
locs, labs = plt.yticks()
plt.setp(labs, rotation=0, va='center')
yformatter = plticker.ScalarFormatter(useOffset=False)
ax.yaxis.set_major_formatter(yformatter)
# do the same with the x-axis ticks
ax.xaxis.set_major_locator(plticker.MaxNLocator(nbins=4,
prune='both'))
if jj != len(x[0, :])-1:
ax.set_xticklabels([])
else:
plt.xlabel(labels[kk])
locs, labs = plt.xticks()
plt.setp(labs, rotation=90, ha='center')
yformatter = plticker.ScalarFormatter(useOffset=False)
ax.xaxis.set_major_formatter(yformatter)
# remove the space between plots
plt.subplots_adjust(hspace=0.0, wspace=0.0)
# the best, median, and standard deviation of the input parameters
# used to feed back to model_funcs for initrange, and plotting the best fit
# model for publication figures in mcmc_run
best = x[0, :]
meds = np.median(x, axis=0)
devs = np.std(x, axis=0)
print 'Best model parameters: '
print best
print 'Median model parameters: '
print meds
# ========================================================================== #
# load the isochrones if we need them
if inferredparams and texout is not None:
try:
loaded
except NameError:
loaded = 1
isobundle = loadisos()
# unpack the model bundle
(magname, interps, limits, fehs, ages,
maxmasses) = isobundle
minfeh, maxfeh, minage, maxage = limits
# put the MCMC results into a TeX table
if texout is not None:
best_out = best.copy()
best_out = list(best_out)
# calculate eccentricity and add it to the list of parameters
e = (np.sqrt(x[:, 2]**2. + x[:, 3]**2.)).reshape((len(x[:, 0]), 1))
e_best = np.sqrt(best[2]**2. + best[3]**2.)
best_out.append(e_best)
x = np.concatenate((x, e), axis=1)
labels.append('$e$')
# add omega to the list
omega = np.arctan2(x[:, 3], x[:, 2]).reshape((len(x[:, 0]), 1))*180./np.pi
omega_best = np.arctan2(best[3], best[2])*180./np.pi
best_out.append(omega_best)
x = np.concatenate((x, omega), axis=1)
labels.append('$\omega$ (deg)')
# if we | |
sorted_arguments_list
# print("ordered results_list: ", results_list)
# print("ordered arguments_list: ")
for arguments in arguments_list:
print(arguments)
if dimensions is not None:
validity_values = []
for i, arguments in enumerate(arguments_list):
valid = True
for j, dim in enumerate(dimensions):
arg_value = arguments[j]
if isinstance(dim, Categorical):
if arguments[j] not in dim.categories:
valid = False
if verbose:
print("entry %d failed validity for argument %d with value %d" % (i, j, arg_value), dim.categories)
elif isinstance(dim, tuple) and len(dim) == 2:
if dim[0] > arguments[j] or dim[1] < arguments[j]:
valid = False
if verbose:
print("entry %d failed validity for argument %d with value %d" % (i, j, arg_value), dim)
elif isinstance(dim, list):
if arguments[j] not in dim:
valid = False
if verbose:
print("entry %d failed validity for argument %d with value %d" % (i, j, arg_value), dim)
validity_values.append(valid)
print("validity_values:", validity_values)
filtered_arguments_list = []
for i in range(len(validity_values)):
if validity_values[i]:
filtered_arguments_list.append(arguments_list[i])
arguments_list = filtered_arguments_list
results_list = results_list[validity_values]
# if len(arguments_list) == 0:
# arguments_list = None
# results_list = None
# print("final ordered results_list: ", results_list)
# print("final ordered arguments_list: ")
for arguments in arguments_list:
print(arguments)
# quit()
if len(arguments_list) == 0:
arguments_list = None
results_list = None
return arguments_list, results_list
def display_best_arguments(arguments_list, results_list, consider_std=True):
if arguments_list is None:
print("arguments_list is None")
return None, None
arguments_results_dict = {}
for i, arguments in enumerate(arguments_list):
arg_tuple = tuple(arguments)
if arg_tuple in arguments_results_dict:
arguments_results_dict[arg_tuple].append(results_list[i])
else:
arguments_results_dict[arg_tuple] = [results_list[i]]
# Average all entries with the same key
averaged_arguments_list = []
averaged_results_list = []
results_stds = []
results_lens = []
for arg in arguments_results_dict.keys():
averaged_arguments_list.append(arg)
averaged_results_list.append(np.array(arguments_results_dict[arg]).mean())
results_stds.append(np.array(arguments_results_dict[arg]).std())
results_lens.append(len(arguments_results_dict[arg]))
# print("std: ", np.array(arguments_results_dict[arg]).std(), " len:", len(arguments_results_dict[arg]))
# print("averaged_arguments_list=", averaged_arguments_list)
# print("averaged_results_list=", averaged_results_list)
# sort
averaged_results_list = np.array(averaged_results_list)
results_stds = np.array(results_stds)
results_lens = np.array(results_lens)
if consider_std:
ordering = np.argsort(averaged_results_list - 0.5 * results_stds/(results_lens-1)**0.5)[::-1]
else:
ordering = np.argsort(averaged_results_list)[::-1]
averaged_results_list = averaged_results_list[ordering]
results_stds = results_stds[ordering]
results_lens = results_lens[ordering]
averaged_sorted_arguments_list = []
for i in range(len(ordering)):
averaged_sorted_arguments_list.append(averaged_arguments_list[ordering[i]])
averaged_arguments_list = averaged_sorted_arguments_list
print("averaged ordered results_list: ", averaged_results_list)
print("results_stds: ", results_stds)
corrected_results_list = averaged_results_list - 0.5 * results_stds/(results_lens-1)**0.5
print("averaged ordered results_list - 0.5 * results_stds/factor: ", corrected_results_list)
print("results_lens: ", results_lens)
print("averaged ordered arguments_list: ")
for arguments in averaged_arguments_list:
print("(", end="")
for arg in arguments:
print("%3d, "%arg, end="")
print(")")
if consider_std:
final_results_list = corrected_results_list
else:
final_results_list = averaged_results_list
return averaged_arguments_list, final_results_list
def progress_callback(res):
print("C", end="")
#def gp_minimize(func, dimensions, base_estimator=None, n_calls=100, n_random_starts=10, acq_func='gp_hedge',
# acq_optimizer='auto', x0=None, y0=None, random_state=None, verbose=False, callback=None,
# n_points=10000, n_restarts_optimizer=5, xi=0.01, kappa=1.96, noise='gaussian', n_jobs=1)
# ['13', '17', '33', '57', '85', '90', '87', '170', '15', '10', '19', '23', '14', '8', '4', '1', '0', '0', '0', '0', '0', '13', '79', '20']
# [13, 17, 33, 57, 85, 90, 87, 170, 15, 10, 19, 23, 14, 8, 4, 1, 0, 0, 0, 0, 0, 13, 79, 20]
# 13 20 28 50 70 90 120 200 9 19 10 26 6 6 9 0 0 0 0 0 0 0 90 25
# Output dimensionalities (PCA and iGSFA)
range_L0_pca_out_dim = (12, 13) # O [13] # (12, 14) # (10, 16) # 13
range_L0_sfa_out_dim = (16, 21) # N (15, 23) # O (18, 23) # (15, 25) # [20] # (20, 21)
range_L1H_sfa_out_dim = (32, 38) #E (32, 35) # O (33, 38) # (31, 34) # (20, 36) # [28] # (28, 29)
range_L1V_sfa_out_dim = (54, 65) # N (50, 65) # (50, 63) # [50] # (50, 51)
range_L2H_sfa_out_dim = (65, 77) # N (65, 95) #E (65, 75) # O (68, 78) # [70] # (70, 71)
range_L2V_sfa_out_dim = (89, 96) # N (72, 100) #E (75, 100) # O (68, 95) # [90] # (90, 91)
range_L3H_sfa_out_dim = (111, 150) # N (92, 145) #E (125, 145) # O (100, 145) # [120] # (120, 121)
range_L3V_sfa_out_dim = (139, 230) #E (170, 216) # O (170, 230) #[200] # (200, 201)
# Length of slow part
range_L0_delta_threshold = (10, 18) # O (12, 18) # (1, 20) # [9] # #(9, 10) #
range_L1H_delta_threshold = (7, 16) # N (7, 18) #E (10, 20) # O (7, 14) # [19] # (19, 20)
range_L1V_delta_threshold = (4, 18) # E(7, 18) # O (7, 15) # [10] # (10, 11)
range_L2H_delta_threshold = (33, 50) # N (15, 46) # O (23, 45) # [26] # (26, 27)
range_L2V_delta_threshold = (0, 22) # O (0, 7) # [6] # (6, 7)
range_L3H_delta_threshold = (0, 14) # O [0] # [6] # (6, 7)
range_L3V_delta_threshold = (9, 13) # O [9] # (3, 5) # [9] # (9, 10)
# WARNING two categories cannot be expressed as [n1, n2], instead use e.g.,
# otherwise interval (n1, n2) is assumed
# Expansions
range_L0_expansion = [1] # N (0, 1) # O [1] # [0] # (0, 1)
range_L1H_expansion = [0] # N Categorical([0, 3]) # O [0] # TRY ALSO 3 [0, 0, 3] # (0, 1)
range_L1V_expansion = Categorical([0, 3]) # O [3] # (0, 1)
range_L2H_expansion = [4] # N Categorical([0, 3, 4]) #E (3, 4) # O [0] # (0, 1)
range_L2V_expansion = Categorical([0, 3, 4]) #E (3, 4) # O [0] # Categorical([0, 3]) #WARNING############################# [0, 3] # (0, 1)
range_L3H_expansion = (6, 16) # N (0, 15) #E (6, 15) # O [7] # [0, 7, 8, 9, 10] # (0, 0)
range_L3V_expansion = (17, 21) # N (0, 21) #E (15, 20) # O (11, 21) # [0, 7, 8, 9] (0, 0)
range_L4_degree_QT = (40, 109) # O (40, 119) # [90] # (90, 90)
range_L4_degree_CT = (13, 26) # O (10, 26) # [25] # (25, 25)
cuicuilco_dimensions = (range_L0_pca_out_dim, range_L0_sfa_out_dim, range_L1H_sfa_out_dim, range_L1V_sfa_out_dim, range_L2H_sfa_out_dim, range_L2V_sfa_out_dim, range_L3H_sfa_out_dim, range_L3V_sfa_out_dim, range_L0_delta_threshold, range_L1H_delta_threshold, range_L1V_delta_threshold, range_L2H_delta_threshold, range_L2V_delta_threshold, range_L3H_delta_threshold, range_L3V_delta_threshold, range_L0_expansion, range_L1H_expansion, range_L1V_expansion, range_L2H_expansion, range_L2V_expansion, range_L3H_expansion, range_L3V_expansion, range_L4_degree_QT, range_L4_degree_CT) # tuple or list?
print("cuicuilco_dimensions:", cuicuilco_dimensions)
# ( 13, 20, 36, 61, 75, 95, 140, 210, 16, 12, 10, 40, 5, 0, 9, 1, 0, 3, 0, 0, 7, 20, 109, 15, )
#( 13, 19, 33, 51, 73, 90, 114, 188, 16, 11, 15, 29, 3, 0, 9, 1, 0, 3, 0, 0, 7, 19, 42, 24, )
#( 13, 20, 36, 60, 72, 89, 139, 170, 14, 7, 10, 40, 5, 0, 9, 1, 0, 3, 0, 0, 7, 19, 101, 19, )
#( 13, 19, 35, 54, 71, 91, 111, 196, 14, 11, 14, 36, 3, 0, 9, 1, 0, 3, 0, 0, 7, 17, 80, 21, )
#( 13, 19, 34, 53, 72, 89, 130, 200, 14, 12, 13, 36, 1, 0, 9, 1, 0, 3, 0, 0, 7, 17, 83, 24, )
# np.random.seed(1234) # use a new random seed each time to allow combination of executions on different systems
argument_list, results_list = load_saved_executions(measure="CR_Gauss_mix", dimensions=cuicuilco_dimensions, verbose=False)
display_best_arguments(argument_list, results_list)
quit()
#argument_list = None
#results_list = None
#argument_list = [ # Best hyperparameters for original slow feature scaling method
#[13, 22, 38, 56, 77, 77, 124, 230, 17, 9, 14, 33, 6, 0, 9, 1, 0, 3, 0, 0, 7, 18, 91, 19],
#[13, 21, 37, 55, 78, 95, 108, 170, 18, 7, 15, 45, 2, 0, 9, 1, 0, 3, 0, 0, 7, 21, 40, 26],
#[13, 19, 35, 54, 71, 91, 111, 196, 14, 11, 14, 36, 3, 0, 9, 1, 0, 3, 0, 0, 7, 17, 80, 21],
#[13, 17, 33, 65, 95, 72, 92, 139, 15, 13, 13, 24, 4, 0, 3, 2, 0, 3, 0, 0, 7, 9, 89, 24],
#[13, 17, 34, 54, 95, 76, 100, 144, 13, 18, 4, 30, 4, 0, 1, 1, 0, 0, 0, 0, 0, 0, 98, 24],
#[13, 22, 38, 56, 77, 77, 124, 230, 17, 9, 14, 33, 6, 0, 9, 1, 0, 3, 0, 0, 7, 18, 91, 19]
#]
#[12, 15, 35, 65, 70, 95, 140, 196, 10, 10, 12, 29, 16, 4, 11, 1, 3, 0, 4, 4, 15, 20, 109, 18],
#[12, 23, 35, 64, 67, 98, 127, 184, 12, 14, 18, 29, 1, 2, 9, 0, 3, 0, 4, 4, 9, 18, 109, 20],
#[15, 19, 34, 59, 74, 95, 131, 208, 14, 12, 14, 39, 1, 10, 10, 0, 3, 0, 3, 3, 14, 13, | |
<reponame>shilpiprd/sympy<filename>sympy/sets/handlers/intersection.py
from sympy import (S, Dummy, Lambda, symbols, Interval, Intersection, Set,
EmptySet, FiniteSet, Union, ComplexRegion, Mul)
from sympy.multipledispatch import dispatch
from sympy.sets.conditionset import ConditionSet
from sympy.sets.fancysets import (Integers, Naturals, Reals, Range,
ImageSet, Rationals)
from sympy.sets.sets import UniversalSet, imageset, ProductSet
from sympy.simplify.radsimp import numer
@dispatch(ConditionSet, ConditionSet) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
return None
@dispatch(ConditionSet, Set) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
return ConditionSet(a.sym, a.condition, Intersection(a.base_set, b))
@dispatch(Naturals, Integers) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
return a
@dispatch(Naturals, Naturals) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
return a if a is S.Naturals else b
@dispatch(Interval, Naturals) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
return intersection_sets(b, a)
@dispatch(ComplexRegion, Set) # type: ignore # noqa:F811
def intersection_sets(self, other): # noqa:F811
if other.is_ComplexRegion:
# self in rectangular form
if (not self.polar) and (not other.polar):
return ComplexRegion(Intersection(self.sets, other.sets))
# self in polar form
elif self.polar and other.polar:
r1, theta1 = self.a_interval, self.b_interval
r2, theta2 = other.a_interval, other.b_interval
new_r_interval = Intersection(r1, r2)
new_theta_interval = Intersection(theta1, theta2)
# 0 and 2*Pi means the same
if ((2*S.Pi in theta1 and S.Zero in theta2) or
(2*S.Pi in theta2 and S.Zero in theta1)):
new_theta_interval = Union(new_theta_interval,
FiniteSet(0))
return ComplexRegion(new_r_interval*new_theta_interval,
polar=True)
if other.is_subset(S.Reals):
new_interval = []
x = symbols("x", cls=Dummy, real=True)
# self in rectangular form
if not self.polar:
for element in self.psets:
if S.Zero in element.args[1]:
new_interval.append(element.args[0])
new_interval = Union(*new_interval)
return Intersection(new_interval, other)
# self in polar form
elif self.polar:
for element in self.psets:
if S.Zero in element.args[1]:
new_interval.append(element.args[0])
if S.Pi in element.args[1]:
new_interval.append(ImageSet(Lambda(x, -x), element.args[0]))
if S.Zero in element.args[0]:
new_interval.append(FiniteSet(0))
new_interval = Union(*new_interval)
return Intersection(new_interval, other)
@dispatch(Integers, Reals) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
return a
@dispatch(Range, Interval) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
# Check that there are no symbolic arguments
if not all(i.is_number for i in a.args + b.args[:2]):
return
# In case of null Range, return an EmptySet.
if a.size == 0:
return S.EmptySet
from sympy.functions.elementary.integers import floor, ceiling
# trim down to self's size, and represent
# as a Range with step 1.
start = ceiling(max(b.inf, a.inf))
if start not in b:
start += 1
end = floor(min(b.sup, a.sup))
if end not in b:
end -= 1
return intersection_sets(a, Range(start, end + 1))
@dispatch(Range, Naturals) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
return intersection_sets(a, Interval(b.inf, S.Infinity))
@dispatch(Range, Range) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
# Check that there are no symbolic range arguments
if not all(all(v.is_number for v in r.args) for r in [a, b]):
return None
# non-overlap quick exits
if not b:
return S.EmptySet
if not a:
return S.EmptySet
if b.sup < a.inf:
return S.EmptySet
if b.inf > a.sup:
return S.EmptySet
# work with finite end at the start
r1 = a
if r1.start.is_infinite:
r1 = r1.reversed
r2 = b
if r2.start.is_infinite:
r2 = r2.reversed
# If both ends are infinite then it means that one Range is just the set
# of all integers (the step must be 1).
if r1.start.is_infinite:
return b
if r2.start.is_infinite:
return a
from sympy.solvers.diophantine.diophantine import diop_linear
from sympy.core.numbers import ilcm
from sympy import sign
# this equation represents the values of the Range;
# it's a linear equation
eq = lambda r, i: r.start + i*r.step
# we want to know when the two equations might
# have integer solutions so we use the diophantine
# solver
va, vb = diop_linear(eq(r1, Dummy('a')) - eq(r2, Dummy('b')))
# check for no solution
no_solution = va is None and vb is None
if no_solution:
return S.EmptySet
# there is a solution
# -------------------
# find the coincident point, c
a0 = va.as_coeff_Add()[0]
c = eq(r1, a0)
# find the first point, if possible, in each range
# since c may not be that point
def _first_finite_point(r1, c):
if c == r1.start:
return c
# st is the signed step we need to take to
# get from c to r1.start
st = sign(r1.start - c)*step
# use Range to calculate the first point:
# we want to get as close as possible to
# r1.start; the Range will not be null since
# it will at least contain c
s1 = Range(c, r1.start + st, st)[-1]
if s1 == r1.start:
pass
else:
# if we didn't hit r1.start then, if the
# sign of st didn't match the sign of r1.step
# we are off by one and s1 is not in r1
if sign(r1.step) != sign(st):
s1 -= st
if s1 not in r1:
return
return s1
# calculate the step size of the new Range
step = abs(ilcm(r1.step, r2.step))
s1 = _first_finite_point(r1, c)
if s1 is None:
return S.EmptySet
s2 = _first_finite_point(r2, c)
if s2 is None:
return S.EmptySet
# replace the corresponding start or stop in
# the original Ranges with these points; the
# result must have at least one point since
# we know that s1 and s2 are in the Ranges
def _updated_range(r, first):
st = sign(r.step)*step
if r.start.is_finite:
rv = Range(first, r.stop, st)
else:
rv = Range(r.start, first + st, st)
return rv
r1 = _updated_range(a, s1)
r2 = _updated_range(b, s2)
# work with them both in the increasing direction
if sign(r1.step) < 0:
r1 = r1.reversed
if sign(r2.step) < 0:
r2 = r2.reversed
# return clipped Range with positive step; it
# can't be empty at this point
start = max(r1.start, r2.start)
stop = min(r1.stop, r2.stop)
return Range(start, stop, step)
@dispatch(Range, Integers) # type: ignore # noqa:F811
def intersection_sets(a, b): # noqa:F811
return a
@dispatch(ImageSet, Set) # type: ignore # noqa:F811
def intersection_sets(self, other): # noqa:F811
from sympy.solvers.diophantine import diophantine
# Only handle the straight-forward univariate case
if (len(self.lamda.variables) > 1
or self.lamda.signature != self.lamda.variables):
return None
base_set = self.base_sets[0]
# Intersection between ImageSets with Integers as base set
# For {f(n) : n in Integers} & {g(m) : m in Integers} we solve the
# diophantine equations f(n)=g(m).
# If the solutions for n are {h(t) : t in Integers} then we return
# {f(h(t)) : t in integers}.
# If the solutions for n are {n_1, n_2, ..., n_k} then we return
# {f(n_i) : 1 <= i <= k}.
if base_set is S.Integers:
gm = None
if isinstance(other, ImageSet) and other.base_sets == (S.Integers,):
gm = other.lamda.expr
var = other.lamda.variables[0]
# Symbol of second ImageSet lambda must be distinct from first
m = Dummy('m')
gm = gm.subs(var, m)
elif other is S.Integers:
m = gm = Dummy('m')
if gm is not None:
fn = self.lamda.expr
n = self.lamda.variables[0]
try:
solns = list(diophantine(fn - gm, syms=(n, m), permute=True))
except (TypeError, NotImplementedError):
# TypeError if equation not polynomial with rational coeff.
# NotImplementedError if correct format but no solver.
return
# 3 cases are possible for solns:
# - empty set,
# - one or more parametric (infinite) solutions,
# - a finite number of (non-parametric) solution couples.
# Among those, there is one type of solution set that is
# not helpful here: multiple parametric solutions.
if len(solns) == 0:
return EmptySet
elif any(s.free_symbols for tupl in solns for s in tupl):
if len(solns) == 1:
soln, solm = solns[0]
(t,) = soln.free_symbols
expr = fn.subs(n, soln.subs(t, n)).expand()
return imageset(Lambda(n, expr), S.Integers)
else:
return
else:
return FiniteSet(*(fn.subs(n, s[0]) for s in solns))
if other == S.Reals:
from sympy.core.function import expand_complex
from sympy.solvers.solvers import denoms, solve_linear
from sympy.core.relational import Eq
def _solution_union(exprs, sym):
# return a union of linear solutions to i in expr;
# if i cannot be solved, use a ConditionSet for solution
sols = []
for i in exprs:
x, xis = solve_linear(i, 0, [sym])
if x == sym:
sols.append(FiniteSet(xis))
else:
sols.append(ConditionSet(sym, Eq(i, 0)))
return Union(*sols)
f = self.lamda.expr
n = self.lamda.variables[0]
n_ = Dummy(n.name, real=True)
f_ = f.subs(n, n_)
re, im = f_.as_real_imag()
im = expand_complex(im)
re = re.subs(n_, n)
im = im.subs(n_, n)
ifree | |
)
# clusters = main.topo.getAllClusters( main )
# mnSwitches = main.Mininet1.getSwitches()
# mnLinks = main.Mininet1.getLinks()
# mnHosts = main.Mininet1.getHosts()
# main.step( "Comparing MN topology to ONOS topology" )
# for controller in range( main.numCtrls ):
# controllerStr = str( controller + 1 )
# if devices[ controller ] and ports[ controller ] and\
# "Error" not in devices[ controller ] and\
# "Error" not in ports[ controller ]:
# currentDevicesResult = main.Mininet1.compareSwitches(
# mnSwitches,
# json.loads( devices[ controller ] ),
# json.loads( ports[ controller ] ) )
# else:
# currentDevicesResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentDevicesResult,
# onpass="ONOS" + controllerStr +
# " Switches view is correct",
# onfail="ONOS" + controllerStr +
# " Switches view is incorrect" )
# if links[ controller ] and "Error" not in links[ controller ]:
# currentLinksResult = main.Mininet1.compareLinks(
# mnSwitches, mnLinks,
# json.loads( links[ controller ] ) )
# else:
# currentLinksResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentLinksResult,
# onpass="ONOS" + controllerStr +
# " links view is correct",
# onfail="ONOS" + controllerStr +
# " links view is incorrect" )
# if hosts[ controller ] or "Error" not in hosts[ controller ]:
# currentHostsResult = main.Mininet1.compareHosts(
# mnHosts,
# json.loads( hosts[ controller ] ) )
# else:
# currentHostsResult = main.FALSE
# utilities.assert_equals( expect=main.TRUE,
# actual=currentHostsResult,
# onpass="ONOS" + controllerStr +
# " hosts exist in Mininet",
# onfail="ONOS" + controllerStr +
# " hosts don't match Mininet" )
# NEW FUNCintentRest Case 8 as based off of the CASE 8 from FUNCintent
"""
Compare ONOS Topology to Mininet Topology
"""
import json
main.case( "Compare ONOS Topology view to Mininet topology" )
main.caseExplanation = "Compare topology elements between Mininet" +\
" and ONOS"
main.log.info( "Gathering topology information from Mininet" )
devicesResults = main.FALSE # Overall Boolean for device correctness
linksResults = main.FALSE # Overall Boolean for link correctness
hostsResults = main.FALSE # Overall Boolean for host correctness
deviceFails = [] # Nodes where devices are incorrect
linkFails = [] # Nodes where links are incorrect
hostFails = [] # Nodes where hosts are incorrect
attempts = main.checkTopoAttempts # Remaining Attempts
mnSwitches = main.Mininet1.getSwitches()
mnLinks = main.Mininet1.getLinks()
mnHosts = main.Mininet1.getHosts()
main.step( "Comparing Mininet topology to ONOS topology" )
while ( attempts >= 0 ) and\
( not devicesResults or not linksResults or not hostsResults ):
time.sleep( 2 )
if not devicesResults:
devices = main.topo.getAllDevices( main )
ports = main.topo.getAllPorts( main )
devicesResults = main.TRUE
deviceFails = [] # Reset for each failed attempt
if not linksResults:
links = main.topo.getAllLinks( main )
linksResults = main.TRUE
linkFails = [] # Reset for each failed attempt
if not hostsResults:
hosts = main.topo.getAllHosts( main )
hostsResults = main.TRUE
hostFails = [] # Reset for each failed attempt
# Check for matching topology on each node
for controller in range( main.numCtrls ):
controllerStr = str( controller + 1 ) # ONOS node number
# Compare Devices
if devices[ controller ] and ports[ controller ] and\
"Error" not in devices[ controller ] and\
"Error" not in ports[ controller ]:
try:
deviceData = json.loads( devices[ controller ] )
portData = json.loads( ports[ controller ] )
except (TypeError,ValueError):
main.log.error( "Could not load json: {0} or {1}".format( str( devices[ controller ] ), str( ports[ controller ] ) ) )
currentDevicesResult = main.FALSE
else:
currentDevicesResult = main.Mininet1.compareSwitches(
mnSwitches,deviceData,portData )
else:
currentDevicesResult = main.FALSE
if not currentDevicesResult:
deviceFails.append( controllerStr )
devicesResults = devicesResults and currentDevicesResult
# Compare Links
if links[ controller ] and "Error" not in links[ controller ]:
try:
linkData = json.loads( links[ controller ] )
except (TypeError,ValueError):
main.log.error("Could not load json:" + str( links[ controller ] ) )
currentLinksResult = main.FALSE
else:
currentLinksResult = main.Mininet1.compareLinks(
mnSwitches, mnLinks,linkData )
else:
currentLinksResult = main.FALSE
if not currentLinksResult:
linkFails.append( controllerStr )
linksResults = linksResults and currentLinksResult
# Compare Hosts
if hosts[ controller ] and "Error" not in hosts[ controller ]:
try:
hostData = json.loads( hosts[ controller ] )
except (TypeError,ValueError):
main.log.error("Could not load json:" + str( hosts[ controller ] ) )
currentHostsResult = main.FALSE
else:
currentHostsResult = main.Mininet1.compareHosts(
mnHosts,hostData )
else:
currentHostsResult = main.FALSE
if not currentHostsResult:
hostFails.append( controllerStr )
hostsResults = hostsResults and currentHostsResult
# Decrement Attempts Remaining
attempts -= 1
utilities.assert_equals( expect=[],
actual=deviceFails,
onpass="ONOS correctly discovered all devices",
onfail="ONOS incorrectly discovered devices on nodes: " +
str( deviceFails ) )
utilities.assert_equals( expect=[],
actual=linkFails,
onpass="ONOS correctly discovered all links",
onfail="ONOS incorrectly discovered links on nodes: " +
str( linkFails ) )
utilities.assert_equals( expect=[],
actual=hostFails,
onpass="ONOS correctly discovered all hosts",
onfail="ONOS incorrectly discovered hosts on nodes: " +
str( hostFails ) )
topoResults = hostsResults and linksResults and devicesResults
utilities.assert_equals( expect=main.TRUE,
actual=topoResults,
onpass="ONOS correctly discovered the topology",
onfail="ONOS incorrectly discovered the topology" )
def CASE9( self, main ):
'''
Report errors/warnings/exceptions
'''
main.log.info( "Error report: \n" )
main.ONOSbench.logReport( globalONOSip[0],
[ "INFO", "FOLLOWER", "WARN", "flow", "ERROR" , "Except" ],
"s" )
#main.ONOSbench.logReport( globalONOSip[1], [ "INFO" ], "d" )
def CASE10( self, main ):
"""
Start Mininet topology with OF 1.0 switches
"""
main.OFProtocol = "1.0"
main.log.report( "Start Mininet topology with OF 1.0 switches" )
main.case( "Start Mininet topology with OF 1.0 switches" )
main.caseExplanation = "Start mininet topology with OF 1.0 " +\
"switches to test intents, exits out if " +\
"topology did not start correctly"
main.step( "Starting Mininet topology with OF 1.0 switches" )
args = "--switch ovs,protocols=OpenFlow10"
topoResult = main.Mininet1.startNet( topoFile=main.dependencyPath +
main.topology,
args=args )
stepResult = topoResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully loaded topology",
onfail="Failed to load topology" )
# Exit if topology did not load properly
if not topoResult:
main.cleanup()
main.exit()
def CASE11( self, main ):
"""
Start Mininet topology with OF 1.3 switches
"""
main.OFProtocol = "1.3"
main.log.report( "Start Mininet topology with OF 1.3 switches" )
main.case( "Start Mininet topology with OF 1.3 switches" )
main.caseExplanation = "Start mininet topology with OF 1.3 " +\
"switches to test intents, exits out if " +\
"topology did not start correctly"
main.step( "Starting Mininet topology with OF 1.3 switches" )
args = "--switch ovs,protocols=OpenFlow13"
topoResult = main.Mininet1.startNet( topoFile=main.dependencyPath +
main.topology,
args=args )
stepResult = topoResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully loaded topology",
onfail="Failed to load topology" )
# Exit if topology did not load properly
if not topoResult:
main.cleanup()
main.exit()
def CASE12( self, main ):
"""
Assign mastership to controllers
"""
import re
main.case( "Assign switches to controllers" )
main.step( "Assigning switches to controllers" )
main.caseExplanation = "Assign OF " + main.OFProtocol +\
" switches to ONOS nodes"
assignResult = main.TRUE
switchList = []
# Creates a list switch name, use getSwitch() function later...
for i in range( 1, ( main.numSwitch + 1 ) ):
switchList.append( 's' + str( i ) )
tempONOSip = []
for i in range( main.numCtrls ):
tempONOSip.append( main.ONOSip[ i ] )
assignResult = main.Mininet1.assignSwController( sw=switchList,
ip=tempONOSip,
port='6653' )
if not assignResult:
main.cleanup()
main.exit()
for i in range( 1, ( main.numSwitch + 1 ) ):
response = main.Mininet1.getSwController( "s" + str( i ) )
print( "Response is " + str( response ) )
if re.search( "tcp:" + main.ONOSip[ 0 ], response ):
assignResult = assignResult and main.TRUE
else:
assignResult = main.FALSE
stepResult = assignResult
utilities.assert_equals( expect=main.TRUE,
actual=stepResult,
onpass="Successfully assigned switches" +
"to controller",
onfail="Failed to assign switches to " +
"controller" )
def CASE13( self,main ):
"""
Create Scapy components
"""
main.case( "Create scapy components" )
main.step( "Create scapy components" )
import json
scapyResult = main.TRUE
for hostName in main.scapyHostNames:
main.Scapy1.createHostComponent( hostName )
main.scapyHosts.append( getattr( main, hostName ) )
main.step( "Start scapy components" )
for host in main.scapyHosts:
host.startHostCli()
host.startScapy()
host.updateSelf()
main.log.debug( host.name )
main.log.debug( host.hostIp )
main.log.debug( host.hostMac )
utilities.assert_equals( expect=main.TRUE,
actual=scapyResult,
onpass="Successfully created Scapy Components",
onfail="Failed to discover Scapy Components" )
def CASE14( self, main ):
"""
Discover all hosts and store its data to a dictionary
"""
main.case( "Discover all hosts" )
stepResult = | |
# coding: UTF-8
import os
os.environ['TF_CPP_MIN_LOG_LEVEL']='2'
import warnings
warnings.filterwarnings("ignore")
import argparse
import numpy as np
import shutil
import PIL
import time
from imageio import imread, imsave
from googletrans import Translator
import torch
import torchvision
import torch.nn.functional as F
from torchvision import transforms as T
import clip
os.environ['KMP_DUPLICATE_LIB_OK']='True'
from clip_fft import to_valid_rgb, fft_image, resume_fft, pixel_image
from utils import slice_imgs, derivat, sim_func, slerp, basename, file_list, img_list, img_read, pad_up_to, txt_clean, latent_anima, cvshow, checkout, save_cfg, old_torch
import transforms
import depth
try: # progress bar for notebooks
get_ipython().__class__.__name__
from progress_bar import ProgressIPy as ProgressBar
except: # normal console
from progress_bar import ProgressBar
clip_models = ['ViT-B/16', 'ViT-B/32', 'RN50', 'RN50x4', 'RN50x16', 'RN101']
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--size', default='1280-720', help='Output resolution')
parser.add_argument('-t', '--in_txt', default=None, help='Text string or file to process (main topic)')
parser.add_argument('-pre', '--in_txt_pre', default=None, help='Prefix for input text')
parser.add_argument('-post', '--in_txt_post', default=None, help='Postfix for input text')
parser.add_argument('-t2', '--in_txt2', default=None, help='Text string or file to process (style)')
parser.add_argument('-t0', '--in_txt0', default=None, help='input text to subtract')
parser.add_argument('-im', '--in_img', default=None, help='input image or directory with images')
parser.add_argument('-w0', '--weight0', default=0.3, type=float, help='weight for subtraction')
parser.add_argument('-w2', '--weight2', default=0.5, type=float, help='weight for style')
parser.add_argument('-wi', '--weight_img', default=0.5, type=float, help='weight for images')
parser.add_argument('-r', '--resume', default=None, help='Resume from saved params or from an image')
parser.add_argument( '--out_dir', default='_out')
parser.add_argument('-tr', '--translate', action='store_true', help='Translate with Google Translate')
parser.add_argument( '--invert', action='store_true', help='Invert criteria')
parser.add_argument('-v', '--verbose', default=True, type=bool)
# training
parser.add_argument( '--gen', default='RGB', help='Generation (optimization) method: FFT or RGB')
parser.add_argument('-m', '--model', default='ViT-B/32', choices=clip_models, help='Select CLIP model to use')
parser.add_argument( '--steps', default=300, type=int, help='Iterations (frames) per scene (text line)')
parser.add_argument( '--samples', default=100, type=int, help='Samples to evaluate per frame')
parser.add_argument('-lr', '--lrate', default=1, type=float, help='Learning rate')
# motion
parser.add_argument('-opt', '--opt_step', default=1, type=int, help='How many optimizing steps per save/transform step')
parser.add_argument('-sm', '--smooth', action='store_true', help='Smoothen interframe jittering for FFT method')
parser.add_argument('-it', '--interpol', default=True, help='Interpolate topics? (or change by cut)')
parser.add_argument( '--fstep', default=100, type=int, help='How many frames before changing motion')
parser.add_argument( '--scale', default=0.012, type=float)
parser.add_argument( '--shift', default=10., type=float, help='in pixels')
parser.add_argument( '--angle', default=0.8, type=float, help='in degrees')
parser.add_argument( '--shear', default=0.4, type=float)
parser.add_argument( '--anima', default=True, help='Animate motion')
# depth
parser.add_argument('-d', '--depth', default=0, type=float, help='Add depth with such strength, if > 0')
parser.add_argument( '--depth_model', default='AdaBins_nyu.pt', help='AdaBins model path')
parser.add_argument( '--depth_mask', default='mask.jpg', help='depth mask path')
parser.add_argument( '--depth_dir', default=None, help='Directory to save depth, if not None')
# tweaks
parser.add_argument('-a', '--align', default='overscan', choices=['central', 'uniform', 'overscan', 'overmax'], help='Sampling distribution')
parser.add_argument('-tf', '--transform', default='custom', choices=['none', 'custom', 'elastic'], help='use augmenting transforms?')
parser.add_argument( '--contrast', default=1.2, type=float)
parser.add_argument( '--colors', default=2, type=float)
parser.add_argument('-sh', '--sharp', default=None, type=float)
parser.add_argument('-mc', '--macro', default=0.4, type=float, help='Endorse macro forms 0..1 ')
parser.add_argument('-e', '--enforce', default=0, type=float, help='Enforce details (by boosting similarity between two parallel samples)')
parser.add_argument('-x', '--expand', default=0, type=float, help='Boosts diversity (by enforcing difference between prev/next samples)')
parser.add_argument('-n', '--noise', default=2., type=float, help='Add noise to make composition sparse (FFT only)') # 0.04
parser.add_argument( '--sim', default='mix', help='Similarity function (angular/spherical/mixed; None = cossim)')
parser.add_argument( '--rem', default=None, help='Dummy text to add to project name')
a = parser.parse_args()
if a.size is not None: a.size = [int(s) for s in a.size.split('-')][::-1]
if len(a.size)==1: a.size = a.size * 2
a.gen = a.gen.upper()
a.invert = -1. if a.invert is True else 1.
# Overriding some parameters, depending on other settings
if a.gen == 'RGB':
a.smooth = False
a.align = 'overscan'
if a.sharp is None: a.sharp = -1. if a.gen == 'RGB' else 1.
if a.model == 'ViT-B/16': a.sim = 'cossim'
return a
def depth_transform(img_t, img_np, depth_infer, depth_mask, size, depthX=0, scale=1., shift=[0,0], colors=1, depth_dir=None, save_num=0):
# d X/Y define the origin point of the depth warp, effectively a "3D pan zoom", [-1..1]
# plus = look ahead, minus = look aside
dX = 100. * shift[0] / size[1]
dY = 100. * shift[1] / size[0]
# dZ = movement direction: 1 away (zoom out), 0 towards (zoom in), 0.5 stay
dZ = 0.5 + 23. * (scale[0]-1)
# dZ += 0.5 * float(math.sin(((save_num % 70)/70) * math.pi * 2))
if img_np is None:
img2 = img_t.clone().detach()
par, imag, _ = pixel_image(img2.shape, resume=img2)
img2 = to_valid_rgb(imag, colors=colors)()
img2 = img2.detach().cpu().numpy()[0]
img2 = (np.transpose(img2, (1,2,0))) # [h,w,c]
img2 = np.clip(img2*255, 0, 255).astype(np.uint8)
image_pil = T.ToPILImage()(img2)
del img2
else:
image_pil = T.ToPILImage()(img_np)
size2 = [s//2 for s in size]
img = depth.depthwarp(img_t, image_pil, depth_infer, depth_mask, size2, depthX, [dX,dY], dZ, rescale=0.5, clip_range=2, save_path=depth_dir, save_num=save_num)
return img
def frame_transform(img, size, angle, shift, scale, shear):
if old_torch(): # 1.7.1
img = T.functional.affine(img, angle, shift, scale, shear, fillcolor=0, resample=PIL.Image.BILINEAR)
img = T.functional.center_crop(img, size)
img = pad_up_to(img, size)
else: # 1.8+
img = T.functional.affine(img, angle, shift, scale, shear, fill=0, interpolation=T.InterpolationMode.BILINEAR)
img = T.functional.center_crop(img, size) # on 1.8+ also pads
return img
def main():
a = get_args()
# Load CLIP models
model_clip, _ = clip.load(a.model, jit=old_torch())
try:
a.modsize = model_clip.visual.input_resolution
except:
a.modsize = 288 if a.model == 'RN50x4' else 384 if a.model == 'RN50x16' else 224
if a.verbose is True: print(' using model', a.model)
xmem = {'ViT-B/16':0.25, 'RN50':0.5, 'RN50x4':0.16, 'RN50x16':0.06, 'RN101':0.33}
if a.model in xmem.keys():
a.samples = int(a.samples * xmem[a.model])
if a.depth > 0:
depth_infer, depth_mask = depth.init_adabins(model_path=a.depth_model, mask_path=a.depth_mask, size=a.size)
if a.depth_dir is not None:
os.makedirs(a.depth_dir, exist_ok=True)
print(' depth dir:', a.depth_dir)
if a.translate:
translator = Translator()
if a.enforce != 0:
a.samples = int(a.samples * 0.5)
if 'elastic' in a.transform:
trform_f = transforms.transforms_elastic
a.samples = int(a.samples * 0.95)
elif 'custom' in a.transform:
trform_f = transforms.transforms_custom
a.samples = int(a.samples * 0.95)
else:
trform_f = transforms.normalize()
def enc_text(txt):
if a.translate:
txt = translator.translate(txt, dest='en').text
emb = model_clip.encode_text(clip.tokenize(txt).cuda()[:77])
return emb.detach().clone()
def enc_image(img_file):
img_t = torch.from_numpy(img_read(img_file)/255.).unsqueeze(0).permute(0,3,1,2).cuda()[:,:3,:,:]
in_sliced = slice_imgs([img_t], a.samples, a.modsize, transforms.normalize(), a.align)[0]
emb = model_clip.encode_image(in_sliced)
return emb.detach().clone()
# Encode inputs
count = 0
texts = []
styles = []
images = []
if a.in_txt is not None:
if os.path.isfile(a.in_txt):
with open(a.in_txt, 'r', encoding="utf-8") as f:
texts = f.readlines()
texts = [tt.strip() for tt in texts if len(tt.strip()) > 0 and tt[0] != '#']
else:
texts = [a.in_txt]
if a.in_txt_pre is not None:
texts = [' '.join([a.in_txt_pre, tt]).strip() for tt in texts]
if a.in_txt_post is not None:
texts = [' '.join([tt, a.in_txt_post]).strip() for tt in texts]
key_txt_encs = [enc_text(txt) for txt in texts]
count = max(count, len(key_txt_encs))
if a.in_txt2 is not None:
if os.path.isfile(a.in_txt2):
with open(a.in_txt2, 'r', encoding="utf-8") as f:
styles = f.readlines()
styles = [tt.strip() for tt in styles if len(tt.strip()) > 0 and tt[0] != '#']
else:
styles = [a.in_txt2]
key_styl_encs = [enc_text(style) for style in styles]
count = max(count, len(key_styl_encs))
if a.in_img is not None and os.path.exists(a.in_img):
images = file_list(a.in_img) if os.path.isdir(a.in_img) else [a.in_img]
key_img_encs = [enc_image(image) for image in images]
count = max(count, len(key_img_encs))
assert count > 0, "No inputs found!"
if a.in_txt0 is not None:
if a.verbose is True: print(' subtract text:', a.in_txt0)
if a.translate:
a.in_txt0 = translator.translate(a.in_txt0, dest='en').text
# if a.verbose is True: print(' translated to:', a.in_txt0)
anti_txt_encs = [enc_text(txt) for txt in a.in_txt0.split('.')]
if a.verbose is True: print(' samples:', a.samples)
global params_tmp
shape = [1, 3, *a.size]
if a.gen == 'RGB':
params_tmp, _, sz = pixel_image(shape, a.resume)
params_tmp = params_tmp[0].cuda().detach()
else:
params_tmp, sz = resume_fft(a.resume, shape, decay=1.5, sd=1)
if sz is not None: a.size = sz
# [glob]steps = for save/move, opt_steps = for optimization cycle
steps = a.steps
glob_steps = count * steps
opt_steps = steps * a.opt_step
if glob_steps == a.fstep: a.fstep = glob_steps // 2 # otherwise no motion
workname = basename(a.in_txt) if a.in_txt is not None else basename(a.in_img)
workname = txt_clean(workname)
workdir = os.path.join(a.out_dir, workname)
if a.rem is not None: workdir += '-%s' % a.rem
if 'RN' in a.model.upper(): workdir += '-%s' % a.model
if a.noise > 0: workdir += '-n%.2g' % a.noise
if a.macro > 0: workdir += '-m%.2g' % a.macro
if a.smooth is True: workdir += '-sm'
if a.transform != 'custom': workdir += '-tf%s' % a.transform
if a.gen == 'RGB': workdir += '-rgb'
tempdir = os.path.join(workdir, 'ttt')
os.makedirs(tempdir, exist_ok=True)
save_cfg(a, workdir)
if a.in_txt is not None and os.path.isfile(a.in_txt):
shutil.copy(a.in_txt, os.path.join(workdir, os.path.basename(a.in_txt)))
if a.in_txt2 is not None and os.path.isfile(a.in_txt2):
shutil.copy(a.in_txt2, os.path.join(workdir, os.path.basename(a.in_txt2)))
midp = 0.5
if a.anima:
if a.gen == 'RGB': # zoom in
m_scale = latent_anima([1], glob_steps, a.fstep, uniform=True, cubic=True, start_lat=[-0.3], verbose=False)
m_scale | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import time
import logging, socket, traceback
from os import path as opath
from struct import pack
import cPickle as pickle
import aerospike
from aerospike.exception import RecordNotFound, AerospikeError
from leveldb import LevelDBError
from threathunter_common.util import ip_match
from . import utils
from nebula_utils import settings
from nebula_utils.persist_utils import utils as putils
from nebula_utils.persist_utils.db import scan_keys, get_db
from nebula_utils.persist_utils import settings as psettings
from nebula_utils.persist_utils.metrics import catch_latency
location = 'nebula_utils.compute.cache'
logger = logging.getLogger(location)
DEBUG_PREFIX = '==============='
# 每个维度一个统计dict
Stat_Dict = dict() # dimension : dict( 各维度的统计)
Hook_Functions = []
def get_stat_db_path(timestamp):
"""
从时间戳 获取 报表数据存放位置
"""
db_path = putils.get_path(timestamp, settings.DB_ROOT_PATH)
return opath.join(db_path, psettings.STAT_DB_PATH)
def get_total_stat_key(key_type):
"""
获取当前小时,key_type下面对所有key做top的 leveldb的key (key_type[1byte]'all')
"""
return get_stat_key_prefix(key_type) + 'all'
def get_click_stat_key(key, dimension):
"""
替代原来的generate_stat_key函数
根据key生成 对应的统计的leveldb里面存储的键
统计的key的格式 type(1 byte)|key(len(key) bytes)
dimension: 统计的key的维度: enum: ip, ipc, did, uid, page
key: 生成统计的key, ex. ip, ipc, device id, 特殊值:"all"
"""
stat_type = utils.Dimension_Stat_Prefix.get(dimension, None)
if stat_type is None:
logging.error('invalid dimension type:%s , can not generate stat key', dimension)
return None
if stat_type == utils.IPC_Stat_Type:
if key == 'all':
res = pack('>B', stat_type) + key
else:
ip_segs = key.split('.')
res = pack('>BBBB', stat_type, int(ip_segs[0]), int(ip_segs[1]), int(ip_segs[2]))
elif stat_type == utils.IP_Stat_Type:
if key == 'all':
key_hex = key
else:
if key.find(':') != -1:
# in case: port in c_ip column
key = key.split(':')[0]
try:
key_hex = socket.inet_aton(key)
except Exception:
logger.error(u'key %s不能直接解析', key)
return None
t_hex = pack('>B', stat_type)
res = t_hex + key_hex
elif stat_type in (utils.UID_Stat_Type, utils.DID_Stat_Type, utils.PAGE_Stat_Type):
t_hex = pack('>B', stat_type)
res = t_hex + key
return res
@catch_latency("统计点击数")
def gen_click_counter(Stat_Dict):
"""
将各个维度统计的click数量存入aerospike
/platform/stats/offline_serial 数据源
连续小时各维度、已经总览的数据
"""
ContinuousDB.get_db()
work_ts = settings.Working_TS
work_day = settings.Working_DAY
related_vars = dict(
did=['did__visit__dynamic_distinct_ip__1h__slot', #did 关联ip数
'did__visit__dynamic_distinct_user__1h__slot',# did 关联user数
'did__visit__dynamic_distinct_page__1h__slot',# did 关联page数
'did__visit__incident_count__1h__slot'],#did 风险事件数
user=['user__visit__dynamic_distinct_ip__1h__slot',# user 关联ip数
'user__visit__dynamic_distinct_did__1h__slot',# user 关联did数
'user__visit__dynamic_distinct_page__1h__slot',# user 关联page数
'user__visit__incident_count__1h__slot'],# user 风险事件数
ip=['ip__visit__dynamic_distinct_did__1h__slot',# ip 关联did数
'ip__visit__dynamic_distinct_user__1h__slot',# ip 关联user数
'ip__visit__dynamic_distinct_page__1h__slot',# ip 关联page数
'ip__visit__incident_count__1h__slot'],# ip 风险事件数
page=['page__visit__dynamic_distinct_ip__1h__slot',# page 关联ip数
'page__visit__dynamic_distinct_user__1h__slot',# page 关联user数
'page__visit__dynamic_distinct_did__1h__slot',# page 关联did数
'page__visit__incident_count__1h__slot'],)# page 风险事件数
# 收集各维度相关联的variable数据,存入aerospike
for dim, var_name in utils.Click_Variable_Names.iteritems():
dim_stat_dict = Stat_Dict.get(dim, None)
# 将每个维度关联的ip、page、did、user数量作为tag存入metrics,当前维度的tag则为维度的key
# ex. {'ip': '172.16.0.1', 'user': 3, 'did': 7, 'page': 10, 'incident': 9}
if dim_stat_dict is None:
logger.info('维度:%s的统计字典为空', dim)
continue
logger.debug('维度%s的key是否都为空? %s' , dim, all(map(lambda x: x is None, dim_stat_dict.iterkeys())))
dim_vars = related_vars[dim]
for key, var_dict in dim_stat_dict.iteritems():
if key == 'all':
continue
record = dict()
record[var_name] = var_dict.get(var_name, 0)
for var in dim_vars:
a = var_dict.get(var, None)
if isinstance(a, (list,set)):
record[var] = len(a)
elif isinstance(a, (int,float)):
record[var] = a
elif a is None:
record[var] = 0
ContinuousDB.add(key, dim, work_day, work_ts, record)
# 收集没有维度的count, distinctcount统计数据,存入aerospike
dim_stat_dict = Stat_Dict.pop('total', {})
var_dict = dim_stat_dict.get('all', {})
total_vars = [
'total__visit__dynamic_distinct_ip__1h__slot', # ip数
'total__visit__dynamic_distinct_did__1h__slot', # did 数
'total__visit__dynamic_distinct_user__1h__slot', # user 数
'total__visit__incident_distinct_user__1h__slot', # 风险用户数
'total__visit__incident_count__1h__slot', # 风险事件数
'total__visit__dynamic_count__1h__slot', # 策略管理页面右上角 总点击数
'total__visit__visitor_incident_count__1h__slot', # 策略管理,访客风险数
'total__visit__account_incident_count__1h__slot', # 策略管理,账号风险数
'total__visit__order_incident_count__1h__slot', # 策略管理,订单风险数
'total__visit__transaction_incident_count__1h__slot', # 策略管理,支付风险数
'total__visit__marketing_incident_count__1h__slot', # 策略管理,营销风险数
'total__visit__other_incident_count__1h__slot', # 策略管理,其他风险数
'total__transaction__sum__1h__slot',
'total__transaction__count__1h__slot',
]
total_dict = {}
for var in total_vars:
var_value = var_dict.get(var, 0)
if isinstance(var_value, (int,float)):
total_dict[var] = var_value
elif isinstance(var_value, set):
total_dict[var] = len(var_value)
ContinuousDB.add('all', 'total', work_day, work_ts, total_dict)
def gen_related_counter(Stat_Dict):
"""
continuous_top_related_statistic接口数据源,IP维度存入关联用户名称,其他维度存入关联IP
页面: 风险分析 ip维度、user维度、did维度点击流页面
ip维度, 关联的user的访问次数
user维度, 关联ip的访问次数
查询ip维度的某个关联用户访问次数时,应该查询user维度的key, 否则从ip维度查该变量统计的数据类型就是dict了,
暂时弃用
"""
# work_ts = None
# continuous_db = ContinuousDB.get_db()
## timestamp = putils.get_last_hour_timestamp()
#
# related_vars = dict( ip='ip__visit__user_dynamic_count__1h__slot',
# user='user__visit__ip_dynamic_count__1h__slot',
# did='did__visit__ip_dynamic_count__1h__slot')
# for dim in utils.Click_Variable_Names.keys():
# dim_stat_dict = Stat_Dict.get(dim, None)
#
# if dim_stat_dict is None:
# logger.info('维度:%s的统计字典为空', dim)
# continue
#
# var_name = related_vars.get(dim ,None)
# if var_name is None:
# # page 维度没有看跨小时的ip访问的需求
# continue
# # 反过来查询的key_type
# if dim == 'ip':
# query_key_type = 'user'
# else:
# query_key_type = 'ip'
#
# for key, var_dict in dim_stat_dict.iteritems():
# if key == 'all':
# continue
#
# related_values = var_dict.get(var_name, {})
# for related_key, value in related_values.iteritems():
# # 有可能重复的key, 直接add?
# continuous_db.add(related_key, query_key_type, work_ts, dict(var_name=value))
def get_stat_key_prefix(key_type):
stat_type = utils.Dimension_Stat_Prefix.get(key_type, None)
return pack('>B', stat_type)
def get_statistic(key, key_type, fromtime, endtime, var_names):
"""
根据key和时间戳 来获取对应小时统计的报表数据
Paramters:
key:
key_type: ip, ipc, page, user, did
timestamp:
t: 生成统计key 对应的type段, 现在默认为None是因为暂时查询key只有ip,ipc 类型的, @todo 视图函数里面扔进来
Return:
if key is None:
{ key(统计leveldb的索引中除了开头type的部分): {var_name1: , var_name2:} }
else:
{var_name1:, var_name2:}
"""
var_names_set = set(var_names)
logger.debug(DEBUG_PREFIX+ 'in get_statistic...')
try:
db_path = get_stat_db_path(fromtime)
# logger.debug(DEBUG_PREFIX+"传入的fromtime:%s, 获取的对应统计的数据库地址是: %s", fromtime, db_path)
except Exception as e:
return None
if key:
logger.debug(DEBUG_PREFIX+" 有指定特定的key")
logger.debug(DEBUG_PREFIX+"传入获取统计数据库的key的参数key:%s, key_type:%s", str(key), str(key_type))
key = get_click_stat_key(key, key_type)
if key is None:
return None
logger.debug(DEBUG_PREFIX+"传入获取统计数据库的key是 %s", (key,))
try:
db = get_db(db_path)
return get_key_stat(key, db, var_names_set)
except KeyError:
logger.error("db:%s don't have key: %s", db_path, key)
return None
except LevelDBError:
logger.error("db:%s 统计结果不正确", db_path)
return None
finally:
if locals().has_key('db'):
del db
else:
logger.debug(DEBUG_PREFIX+"会遍历所有的key")
# url: {var_name1: , var_name2:}
ret = dict()
# 当传入的key为空时, 来遍历所有的page维度的key, 从里面load所有的var_names
# 目前只有page维度会传入空的key
prefix = get_stat_key_prefix(key_type)
try:
db = get_db(db_path)
keys = scan_keys(prefix, db, include_value=False)
# logger.debug(DEBUG_PREFIX+"将会遍历的统计key_type:%s, prefix:%s 扫到的keys: %s", key_type, (prefix,), keys)
for key in keys:
key_stat = get_key_stat(key, db, var_names_set)
# logger.debug(DEBUG_PREFIX+"key: %s, key in ret? %s ,查询到的数据是:%s",(key,), key in ret.keys(), key_stat)
ret[key[1:]] = key_stat
except LevelDBError:
logger.error("db:%s 统计结果不正确", db_path)
return None
except Exception as e:
logger.error(e)
return None
finally:
if locals().has_key('db'):
del db
return ret
return None
def get_all_statistic(key_type, fromtime, endtime, var_names):
"""
return: {var_name: , var_name:}
"""
# logger.debug(DEBUG_PREFIX+ 'in get_all_statistic... , key_type is %s', key_type)
total_key = get_total_stat_key(key_type)
# logger.debug(DEBUG_PREFIX+ '拿到对应维度特殊的总统计的统计key为: %s', (total_key, ))
var_names_set = set(var_names)
try:
db_path = get_stat_db_path(fromtime)
except Exception as e:
logger.error("Error when get %s type's total statistic", key_type)
traceback.print_exc()
return None
if not opath.exists(db_path):
return None
try:
db = get_db(db_path)
return get_key_stat(total_key, db, var_names_set)
except KeyError:
logger.error("db:%s don't have key: %s", db_path, (total_key,))
return None
except LevelDBError:
logger.error("db:%s 统计结果不正确", db_path)
return None
finally:
if locals().has_key('db'):
del db
def get_key_stat(key, db, var_names_set):
"""
获取db数据库中对应key下面var_names_set的下统计数据
"""
# logger.debug(DEBUG_PREFIX+"in 获取key: %s对应的统计数据", (key,))
value = db.Get(key)
# logger.debug(DEBUG_PREFIX+"获取key对应的统计原始数据: %s", (value,))
jvalue = pickle.loads(value)
# logger.debug(DEBUG_PREFIX+"获取key对应的统计数据是: %s, 过滤的变量名是 %s", jvalue, var_names_set)
return dict( (k,v) for k,v in jvalue.iteritems() if var_names_set and k in var_names_set)
@catch_latency("统计风险事件")
def gen_risk_incidents(stat_dict):
# 风险事件使用IP关联
ip_dimension = stat_dict.get('ip', {})
risk_incident = list()
for ip, variables in ip_dimension.items():
# ip incident事件数
ip_incident_count = variables.get('ip__visit__incident_count__1h__slot', 0)
if not ip_incident_count or ip == 'all':
continue
# 获取incident事件notice统计数据
incident = dict()
incident['ip'] = ip
incident['associated_events'] = list() # 风险事件关联事件id set
incident['start_time'] = variables.pop('ip__visit__incident_min_timestamp__1h__slot', 0)
incident['strategies'] = variables.pop('ip__visit__scene_incident_count_strategy__1h__slot', {})
incident['hit_tags'] = variables.pop('ip__visit__tag_incident_count__1h__slot', {})
incident['risk_score'] = compute_risk_score(incident['strategies'], ip_incident_count)
uri_stems = sorted(variables.pop('ip__visit__page_incident_count__1h__slot', {}).items(),
lambda x, y: cmp(x[1], y[1]), reverse=True)[:10]
incident['uri_stems'] = {uri: value for uri, value in uri_stems}
incident['hosts'] = dict()
for uri, count in incident['uri_stems'].items():
if uri:
host, _ = putils.parse_host_url_path(uri)
else:
host = uri
if incident['hosts'].get(host, None):
incident['hosts'][host] += count
else:
incident['hosts'][host] = count
incident['most_visited'] = sorted(incident['uri_stems'].items(),
lambda x, y: cmp(x[1], y[1]), reverse=True)[0][0] if incident['uri_stems'] else ''
incident['peak'] = variables.pop('ip__visit__incident_max_rate__1h__slot', {}).get('max_count', 0)
incident['dids'] = variables.pop('ip__visit__did_incident_count__1h__slot', {})
incident['associated_users'] = variables.pop('ip__visit__user_incident_count__1h__slot', {})
incident['users_count'] = len(variables.pop('ip__visit__incident_distinct_user__1h__slot', []))
incident['associated_orders'] = dict()
incident['status'] = 0
risk_incident.append(incident)
from tornado.httpclient import HTTPClient, HTTPError
from tornado.escape import json_encode, json_decode
auth_code = putils.get_auth_code()
incident_url = 'http://{}:{}/platform/risk_incidents?auth={}'.format(
settings.WebUI_Address, settings.WebUI_Port, auth_code)
client = HTTPClient()
try:
_ = client.fetch(incident_url, method='POST', body=json_encode(risk_incident))
res = json_decode(_.body)
if res.get('msg', '') != 'ok':
logger.error('新增风险事件失败,返回: {}'.format(res))
except HTTPError:
logger.error(u'很有可能插入风险事件超时')
def compute_risk_score(strategies, incident_count):
"""
根据策略权重计算风险值
每个场景下的所有策略计算平均值
风险值为所有场景中的最大平均值
"""
risk_scores = list()
for category, category_strategies in strategies.items():
category_score = 0
# 根据权重计算场景总分
for strategy, count in category_strategies.items():
strategy_score = putils.Strategies_Weigh.get(strategy, {}).get('score', 0)
category_score += strategy_score * count
risk_scores.append(int(category_score/incident_count))
return max(risk_scores) if risk_scores else 0
class ContinuousDB(object):
db = None
db_name = settings.AeroSpike_DB_Name #'offline'
ttl = settings.AeroSpike_DB_Expire * 3600 # 数据过期时间 单位: 秒
@classmethod
def get_db(cls):
if cls.db is None:
cls.db = aerospike.client(settings.ContinuousDB_Config).connect()
return cls.db
@classmethod
def add(cls, key, key_type, day, timestamp, vals):
if key is None:
return
# key加上当天日期时间戳,防止不过期
key = '%s_%s' % (key, day)
db_key = (cls.db_name, key_type, key)
# logger.info('insert key: %s, key_type:%s', key, key_type)
try:
cls.db.put(db_key, {str(timestamp):vals}, meta={'ttl':cls.ttl})
except AerospikeError as e:
logger.error('Aerospike Error: %s %s', e.msg, e.code)
@classmethod
def query(cls, key, key_type, timestamp, var_list):
"""
Return:
{var1:, var2:}
"""
db_key = (cls.db_name, key_type, key)
try:
(key, meta, bins) = cls.db.select(db_key, timestamp)
if bins:
d = bins[timestamp]
result = dict( (var, d.get(var, 0)) for var in var_list )
except RecordNotFound:
return None
except AerospikeError as e:
logger.error('Aerospike Error: %s %s', e.msg, e.code)
return None
return result
@classmethod
def query_many(cls, key, key_type, timestamps, var_list):
"""
查询多个时间点的统计信息, 输出保留传入的时间戳的顺序
Return:
[ (timestamp, statistic_dict), | |
# Copyright 2013 10gen Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from bson.json_util import loads, dumps
import datetime
import mock
import webapps.server
from webapps.server.views import ratelimit
from webapps.lib.db import get_db
from webapps.lib.util import to_coll_name, get_collection_names
from flask import session
from pymongo.cursor import Cursor
from pymongo.errors import OperationFailure
from webapps.lib.MWSServerError import MWSServerError
from tests import MongoWSTestCase
from webapps.lib import CLIENTS_COLLECTION
class ViewsSetUpUnitTestCase(MongoWSTestCase):
def test_create_mws_resource(self):
url = '/mws/'
rv = self.app.post(url)
new_response_dict = loads(rv.data)
self.assertIn('res_id', new_response_dict)
res_id = new_response_dict['res_id']
is_new = new_response_dict['is_new']
self.assertIsNotNone(res_id)
self.assertTrue(is_new)
# check if res_id is unchanged
rv = self.app.post(url)
new_response_dict = loads(rv.data)
new_res_id = new_response_dict['res_id']
new_is_new = new_response_dict['is_new']
self.assertIsNotNone(new_res_id)
self.assertEqual(res_id, new_res_id)
self.assertFalse(new_is_new)
def test_create_mws_resource_new_session(self):
url = '/mws/'
rv = self.app.post(url)
response_dict = loads(rv.data)
self.assertIn('res_id', response_dict)
res_id = response_dict['res_id']
self.assertIsNotNone(res_id)
with self.app.session_transaction() as sess:
del sess['session_id']
# check if res_id is unique
rv = self.app.post(url)
new_res_id = loads(rv.data)['res_id']
self.assertIsNotNone(new_res_id)
self.assertNotEqual(res_id, new_res_id)
@mock.patch('webapps.server.views.datetime')
def test_keep_mws_alive(self, datetime_mock):
first = datetime.datetime(2012, 7, 4)
second = first + datetime.timedelta(days=1)
datetime_mock.now.return_value = first
db = get_db()
# get a session to keep alive
rv = self.app.post('/mws/')
res_id = loads(rv.data)['res_id']
with self.app.session_transaction() as sess:
session_id = sess['session_id']
res = db.clients.find({'res_id': res_id, 'session_id': session_id},
{'timestamp': 1})
_id = res[0]['_id']
old_ts = res[0]['timestamp']
self.assertEqual(old_ts, first)
datetime_mock.now.return_value = second
url = '/mws/' + res_id + '/keep-alive'
rv = self.app.post(url)
self.assertEqual(rv.status_code, 204)
newres = db.clients.find({'_id': _id}, {'timestamp': 1})
self.assertEqual(newres[0]['timestamp'], second)
def test_ratelimit(self):
rv = self.app.post('/mws/')
self.res_id = loads(rv.data)['res_id']
limit = self.real_app.config['RATELIMIT_QUOTA'] = 3
def dummy():
return ('', 204)
with self.app.session_transaction() as client_sess:
session_id = client_sess['session_id']
with self.real_app.test_request_context():
session['session_id'] = session_id
for i in range(limit):
self.assertEqual(ratelimit(dummy)(), ('', 204))
with self.assertRaises(MWSServerError) as cm:
ratelimit(dummy)()
self.assertEqual(cm.exception.error, 429)
def test_ratelimit_no_session(self):
def dummy():
return ('', 204)
with self.real_app.test_request_context():
with self.assertRaises(MWSServerError) as cm:
ratelimit(dummy)()
self.assertEqual(cm.exception.error, 401)
def test_nocache(self):
res = self.app.post('/mws/')
self.assertEqual(res.headers['cache-control'], 'no-cache')
self.assertEqual(res.headers['expires'], '0')
res_id = loads(res.data)['res_id']
res = self.app.get('/mws/%s/db/coll/find?{}' % res_id)
self.assertEqual(res.headers['cache-control'], 'no-cache')
self.assertEqual(res.headers['expires'], '0')
class DBTestCase(MongoWSTestCase):
def setUp(self):
super(DBTestCase, self).setUp()
# Todo: For stuff that isn't checking authentication,
# we probably don't want to rely on/use the authentication code
rv = self.app.post('/mws/')
response_dict = loads(rv.data)
self.assertIn('res_id', response_dict)
self.res_id = response_dict['res_id']
self.assertIsNotNone(self.res_id)
self.db = get_db()
self.make_request_url = '/mws/%s/db/%%s' % (self.res_id)
def _make_request(self, endpoint, data, method, expected_status):
url = self.make_request_url % (endpoint)
if data is not None:
if isinstance(data, dict):
data = dumps(
dict((k, v) for (k, v) in data.iteritems() if v is not None)
)
else:
data = dumps(data)
if method == self.app.get:
url = '%s?data=%s' % (url, data)
data = None
result = method(url, data=data, content_type='application/json')
actual_status = result.status_code
self.assertEqual(
actual_status, expected_status,
("Expected request status to be %s, got %s instead."
" Full result: %s") %
(expected_status, actual_status, result.data))
result_dict = loads(result.data) if result.data else {}
return result_dict
def make_get_collection_names_request(self, expected_status=200):
return self._make_request('getCollectionNames', None, self.app.get,
expected_status)
def make_db_drop_request(self, expected_status=204):
self.make_request_url = '/mws/%s/db%%s' % (self.res_id)
return self._make_request('', None, self.app.delete, expected_status)
class DBCollectionTestCase(DBTestCase):
def setUp(self):
super(DBCollectionTestCase, self).setUp()
self.coll_name = 'test_collection'
self.internal_coll_name = to_coll_name(self.res_id,
self.coll_name)
self.db = get_db()
self.db_collection = self.db[self.internal_coll_name]
self.make_request_url = '/mws/%s/db/%s/%%s' % \
(self.res_id, self.coll_name)
def tearDown(self):
super(DBCollectionTestCase, self).setUp()
self.db_collection.drop()
def make_find_request(self, query=None, projection=None, skip=None,
limit=None, expected_status=200, cursor_id=0,
retrieved=0, count=0, drain_cursor=False):
data = {
'query': query,
'projection': projection,
'skip': skip,
'limit': limit,
'cursor_id': cursor_id,
'retrieved': retrieved,
'count': count,
'drain_cursor': drain_cursor
}
return self._make_request('find', data, self.app.get,
expected_status)
def make_insert_request(self, document, expected_status=200):
data = {'document': document}
return self._make_request('insert', data, self.app.post,
expected_status)
def make_remove_request(self, constraint, just_one=False,
expected_status=204):
data = {'constraint': constraint, 'just_one': just_one}
return self._make_request('remove', data, self.app.delete,
expected_status)
def make_update_request(self, query, update, upsert=False, multi=False,
expected_status=204):
data = {
'query': query,
'update': update,
'upsert': upsert,
'multi': multi,
}
return self._make_request('update', data, self.app.put,
expected_status)
def make_aggregate_request(self, query=None, expected_status=200):
return self._make_request('aggregate', query, self.app.get,
expected_status)
def make_drop_request(self, expected_status=204):
return self._make_request('drop', None, self.app.delete,
expected_status)
def make_count_request(self, query=None, skip=None, limit=None,
expected_status=200):
data = {'query': query, 'skip': skip, 'limit': limit}
return self._make_request('count', data, self.app.get, expected_status)
def set_session_id(self, new_id):
with self.app.session_transaction() as sess:
sess['session_id'] = new_id
class FindUnitTestCase(DBCollectionTestCase):
def ensure_cursor_death(self, collection, cursor_id, retrieved):
batch_size = self.real_app.config['CURSOR_BATCH_SIZE']
cursor = Cursor(collection, _cursor_id=cursor_id,
limit=batch_size, _retrieved=retrieved)
try:
cursor.next()
except StopIteration:
pass
except OperationFailure:
pass
else:
self.fail('Cursor was not killed')
def verify_cursor(self, num_docs, **kwargs):
self.db_collection.drop()
docs = [{'val': i} for i in xrange(num_docs)]
total_received = 0
self.db_collection.insert(docs)
query = {}
response = self.make_find_request(query=query, **kwargs)
count = response['count']
if kwargs.get('limit') is not None:
self.assertEqual(count, kwargs['limit'])
else:
self.assertEqual(count, num_docs)
expected = kwargs['limit'] if kwargs.get('limit') else num_docs
if kwargs.get('drain_cursor'):
self.assertEqual(len(response['result']), count)
total_received += len(response['result'])
while total_received != expected:
values = [r['val'] for r in response['result']]
cursor_id = response['cursor_id']
retrieved = len(response['result'])
self.assertItemsEqual(
values, range(total_received, total_received+retrieved))
total_received += retrieved
if total_received == expected:
break
response = self.make_find_request(query=query, cursor_id=cursor_id,
retrieved=total_received,
count=count, **kwargs)
self.ensure_cursor_death(self.db_collection,
long(response['cursor_id']),
retrieved=total_received)
def test_find(self):
query = {'name': 'mongo'}
self.db_collection.insert(query)
result = self.make_find_request(query)
self.assertEqual(len(result), 3)
self.assertEqual(result['count'], 1)
self.assertEqual(result['cursor_id'], '0')
self.assertEqual(len(result['result']), 1)
self.assertEqual(result['result'][0]['name'], 'mongo')
def test_cursor(self):
batch_size = self.real_app.config['CURSOR_BATCH_SIZE']
self.verify_cursor(83)
self.verify_cursor(100)
self.verify_cursor(250)
self.verify_cursor(batch_size)
self.verify_cursor(batch_size+1)
self.verify_cursor(batch_size-1)
def test_invalid_cursor(self):
query = {}
invalid_cursor = 1234
docs = [{'val': i} for i in xrange(21)]
self.db_collection.insert(docs)
response = self.make_find_request(query=query)
count = response['count']
retrieved = len(response['result'])
self.make_find_request(query=query, cursor_id=invalid_cursor,
retrieved=retrieved, count=count,
expected_status=400)
def test_cursor_with_limit(self):
batch_size = self.real_app.config['CURSOR_BATCH_SIZE']
self.verify_cursor(100, limit=83)
self.verify_cursor(100, limit=100)
self.verify_cursor(100, limit=batch_size)
self.verify_cursor(100, limit=batch_size+1)
self.verify_cursor(100, limit=batch_size-1)
def test_cursor_drain(self):
batch_size = self.real_app.config['CURSOR_BATCH_SIZE']
self.verify_cursor(100, drain_cursor=True)
self.verify_cursor(batch_size, drain_cursor=True)
self.verify_cursor(batch_size+1, drain_cursor=True)
self.verify_cursor(batch_size-1, drain_cursor=True)
def test_cursor_drain_with_limit(self):
batch_size = self.real_app.config['CURSOR_BATCH_SIZE']
self.verify_cursor(100, limit=100, drain_cursor=True)
self.verify_cursor(100, limit=batch_size, drain_cursor=True)
self.verify_cursor(100, limit=batch_size+1, drain_cursor=True)
self.verify_cursor(100, limit=batch_size-1, drain_cursor=True)
def test_skipping_results(self):
self.db_collection.insert([{'val': i} for i in xrange(10)])
response = self.make_find_request(query={}, skip=4)
result = response['result']
self.assertEqual(len(result), 6)
values = [r['val'] for r in result]
self.assertItemsEqual(values, range(4, 10))
def test_limiting_results(self):
self.db_collection.insert([{'val': i} for i in xrange(10)])
response = self.make_find_request(query={}, limit=4)
result = response['result']
self.assertEqual(len(result), 4)
values = [r['val'] for r in result]
self.assertItemsEqual(values, range(4))
def test_invalid_find_session(self):
self.set_session_id('invalid_id')
document = {'name': 'mongo'}
result = self.make_find_request(document, expected_status=403)
error = {
'error': 403,
'reason': 'Session error. User does not have access to res_id',
'detail': '',
}
self.assertEqual(result, error)
class InsertUnitTestCase(DBCollectionTestCase):
def test_simple_insert(self):
document = {'name': 'Mongo'}
self.make_insert_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
def test_multiple_document_insert(self):
document = [{'name': 'Mongo'}, {'name': '10gen'}]
self.make_insert_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 2)
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', '10gen'])
def test_invalid_insert_session(self):
self.set_session_id('invalid_session')
document = {'name': 'mongo'}
self.make_insert_request(document, expected_status=403)
def test_insert_quota(self):
limit = self.real_app.config['QUOTA_COLLECTION_SIZE'] = 150
self.make_insert_request([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
], expected_status=200)
result = self.make_insert_request([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
], expected_status=403)
error = {
'error': 403,
'reason': 'Collection size exceeded',
'detail': ''
}
self.assertEqual(result, error)
class RemoveUnitTestCase(DBCollectionTestCase):
def test_remove(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
document = {'name': 'Mongo'}
self.make_remove_request(document)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'NotMongo')
def test_remove_one(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
document = {'name': 'Mongo'}
self.make_remove_request(document, just_one=True)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', 'NotMongo'])
def test_remove_requires_valid_res_id(self):
self.set_session_id('invalid_session')
self.make_remove_request({}, expected_status=403)
class UpdateUnitTestCase(DBCollectionTestCase):
def test_upsert(self):
result = self.db_collection.find({'name': 'Mongo'})
self.assertEqual(result.count(), 0)
self.make_update_request({}, {'name': 'Mongo'}, True)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
def test_update_one(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request({'name': 'Mongo'}, {'name': 'Mongo2'}, True)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo', 'Mongo2', 'NotMongo'])
def test_update_multi(self):
self.db_collection.insert([
{'name': 'Mongo'}, {'name': 'Mongo'}, {'name': 'NotMongo'}
])
self.make_update_request(
{'name': 'Mongo'},
{'$set': {'name': 'Mongo2'}},
False, True
)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo2', 'Mongo2', 'NotMongo'])
def test_multi_upsert(self):
# Does not exist - upsert
self.make_update_request({}, {'$set': {'name': 'Mongo'}}, True, True)
result = self.db_collection.find()
self.assertEqual(result.count(), 1)
self.assertEqual(result[0]['name'], 'Mongo')
# Exists - multi-update
self.db_collection.insert([{'name': 'Mongo'}, {'name': 'NotMongo'}])
self.make_update_request(
{'name': 'Mongo'},
{'$set': {'name': 'Mongo2'}},
True, True
)
result = self.db_collection.find()
names = [r['name'] for r in result]
self.assertItemsEqual(names, ['Mongo2', 'Mongo2', 'NotMongo'])
def test_update_quota(self):
limit = self.real_app.config['QUOTA_COLLECTION_SIZE'] = 500
self.db_collection.insert([
{'name': 'Mongo'}, {'name': | |
"""
Django settings for vmi (Verify My Identity) project.
Copyright Videntity Systems Inc.
Generated by 'django-admin startproject' using Django 2.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
import datetime
import dj_database_url
from django.contrib.messages import constants as messages
from getenv import env
from .utils import bool_env
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env(
'SECRET_KEY', <KEY>')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool_env(env('DEBUG', True))
ALLOWED_HOSTS = os.getenv("ALLOWED_HOSTS", "").split(",")
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrapform',
'social_django',
'phonenumber_field',
'oauth2_provider',
'rest_framework',
'django_filters',
'apps.oidc',
'apps.home',
'apps.reports',
'apps.accounts',
'apps.ial',
'apps.fido',
'apps.mfa.backends.sms',
'apps.api',
# 'django_extensions',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'social_django.middleware.SocialAuthExceptionMiddleware',
'apps.mfa.middleware.DeviceVerificationMiddleware',
'apps.mfa.middleware.AssertDeviceVerificationMiddleware',
'apps.oidc.error_handlers.AuthenticationRequiredExceptionMiddleware',
'apps.oidc.error_handlers.OIDCNoPromptMiddleware',
]
AUTHENTICATION_BACKENDS = (
# 'social_core.backends.google_openidconnect.GoogleOpenIdConnect',
'django.contrib.auth.backends.ModelBackend',
'apps.accounts.authentication_backends.EmailBackend',
'apps.accounts.authentication_backends.SubjectBackend',
# apps.accounts.ldap_auth_backends.LDAPBackend',
)
SOCIAL_AUTH_GOOGLE_URL = env(
"SOCIAL_AUTH_GOOGLE_URL", 'https://accounts.google.com')
SOCIAL_AUTH_GOOGLE_OIDC_ENDPOINT = env(
"SOCIAL_AUTH_GOOGLE_OIDC_ENDPOINT", 'https://accounts.google.com')
SOCIAL_AUTH_GOOGLE_OPENIDCONNECT_KEY = env(
'SOCIAL_AUTH_GOOGLE_OPENIDCONNECT_KEY', '')
SOCIAL_AUTH_GOOGLE_OPENIDCONNECT_SECRET = env(
'SOCIAL_AUTH_GOOGLE_OPENIDCONNECT_SECRET', '')
SOCIAL_AUTH_LOGIN_REDIRECT_URL = '/'
VERIFICATION_BACKENDS = [
'apps.fido.auth.backends.FIDO2Backend',
'apps.mfa.backends.sms.backend.SMSBackend',
]
SMS_CODE_CHARSET = "1234567890"
ROOT_URLCONF = 'vmi.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django_settings_export.settings_export',
],
},
},
]
WSGI_APPLICATION = 'vmi.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config(
default=env('DATABASES_CUSTOM',
'sqlite:///{}/db/db.sqlite3'.format(BASE_DIR))),
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation'
'.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation'
'.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
AWS_STORAGE_BUCKET_NAME = env(
"AWS_STORAGE_BUCKET_NAME", "development-vmi-media-storage")
AWS_AUTO_CREATE_BUCKET = True
AWS_S3_FILE_OVERWRITE = False
AWS_QUERYSTRING_AUTH = False
DEFAULT_FILE_STORAGE = env("DEFAULT_FILE_STORAGE",
'django.core.files.storage.FileSystemStorage')
MEDIA_ROOT = os.path.join(BASE_DIR, 'uploads')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
# MEDIA_URL = 'http://localhost:8000/media/'
MEDIA_URL = '/media/'
MESSAGE_TAGS = {
messages.DEBUG: 'debug',
messages.INFO: 'info',
messages.SUCCESS: 'success',
messages.WARNING: 'warning',
messages.ERROR: 'danger',
}
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = 'sitestatic'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, "static-assets"),
]
ATOMIC_REQUESTS = True
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'oauth2_provider.contrib.rest_framework.OAuth2Authentication',
),
'DEFAULT_FILTER_BACKENDS': (
'django_filters.rest_framework.DjangoFilterBackend',
),
}
# OAUTH SETTINGS
OAUTH2_PROVIDER = {
'SCOPES': {'openid': 'open id connect access'},
'DEFAULT_SCOPES': ['openid'],
'OAUTH2_VALIDATOR_CLASS': 'vmi.oauth2_validators.SingleAccessTokenValidator',
'OAUTH2_SERVER_CLASS': 'apps.oidc.server.Server',
'REQUEST_APPROVAL_PROMPT': 'auto',
'ACCESS_TOKEN_EXPIRE_SECONDS': int(env('ACCESS_TOKEN_EXPIRE_SECONDS', 315360000))
}
OAUTH2_PROVIDER_GRANT_MODEL = 'oidc.Grant'
OAUTH2_PROVIDER_ACCESS_TOKEN_MODEL = 'oauth2_provider.AccessToken'
OAUTH2_PROVIDER_APPLICATION_MODEL = 'oauth2_provider.Application'
OAUTH2_PROVIDER_REFRESH_TOKEN_MODEL = 'oauth2_provider.RefreshToken'
OAUTH2_PROVIDER_ALLOWED_GRANT_TYPES = (
"authorization_code",
# "password",
# "client_credentials",
"refresh_token",
)
OAUTH2_PROVIDER_ALLOWED_RESPONSE_TYPES = (
# "token",
"code",
)
OIDC_PROVIDER = {
# 'OIDC_ISSUER': 'http://localhost:8000',
'OIDC_BASE_CLAIM_PROVIDER_CLASS': 'apps.oidc.claims.ClaimProvider',
'OIDC_CLAIM_PROVIDERS': [
# Mandatory
'apps.oidc.claims.UserClaimProvider',
# Optional
# The UserProfileClaimProvider currently gets all claims fetch-able via the
# UserProfile.
'apps.accounts.claims.UserProfileClaimProvider',
'apps.accounts.claims.AddressClaimProvider',
'apps.accounts.claims.IdentifierClaimProvider',
'apps.accounts.claims.OrganizationAgentClaimProvider',
'apps.accounts.claims.MembershipClaimProvider',
'apps.accounts.claims.VerifiedPersonDataClaimProvider',
# 'apps.accounts.claims.SubjectClaimProvider',
# 'apps.accounts.claims.EmailVerifiedClaimProvider',
# 'apps.accounts.claims.PhoneNumberClaimProvider',
# 'apps.accounts.claims.IdentityAssuranceLevelClaimProvider',
# 'apps.accounts.claims.AuthenticatorAssuranceLevelClaimProvider',
# 'apps.accounts.claims.VectorsOfTrustClaimProvider',
'apps.fido.claims.AuthenticatorAssuranceProvider',
'apps.mfa.backends.sms.claims.AuthenticatorAssuranceProvider',
],
}
# Adding to allow other modes of SMS text delivery in the future.
SMS_STRATEGY = env('SMS_STRATEGY', 'AWS-SNS')
# Add a prefix to the lugh checkdigit calculation.
# This can help identify genuine subject ids and indicate provenance.
SUBJECT_LUHN_PREFIX = env('SUBJECT_LUHN_PREFIX', '')
APPLICATION_TITLE = env('DJANGO_APPLICATION_TITLE', "Share My Health Accounts")
KILLER_APP_TITLE = env('KILLER_APP_TITLE', 'Share My Health Web Application')
KILLER_APP_URI = env('KILLER_APP_URI', 'http://smhapp:8002')
TOP_LEFT_TITLE = env('TOP_LEFT_TITLE', 'verify my identity')
PARTNER_REF = env('PARTNER_REF', '')
if len(PARTNER_REF) > 0:
PARTNER_REF += "/"
ORGANIZATION_TITLE = env(
'DJANGO_ORGANIZATION_TITLE',
'Alliance for Better Health')
ORGANIZATION_URI = env('DJANGO_ORGANIZATION_URI',
'http://transparenthealth.org')
POLICY_URI = env('DJANGO_POLICY_URI',
'http://sharemy.health/privacy-policy-1.0.html')
POLICY_TITLE = env('DJANGO_POLICY_TITLE', 'Privacy Policy')
TOS_URI = env('DJANGO_TOS_URI',
'http://sharemy.health/terms-of-service-1.0.html')
AGENT_TOS_URI = env('DJANGO_AGENT_TOS_URI',
'http://sharemy.health/agent-terms-of-service-1.0.html')
TOS_TITLE = env('DJANGO_TOS_TITLE', 'Terms of Service')
# If True, display the training attestation on agent signup.
REQUIRE_TRAINING_FOR_AGENT_SIGNUP = bool_env(env('REQUIRE_TRAINING_FOR_AGENT_SIGNUP', False))
TRAINING_URI = env('TRAINING_URI',
'http://example.com/training1.0.html')
TOS_TITLE = env('DJANGO_TOS_TITLE', 'Terms of Service')
EXPLAINATION_LINE = ('This is an instance of Verify My Identity, \
a standards-based OpenID Connect Identity Provider.')
EXPLAINATION_LINE = env('DJANGO_EXPLAINATION_LINE ', EXPLAINATION_LINE)
USER_DOCS_URI = "https://github.com/TransparentHealth/vmi"
USER_DOCS_TITLE = "User Documentation"
USER_DOCS = "User Docs"
# LINKS TO DOCS
DEVELOPER_DOCS_URI = "https://github.com/TransparentHealth/vmi"
DEVELOPER_DOCS = "Developer Docs"
DEFAULT_DISCLOSURE_TEXT = """
Unauthorized or improper use of this system or its data may result
in disciplinary action, as well as civil and criminal penalties.
This system may be monitored, recorded, and subject to audit.
"""
DISCLOSURE_TEXT = env('DJANGO_PRIVACY_POLICY_URI', DEFAULT_DISCLOSURE_TEXT)
HOSTNAME_URL = env('HOSTNAME_URL', 'http://localhost:8000')
ORG_SIGNUP_CONTACT = env('ORG_SIGNUP_CONTACT',
'https://example.com/contact-us/')
# Allow Members to create accounts
ALLOW_MEMBER_SIGNUP = bool_env(env('ALLOW_MEMBER_SIGNUP', False))
CONTACT_EMAIL = env('DJANGO_CONTACT_EMAIL', '<EMAIL>')
SETTINGS_EXPORT = [
'DEBUG',
'CONTACT_EMAIL',
'ALLOWED_HOSTS',
'APPLICATION_TITLE',
'STATIC_URL',
'STATIC_ROOT',
'DEVELOPER_DOCS_URI',
'ORGANIZATION_TITLE',
'POLICY_URI',
'POLICY_TITLE',
'DISCLOSURE_TEXT',
'TOS_URI',
'TOS_TITLE',
'EXPLAINATION_LINE',
'USER_DOCS_URI',
'USER_DOCS',
'DEVELOPER_DOCS',
'USER_DOCS_TITLE',
'HOSTNAME_URL',
'TOP_LEFT_TITLE',
'KILLER_APP_URI',
'KILLER_APP_TITLE',
'ORG_SIGNUP_CONTACT',
'ALLOW_MEMBER_SIGNUP',
'PARTNER_REF',
'PUBLIC_HOME_TEMPLATE',
]
# Emails
DEFAULT_FROM_EMAIL = env('FROM_EMAIL', '<EMAIL>')
DEFAULT_ADMIN_EMAIL = env('ADMIN_EMAIL',
'<EMAIL>')
# Select the right Email delivery system that works for you.
# Django's default is 'django.core.mail.backends.smtp.EmailBackend'. This will work with most email systems.
# Set the other email settings according to your configuration.
# If using the AWS Simple Email Service backend, 'django_ses.SESBackend', you need
# only to have the values for AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY ,
# and AWS_DEFAULT_REGION set.
# You can use 'django.core.mail.backends.console.EmailBackend' to send
# emails to stdout instead of sending them.
EMAIL_BACKEND = env('EMAIL_BACKEND', 'django_ses.SESBackend')
# These values are important when using another email service such as your own email server (e.g. Exchange, Sendmail)
# or a service such as Twilio SendGrid (available on the Azure Marketplace)
# https://azuremarketplace.microsoft.com/en-us/marketplace/apps/SendGrid.SendGrid
# Values default to Django default values
EMAIL_HOST = env('EMAIL_HOST', 'localhost')
EMAIL_PASSWORD = env('EMAIL_PASSWORD', '')
EMAIL_HOST_PASSWORD = env('EMAIL_HOST_PASSWORD', '')
EMAIL_HOST_USER = env('EMAIL_HOST_USER', '')
EMAIL_PORT = int(env('EMAIL_PORT', 25))
EMAIL_SUBJECT_PREFIX = env('EMAIL_SUBJECT_PREFIX', '[VerifyMyIdentity] ')
EMAIL_USE_LOCALTIME = bool_env(env('EMAIL_USE_LOCALTIME', False))
EMAIL_USE_TLS = bool_env(env('EMAIL_USE_TLS', False))
EMAIL_USE_SSL = bool_env(env('EMAIL_USE_SSL', False))
EMAIL_SSL_CERTFILE = env('EMAIL_SSL_CERTFILE', None)
EMAIL_SSL_KEYFILE = env('EMAIL_SSL_KEYFILE', None)
SIGNUP_TIMEOUT_DAYS = 3
ORGANIZATION_NAME = "Verify My Identity"
# 4 MB Default
MAX_PROFILE_PICTURE_SIZE = env(
'MAX_PROFILE_PICTURE_SIZE', str(4 * 1024 * 1024))
# Define individual identifier types
INDIVIDUAL_ID_TYPE_CHOICES = env('INDIVIDUAL_ID_TYPE_CHOICES', (
('PATIENT_ID_FHIR', 'Patient ID in FHIR'),
('MPI', 'Master Patient Index (Not FHIR Patient ID)'),
('SSN', 'Social Security Number'),
('MEDICAID_ID', 'Medicaid ID Number'),
('MEDICARE_HICN', 'Medicare HICN (Legacy)'),
('MEDICARE_ID', 'Medicare ID Number'),
('INSURANCE_ID', 'Insurance ID Number'),
('IHE_ID', 'Health Information Exchange ID'),
('NPI', 'National Provider Identifier'),
('UHI', 'Universal Health Identifier'),))
# Define organization identifier types
ORGANIZATION_ID_TYPE_CHOICES = env('ORGANIZATION_ID_TYPE_CHOICES', (
('FEIN', 'Federal Employer ID Number (Tax ID)'),
('NPI', 'National Provider Identifier'),
('OEID', 'Other Entity Identifier'),
('PECOS', 'PECOS Medicare ID'),))
DEFAULT_COUNTRY_CODE_FOR_INDIVIDUAL_IDENTIFIERS = env(
'DEFAULT_COUNTRY_CODE_FOR_IDENTIFIERS', "US")
PHONENUMBER_DEFAULT_REGION = env('PHONENUMBER_DEFAULT_REGION', "US")
# Terms of service version
CURRENT_TOS_VERSION = env('CURRENT_TOS_VERSION', "1")
# Privacy Policy version
CURRENT_PP_VERSION = env('CURRENT_PP_VERSION', "1")
# Expire session on browser close.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
# Expire session. Default is 10 minutes: 10 * 60 seconds
SESSION_COOKIE_AGE = int(env('SESSION_COOKIE_AGE', int(30 * 60)))
# Whitelabeling.The next settings allow for homepage and login screen
# customization.
# Pick a login template and title.
LOGIN_TEMPLATE_PICKER = {"default": 'login.html',
'share-my-health': 'login.html',
# Add others here to create a custom login template.
}
# Whitelabel: Pick a public template. Customize to your needs.
PUBLIC_HOME_TEMPLATE = env('PUBLIC_HOME_TEMPLATE', "index.html")
# What a user sees when logged in.
AUTHENTICATED_HOME_TEMPLATE = env(
'AUTHENTICATED_HOME_TEMPLATE', "authenticated-home.html")
# List of IAL2 classifications. You can define your own. Anything that is not empty
# (e.g. not "") will be an IAL2.""
IAL2_EVIDENCE_CLASSIFICATIONS = (
# Generic
('ONE-SUPERIOR-OR-STRONG-PLUS',
'One Superior or Strong+ pieces of identity evidence'),
('ONE-STRONG-TWO-FAIR', 'One Strong and Two Fair pieces of identity evidence'),
('TWO-STRONG', 'Two Pieces of Strong identity evidence'),
# More specific
('ONE-SUPERIOR-OR-STRONG-PLUS-1', "Driver's License"),
('ONE-SUPERIOR-OR-STRONG-PLUS-2', "Identification Card"),
('ONE-SUPERIOR-OR-STRONG-PLUS-3', 'Veteran ID Card'),
('ONE-SUPERIOR-OR-STRONG-PLUS-4', 'Passport'),
('ONE-SUPERIOR-OR-STRONG-PLUS-5', 'NY Medicaid ID Card'),
('ONE-SUPERIOR-OR-STRONG-PLUS-6', 'Medicare ID'),
('TWO-STRONG-1', 'Original Birth Certificate and a Social Security Card'),
('TRUSTED-REFEREE-VOUCH', 'I am a Trusted Referee Vouching for this person'),
)
# For creating agent users who have out of band _D verification on file.
AUTO_IAL_2_DEFAULT_CLASSIFICATION = 'ONE-SUPERIOR-OR-STRONG-PLUS',
AUTO_IAL_2_DEFAULT_SUBCLASSIFICATION = env(
'AUTO_IAL_2_DEFAULT_SUBCLASSIFICATION', "I9")
AUTO_IAL_2_DESCRIPTION = env(
'AUTO_IAL_2_DESCRIPTION', "Documents verified by i9 employment")
LOGIN_RATELIMIT = env('LOGIN_RATELIMIT', '100/h')
# These are used to encrypt the passphrase. Change these for production
PASSPHRASE_SALT = env('PASSPHRASE_SALT', "FA6F747468657265616C706570706573")
PASSPHRASE_ITERATIONS = int(env('PASSPHRASE_ITERATIONS', "200"))
# These are added for portability to other cloud platforms.
# Note that instead these values can be passed as an IAM role.
# See
# https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_switch-role-ec2.html
AWS_ACCESS_KEY_ID = env('AWS_ACCESS_KEY_ID', "set-your-own-id")
AWS_SECRET_ACCESS_KEY = env('AWS_SECRET_ACCESS_KEY', "set-your-own-key")
AWS_DEFAULT_REGION = env('AWS_DEFAULT_REGION', 'us-east-1')
# Set to True when using in a reverse proxy such as Gunicorn and Nginx
SOCIAL_AUTH_REDIRECT_IS_HTTPS = bool_env(
env('SOCIAL_AUTH_REDIRECT_IS_HTTPS', False))
# Blank means skip EC2.
EC2PARAMSTORE_4_ENVIRONMENT_VARIABLES = env(
'EC2PARAMSTORE_4_ENVIRONMENT_VARIABLES', "EC2_PARAMSTORE")
# Set an acceptable age range for birthdays, registering,
now = datetime.datetime.now()
MINIMUM_AGE = int(env('MINIMUM_AGE', '18'))
MINIMUM_BIRTH_YEAR = now.year - MINIMUM_AGE
BIRTHDATE_YEARS = [x for x in range(1900, MINIMUM_BIRTH_YEAR)]
ID_DOCUMENT_ISSUANCE_YEARS = [x for x in range(now.year - 20, now.year)]
# Set possible expiration for identity documents e.g. driver's license).
EXPIRY_DATE_ACCEPTABLE_YEARS = [x for x in range(now.year, 2050)]
# VECTORS_OF_TRUST_TRUSTMARK_URLfor value of `vtm` claim.
VECTORS_OF_TRUST_TRUSTMARK_URL = env('VECTORS_OF_TRUST_TRUSTMARK_URL',
'https://github.com/TransparentHealth/800-63-3-trustmark/')
# ALLOW_MULTIPLE_USERS_PER_EMAIL should never be activated on a production
# system. It exists for debugging and testing.
ALLOW_MULTIPLE_USERS_PER_EMAIL = bool_env(
env('ALLOW_MULTIPLE_USERS_PER_EMAIL', False))
# Use these settings to allow/disallow different ID verification modes.
ALLOW_PHYSICAL_INPERSON_PROOFING = bool_env(
env('ALLOW_PHYSICAL_INPERSON_PROOFING', True)) # pipp
ALLOW_SUPERVISED_REMOTE_INPERSON_PROOFING = bool_env(
| |
#
# BSD 3-Clause License
#
# Copyright (c) 2019, Analog Devices, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
'''
===================
Calibration Map
===================
This module contains functions for generating calibration map for storing in EEPROM, storing in file, reading it back from file and parsing the map.
'''
from collections import namedtuple
import struct
import sys
from natsort import natsorted, ns
import re
import os
import pandas as pd
from . import firmware_gen as lf
import json
import aditofpython as tof
import tof_calib.device as device
import logging
import logging.config
import numpy as np
def setup_logging():
with open('./../logger.json', 'r') as f:
config = json.load(f)
logging.config.dictConfig(config)
'''
Predefined Hashmap key Defination
---------------------------------
'''
#Dictionary for modes
mode_dict = {'near' : 0, 'mid' : 1, 'far' : 2}
#Hashmap key for packet type
HEADER = 0
CAMERA_INTRINSIC = 1
NEAR_CAL = 2
NEAR_LF = 3
MID_CAL = 4
MID_LF = 5
FAR_CAL = 6
FAR_LF = 7
#Hashmap key for common parameters
EEPROM_VERSION = 1
CAL_SER_NUM = 2
CAL_DATE = 3
CHECKSUM = 4
#Hashmap key for Header Parameters
TOTAL_SIZE = 5
NUMBER_OF_MODES = 6
MODULE_TYPE = 11 #Value 2: BroadMarket/1: PICO/0 : ADI EVAL
AFE_TYPE = 13 #Value 0: ADDI9033 / 1:ADDI9043/ 2: ADDI9050
SENSOR_TYPE = 14 #Value 0 : Panasonic VGA / 1 : Panasonic QVGA
LASER_TYPE = 16 #Value 0 : Princeton VCSEL/ 1 : Heptagon VCSEL...
#Hashmap key for Camera Intrinsic
INTRINSIC = 5
DISTORTION_COEFFICIENTS = 6
#Hashmap for linear correct
ISATG_PROJECT_VERSION = 5
CALIBRATION_SOFTWARE_VERSION = 6
CALIBRATION_TYPE = 7 #Value 0 Sweep, 1: Rail, 2: Faceplant
CALIBRATION_MODE = 8 #Value 0:Near 1, 1 : Mid, 2 :Far
PULSE_COUNT = 11
NO_OF_LASERS = 12
LINEAR_CORRECT_OFFSET = 22
LINEAR_CORRECT_XPWR = 23
#Hashmap for load files
ADDR_DATA_LIST = 5
#Indices for PARAM STRUCT
SIZE = 0
VALUE = 1
'''
Class for managing the calibration map
Consist functions to:
generate calibration map
store calibration map binary to file
read calibration map from binary file
parse binary back to calibration map
display calibration map
---------------------------------
'''
class cal_map(object):
def __init__(self):
self.calibration_map = {}
header_packet = {
TOTAL_SIZE : self.param_struct([8]),
CHECKSUM : self.param_struct([8])
}
self.calibration_map = {
HEADER : [self.get_packet_size(header_packet), header_packet],
}
#calculates size of value and returns list[size, value]
def param_struct(self, param_value):
size = len(param_value) * 4 # len * 4(each element is float)
param_value = [int(size), [float(i) for i in param_value]]
return param_value
#calculates and returns size of packet
def get_packet_size(self, packet):
packet_size = 0
for nested_key,nested_value in packet.items():
param_size, param_value = nested_value
packet_size = packet_size + param_size + 8 # added size 8 for key and size of each parameter
return int(packet_size)
#calculates and returns size of map
def get_map_size(self):
map_size = 0
for key, list_params in self.calibration_map.items():
size, nested_dict = list_params
map_size = map_size + size
map_size = map_size + 8 #Size of each key(4) and packet size(4) is added(4+4=8)
return map_size
def update_packet_checksum(self, packet):
checksum = 0
for nested_key,nested_value in packet.items():
param_size, param_value = nested_value
for i in range (int(param_size/4)):
checksum = int(checksum) ^ int(param_value[i])
packet[CHECKSUM] = self.param_struct([checksum])
def update_map_header(self):
#Update Header Total Size
total_size = self.get_map_size()
self.calibration_map[HEADER][VALUE][TOTAL_SIZE] = self.param_struct([total_size])
#Update Header Checksum
self.update_packet_checksum(self.calibration_map[HEADER][VALUE])
#Generates Default Dictionary
def init_default_cal_map(self):
header_packet = {
EEPROM_VERSION : self.param_struct([0]),
TOTAL_SIZE : self.param_struct([1000]),
NUMBER_OF_MODES : self.param_struct([3]),
}
self.update_packet_checksum(header_packet)
camera_intrinsic_packet = {
EEPROM_VERSION : self.param_struct([0]),
CAL_SER_NUM : self.param_struct([0]),
CAL_DATE : self.param_struct([12042019]),
INTRINSIC : self.param_struct([0, 0, 0, 0, 0, 0, 0, 0, 0])
}
self.update_packet_checksum(camera_intrinsic_packet)
self.calibration_map = {
HEADER : [self.get_packet_size(header_packet), header_packet],
CAMERA_INTRINSIC : [self.get_packet_size(camera_intrinsic_packet), camera_intrinsic_packet]
}
#Update Header
self.update_map_header()
#Parses through dictionary and prints the key and value
def display_cal_map(self):
#Printing just the value of Calibration Dictionary
for key, list_params in self.calibration_map.items():
print ("Packet Key: ", (key),end="") # print the primary key (for Packet Type)
size, nested_dict = list_params
print ("\tPacket Size: ", size) #print the size of pimary packet
#print ("Packet Key: ", (key),"\tPacket Size: ", size, file=open("output.txt", "a"))
for nested_key,nested_value in nested_dict.items():
print("\tParam Key: ", nested_key,end="") #print the nested key (Parameter key)
param_size, param_value = nested_value
print("\tParam Size: ",param_size,end="") #print the size of Param
value = []
for i in range (int(param_size/4)):
value.append(param_value[i])
print("\tParam Value: ",value) #print the value of Param
#print("\tParam Key: ", nested_key,"\tParam Size: ",param_size,"\tParam Value: ",value, file=open("output.txt", "a")) #print the Param to file
#Generates the binary file for writing to EEPROM
def save_cal_map(self, filename):
#writing float values
f = open(filename,"wb")
for key, list_params in self.calibration_map.items():
f.write(struct.pack('<f', key) ) #write the primary key (for Packet Type)
struct.pack('<f', key)
size, nested_dict = list_params
f.write(struct.pack('<f', size) ) #write the size of pimary packet size
for nested_key,nested_value in nested_dict.items():
f.write(struct.pack('<f',nested_key)) #write the nested key (Parameter key)
param_size, param_value = nested_value
f.write(struct.pack('<f',param_size)) #write the size of Param
for i in range (int(param_size/4)):
f.write(struct.pack('<f',param_value[i])) #write the value of Param
f.close()
'''Reads the binary file and parses it back to map,
replaces the value if already exist'''
def read_cal_map(self, filename):
#open the file
with open(filename,"rb") as f:
while True:
key = f.read(4)
if not key:
break
key = struct.unpack('<f', key)
key = int(key[0])
sub_packet_size = struct.unpack('<f', f.read(4))
sub_packet_size = int(sub_packet_size[0])
sub_packet_map = {}
i = 0
while i<(sub_packet_size/4): #4:size of float
sub_packet_value = struct.unpack('<f', f.read(4))
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_key = sub_packet_value
sub_packet_value = struct.unpack('<f', f.read(4))
sub_packet_value = int(sub_packet_value[0])
i = i + 1
parameter_size = sub_packet_value
number_of_elements = int(parameter_size/4) #4:size of float
value=[]
for j in range (number_of_elements):
sub_packet_value = struct.unpack('<f', f.read(4))
#sub_packet_value = int(sub_packet_value[0])
value.append(sub_packet_value[0])
i = i + 1
sub_packet_map.update({parameter_key: [parameter_size, value]})
self.calibration_map[key] = [sub_packet_size,sub_packet_map]
f.close()
#Add Load files to map, if existing map consist load files, it overwrites it, otherwise adds it
def add_load_files_to_map(self, packet_type, lf_path):
lf_map = {}
lf_list =[]
file_list = natsorted(os.listdir("./"+lf_path+"/"), alg=ns.IGNORECASE)[:13]
#print(file_list)
for file_name in file_list:
if file_name.endswith(".lf"):
addr, data, mode_locations = lf.extract_code_block("./"+lf_path+"/"+file_name)
for i in range(len(addr)) :
lf_list.append(addr[i])
lf_list.append(data[i])
#print("Parsed File", file_name, " ", file_num, "\n", lf_list)
#input("Press Enter to continue...")
lf_map[ADDR_DATA_LIST] = self.param_struct(lf_list)
#print(lf_map)
self.update_packet_checksum(lf_map)
self.calibration_map[packet_type] = [self.get_packet_size(lf_map), lf_map]
#Update Header
self.update_map_header()
def add_linear_offset_csv_to_map(self, packet_type, linear_offset_csv_file):
linear_df = pd.read_csv(linear_offset_csv_file)
linear_correct_offset_list = (linear_df.to_dict(orient='list')["reg_offset_value_hex"])
linear_correct_xpwr_list = (linear_df.to_dict(orient='list')["xcorr"][1:])
linear_map = {}
linear_map[LINEAR_CORRECT_OFFSET] = self.param_struct([int(i, 16) for i in linear_correct_offset_list])
linear_map[LINEAR_CORRECT_XPWR] = self.param_struct(linear_correct_xpwr_list)
self.calibration_map[packet_type] = [self.get_packet_size(linear_map), linear_map]
#Update Header
self. update_map_header()
def add_json_to_map(self, packet_type, json_file):
with open(json_file, 'r') as f:
json_read = json.load(f)
json_map = {}
for key,value in json_read.items():
for sub_key,sub_value in json_read[key].items():
if(type(sub_value) is list):
json_map[int(sub_key)] = self.param_struct(sub_value)
else:
json_map[int(sub_key)] = self.param_struct([sub_value])
self.update_packet_checksum(json_map)
self.calibration_map[packet_type] = [self.get_packet_size(json_map), json_map]
self.update_map_header()
#Function to replace calibration mode block
def replace_eeprom_mode(self, mode, linear_cal_json_file, load_file_path):
self.add_json_to_map((mode_dict[mode]*2+2), linear_cal_json_file)
self.add_load_files_to_map((mode_dict[mode]*2+3), load_file_path)
def write_eeprom_cal_map(self, eeprom):
#print("\n\nWriting EEPROM")
eeprom_write_bytearray = bytes()
for key, list_params in self.calibration_map.items():
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', key)) #write the primary key (for Packet Type)
struct.pack('<f', key)
size, nested_dict = list_params
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', size)) #write the size of pimary packet size
for nested_key,nested_value in nested_dict.items():
eeprom_write_bytearray = eeprom_write_bytearray + (struct.pack('<f', nested_key)) #write the nested key (Parameter key)
param_size, | |
#!/usr/bin/env python
################################
# Interactive UPNP application #
# <NAME> #
# 07/16/2008 #
################################
import sys
import os
import re
import platform
import xml.dom.minidom as minidom
import IN
import urllib
import urllib2
import readline
import time
import pickle
import struct
import base64
import getopt
import select
from socket import *
# Most of the CmdCompleter class was originally written by <NAME>
# It serves to tab-complete commands inside the program's shell
class CmdCompleter:
def __init__(self, commands):
self.commands = commands
# Traverses the list of available commands
def traverse(self, tokens, tree):
retVal = []
# If there are no commands, or no user input, return null
if tree is None or len(tokens) == 0:
retVal = []
# If there is only one word, only auto-complete the primary commands
elif len(tokens) == 1:
retVal = [x+' ' for x in tree if x.startswith(tokens[0])]
# Else auto-complete for the sub-commands
elif tokens[0] in tree.keys():
retVal = self.traverse(tokens[1:],tree[tokens[0]])
return retVal
# Returns a list of possible commands that match the partial command that the user has entered
def complete(self, text, state):
try:
tokens = readline.get_line_buffer().split()
if not tokens or readline.get_line_buffer()[-1] == ' ':
tokens.append('')
results = self.traverse(tokens,self.commands) + [None]
return results[state]
except Exception, e:
print "Failed to complete command: %s" % str(e)
return
#UPNP class for getting, sending and parsing SSDP/SOAP XML data (among other things...)
class upnp:
ip = False
port = False
completer = False
msearchHeaders = {
'MAN' : '"ssdp:discover"',
'MX' : '2'
}
DEFAULT_IP = "192.168.127.12"
DEFAULT_PORT = 1900
UPNP_VERSION = '1.0'
MAX_RECV = 8192
MAX_HOSTS = 0
TIMEOUT = 0
HTTP_HEADERS = []
ENUM_HOSTS = {}
LISTENER_LIMIT = True
VERBOSE = False
UNIQ = False
DEBUG = False
LOG_FILE = False
BATCH_FILE = None
IFACE = None
STARS = '****************************************************************'
csock = False
ssock = False
def __init__(self,ip,port,iface,appCommands):
if appCommands:
self.completer = CmdCompleter(appCommands)
if self.initSockets(ip,port,iface) == False:
print 'UPNP class initialization failed!'
print 'Bye!'
sys.exit(1)
else:
self.soapEnd = re.compile('<\/.*:envelope>')
#Initialize default sockets
def initSockets(self,ip,port,iface):
if self.csock:
self.csock.close()
if self.ssock:
self.ssock.close()
if iface != None:
self.IFACE = iface
if not ip:
ip = self.DEFAULT_IP
if not port:
port = self.DEFAULT_PORT
self.port = port
self.ip = ip
try:
#This is needed to join a multicast group
self.mreq = struct.pack("4sl",inet_aton(ip),INADDR_ANY)
#Set up client socket
self.csock = socket(AF_INET,SOCK_DGRAM)
self.csock.setsockopt(IPPROTO_IP,IP_MULTICAST_TTL,2)
#Set up server socket
self.ssock = socket(AF_INET,SOCK_DGRAM,IPPROTO_UDP)
self.ssock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
# BSD systems also need to set SO_REUSEPORT
try:
self.ssock.setsockopt(SOL_SOCKET,SO_REUSEPORT,1)
except:
pass
#Only bind to this interface
if self.IFACE != None:
print '\nBinding to interface',self.IFACE,'...\n'
self.ssock.setsockopt(SOL_SOCKET,IN.SO_BINDTODEVICE,struct.pack("%ds" % (len(self.IFACE)+1,), self.IFACE))
self.csock.setsockopt(SOL_SOCKET,IN.SO_BINDTODEVICE,struct.pack("%ds" % (len(self.IFACE)+1,), self.IFACE))
try:
self.ssock.bind(('',self.port))
except Exception, e:
print "WARNING: Failed to bind %s:%d: %s" , (self.ip,self.port,e)
try:
self.ssock.setsockopt(IPPROTO_IP,IP_ADD_MEMBERSHIP,self.mreq)
except Exception, e:
print 'WARNING: Failed to join multicast group:',e
except Exception, e:
print "Failed to initialize UPNP sockets:",e
return False
return True
#Clean up file/socket descriptors
def cleanup(self):
if self.LOG_FILE != False:
self.LOG_FILE.close()
self.csock.close()
self.ssock.close()
#Send network data
def send(self,data,socket):
#By default, use the client socket that's part of this class
if socket == False:
socket = self.csock
try:
socket.sendto(data,(self.ip,self.port))
return True
except Exception, e:
print "SendTo method failed for %s:%d : %s" % (self.ip,self.port,e)
return False
#Receive network data
def recv(self,size,socket):
if socket == False:
socket = self.ssock
if self.TIMEOUT:
socket.setblocking(0)
ready = select.select([socket], [], [], self.TIMEOUT)[0]
else:
socket.setblocking(1)
ready = True
try:
if ready:
return socket.recv(size)
else:
return False
except:
return False
#Create new UDP socket on ip, bound to port
def createNewListener(self,ip,port):
try:
newsock = socket(AF_INET,SOCK_DGRAM,IPPROTO_UDP)
newsock.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
# BSD systems also need to set SO_REUSEPORT
try:
newsock.setsockopt(SOL_SOCKET,SO_REUSEPORT,1)
except:
pass
newsock.bind((ip,port))
return newsock
except:
return False
#Return the class's primary server socket
def listener(self):
return self.ssock
#Return the class's primary client socket
def sender(self):
return self.csock
#Parse a URL, return the host and the page
def parseURL(self,url):
delim = '://'
host = False
page = False
#Split the host and page
try:
(host,page) = url.split(delim)[1].split('/',1)
page = '/' + page
except:
#If '://' is not in the url, then it's not a full URL, so assume that it's just a relative path
page = url
return (host,page)
#Pull the name of the device type from a device type string
#The device type string looks like: 'urn:schemas-upnp-org:device:WANDevice:1'
def parseDeviceTypeName(self,string):
delim1 = 'device:'
delim2 = ':'
if delim1 in string and not string.endswith(delim1):
return string.split(delim1)[1].split(delim2,1)[0]
return False
#Pull the name of the service type from a service type string
#The service type string looks like: 'urn:schemas-upnp-org:service:Layer3Forwarding:1'
def parseServiceTypeName(self,string):
delim1 = 'service:'
delim2 = ':'
if delim1 in string and not string.endswith(delim1):
return string.split(delim1)[1].split(delim2,1)[0]
return False
#Pull the header info for the specified HTTP header - case insensitive
def parseHeader(self,data,header):
delimiter = "%s:" % header
defaultRet = False
lowerDelim = delimiter.lower()
dataArray = data.split("\r\n")
#Loop through each line of the headers
for line in dataArray:
lowerLine = line.lower()
#Does this line start with the header we're looking for?
if lowerLine.startswith(lowerDelim):
try:
return line.split(':',1)[1].strip()
except:
print "Failure parsing header data for %s" % header
return defaultRet
#Extract the contents of a single XML tag from the data
def extractSingleTag(self,data,tag):
startTag = "<%s" % tag
endTag = "</%s>" % tag
try:
tmp = data.split(startTag)[1]
index = tmp.find('>')
if index != -1:
index += 1
return tmp[index:].split(endTag)[0].strip()
except:
pass
return None
#Parses SSDP notify and reply packets, and populates the ENUM_HOSTS dict
def parseSSDPInfo(self,data,showUniq,verbose):
hostFound = False
foundLocation = False
messageType = False
xmlFile = False
host = False
page = False
upnpType = None
knownHeaders = {
'NOTIFY' : 'notification',
'HTTP/1.1 200 OK' : 'reply'
}
#Use the class defaults if these aren't specified
if showUniq == False:
showUniq = self.UNIQ
if verbose == False:
verbose = self.VERBOSE
#Is the SSDP packet a notification, a reply, or neither?
for text,messageType in knownHeaders.iteritems():
if data.upper().startswith(text):
break
else:
messageType = False
#If this is a notification or a reply message...
if messageType != False:
#Get the host name and location of its main UPNP XML file
xmlFile = self.parseHeader(data,"LOCATION")
upnpType = self.parseHeader(data,"SERVER")
(host,page) = self.parseURL(xmlFile)
#Sanity check to make sure we got all the info we need
if xmlFile == False or host == False or page == False:
print 'ERROR parsing recieved header:'
print self.STARS
print data
print self.STARS
print ''
return False
#Get the protocol in use (i.e., http, https, etc)
protocol = xmlFile.split('://')[0]+'://'
#Check if we've seen this host before; add to the list of hosts if:
# 1. This is a new host
# 2. We've already seen this host, but the uniq hosts setting is disabled
for hostID,hostInfo in self.ENUM_HOSTS.iteritems():
if hostInfo['name'] == host:
hostFound = True
if self.UNIQ:
return False
if (hostFound and not self.UNIQ) or not hostFound:
#Get the new host's index number and create an entry in ENUM_HOSTS
index = len(self.ENUM_HOSTS)
self.ENUM_HOSTS[index] = {
'name' : host,
'dataComplete' : False,
'proto' : protocol,
'xmlFile' : xmlFile,
'serverType' : None,
'upnpServer' : upnpType,
'deviceList' : {}
}
#Be sure to update the command completer so we can tab complete through this host's data structure
self.updateCmdCompleter(self.ENUM_HOSTS)
#Print out some basic device info
print self.STARS
print "SSDP %s message from %s" % (messageType,host)
if xmlFile:
foundLocation = True
print "XML file is located at %s" % xmlFile
if upnpType:
print "Device is running %s"% upnpType
print self.STARS
print ''
return True
#Send GET request for a UPNP XML file
def getXML(self,url):
headers = {
'USER-AGENT':'uPNP/'+self.UPNP_VERSION,
'CONTENT-TYPE':'text/xml; charset="utf-8"'
}
try:
#Use urllib2 for the request, it's awesome
req = urllib2.Request(url, None, headers)
response = urllib2.urlopen(req)
output = response.read()
headers = response.info()
return (headers,output)
except Exception, e:
print "Request for '%s' failed: %s" % (url,e)
return (False,False)
#Send SOAP request
def sendSOAP(self,hostName,serviceType,controlURL,actionName,actionArguments):
argList = ''
soapResponse = ''
if '://' in controlURL:
urlArray = controlURL.split('/',3)
if len(urlArray) < 4:
controlURL = '/'
else:
controlURL = '/' + urlArray[3]
soapRequest = 'POST %s HTTP/1.1\r\n' % controlURL
#Check if a port number was specified in the host name; default is port 80
if ':' in hostName:
hostNameArray = hostName.split(':')
host = hostNameArray[0]
try:
port = int(hostNameArray[1])
except:
print 'Invalid port specified for host connection:',hostName[1]
return False
else:
host = hostName
port = 80
#Create a string containing all of the SOAP action's arguments and values
for arg,(val,dt) in actionArguments.iteritems():
argList += '<%s>%s</%s>' % (arg,val,arg)
#Create the SOAP request
soapBody = '<?xml version="1.0"?>\n'\
'<SOAP-ENV:Envelope xmlns:SOAP-ENV="http://schemas.xmlsoap.org/soap/envelope/" SOAP-ENV:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">\n'\
'<SOAP-ENV:Body>\n'\
'\t<m:%s xmlns:m="%s">\n'\
'%s\n'\
'\t</m:%s>\n'\
'</SOAP-ENV:Body>\n'\
'</SOAP-ENV:Envelope>' % (actionName,serviceType,argList,actionName)
#Specify the headers to send with the request
headers = {
'Host':hostName,
'Content-Length':len(soapBody),
'Content-Type':'text/xml',
'SOAPAction':'"%s#%s"' % (serviceType,actionName)
}
#Generate the final payload
for head,value in headers.iteritems():
soapRequest += '%s: %s\r\n' % (head,value)
soapRequest += '\r\n%s' % soapBody
#Send data and go into recieve loop
try:
sock = socket(AF_INET,SOCK_STREAM)
sock.connect((host,port))
if self.DEBUG:
print self.STARS
print soapRequest
print self.STARS
print ''
sock.send(soapRequest)
while True:
data = sock.recv(self.MAX_RECV)
if not data:
break
else:
soapResponse += data
if self.soapEnd.search(soapResponse.lower()) != None:
break
sock.close()
(header,body) = soapResponse.split('\r\n\r\n',1)
if not header.upper().startswith('HTTP/1.') and ' 200 ' in header.split('\r\n')[0]:
print 'SOAP request failed with error code:',header.split('\r\n')[0].split(' ',1)[1]
errorMsg = self.extractSingleTag(body,'errorDescription')
if errorMsg:
print 'SOAP error message:',errorMsg
return False
else:
return body
except Exception, e:
print 'Caught socket exception:',e
sock.close()
return False
except KeyboardInterrupt:
print ""
sock.close()
return False
#Display all info for a given host
def showCompleteHostInfo(self,index,fp):
na = 'N/A'
serviceKeys = ['controlURL','eventSubURL','serviceId','SCPDURL','fullName']
if fp == False:
fp = sys.stdout
if index < 0 or index >= len(self.ENUM_HOSTS):
fp.write('Specified host does not exist...\n')
return
try:
hostInfo = self.ENUM_HOSTS[index]
if hostInfo['dataComplete'] == False:
print "Cannot show all host info because I don't have it all yet. Try running 'host info %d' first...\n" % index
fp.write('Host name: %s\n' % hostInfo['name'])
fp.write('UPNP XML File: %s\n\n' % hostInfo['xmlFile'])
fp.write('\nDevice information:\n')
for deviceName,deviceStruct in hostInfo['deviceList'].iteritems():
fp.write('\tDevice Name: %s\n' % deviceName)
for serviceName,serviceStruct in deviceStruct['services'].iteritems():
fp.write('\t\tService Name: %s\n' % serviceName)
for key in serviceKeys:
fp.write('\t\t\t%s: %s\n' % (key,serviceStruct[key]))
fp.write('\t\t\tServiceActions:\n')
for actionName,actionStruct in serviceStruct['actions'].iteritems():
fp.write('\t\t\t\t%s\n' % actionName)
for argName,argStruct in actionStruct['arguments'].iteritems():
fp.write('\t\t\t\t\t%s \n' % argName)
for key,val in argStruct.iteritems():
#try is a specific fix for Belkin WeMo devices, otherwise the data won't show when using the host details command
try:
if key == 'relatedStateVariable':
fp.write('\t\t\t\t\t\t%s:\n' % val)
for k,v in serviceStruct['serviceStateVariables'][val].iteritems():
fp.write('\t\t\t\t\t\t\t%s: %s\n' % (k,v))
else:
fp.write('\t\t\t\t\t\t%s: %s\n' % (key,val))
except:
pass
except Exception, e:
print 'Caught exception while showing host info:',e
#Wrapper function...
def getHostInfo(self,xmlData,xmlHeaders,index):
if self.ENUM_HOSTS[index]['dataComplete'] == True:
return
if index >= 0 and index < len(self.ENUM_HOSTS):
try:
xmlRoot = minidom.parseString(xmlData)
self.parseDeviceInfo(xmlRoot,index)
self.ENUM_HOSTS[index]['serverType'] = xmlHeaders.getheader('Server')
self.ENUM_HOSTS[index]['dataComplete'] = True
return True
except Exception, e:
print 'Caught exception while getting host info:',e
return False
#Parse device info from the retrieved XML file
def parseDeviceInfo(self,xmlRoot,index):
deviceEntryPointer = False
devTag = "device"
deviceType | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from subprocess import call
import pygraph.readwrite
from pygraph.classes.digraph import digraph
from propsde.graph_representation.newNode import Node, isDefinite, getCopular, \
getPossesive, EXISTENSIAL
from pygraph.algorithms.accessibility import accessibility
from propsde.graph_representation.graph_utils import get_min_max_span, find_nodes, \
find_edges, merge_nodes, multi_get, duplicateEdge, accessibility_wo_self, \
subgraph_to_string, replace_head, find_marker_idx
from propsde.graph_representation.word import Word
from propsde.dependency_tree.definitions import *
import pygraph.readwrite.dot
from itertools import product
from propsde.graph_representation.proposition import Proposition
from copy import copy, deepcopy
import re, cgi, time, subprocess, math
from itertools import product
from propsde.graph_representation.proposition import Proposition
from copy import copy, deepcopy
from propsde.graph_representation import newNode
# from ctypes.wintypes import WORD
from propsde.utils.utils import encode_german_characters, encode_german_chars
import propsde.graph_representation.raising_subj_verbs as raising_subj_verbs
FIRST_ENTITY_LABEL = "sameAs_arg" # "first_entity"
SECOND_ENTITY_LABEL = "sameAs_arg" # "second_entity"
POSSESSOR_LABEL = "possessor"
POSSESSED_LABEL = "possessed"
COMP_LABEL = "comp"
DISCOURSE_LABEL = "discourse"
CONDITION_LABEL = "condition"
REASON_LABEL = "reason"
OUTCOME_LABEL = "outcome"
EVENT_LABEL = "event"
ADV_LABEL = "adverb"
SORUCE_LABEL = "source"
TOPNODE_COLOR = "red"
PREDICATE_COLOR = "purple"
#join_labels = ["mwe", "nn", "num", "number", "possessive", "prt", "predet", "npadvmod"]
join_labels = [ ("PNC","ALL"), ("NMC","ALL"), ("NK","CARD"), ("SVP","ALL"), ("AMS","ALL"), ("PM","ALL"),
("ADC","ALL"), ("AVC","ALL"), ("UC","ALL") ]
ignore_labels = [] #["det", "neg", "aux", "auxpass", "punct"]
ignore_nodes = [ ("--","$,"), ("--","$."), ("--","$("), ("NG","ALL"), ("NK","ART"), ("JU","ALL") ]
#the labels we unify:
inverse_labels = {"subj":["SB","possessor"], #["xsubj","nsubj","nsubjpass","csubj","csubjpass","possessor"],
"comp": ["CP"], #["xcomp","ccomp","acomp"]
"source":[], #["acomp"],
"mod": ["MO","NK","MNR","CC"], #["amod","advcl","rcmod","advmod","quantmod","vmod"],
"dobj":["OC","OA","OA2","OG","possessed"], #["possessed"]
"iobj": ["DA"], # new for DE
"poss": ["AG","PG"], # new for DE
}
normalize_labels_dic = {}
for k in inverse_labels:
for v in inverse_labels[k]:
normalize_labels_dic[v] = k
class GraphWrapper(digraph):
"""
save nodes by uid, to make it possible to have different nodes with same str value
@type nodes: dict
@var nodes: a mapping from a node uid to its Node object
"""
def __init__(self, originalSentence, HOME_DIR):
"""
Initialize the nodes dictionary as well as the inner digraph
"""
if not originalSentence:
originalSentence = "<empty>"
self.originalSentence = encode_german_characters(originalSentence)
self.originalSentence_original = originalSentence
self.HOME_DIR = HOME_DIR
self.nodesMap = {}
self.modalVerbs = raising_subj_verbs.verbs
digraph.__init__(self)
def set_original_sentence(self,s):
self.originalSentence = encode_german_characters(s)
def nodes(self):
"""
overrides the nodes() function of digraph, to maintain the nodes mapping
@rtype list(Node)
@return the Node objects stored in this graph
"""
return [self.nodesMap[curId] for curId in digraph.nodes(self)]
def edges(self):
"""
overrides the edges() function of digraph, to maintain the nodes mapping
@rtype list(edge)
@return the edges stored in this graph
"""
return [(self.nodesMap[u], self.nodesMap[v]) for (u, v) in digraph.edges(self)]
def is_edge_exists(self, n1, n2):
"""
overrides the edges() function of digraph, to maintain the nodes mapping
@rtype list(Edge)
@return the Edge objects stored in this graph
"""
return (n1, n2) in self.edges()
def get_components(self):
graph_components = accessibility(self)
return {self.nodesMap[key.uid]:[self.nodesMap[v.uid] for v in value] for key, value in graph_components.iteritems() }
def add_node(self, node):
"""
overrides the add_node of digraph, to maintain the nodes mapping
@type node: Node
@param node: the node to be added to the graph
"""
self.nodesMap[node.uid] = node
digraph.add_node(self, node.uid)
def del_node(self, node):
"""
overrides the del_node of digraph, to maintain the nodes mapping
@type node: Node
@param node: the node to be removed
"""
del(self.nodesMap[node.uid])
# remove this node from any future propagation
for curNode in self.nodesMap.values():
if node in curNode.propagateTo:
curNode.propagateTo.remove(node)
digraph.del_node(self, node.uid)
def del_nodes(self, nodes):
"""
delete a set of nodes
"""
for node in nodes:
self.del_node(node)
def del_edge(self, edge):
"""
overrides the del_edge of digraph, to maintain the nodes mapping
@type edge: tuple [node]
@param edge: the edge to be removed
"""
u, v = edge
if v not in self.neighbors(u):
print "1"
if isinstance(u, Node):
digraph.del_edge(self, edge=(u.uid, v.uid))
else:
digraph.del_edge(self, edge=edge)
def del_edges(self, edges):
for edge in edges:
self.del_edge(edge)
def neighbors(self, node):
"""
overrides the neighbors function of digraph, to maintain the nodes mapping
@type node: Node
@param node: the nodes of the neighbors
"""
if isinstance(node, Node):
return [self.nodesMap[uid] for uid in digraph.neighbors(self, node.uid)]
else:
return digraph.neighbors(self, node)
def incidents(self, node):
"""
overrides the incidents function of digraph, to maintain the nodes mapping
@type node: Node
@param node: the nodes of the neighbors
"""
if isinstance(node, Node):
return [self.nodesMap[uid] for uid in digraph.incidents(self, node.uid)]
else:
return digraph.incidents(self, node)
def add_edge(self, edge, label=''):
"""
overrides the add_edge function of digraph, to maintain the nodes mapping
@type edge: (node1,node2)
@type node1: Node
@param node2: origin of new edge
@type node2: Node
@param node2: destination of new edge
"""
node1, node2 = edge
basicEdge = (node1.uid, node2.uid)
ret = digraph.add_edge(self, edge=basicEdge, label=label)
# if not self.is_aux_edge(basicEdge):
# self.del_edge(edge)
# ret = digraph.add_edge(self,edge=basicEdge,label=label,wt=100)
return ret
def __unicode__(self):
ret = self.originalSentence+"\n"
for i,node in enumerate(self.nodesMap.values()):
ret += node.to_conll_like() + "\n"
return ret
def __str__(self):
return unicode(self).encode('utf-8')
def edge_label(self, edge):
"""
overrides the edge_label function of digraph, to maintain the nodes mapping
@type edge: (node1,node2)
@type node1: Node
@param node2: origin of new edge
@type node2: Node
@param node2: destination of new edge
"""
node1, node2 = edge
if isinstance(node1, Node):
return digraph.edge_label(self, edge=(node1.uid, node2.uid))
else:
return digraph.edge_label(self, edge)
def has_edge(self, (u, v)):
return digraph.has_edge(self, (u.uid, v.uid))
def set_edge_label(self, edge, label):
node1, node2 = edge
return digraph.set_edge_label(self, edge=(node1.uid, node2.uid), label=label)
def drawToFile(self, filename, filetype):
"""
Saves a graphic filename of this graph
@type filename string
@param name of file in which to write the output, without extension
@type filetype string
@param the type of file [png,jpg,...] - will be passed to dot
"""
if not filename:
return self.writeToDot(filename="",
writeLabel=False)
ret = self.writeToDot(filename=filename + ".dot",
writeLabel=(filetype == "svg"))
call("dot -T{1} {0}.dot -o {0}.{1}".format(filename, filetype).split())
def is_aux_edge(self, (src, dst)):
"""
src and dst should be uid's of nodes!
"""
label = self.edge_label((src, dst))
if (not self.nodesMap[src].isPredicate) or ((label not in arguments_dependencies + clausal_complements)):# and (not label.startswith("prep"))):
return True
return False
def writeToDot(self, filename, writeLabel):
"""
Outputs a dot file representing this graph
@type filename: string
@param filename: the file in which to save the dot text
"""
dot = pygraph.readwrite.dot.pydot.Dot()
if writeLabel:
label = "\n".join([self.originalSentence.encode('utf-8')])
dot.set_label(label)
dot.set_labelloc("bottom")
dot.set_labeljust("center")
for uid in self.nodesMap:
curNode = self.nodesMap[uid]
dotNode = pygraph.readwrite.dot.pydot.Node()
dotNode.set_shape(curNode.nodeShape)
dotNode.set_name(unicode(uid))
label = encode_german_chars(u"<{0}>".format(curNode))
dotNode.set_label(label.encode('ascii', errors='ignore'))
if curNode.isPredicate:
dotNode.set_color(PREDICATE_COLOR)
dotNode.set_fontcolor(PREDICATE_COLOR)
if curNode.features.get("top", False):
dotNode.set_color(TOPNODE_COLOR)
dotNode.set_fontcolor(TOPNODE_COLOR)
##### for debug #####
if curNode.features.has_key("Nominal"):
dotNode.set_color("blue")
dotNode.set_fontcolor("blue")
if curNode.features.has_key("VADAS"):
dotNode.set_color("green")
dotNode.set_fontcolor("green")
if curNode.features.has_key("traces"):
dotNode.set_color("orange")
dotNode.set_fontcolor("orange")
if curNode.features.has_key("LV"):
dotNode.set_color("purple")
dotNode.set_fontcolor("purple")
if curNode.features.has_key("heuristics"):
dotNode.set_color("teal")
dotNode.set_fontcolor("teal")
if curNode.features.has_key("debug"):
dotNode.set_color("blue")
dotNode.set_fontcolor("blue")
dot.add_node(dotNode)
for (src, dst) in digraph.edges(self):
curEdge = pygraph.readwrite.dot.pydot.Edge(src=src, dst=dst)
curEdge.set_fontsize("11")
label = self.edge_label((src, dst)).encode('utf-8')
if label:
if self.is_aux_edge((src, dst)):
curEdge.set_style("dashed")
curEdge.set_label(label)
dot.add_edge(curEdge)
if not filename:
return dot
try:
dot.write(filename)
except Exception as e:
print e
def getJson(self):
"""
@return: json representation of this graph
"""
# format: (unique id, (isPredicate, (minIndex, maxIndex)))
entities = dict([(uid, {'predicate': bool(node.isPredicate),
'feats': self.getFeatsDic(node),
'charIndices': (0,0) if not node.text else self.nodeToCharIndices(node)})
for uid, node in self.nodesMap.items()])
edges = [(src, dest, self.edge_label((src, dest))) for (src, dest) in digraph.edges(self)]
return (entities, edges)
def getFeatsDic(self, node):
return {'implicit' : node.is_implicit(),
'tense' : node.features.get('Tense', ''),
'text' : sorted(node.text, key = lambda w: w.index),
'passive' : 'passive' if 'Passive Voice' in node.features else '',
'definite' : node.features.get('Definite', ''),
'pos' : node.pos() if node.pos() else 'NN',
'negated': 'negated' if 'Negation' in node.features else '',
'subjunctive': node.features.get('Subjunctive', '')}
def nodeToCharIndices(self, node):
''' Get the start and end char indices from a given word index in a tokenized sentence '''
sent = self.originalSentence
if (not node.text):
return (0,0)
data = sent.split(' ')
sortedText = sorted(node.text, key = lambda w: w.index)
startInd = sortedText[0].index
endInd = sortedText[-1].index
if not node.is_implicit():
startInd = startInd - 1
endInd = endInd - 1
baseIndex = sum(map(len, data[:startInd])) + startInd
endIndex = sum(map(len, data[:endInd])) + endInd + len(data[endInd])
return (baseIndex, endIndex)
def draw(self):
"""
Displays the graph output by dot with mspaint.
It saves the dot and png file in temporary files in pwd.
"""
dumpGraphsToTexFile(graphs=[self], appendix={}, graphsPerFile=1, lib=self.HOME_DIR, outputType="html")
call('"C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe" ' + self.HOME_DIR + 'autogen0.html')
# was called _aux for English, but were actually removing other relations for German here. auxiliaries | |
backup copy is stored in "
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
iIiIIIi += lisp . lisp_print_cour ( "./lisp.config.before-clear" )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 19 - 19: iIii1I11I1II1
if 26 - 26: OoooooooOO % I1IiiI % Oo0Ooo . I1IiiI % Ii1I
if 34 - 34: IiII / OoOoOO00
if 87 - 87: O0 * o0oOOo0O0Ooo * Oo0Ooo * II111iiii
if 6 - 6: i1IIi . I1ii11iIi11i + OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
@ bottle . route ( '/lisp/clear/conf/verify' )
def ii1Ii1IiIIi ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 83 - 83: I11i / I1ii11iIi11i
if 34 - 34: I1IiiI * Oo0Ooo * I1Ii111 / OoO0O00 * I11i / iIii1I11I1II1
iIiIIIi = "<br>Are you sure you want to clear the configuration?"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 74 - 74: Oo0Ooo / i11iIiiIii - II111iiii * o0oOOo0O0Ooo
IIi1IIIIi = lisp . lisp_button ( "yes" , "/lisp/clear/conf" )
OOOoO = lisp . lisp_button ( "cancel" , "/lisp" )
iIiIIIi += IIi1IIIIi + OOOoO + "<br>"
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 14 - 14: I11i . iIii1I11I1II1 . OoooooooOO . II111iiii / o0oOOo0O0Ooo
if 21 - 21: i11iIiiIii / i1IIi + I1IiiI * OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if 68 - 68: Oo0Ooo + i11iIiiIii
def Oo0oOooo000OO ( ) :
oo00O00oO = ""
if 98 - 98: o0oOOo0O0Ooo + O0 % i1IIi - OOooOOo + Oo0Ooo
for OoOo000oOo0oo in [ "443" , "-8080" , "8080" ] :
oO0O = 'ps auxww | egrep "lisp-core.pyo {}" | egrep -v grep' . format ( OoOo000oOo0oo )
iIiIIIi = getoutput ( oO0O )
if ( iIiIIIi == "" ) : continue
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
iIiIIIi = iIiIIIi . split ( "\n" ) [ 0 ]
iIiIIIi = iIiIIIi . split ( " " )
if ( iIiIIIi [ - 2 ] == "lisp-core.pyo" and iIiIIIi [ - 1 ] == OoOo000oOo0oo ) : oo00O00oO = OoOo000oOo0oo
break
if 56 - 56: O0
return ( oo00O00oO )
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
@ bottle . route ( '/lisp/restart' )
def OO000o00 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 46 - 46: OoO0O00
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
OOoO = getoutput ( "egrep requiretty /etc/sudoers" ) . split ( " " )
if ( OOoO [ - 1 ] == "requiretty" and OOoO [ 0 ] == "Defaults" ) :
iIiIIIi = "Need to remove 'requiretty' from /etc/sudoers"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
lisp . lprint ( lisp . bold ( "LISP subsystem restart request received" , False ) )
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
if 71 - 71: OoooooooOO
if 33 - 33: I1Ii111
if 62 - 62: I1ii11iIi11i + Ii1I + i1IIi / OoooooooOO
oo00O00oO = Oo0oOooo000OO ( )
if 7 - 7: o0oOOo0O0Ooo + i1IIi . I1IiiI / Oo0Ooo
if 22 - 22: ooOoO0o - ooOoO0o % OOooOOo . I1Ii111 + oO0o
if 63 - 63: I1IiiI % I1Ii111 * o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % iII111i
if 45 - 45: IiII
Oo0O0 = "sleep 1; sudo ./RESTART-LISP {}" . format ( oo00O00oO )
threading . Thread ( target = Ii1Iii111IiI1 , args = [ Oo0O0 ] ) . start ( )
if 98 - 98: I1Ii111 - OoooooooOO % I1IiiI + O0 . Ii1I
iIiIIIi = lisp . lisp_print_sans ( "Restarting LISP subsystem ..." )
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 56 - 56: II111iiii / oO0o + i11iIiiIii + OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
if 79 - 79: Ii1I . OoO0O00
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
def Ii1Iii111IiI1 ( command ) :
os . system ( command )
if 84 - 84: Ii1I / IiII
if 86 - 86: OoOoOO00 * II111iiii - O0 . OoOoOO00 % iIii1I11I1II1 / OOooOOo
if 11 - 11: I1IiiI * oO0o + I1ii11iIi11i / I1ii11iIi11i
if 37 - 37: i11iIiiIii + i1IIi
if 23 - 23: iII111i + I11i . OoOoOO00 * I1IiiI + I1ii11iIi11i
if 18 - 18: IiII * o0oOOo0O0Ooo . IiII / O0
if 8 - 8: o0oOOo0O0Ooo
@ bottle . route ( '/lisp/restart/verify' )
def II1II1 ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
iIiIIIi = "<br>Are you sure you want to restart the LISP subsystem?"
iIiIIIi = lisp . lisp_print_sans ( iIiIIIi )
if 36 - 36: O0 + Oo0Ooo
IIi1IIIIi = lisp . lisp_button ( "yes" , "/lisp/restart" )
OOOoO = lisp . lisp_button ( "cancel" , "/lisp" )
iIiIIIi += IIi1IIIIi + OOOoO + "<br>"
return ( lispconfig . lisp_show_wrapper ( iIiIIIi ) )
if 5 - 5: Oo0Ooo * OoOoOO00
if 46 - 46: ooOoO0o
if 33 - 33: iII111i - II111iiii * OoooooooOO - Oo0Ooo - OOooOOo
if 84 - 84: I1Ii111 + Oo0Ooo - OoOoOO00 * OoOoOO00
if 61 - 61: OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
@ bottle . route ( '/lisp/install' , method = "post" )
def oooo0OOo ( ) :
if ( lispconfig . lisp_validate_user ( ) == False ) :
return ( OOoO0 ( ) )
if 72 - 72: O0 / ooOoO0o + OoooooooOO * iII111i
if 61 - 61: OoooooooOO % II111iiii - I1IiiI % I1ii11iIi11i + i1IIi
i1II = bottle . request . forms . get ( "image_url" )
if ( i1II . find ( "lispers.net" ) == - 1 or i1II . find ( ".tgz" ) == - 1 ) :
iIi1IiI = | |
self._PGenr.TimeType
DateType = self._PGenr.DateType
IntegerType = self._PGenr.IntegerType
DecimalType = self._PGenr.DecimalType
PositiveIntegerType = self._PGenr.PositiveIntegerType
NegativeIntegerType = self._PGenr.NegativeIntegerType
NonPositiveIntegerType = self._PGenr.NonPositiveIntegerType
NonNegativeIntegerType = self._PGenr.NonNegativeIntegerType
BooleanType = self._PGenr.BooleanType
FloatType = self._PGenr.FloatType
DoubleType = self._PGenr.DoubleType
OtherSimpleTypes = self._PGenr.OtherSimpleTypes
AnyAttributeType = self._PGenr.AnyAttributeType
SimpleTypeType = self._PGenr.SimpleTypeType
RestrictionType = self._PGenr.RestrictionType
EnumerationType = self._PGenr.EnumerationType
MinInclusiveType = self._PGenr.MinInclusiveType
MaxInclusiveType = self._PGenr.MaxInclusiveType
UnionType = self._PGenr.UnionType
WhiteSpaceType = self._PGenr.WhiteSpaceType
ListType = self._PGenr.ListType
AnnotationType = self._PGenr.AnnotationType
DocumentationType = self._PGenr.DocumentationType
if name == SchemaType:
self.inSchema = 1
element = XschemaElement(self._PGenr, attrs)
if len(self.stack) == 1:
element.setTopLevel(1)
self.stack.append(element)
# If there is an attribute "xmlns" and its value is
# "http://www.w3.org/2001/XMLSchema", then remember and
# use that namespace prefix.
for name, value in list(attrs.items()):
if name[:6] == 'xmlns:':
nameSpace = name[6:] + ':'
self._PGenr.NamespacesDict[value] = nameSpace
elif name == 'targetNamespace':
self.Targetnamespace = value
elif (name == ElementType or
((name == ComplexTypeType) and (len(self.stack) == 1))
):
self.inElement = 1
self.inNonanonymousComplexType = 1
element = XschemaElement(self._PGenr, attrs)
if not 'type' in list(attrs.keys()) and not 'ref' in list(attrs.keys()):
element.setExplicitDefine(1)
if len(self.stack) == 1:
element.setTopLevel(1)
if 'substitutionGroup' in list(attrs.keys()) and 'name' in list(attrs.keys()):
substituteName = attrs['name']
headName = attrs['substitutionGroup']
if headName not in self.SubstitutionGroups:
self.SubstitutionGroups[headName] = []
self.SubstitutionGroups[headName].append(substituteName)
if name == ComplexTypeType:
element.complexType = 1
if self.inChoice and self.currentChoice:
element.choice = self.currentChoice
self.stack.append(element)
elif name == ComplexTypeType:
# If it have any attributes and there is something on the stack,
# then copy the attributes to the item on top of the stack.
if len(self.stack) > 1 and len(attrs) > 0:
parentDict = self.stack[-1].getAttrs()
for key in list(attrs.keys()):
parentDict[key] = attrs[key]
self.inComplexType = 1
elif name == AnyType:
element = XschemaElement(self._PGenr, attrs)
element.type = AnyTypeIdentifier
self.stack.append(element)
elif name == GroupType:
element = XschemaElement(self._PGenr, attrs)
if len(self.stack) == 1:
element.setTopLevel(1)
self.stack.append(element)
elif name == SequenceType:
self.inSequence = 1
elif name == ChoiceType:
self.currentChoice = XschemaElement(self._PGenr, attrs)
self.inChoice = 1
elif name == AttributeType:
self.inAttribute = 1
if 'name' in list(attrs.keys()):
name = attrs['name']
elif 'ref' in list(attrs.keys()):
name = strip_namespace(attrs['ref'])
else:
name = 'no_attribute_name'
if 'type' in list(attrs.keys()):
data_type = attrs['type']
else:
data_type = StringType[0]
if 'use' in list(attrs.keys()):
use = attrs['use']
else:
use = 'optional'
if 'default' in list(attrs.keys()):
default = attrs['default']
else:
default = None
if self.stack[-1].attributeGroup:
# Add this attribute to a current attributeGroup.
attribute = XschemaAttribute(self._PGenr, name, data_type, use, default)
self.stack[-1].attributeGroup.add(name, attribute)
else:
# Add this attribute to the element/complexType.
attribute = XschemaAttribute(self._PGenr, name, data_type, use, default)
self.stack[-1].attributeDefs[name] = attribute
self.lastAttribute = attribute
elif name == AttributeGroupType:
self.inAttributeGroup = 1
# If it has attribute 'name', then it's a definition.
# Prepare to save it as an attributeGroup.
if 'name' in list(attrs.keys()):
name = strip_namespace(attrs['name'])
attributeGroup = XschemaAttributeGroup(name)
element = XschemaElement(self._PGenr, attrs)
if len(self.stack) == 1:
element.setTopLevel(1)
element.setAttributeGroup(attributeGroup)
self.stack.append(element)
# If it has attribute 'ref', add it to the list of
# attributeGroups for this element/complexType.
if 'ref' in list(attrs.keys()):
self.stack[-1].attributeGroupNameList.append(attrs['ref'])
elif name == SimpleContentType:
self.inSimpleContent = 1
if len(self.stack) > 0:
self.stack[-1].setSimpleContent(True)
elif name == ComplexContentType:
pass
elif name == ExtensionType:
if 'base' in list(attrs.keys()) and len(self.stack) > 0:
extensionBase = attrs['base']
if extensionBase in StringType or \
extensionBase in IDTypes or \
extensionBase in NameTypes or \
extensionBase == TokenType or \
extensionBase == DateTimeType or \
extensionBase == TimeType or \
extensionBase == DateType or \
extensionBase in IntegerType or \
extensionBase == DecimalType or \
extensionBase == PositiveIntegerType or \
extensionBase == NegativeIntegerType or \
extensionBase == NonPositiveIntegerType or \
extensionBase == NonNegativeIntegerType or \
extensionBase == BooleanType or \
extensionBase == FloatType or \
extensionBase == DoubleType or \
extensionBase in OtherSimpleTypes:
if (len(self.stack) > 0 and
isinstance(self.stack[-1], XschemaElement)):
self.stack[-1].addSimpleBase(extensionBase.encode('utf-8'))
else:
self.stack[-1].setBase(extensionBase)
elif name == AnyAttributeType:
# Mark the current element as containing anyAttribute.
self.stack[-1].setAnyAttribute(1)
elif name == SimpleTypeType:
# fixlist
if self.inAttribute:
pass
elif self.inSimpleType and self.inRestrictionType:
pass
else:
# Save the name of the simpleType, but ignore everything
# else about it (for now).
if 'name' in list(attrs.keys()):
stName = self._PGenr.cleanupName(attrs['name'])
elif len(self.stack) > 0:
stName = self._PGenr.cleanupName(self.stack[-1].getName())
else:
stName = None
# If the parent is an element, mark it as a simpleType.
if len(self.stack) > 0:
self.stack[-1].setSimpleType(1)
element = SimpleTypeElement(stName)
element.setDefault(attrs.get('default'))
self._PGenr.SimpleTypeDict[stName] = element
self.stack.append(element)
self.inSimpleType = 1
elif name == RestrictionType:
if self.inAttribute:
if 'base' in attrs:
self.lastAttribute.setData_type(attrs['base'])
else:
# If we are in a simpleType, capture the name of
# the restriction base.
if ((self.inSimpleType or self.inSimpleContent) and
'base' in list(attrs.keys())):
self.stack[-1].setBase(attrs['base'])
else:
if 'base' in list(attrs.keys()):
self.stack[-1].setRestrictionBase(attrs['base'])
self.stack[-1].setRestrictionAttrs(dict(attrs))
self.inRestrictionType = 1
elif name in [EnumerationType, MinInclusiveType, MaxInclusiveType]:
if 'value' not in attrs:
return
if self.inAttribute:
# We know that the restriction is on an attribute and the
# attributes of the current element are un-ordered so the
# instance variable "lastAttribute" will have our attribute.
values = self.lastAttribute.values
elif self.inElement and 'value' in attrs:
# We're not in an attribute so the restriction must have
# been placed on an element and that element will still be
# in the stack. We search backwards through the stack to
# find the last element.
element = None
if self.stack:
for entry in reversed(self.stack):
if isinstance(entry, XschemaElement):
element = entry
break
if element is None:
err_msg('Cannot find element to attach enumeration: %s\n' % (
attrs['value']), )
sys.exit(1)
values = element.values
elif self.inSimpleType and 'value' in attrs:
# We've been defined as a simpleType on our own.
values = self.stack[-1].values
if name == EnumerationType:
values.append(attrs['value'])
else:
if len(values) == 0:
values.extend([None, None])
if name == MinInclusiveType:
values[0] = {'minimum': int(attrs['value'])}
else:
values[1] = {'maximum': int(attrs['value'])}
elif name == UnionType:
# Union types are only used with a parent simpleType and we want
# the parent to know what it's a union of.
parentelement = self.stack[-1]
if (isinstance(parentelement, SimpleTypeElement) and
'memberTypes' in attrs):
for member in attrs['memberTypes'].split(" "):
self.stack[-1].unionOf.append(member)
elif name == WhiteSpaceType and self.inRestrictionType:
if 'value' in attrs:
if attrs.getValue('value') == 'collapse':
self.stack[-1].collapseWhiteSpace = 1
elif name == ListType:
self.inListType = 1
# fixlist
if self.inSimpleType: # and self.inRestrictionType:
self.stack[-1].setListType(1)
if self.inSimpleType:
if 'itemType' in attrs:
self.stack[-1].setBase(attrs['itemType'])
elif name == AnnotationType:
self.inAnnotationType = 1
elif name == DocumentationType:
if self.inAnnotationType:
self.inDocumentationType = 1
logging.debug("Start element stack: %d" % len(self.stack))
def endElement(self, name):
logging.debug("End element: %s" % (name))
logging.debug("End element stack: %d" % (len(self.stack)))
SchemaType = self._PGenr.SchemaType
ElementType = self._PGenr.ElementType
ComplexTypeType = self._PGenr.ComplexTypeType
AnyType = self._PGenr.AnyType
GroupType = self._PGenr.GroupType
SequenceType = self._PGenr.SequenceType
ChoiceType = self._PGenr.ChoiceType
AttributeType = self._PGenr.AttributeType
AttributeGroupType = self._PGenr.AttributeGroupType
SimpleContentType = self._PGenr.SimpleContentType
ComplexContentType = self._PGenr.ComplexContentType
ExtensionType = self._PGenr.ExtensionType
StringType = self._PGenr.StringType
IDTypes = self._PGenr.IDTypes
NameTypes = self._PGenr.NameTypes
TokenType = self._PGenr.TokenType
DateTimeType = self._PGenr.DateTimeType
TimeType = self._PGenr.TimeType
DateType = self._PGenr.DateType
IntegerType = self._PGenr.IntegerType
DecimalType = self._PGenr.DecimalType
PositiveIntegerType = self._PGenr.PositiveIntegerType
NegativeIntegerType = self._PGenr.NegativeIntegerType
NonPositiveIntegerType = self._PGenr.NonPositiveIntegerType
NonNegativeIntegerType = self._PGenr.NonNegativeIntegerType
BooleanType = self._PGenr.BooleanType
FloatType = self._PGenr.FloatType
DoubleType = self._PGenr.DoubleType
OtherSimpleTypes = self._PGenr.OtherSimpleTypes
AnyAttributeType = self._PGenr.AnyAttributeType
SimpleTypeType = self._PGenr.SimpleTypeType
RestrictionType = self._PGenr.RestrictionType
EnumerationType = self._PGenr.EnumerationType
MinInclusiveType = self._PGenr.MinInclusiveType
UnionType = self._PGenr.UnionType
WhiteSpaceType = self._PGenr.WhiteSpaceType
ListType = self._PGenr.ListType
AnnotationType = self._PGenr.AnnotationType
DocumentationType = self._PGenr.DocumentationType
if name == SimpleTypeType: # and self.inSimpleType:
self.inSimpleType = 0
if self.inAttribute:
pass
else:
# If the simpleType is directly off the root, it may be used to
# qualify the type of many elements and/or attributes so we
# don't want to loose it entirely.
simpleType = self.stack.pop()
# fixlist
if len(self.stack) == 1:
self.topLevelSimpleTypes.append(simpleType)
self.stack[-1].setListType(simpleType.isListType())
elif name == RestrictionType and self.inRestrictionType:
self.inRestrictionType = 0
elif name == ElementType or (name == ComplexTypeType and self.stack[-1].complexType):
self.inElement = 0
self.inNonanonymousComplexType = 0
if len(self.stack) >= 2:
element = self.stack.pop()
self.stack[-1].addChild(element)
elif name == AnyType:
if len(self.stack) >= 2:
element = self.stack.pop()
self.stack[-1].addChild(element)
elif name == ComplexTypeType:
self.inComplexType = 0
elif name == SequenceType:
self.inSequence = 0
elif name == ChoiceType:
self.currentChoice = None
self.inChoice = 0
elif name == AttributeType:
self.inAttribute = 0
elif name == AttributeGroupType:
self.inAttributeGroup = 0
if self.stack[-1].attributeGroup:
# The top | |
= self.storage_config['dimensions']
dimensions = dimension_config.keys()
index_dimensions = indices_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('variable_name = %s', variable_name)
logger.debug('indices_dict = %s', indices_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(index_dimensions) <= set(dimensions), 'Invalid slice index dimension(s)'
# Create slices for accessing netcdf array
slicing = [slice(indices_dict[dimension], indices_dict[dimension] + 1) if dimension in index_dimensions
else slice(0, nc_shape_dict[dimension]) for dimension in dimensions]
logger.debug('slicing = %s', slicing)
logger.debug('self.netcdf_object.variables = %s' % self.netcdf_object.variables)
variable = self.netcdf_object.variables[variable_name]
# logger.debug('variable = %s' % variable)
slice_array = variable[slicing]
logger.debug('slice_array = %s', slice_array)
return slice_array
def get_subset_indices(self, range_dict):
'''
Function to read an array subset of the specified netCDF variable
Parameters:
variable_name: Name of variable from which the subset array will be read
range_dict: Dict keyed by dimension tag containing the dimension(s) & range tuples from which the subset should be read
Returns:
dimension_indices_dict: Dict containing array indices for each dimension
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
range_dimensions = range_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('range_dict = %s', range_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(range_dimensions) <= set(dimensions), 'Invalid range dimension(s)'
# Create slices for accessing netcdf array
dimension_indices_dict = {} # Dict containing all indices for each dimension
for dimension_index in range(len(dimensions)):
dimension = dimensions[dimension_index]
dimension_array = self.netcdf_object.variables[dimension_names[dimension_index]][:]
if dimension in range_dimensions:
logger.debug('dimension_array = %s', dimension_array)
logger.debug('range = %s', range_dict[dimension])
mask_array = ((dimension_array > range_dict[dimension][0]) * (dimension_array <= range_dict[dimension][1]))
index_array = np.where(mask_array)
logger.debug('index_array = %s', index_array)
dimension_indices_dict[dimension] = dimension_array[mask_array]
if not index_array:
logger.warning('Invalid range %s for dimension %s', range_dict[dimension], dimension)
return None
else: # Range not defined for this dimension - take the whole lot
dimension_indices_dict[dimension] = dimension_array
return dimension_indices_dict
def read_subset(self, variable_name, range_dict):
'''
Function to read an array subset of the specified netCDF variable
Parameters:
variable_name: Name of variable from which the subset array will be read
range_dict: Dict keyed by dimension tag containing the dimension(s) & range tuples from which the subset should be read
Returns:
subset_array: Numpy array read from netCDF file
dimension_indices_dict: Dict containing array indices for each dimension
'''
if not self._isopen:
self.open()
dimension_config = self.storage_config['dimensions']
dimensions = dimension_config.keys()
range_dimensions = range_dict.keys()
dimension_names = [dimension_config[dimension]['dimension_name'] for dimension in dimensions]
# Dict of dimensions and sizes read from netCDF
nc_shape_dict = {dimensions[index]: len(self.netcdf_object.dimensions[dimension_names[index]]) for index in range(len(dimensions))}
logger.debug('variable_name = %s', variable_name)
logger.debug('range_dict = %s', range_dict)
logger.debug('nc_shape_dict = %s', nc_shape_dict)
assert set(range_dimensions) <= set(dimensions), 'Invalid range dimension(s)'
# Create slices for accessing netcdf array
dimension_indices_dict = {} # Dict containing all indices for each dimension
slicing = []
for dimension_index in range(len(dimensions)):
dimension = dimensions[dimension_index]
dimension_array = self.netcdf_object.variables[dimension_names[dimension_index]][:]
if dimension in range_dimensions:
logger.debug('dimension_array = %s', dimension_array)
logger.debug('range = %s', range_dict[dimension])
mask_array = ((dimension_array > range_dict[dimension][0]) * (dimension_array <= range_dict[dimension][1]))
index_array = np.where(mask_array)
logger.debug('index_array = %s', index_array)
dimension_indices_dict[dimension] = dimension_array[mask_array]
try:
dimension_slice = slice(index_array[0][0], index_array[0][-1] + 1)
except IndexError:
logger.warning('Invalid range %s for dimension %s', range_dict[dimension], dimension)
return None
else: # Range not defined for this dimension
dimension_indices_dict[dimension] = dimension_array
dimension_slice = slice(0, nc_shape_dict[dimension])
slicing.append(dimension_slice)
logger.debug('slicing = %s', slicing)
variable = self.netcdf_object.variables[variable_name]
# logger.debug('variable = %s' % variable)
subset_array = variable[slicing]
logger.debug('subset_array = %s', subset_array)
return subset_array, dimension_indices_dict
def get_datatype(self, variable_name, convention='numpy'):
'''
Returns NetCDF datatype of specified variable
'''
return self.storage_config['measurement_types'][variable_name].get(convention + '_datatype_name')
def get_attributes(self, verbose=None, normalise=True):
"""
Copy the global and variable attributes from a netCDF object to an
OrderedDict. This is a little like 'ncdump -h' (without the formatting).
Global attributes are keyed in the OrderedDict by the attribute name.
Variable attributes are keyed in the OrderedDict by the variable name and
attribute name separated by a colon, i.e. variable:attribute.
Normalise means that some NumPy types returned from the netCDF module are
converted to equivalent regular types.
Notes from the netCDF module:
The ncattrs method of a Dataset or Variable instance can be used to
retrieve the names of all the netCDF attributes.
The __dict__ attribute of a Dataset or Variable instance provides all
the netCDF attribute name/value pairs in an OrderedDict.
self.netcdf_object.dimensions.iteritems()
self.netcdf_object.variables
self.netcdf_object.ncattrs()
self.netcdf_object.__dict__
"""
return netcdf_builder.get_attributes(self.netcdf_object, verbose, normalise)
def set_attributes(self, ncdict, delval='DELETE'):
"""
Copy attribute names and values from a dict (or OrderedDict) to a netCDF
object.
Global attributes are keyed in the OrderedDict by the attribute name.
Variable attributes are keyed in the OrderedDict by the variable name and
attribute name separated by a colon, i.e. variable:attribute.
If any value is equal to delval then, if the corresponding attribute exists
in the netCDF object, the corresponding attribute is removed from the
netCDF object. The default value of delval is 'DELETE'. For example,
nc3_set_attributes(self.netcdf_object, {'temperature:missing_value':'DELETE'})
will delete the missing_value attribute from the temperature variable.
A ValueError exception is raised if a key refers to a variable name that
is not defined in the netCDF object.
"""
netcdf_builder.set_attributes(self.netcdf_object, ncdict, delval)
def show_dimensions(self):
"""
Print the dimension names, lengths and whether they are unlimited.
"""
netcdf_builder.show_dimensions(self.netcdf_object)
def set_variable(self, varname, dtype='f4', dims=None, chunksize=None, fill=None, zlib=False, **kwargs):
"""
Define (create) a variable in a netCDF object. No data is written to the
variable yet. Give the variable's dimensions as a tuple of dimension names.
Dimensions must have been previously created with self.netcdf_object.createDimension
(e.g. see set_timelatlon()).
Recommended ordering of dimensions is:
time, height or depth (Z), latitude (Y), longitude (X).
Any other dimensions should be defined before (placed to the left of) the
spatio-temporal coordinates.
To create a scalar variable, use an empty tuple for the dimensions.
Variables can be renamed with the 'renameVariable' method of the netCDF
object.
Specify compression with zlib=True (default = False).
Specify the chunksize with a sequence (tuple, list) of the same length
as dims (i.e., the number of dimensions) where each element of chunksize
corresponds to the size of the chunk along the corresponding dimension.
There are some tips and tricks associated with chunking - see
http://data.auscover.org.au/node/73 for an overview.
The default behaviour is to create a floating-point (f4) variable
with dimensions ('time','latitude','longitude'), with no chunking and
no compression.
"""
netcdf_builder.set_variable(self.netcdf_object, varname, dtype=dtype, dims=dims, chunksize=chunksize, fill=fill, zlib=zlib, **kwargs)
def add_bounds(self, dimension_tag, bounds):
"""Add a bounds array of data to the netCDF object.
Bounds array can be a list, tuple or NumPy array.
A bounds array gives the values of the vertices corresponding to a dimension
variable (see the CF documentation for more information). The dimension
variable requires an attribute called 'bounds', which references a variable
that contains the bounds array. The bounds array has the same shape as the
corresponding dimension with an extra size for the number of vertices.
This function:
- Adds a 'bounds' attribute to the dimension variable if required.
If a bounds attribute exits then its value will be used for the bounds
variable (bndname). Otherwise if a bndname is given then this will be
used. Otherwise the default bndname will be '_bounds' appended to the
dimension name.
- If the bounds variable exists then a ValueError will be raised if its
shape does not match the bounds array.
- If the bounds variable does not exist then it will be created. If so
an exra dimension is required for the number of vertices. Any existing
dimension of the right size will be used. Otherwise a new dimension
will be created. The new dimension's name will be 'nv' (number of
vertices), unless this dimension name is already used in which case
'_nv' appended to the dimension name will be used instead.
- Lastly, the bounds array is written to the bounds variable. If | |
# +--------------------------------------------------------------------------+
# | Licensed Materials - Property of IBM |
# | |
# | (C) Copyright IBM Corporation 2009-2014. |
# +--------------------------------------------------------------------------+
# | This module complies with Django 1.0 and is |
# | Licensed under the Apache License, Version 2.0 (the "License"); |
# | you may not use this file except in compliance with the License. |
# | You may obtain a copy of the License at |
# | http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable |
# | law or agreed to in writing, software distributed under the License is |
# | distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY |
# | KIND, either express or implied. See the License for the specific |
# | language governing permissions and limitations under the License. |
# +--------------------------------------------------------------------------+
# | Authors: <NAME>, <NAME>, <NAME> |
# +--------------------------------------------------------------------------+
import sys
_IS_JYTHON = sys.platform.startswith( 'java' )
if not _IS_JYTHON:
try:
# Import IBM_DB wrapper ibm_db_dbi
import ibm_db_dbi as Database
#from Database import DatabaseError
except ImportError, e:
raise ImportError( "ibm_db module not found. Install ibm_db module from http://code.google.com/p/ibm-db/. Error: %s" % e )
else:
from com.ziclix.python.sql import zxJDBC
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
from django import VERSION as djangoVersion
class DatabaseIntrospection( BaseDatabaseIntrospection ):
"""
This is the class where database metadata information can be generated.
"""
if not _IS_JYTHON:
data_types_reverse = {
Database.STRING : "CharField",
Database.TEXT : "TextField",
Database.XML : "XMLField",
Database.NUMBER : "IntegerField",
Database.FLOAT : "FloatField",
Database.DECIMAL : "DecimalField",
Database.DATE : "DateField",
Database.TIME : "TimeField",
Database.DATETIME : "DateTimeField",
}
if(djangoVersion[0:2] > (1, 1)):
data_types_reverse[Database.BINARY] = "BinaryField"
data_types_reverse[Database.BIGINT] = "BigIntegerField"
else:
data_types_reverse[Database.BIGINT] = "IntegerField"
else:
data_types_reverse = {
zxJDBC.CHAR: "CharField",
zxJDBC.BIGINT: "BigIntegerField",
zxJDBC.BINARY: "BinaryField",
zxJDBC.BIT: "SmallIntegerField",
zxJDBC.BLOB: "BinaryField",
zxJDBC.CLOB: "TextField",
zxJDBC.DATE: "DateField",
zxJDBC.DECIMAL: "DecimalField",
zxJDBC.DOUBLE: "FloatField",
zxJDBC.FLOAT: "FloatField",
zxJDBC.INTEGER: "IntegerField",
zxJDBC.LONGVARCHAR: "TextField",
zxJDBC.LONGVARBINARY: "ImageField",
zxJDBC.NUMERIC: "DecimalField",
zxJDBC.REAL: "FloatField",
zxJDBC.SMALLINT: "SmallIntegerField",
zxJDBC.VARCHAR: "CharField",
zxJDBC.TIMESTAMP: "DateTimeField",
zxJDBC.TIME: "TimeField",
}
def get_field_type(self, data_type, description):
if not _IS_JYTHON:
if data_type == Database.NUMBER:
if description.precision == 5:
return 'SmallIntegerField'
return super(DatabaseIntrospection, self).get_field_type(data_type, description)
# Converting table name to lower case.
def table_name_converter ( self, name ):
return name.lower()
# Getting the list of all tables, which are present under current schema.
def get_table_list ( self, cursor ):
table_list = []
if not _IS_JYTHON:
for table in cursor.connection.tables( cursor.connection.get_current_schema() ):
table_list.append( table['TABLE_NAME'].lower() )
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# tables(String catalog, String schemaPattern, String tableNamePattern, String[] types) gives a description of tables available in a catalog
cursor.tables( None, schema, None, ( "TABLE", ) )
for table in cursor.fetchall():
# table[2] is table name
table_list.append( table[2].lower() )
return table_list
# Generating a dictionary for foreign key details, which are present under current schema.
def get_relations( self, cursor, table_name ):
relations = {}
if not _IS_JYTHON:
schema = cursor.connection.get_current_schema()
for fk in cursor.connection.foreign_keys( True, schema, table_name ):
relations[self.__get_col_index( cursor, schema, table_name, fk['FKCOLUMN_NAME'] )] = ( self.__get_col_index( cursor, schema, fk['PKTABLE_NAME'], fk['PKCOLUMN_NAME'] ), fk['PKTABLE_NAME'].lower() )
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# foreign_keys(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable)
# gives a description of the foreign key columns in the foreign key table that reference the primary key columns
# of the primary key table (describe how one table imports another's key.) This should normally return a single foreign key/primary key pair
# (most tables only import a foreign key from a table once.) They are ordered by FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, and KEY_SEQ
cursor.foreignkeys( None, schema, table_name, None, '%', '%' )
for fk in cursor.fetchall():
# fk[2] is primary key table name, fk[3] is primary key column name, fk[7] is foreign key column name being exported
relations[self.__get_col_index( cursor, schema, table_name, fk[7] )] = ( self.__get_col_index( cursor, schema, fk[2], fk[3] ), fk[3], fk[2] )
return relations
# Private method. Getting Index position of column by its name
def __get_col_index ( self, cursor, schema, table_name, col_name ):
if not _IS_JYTHON:
for col in cursor.connection.columns( schema, table_name, [col_name] ):
return col['ORDINAL_POSITION'] - 1
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# columns(String catalog, String schemaPattern, String tableNamePattern, String columnNamePattern) gives a description of table columns available in the specified catalog
cursor.columns( None, schema, table_name, col_name )
for col in cursor.fetchall():
#col[16] is index of column in table
return col[16] - 1
def get_key_columns(self, cursor, table_name):
relations = []
if not _IS_JYTHON:
schema = cursor.connection.get_current_schema()
for fk in cursor.connection.foreign_keys( True, schema, table_name ):
relations.append( (fk['FKCOLUMN_NAME'].lower(), fk['PKTABLE_NAME'].lower(), fk['PKCOLUMN_NAME'].lower()) )
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# foreign_keys(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable)
# gives a description of the foreign key columns in the foreign key table that reference the primary key columns
# of the primary key table (describe how one table imports another's key.) This should normally return a single foreign key/primary key pair
# (most tables only import a foreign key from a table once.) They are ordered by FKTABLE_CAT, FKTABLE_SCHEM, FKTABLE_NAME, and KEY_SEQ
cursor.foreignkeys( None, schema, table_name, None, '%', '%' )
for fk in cursor.fetchall():
# fk[2] is primary key table name, fk[3] is primary key column name, fk[7] is foreign key column name being exported
relations.append( (fk[7], fk[2], fk[3]) )
return relations
# Getting list of indexes associated with the table provided.
def get_indexes( self, cursor, table_name ):
indexes = {}
# To skip indexes across multiple fields
multifield_indexSet = set()
if not _IS_JYTHON:
schema = cursor.connection.get_current_schema()
all_indexes = cursor.connection.indexes( True, schema, table_name )
for index in all_indexes:
if (index['ORDINAL_POSITION'] is not None) and (index['ORDINAL_POSITION']== 2):
multifield_indexSet.add(index['INDEX_NAME'])
for index in all_indexes:
temp = {}
if index['INDEX_NAME'] in multifield_indexSet:
continue
if ( index['NON_UNIQUE'] ):
temp['unique'] = False
else:
temp['unique'] = True
temp['primary_key'] = False
indexes[index['COLUMN_NAME'].lower()] = temp
for index in cursor.connection.primary_keys( True, schema, table_name ):
indexes[index['COLUMN_NAME'].lower()]['primary_key'] = True
else:
cursor.execute( "select current_schema from sysibm.sysdummy1" )
schema = cursor.fetchone()[0]
# statistics(String catalog, String schema, String table, boolean unique, boolean approximate) returns a description of a table's indices and statistics.
cursor.statistics( None, schema, table_name, 0, 0 )
all_indexes = cursor.fetchall()
for index in all_indexes:
#index[7] indicate ORDINAL_POSITION within index, and index[5] is index name
if index[7] == 2:
multifield_indexSet.add(index[5])
for index in all_indexes:
temp = {}
if index[5] in multifield_indexSet:
continue
# index[3] indicate non-uniqueness of column
if ( index[3] != None ):
if ( index[3] ) == 1:
temp['unique'] = False
else:
temp['unique'] = True
temp['primary_key'] = False
# index[8] is column name
indexes[index[8].lower()] = temp
# primarykeys(String catalog, String schema, String table) gives a description of a table's primary key columns
cursor.primarykeys( None, schema, table_name )
for index in cursor.fetchall():
#index[3] is column name
indexes[index[3].lower()]['primary_key'] = True
return indexes
# Getting the description of the table.
def get_table_description( self, cursor, table_name ):
qn = self.connection.ops.quote_name
cursor.execute( "SELECT * FROM %s FETCH FIRST 1 ROWS ONLY" % qn( table_name ) )
description = []
if djangoVersion < (1, 6):
for desc in cursor.description:
description.append( [ desc[0].lower(), ] + desc[1:] )
else:
for desc in cursor.description:
description.append(FieldInfo(*[desc[0].lower(), ] + desc[1:]))
return description
def get_constraints(self, cursor, table_name):
constraints = {}
if not _IS_JYTHON:
schema = cursor.connection.get_current_schema()
sql = "SELECT CONSTNAME, COLNAME FROM SYSCAT.COLCHECKS WHERE TABSCHEMA='%(schema)s' AND TABNAME='%(table)s'" % {'schema': schema.upper(), 'table': table_name.upper()}
cursor.execute(sql)
for constname, colname in cursor.fetchall():
if constname not in constraints:
constraints[constname] = {
'columns': [],
'primary_key': False,
'unique': False,
'foreign_key': None,
'check': True,
'index': False
}
constraints[constname]['columns'].append(colname.lower())
sql = "SELECT KEYCOL.CONSTNAME, KEYCOL.COLNAME FROM SYSCAT.KEYCOLUSE KEYCOL INNER JOIN SYSCAT.TABCONST TABCONST ON KEYCOL.CONSTNAME=TABCONST.CONSTNAME WHERE TABCONST.TABSCHEMA='%(schema)s' and TABCONST.TABNAME='%(table)s' and TABCONST.TYPE='U'" % {'schema': schema.upper(), 'table': table_name.upper()}
cursor.execute(sql)
for constname, colname in cursor.fetchall():
if constname not in constraints:
constraints[constname] = {
'columns': [],
'primary_key': False,
'unique': True,
'foreign_key': None,
'check': False,
'index': True
}
| |
from __future__ import unicode_literals
import glob
import shutil
from os.path import basename, dirname
from typing import Iterable
from jsonpickle import json
from ledger.compact_merkle_tree import CompactMerkleTree
from ledger.genesis_txn.genesis_txn_file_util import create_genesis_txn_init_ledger
from ledger.genesis_txn.genesis_txn_initiator_from_file import GenesisTxnInitiatorFromFile
from ledger.ledger import Ledger
from plenum.cli.command import helpCmd, statusNodeCmd, statusClientCmd, \
loadPluginsCmd, clientSendCmd, clientShowCmd, newKeyCmd, \
newWalletCmd, renameWalletCmd, useWalletCmd, saveWalletCmd, \
listWalletCmd, listIdsCmd, useIdCmd, addGenesisTxnCmd, \
createGenesisTxnFileCmd, changePromptCmd, exitCmd, quitCmd, Command
from plenum.cli.command import licenseCmd
from plenum.cli.command import newClientCmd
from plenum.cli.command import newNodeCmd
from plenum.cli.command import statusCmd
from plenum.cli.constants import SIMPLE_CMDS, CLI_CMDS, NODE_OR_CLI, NODE_CMDS, \
PROMPT_ENV_SEPARATOR, WALLET_FILE_EXTENSION, NO_ENV
from plenum.cli.helper import getUtilGrams, getNodeGrams, getClientGrams, \
getAllGrams
from plenum.cli.phrase_word_completer import PhraseWordCompleter
from plenum.client.wallet import Wallet, WalletStorageHelper
from plenum.common.constants import TXN_TYPE, TARGET_NYM, DATA, IDENTIFIER, \
NODE, ALIAS, NODE_IP, NODE_PORT, CLIENT_PORT, CLIENT_IP, VERKEY, BY, \
CLIENT_STACK_SUFFIX
from plenum.common.exceptions import NameAlreadyExists, KeysNotFoundException
from plenum.common.keygen_utils import learnKeysFromOthers, tellKeysToOthers, areKeysSetup
from plenum.common.plugin_helper import loadPlugins
from plenum.common.signer_did import DidSigner
from plenum.common.stack_manager import TxnStackManager
from plenum.common.tools import lazy_field
from plenum.common.transactions import PlenumTransactions
from prompt_toolkit.utils import is_windows, is_conemu_ansi
from storage.kv_in_memory import KeyValueStorageInMemory
from stp_core.crypto.util import cleanSeed, seedFromHex
from stp_core.network.port_dispenser import genHa
from stp_core.types import HA
from plenum.common.config_helper import PNodeConfigHelper
import configparser
import os
from configparser import ConfigParser
from collections import OrderedDict
import time
import ast
from functools import reduce, partial
import sys
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.contrib.regular_languages.compiler import compile
from prompt_toolkit.contrib.regular_languages.completion import GrammarCompleter
from prompt_toolkit.contrib.regular_languages.lexer import GrammarLexer
from prompt_toolkit.interface import CommandLineInterface
from prompt_toolkit.shortcuts import create_prompt_application, \
create_asyncio_eventloop
from prompt_toolkit.layout.lexers import SimpleLexer
from prompt_toolkit.styles import PygmentsStyle
from prompt_toolkit.terminal.vt100_output import Vt100_Output
from pygments.token import Token
from plenum.client.client import Client
from plenum.common.util import getMaxFailures, \
firstValue, randomString, bootstrapClientKeys, \
getFriendlyIdentifier, \
normalizedWalletFileName, getWalletFilePath, \
getLastSavedWalletFileName
from stp_core.common.log import \
getlogger, Logger
from plenum.server.node import Node
from plenum.common.types import NodeDetail
from plenum.server.plugin_loader import PluginLoader
from plenum.server.replica import Replica
from plenum.common.config_util import getConfig
from plenum.__metadata__ import __version__
from plenum.cli.command_history import CliFileHistory
if is_windows():
from prompt_toolkit.terminal.win32_output import Win32Output # noqa
from prompt_toolkit.terminal.conemu_output import ConEmuOutput # noqa
else:
from prompt_toolkit.terminal.vt100_output import Vt100_Output # noqa
class CustomOutput(Vt100_Output):
"""
Subclassing Vt100 just to override the `ask_for_cpr` method which prints
an escape character on the console. Not printing the escape character
"""
def ask_for_cpr(self):
"""
Asks for a cursor position report (CPR).
"""
self.flush()
class Cli:
isElectionStarted = False
primariesSelected = 0
electedPrimaries = set()
name = 'plenum'
properName = 'Plenum'
fullName = 'Plenum protocol'
NodeClass = Node
ClientClass = Client
defaultWalletName = 'Default'
_genesisTransactions = []
# noinspection PyPep8
def __init__(self, looper, basedirpath: str, ledger_base_dir: str, nodeReg=None, cliNodeReg=None,
output=None, debug=False, logFileName=None, config=None,
useNodeReg=False, withNode=True, unique_name=None,
override_tags=None, nodes_chroot: str=None):
self.unique_name = unique_name
self.curClientPort = None
self.basedirpath = os.path.expanduser(basedirpath)
self.ledger_base_dir = os.path.expanduser(ledger_base_dir)
self._config = config or getConfig(self.basedirpath)
Logger().enableCliLogging(self.out,
override_tags=override_tags)
self.looper = looper
self.withNode = withNode
self.__init_registry(useNodeReg, nodeReg, cliNodeReg)
# Used to store created clients
self.clients = {} # clientName -> Client
# To store the created requests
self.requests = {}
# To store the nodes created
self.nodes = {}
self.externalClientKeys = {} # type: Dict[str,str]
self.cliCmds = CLI_CMDS
self.nodeCmds = NODE_CMDS
self.helpablesCommands = self.cliCmds | self.nodeCmds
self.simpleCmds = SIMPLE_CMDS
self.commands = {'list', 'help'} | self.simpleCmds
self.cliActions = {'send', 'show'}
self.commands.update(self.cliCmds)
self.commands.update(self.nodeCmds)
self.node_or_cli = NODE_OR_CLI
self.nodeNames = list(self.nodeReg.keys()) + ["all"]
self.debug = debug
self.plugins = {}
self.pluginPaths = []
self.defaultClient = None
self.activeDID = None
# Wallet and Client are the same from user perspective for now
self._activeClient = None
self._wallets = {} # type: Dict[str, Wallet]
self._activeWallet = None # type: Wallet
self.keyPairs = {}
self.nodes_chroot = nodes_chroot
'''
examples:
status
new node Alpha
new node all
new client Joe
client Joe send <Cmd>
client Joe show 1
'''
self.utilGrams = getUtilGrams()
self.nodeGrams = getNodeGrams()
self.clientGrams = getClientGrams()
self._allGrams = []
self._lexers = {}
self.clientWC = WordCompleter([])
self._completers = {}
self.initializeInputParser()
self.style = PygmentsStyle.from_defaults({
Token.Operator: '#33aa33 bold',
Token.Gray: '#424242',
Token.Number: '#aa3333 bold',
Token.Name: '#ffff00 bold',
Token.Heading: 'bold',
Token.TrailingInput: 'bg:#662222 #ffffff',
Token.BoldGreen: '#33aa33 bold',
Token.BoldOrange: '#ff4f2f bold',
Token.BoldBlue: '#095cab bold'})
self.voidMsg = "<none>"
# Create an asyncio `EventLoop` object. This is a wrapper around the
# asyncio loop that can be passed into prompt_toolkit.
eventloop = create_asyncio_eventloop(looper.loop)
self.pers_hist = CliFileHistory(
command_filter=self.mask_seed, filename='.{}-cli-history'.format(self.name))
# Create interface.
app = create_prompt_application('{}> '.format(self.name),
lexer=self.grammarLexer,
completer=self.grammarCompleter,
style=self.style,
history=self.pers_hist)
self.currPromptText = self.name
if output:
out = output
else:
if is_windows():
if is_conemu_ansi():
out = ConEmuOutput(sys.__stdout__)
else:
out = Win32Output(sys.__stdout__)
else:
out = CustomOutput.from_pty(sys.__stdout__, true_color=True)
self.cli = CommandLineInterface(
application=app,
eventloop=eventloop,
output=out)
# Patch stdout in something that will always print *above* the prompt
# when something is written to stdout.
sys.stdout = self.cli.stdout_proxy()
if logFileName:
Logger().enableFileLogging(logFileName)
self.logger = getlogger("cli")
self.print("\n{}-CLI (c) 2017 Evernym, Inc.".format(self.properName))
self._actions = []
if nodeReg:
self.print("Node registry loaded.")
self.showNodeRegistry()
self.print("Type 'help' for more information.")
self.print("Running {} {}\n".format(self.properName,
self.getCliVersion()))
tp = loadPlugins(self.basedirpath)
self.logger.debug("total plugins loaded in cli: {}".format(tp))
self.restoreLastActiveWallet()
self.checkIfCmdHandlerAndCmdMappingExists()
@property
def pool_ledger_dir(self):
return self.ledger_base_dir
def __init_registry(self, useNodeReg=False, nodeReg=None, cliNodeReg=None):
self.nodeRegLoadedFromFile = False
if not (useNodeReg and nodeReg and len(nodeReg) and
cliNodeReg and len(cliNodeReg)):
self.__init_registry_from_ledger()
else:
self.nodeReg = nodeReg
self.cliNodeReg = cliNodeReg
self.nodeRegistry = {}
for nStkNm, nha in self.nodeReg.items():
cStkNm = nStkNm + CLIENT_STACK_SUFFIX
self.nodeRegistry[nStkNm] = NodeDetail(
HA(*nha), cStkNm, HA(*self.cliNodeReg[cStkNm]))
def __init_registry_from_ledger(self):
self.nodeRegLoadedFromFile = True
genesis_txn_initiator = GenesisTxnInitiatorFromFile(
self.pool_ledger_dir, self.config.poolTransactionsFile)
ledger = Ledger(CompactMerkleTree(),
dataDir=self.pool_ledger_dir,
fileName=self.config.poolTransactionsFile,
genesis_txn_initiator=genesis_txn_initiator,
transactionLogStore=KeyValueStorageInMemory())
nodeReg, cliNodeReg, _ = TxnStackManager.parseLedgerForHaAndKeys(
ledger)
ledger.stop()
self.nodeReg = nodeReg
self.cliNodeReg = cliNodeReg
def close(self):
"""
Stops all the created clients and nodes.
"""
for key in self.clients:
self.clients[key].stop()
for key in self.nodes:
self.nodes[key].stop()
def _getCmdMappingError(self, cmdHandlerFuncName, mappingFuncName):
msg = "Command mapping not provided for '{}' command handler. " \
"\nPlease add proper mapping for that command handler " \
"(in function '{}') with corresponding command object.".\
format(cmdHandlerFuncName, mappingFuncName)
sep = "\n" + "*" * 125 + "\n"
msg = sep + msg + sep
return msg
def checkIfCmdHandlerAndCmdMappingExists(self):
for cmdHandlerFunc in self.actions:
funcName = cmdHandlerFunc.__name__.replace("_", "")
if funcName not in self.cmdHandlerToCmdMappings().keys():
raise Exception(self._getCmdMappingError(
cmdHandlerFunc.__name__,
self.cmdHandlerToCmdMappings.__name__))
@staticmethod
def getCliVersion():
return __version__
@property
def genesisTransactions(self):
return self._genesisTransactions
def reset(self):
self._genesisTransactions = []
@property
def actions(self):
if not self._actions:
self._actions = [self._simpleAction, self._helpAction,
self._newNodeAction, self._newClientAction,
self._statusNodeAction, self._statusClientAction,
self._loadPluginDirAction,
self._clientCommand, self._addKeyAction,
self._newKeyAction, self._listIdsAction,
self._useIdentifierAction, self._addGenesisAction,
self._createGenTxnFileAction, self._changePrompt,
self._newWallet, self._renameWallet,
self._useWalletAction, self._saveWalletAction,
self._listWalletsAction]
return self._actions
@property
def config(self):
if self._config:
return self._config
else:
self._config = getConfig()
return self._config
@lazy_field
def walletSaver(self):
return WalletStorageHelper(self.getWalletsBaseDir(),
dmode=self.config.WALLET_DIR_MODE,
fmode=self.config.WALLET_FILE_MODE)
@property
def allGrams(self):
if not self._allGrams:
self._allGrams = [self.utilGrams, self.nodeGrams, self.clientGrams]
return self._allGrams
@property
def completers(self):
if not self._completers:
self._completers = {
'node_command': WordCompleter(self.nodeCmds),
'client_command': WordCompleter(self.cliCmds),
'client': WordCompleter(['client']),
'command': WordCompleter(self.commands),
'node_or_cli': WordCompleter(self.node_or_cli),
'node_name': WordCompleter(self.nodeNames),
'more_nodes': WordCompleter(self.nodeNames),
'helpable': WordCompleter(self.helpablesCommands),
'load_plugins': PhraseWordCompleter('load plugins from'),
'client_name': self.clientWC,
'second_client_name': self.clientWC,
'cli_action': WordCompleter(self.cliActions),
'simple': WordCompleter(self.simpleCmds),
'add_key': PhraseWordCompleter('add key'),
'for_client': PhraseWordCompleter('for client'),
'new_key': PhraseWordCompleter('new key'),
'new_wallet': PhraseWordCompleter('new wallet'),
'rename_wallet': PhraseWordCompleter('rename wallet'),
'list_ids': PhraseWordCompleter('list ids'),
'list_wallet': PhraseWordCompleter('list wallets'),
'become': WordCompleter(['become']),
'use_id': PhraseWordCompleter('use DID'),
'use_wallet': PhraseWordCompleter('use wallet'),
'save_wallet': PhraseWordCompleter('save wallet'),
'add_gen_txn': PhraseWordCompleter('add genesis transaction'),
'prompt': WordCompleter(['prompt']),
'create_gen_txn_file': PhraseWordCompleter(
'create genesis transaction file')
}
return self._completers
@property
def lexers(self):
if not self._lexers:
lexerNames = {
'node_command',
'command',
'helpable',
'load_plugins',
'load',
'node_or_cli',
'node_name',
'more_nodes',
'simple',
'client_command',
'add_key',
'verkey',
'for_client',
'DID',
'new_key',
'list_ids',
'list_wallets',
'become',
'use_id',
'prompt',
'new_wallet',
'use_wallet',
'save_wallet',
'rename_wallet',
'add_genesis',
'create_gen_txn_file'
}
lexers = {n: SimpleLexer(Token.Keyword) for n in lexerNames}
self._lexers = {**lexers}
return self._lexers
def _renameWalletFile(self, oldWalletName, newWalletName):
walletsDir = self.getContextBasedWalletsBaseDir()
oldWalletFilePath = getWalletFilePath(
walletsDir, normalizedWalletFileName(oldWalletName))
if os.path.exists(oldWalletFilePath):
newWalletFilePath = getWalletFilePath(
walletsDir, normalizedWalletFileName(newWalletName))
if os.path.exists(newWalletFilePath):
self.print("A persistent wallet file already exists for "
"new wallet name. Please choose new wallet name.")
return False
os.rename(oldWalletFilePath, newWalletFilePath)
return True
def _renameWallet(self, matchedVars):
if matchedVars.get('rename_wallet'):
fromName = matchedVars.get('from')
toName = matchedVars.get('to')
conflictFound = self._checkIfIdentifierConflicts(
toName, checkInAliases=False, checkInSigners=False)
if not conflictFound:
fromWallet = self.wallets.get(fromName) if fromName \
else self.activeWallet
if not fromWallet:
self.print('Wallet {} not found'.format(fromName))
return True
if not self._renameWalletFile(fromName, toName):
return True
fromWallet.name = toName
del self.wallets[fromName]
self.wallets[toName] = fromWallet
self.print('Wallet {} renamed to {}'.format(fromName, toName))
return True
def _newWallet(self, matchedVars):
if matchedVars.get('new_wallet'):
name = matchedVars.get('name')
conflictFound = self._checkIfIdentifierConflicts(
name, checkInAliases=False, checkInSigners=False)
if not conflictFound:
self._saveActiveWallet()
self._createWallet(name)
return True
def _changePrompt(self, matchedVars):
if matchedVars.get('prompt'):
promptText = matchedVars.get('name')
self._setPrompt(promptText)
return True
def _createGenTxnFileAction(self, matchedVars):
if matchedVars.get('create_gen_txn_file'):
ledger = create_genesis_txn_init_ledger(
self.pool_ledger_dir, self.config.poolTransactionsFile)
ledger.reset()
for item in self.genesisTransactions:
ledger.add(item)
self.print('Genesis transaction file created at {} '
.format(ledger._transactionLog.db_path))
ledger.stop()
return True
def _addGenesisAction(self, matchedVars):
if matchedVars.get('add_gen_txn'):
if matchedVars.get(TARGET_NYM):
return self._addOldGenesisCommand(matchedVars)
else:
return self._addNewGenesisCommand(matchedVars)
def _addNewGenesisCommand(self, matchedVars):
typ = self._getType(matchedVars)
nodeName, nodeData, DID = None, None, None
jsonNodeData = json.loads(matchedVars.get(DATA))
for | |
<filename>pymustache/mustache.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
import copy
try:
from html import escape as html_escape
except:
# python 2
import cgi
def html_escape(text):
return cgi.escape(text, quote=True)
DEFAULT_DELIMITERS = ('{{', '}}')
EMPTYSTRING = ""
spaces_not_newline = ' \t\r\b\f'
re_space = re.compile(r'[' + spaces_not_newline + r']*(\n|$)')
re_insert_indent = re.compile(r'(^|\n)(?=.|\n)', re.DOTALL)
# default filters
filters = {}
#==============================================================================
# Context lookup.
# Mustache uses javascript's prototype like lookup for variables.
# A context is just a dict, and we use a list of contexts to represent the
# stack, the lookup order is in reversed order
# lookup('x', ({'y': 30, 'z':40}, {'x': 10, 'y': 20}) => 10
# lookup('y', ({'y': 30, 'z':40}, {'x': 10, 'y': 20}) => 20
# lookup('z', ({'y': 30, 'z':40}, {'x': 10, 'y': 20}) => 40
# context now contains special variables: {'.': normal_context, '@': special_vars}
def lookup(var_name, contexts=(), start=0):
"""lookup the value of the var_name on the stack of contexts
:var_name: TODO
:contexts: TODO
:returns: None if not found
"""
start = len(contexts) if start >=0 else start
for context in reversed(contexts[:start]):
try:
if var_name in context:
return context[var_name]
except TypeError as te:
# we may put variable on the context, skip it
continue
return None
def get_parent(contexts):
try:
return contexts[-1]
except:
return None
def parse_int(string):
try:
return int(string)
except:
return None
#==============================================================================
# Compilation
# To compile a template into a tree of tokens, using the given delimiters.
re_delimiters = {}
def delimiters_to_re(delimiters):
"""convert delimiters to corresponding regular expressions"""
# caching
delimiters = tuple(delimiters)
if delimiters in re_delimiters:
re_tag = re_delimiters[delimiters]
else:
open_tag, close_tag = delimiters
# escape
open_tag = ''.join([c if c.isalnum() else '\\' + c for c in open_tag])
close_tag = ''.join([c if c.isalnum() else '\\' + c for c in close_tag])
re_tag = re.compile(open_tag + r'([#^>&{/!=]?)\s*(.*?)\s*([}=]?)' + close_tag, re.DOTALL)
re_delimiters[delimiters] = re_tag
return re_tag
class SyntaxError(Exception):
pass
def is_standalone(text, start, end):
"""check if the string text[start:end] is standalone by checking forwards
and backwards for blankspaces
:text: TODO
:(start, end): TODO
:returns: the start of next index after text[start:end]
"""
left = False
start -= 1
while start >= 0 and text[start] in spaces_not_newline:
start -= 1
if start < 0 or text[start] == '\n':
left = True
right = re_space.match(text, end)
return (start+1, right.end()) if left and right else None
def compiled(template, delimiters=DEFAULT_DELIMITERS):
"""Compile a template into token tree
:template: TODO
:delimiters: TODO
:returns: the root token
"""
re_tag = delimiters_to_re(delimiters)
# variable to save states
tokens = []
index = 0
sections = []
tokens_stack = []
# root token
root = Root('root')
root.filters = copy.copy(filters)
m = re_tag.search(template, index)
while m is not None:
token = None
last_literal = None
strip_space = False
if m.start() > index:
last_literal = Literal('str', template[index:m.start()], root=root)
tokens.append(last_literal)
# parse token
prefix, name, suffix = m.groups()
if prefix == '=' and suffix == '=':
# {{=| |=}} to change delimiters
delimiters = re.split(r'\s+', name)
if len(delimiters) != 2:
raise SyntaxError('Invalid new delimiter definition: ' + m.group())
re_tag = delimiters_to_re(delimiters)
strip_space = True
elif prefix == '{' and suffix == '}':
# {{{ variable }}}
token = Variable(name, name, root=root)
elif prefix == '' and suffix == '':
# {{ name }}
token = Variable(name, name, root=root)
token.escape = True
elif suffix != '' and suffix != None:
raise SyntaxError('Invalid token: ' + m.group())
elif prefix == '&':
# {{& escaped variable }}
token = Variable(name, name, root=root)
elif prefix == '!':
# {{! comment }}
token = Comment(name, root=root)
if len(sections) <= 0:
# considered as standalone only outside sections
strip_space = True
elif prefix == '>':
# {{> partial}}
token = Partial(name, name, root=root)
strip_space = True
pos = is_standalone(template, m.start(), m.end())
if pos:
token.indent = len(template[pos[0]:m.start()])
elif prefix == '#' or prefix == '^':
# {{# section }} or # {{^ inverted }}
# strip filter
sec_name = name.split('|')[0].strip()
token = Section(sec_name, name, root=root) if prefix == '#' else Inverted(name, name, root=root)
token.delimiter = delimiters
tokens.append(token)
# save the tokens onto stack
token = None
tokens_stack.append(tokens)
tokens = []
sections.append((sec_name, prefix, m.end()))
strip_space = True
elif prefix == '/':
tag_name, sec_type, text_end = sections.pop()
if tag_name != name:
raise SyntaxError("unclosed tag: '" + tag_name + "' Got:" + m.group())
children = tokens
tokens = tokens_stack.pop()
tokens[-1].text = template[text_end:m.start()]
tokens[-1].children = children
strip_space = True
else:
raise SyntaxError('Unknown tag: ' + m.group())
if token is not None:
tokens.append(token)
index = m.end()
if strip_space:
pos = is_standalone(template, m.start(), m.end())
if pos:
index = pos[1]
if last_literal: last_literal.value = last_literal.value.rstrip(spaces_not_newline)
m = re_tag.search(template, index)
tokens.append(Literal('str', template[index:]))
root.children = tokens
return root
def render(template, context, partials={}, delimiters=None):
contexts = [context]
if not isinstance(partials, dict):
raise TypeError('partials should be dict, but got ' + type(partials))
return inner_render(template, contexts, partials, delimiters)
def inner_render(template, contexts, partials={}, delimiters=None):
delimiters = DEFAULT_DELIMITERS if delimiters is None else delimiters
parent_token = compiled(template, delimiters)
return parent_token._render(contexts, partials)
#==============================================================================
# Token
# We'll parse the template into a tree of tokens, so a Token is actually a
# node of the tree.
# We'll save the all the information about the node here.
class Token():
"""The node of a parse tree"""
def __init__(self, name, value=None, text='', children=None, root=None):
self.name = name
self.value = value
self.text = text
self.children = children
self.escape = False
self.delimiter = None # used for section
self.indent = 0 # used for partial
self.root = root
self.filters = {}
def _escape(self, text):
"""Escape text according to self.escape"""
ret = EMPTYSTRING if text is None else str(text)
if self.escape:
return html_escape(ret)
else:
return ret
def _lookup(self, dot_name, contexts):
"""lookup value for names like 'a.b.c' and handle filters as well"""
# process filters
filters = [x for x in map(lambda x: x.strip(), dot_name.split('|'))]
dot_name = filters[0]
filters = filters[1:]
# should support paths like '../../a.b.c/../d', etc.
if not dot_name.startswith('.'):
dot_name = './' + dot_name
paths = dot_name.split('/')
last_path = paths[-1]
# path like '../..' or ./../. etc.
refer_context = last_path == '' or last_path == '.' or last_path == '..'
paths = paths if refer_context else paths[:-1]
# count path level
level = 0
for path in paths:
if path == '..':
level -= 1
elif path != '.':
# ../a.b.c/.. in the middle
level += len(path.strip('.').split('.'))
names = last_path.split('.')
# fetch the correct context
if refer_context or names[0] == '':
try:
value = contexts[level-1]
except:
value = None
else:
# support {{a.b.c.d.e}} like lookup
value = lookup(names[0], contexts, level)
# lookup for variables
if not refer_context:
for name in names[1:]:
try:
# a.num (a.1, a.2) to access list
index = parse_int(name)
name = parse_int(name) if isinstance(value, (list, tuple)) else name
value = value[name]
except:
# not found
value = None
break;
# apply filters
for f in filters:
try:
func = self.root.filters[f]
value = func(value)
except:
continue
return value
def _render_children(self, contexts, partials):
"""Render the children tokens"""
ret = []
for child in self.children:
ret.append(child._render(contexts, partials))
return EMPTYSTRING.join(ret)
def _get_str(self, indent):
ret = []
ret.append(' '*indent + '[(')
ret.append(self.type_string)
ret.append(',')
ret.append(self.name)
if self.value:
ret.append(',')
ret.append(repr(self.value))
ret.append(')')
if self.children:
for c in self.children:
ret.append('\n')
ret.append(c._get_str(indent+4))
ret.append(']')
return ''.join(ret)
def __str__(self):
return self._get_str(0)
def render(self, contexts, partials={}):
# interface for compiled object, corresponds to render()
contexts = [contexts]
return self._render(contexts, partials)
class Root(Token):
def __init__(self, *arg, **kw):
Token.__init__(self, *arg, **kw)
self.type_string = 'R'
def _render(self, contexts, partials):
return self._render_children(contexts, partials)
class Literal(Token):
def __init__(self, *arg, **kw):
Token.__init__(self, *arg, **kw)
self.type_string = 'L'
def _render(self, contexts, partials):
"""render simple literals"""
return self._escape(self.value)
class Variable(Token):
def __init__(self, *arg, **kw):
Token.__init__(self, *arg, **kw)
self.type_string = 'V'
def _render(self, contexts, partials):
"""render variable"""
value = self._lookup(self.value, contexts)
# lambda
if callable(value):
value = inner_render(str(value()), contexts, partials)
return self._escape(value)
class Section(Token):
def __init__(self, *arg, **kw):
Token.__init__(self, *arg, **kw)
self.type_string = 'S'
def _render(self, contexts, partials):
"""render section"""
val = self._lookup(self.value, contexts)
if not val:
# false value
return EMPTYSTRING
# normally json has types: number/string/list/map
# but python has more, so we decide that map and string should not iterate
# by default, other do.
if hasattr(val, "__iter__") and not isinstance(val, (str, dict)):
| |
reply = f"{reply} {ASK_ABOUT_OFFERED_BOOK}"
confidence = self.super_conf
if not reply:
reply = f"{ACKNOWLEDGE_AUTHOR}. By the way,"
reply = reply.replace("AUTHOR", author_name)
reply = self.book_linkto_reply(reply, human_attr)
confidence = self.default_conf
elif is_wikidata_entity(plain_bookname) and n_years_ago:
# if we found book name in user reply
bookname = entity_to_label(plain_bookname)
human_attr["book_skill"]["n_years_ago"] = n_years_ago
human_attr["book_skill"]["book"] = bookname
human_attr["book_skill"]["plain_book"] = plain_bookname
logger.debug("Bookname detected: returning AMAZING_READ_BOOK & WHEN_IT_WAS_PUBLISHED")
plain_author = get_author(plain_bookname, return_plain=True)
if is_wikidata_entity(plain_author):
logger.debug("Is author")
author_name = entity_to_label(plain_author)
human_attr["book_skill"]["author"] = author_name
offered_plain_bookname = best_plain_book_by_author(
plain_author_name=plain_author, plain_last_bookname=plain_bookname, default_phrase=""
)
if is_wikidata_entity(plain_bookname):
offered_bookname = entity_to_label(offered_plain_bookname)
reply = f"{IF_REMEMBER_LAST_BOOK} {ASK_ABOUT_OFFERED_BOOK}"
reply = reply.replace("AUTHOR", author_name).replace("BOOK", offered_bookname)
else:
reply = f"{AMAZING_READ_BOOK} {WHEN_IT_WAS_PUBLISHED}"
else:
reply = f"{AMAZING_READ_BOOK} {WHEN_IT_WAS_PUBLISHED}"
if len(bookname.split()) > 1 and we_asked_about_book:
# if book title is long enough, we set super conf
confidence = self.super_conf
else:
confidence = self.default_conf
elif genre_name is not None:
prev_genre = get_genre(annotated_prev_phrase["text"], return_name=True)
only_one_phrase = len(GENRE_PHRASES[genre_name]) == 1
logger.debug(f"Phrase contains name of genre {genre_name}")
if prev_genre != genre_name or only_one_phrase:
reply = GENRE_PHRASES[genre_name][0]
else:
reply = GENRE_PHRASES[genre_name][1]
if len(genre_name) > 5 and reply not in human_attr["book_skill"]["used_phrases"]:
confidence = self.super_conf
else:
confidence = self.default_conf
elif movie_name:
reply = get_movie_answer(annotated_user_phrase, human_attr)
if len(movie_name.split()) > 1 and movie_name.lower() in annotated_user_phrase["text"].lower():
# if book title is long enough and is in user reply,we set super conf
confidence = self.super_conf
else:
confidence = self.default_conf
else:
if any([WHAT_BOOK_IMPRESSED_MOST in j for j in human_attr["book_skill"]["used_phrases"]]):
reply, confidence = self.default_reply, 0
else:
reply, confidence = f"Fabulous! And {WHAT_BOOK_IMPRESSED_MOST}", self.default_conf
return reply, confidence, human_attr
def __call__(self, dialogs):
texts, confidences = [], []
human_attrs, bot_attrs, attrs = [], [], []
for dialog in dialogs:
reply = ""
confidence = 0
attr = {}
bot_attr = {}
human_attr = dialog["human"]["attributes"]
human_attr["book_skill"] = dialog["human"]["attributes"].get("book_skill", {})
human_attr["book_skill"]["used_phrases"] = human_attr["book_skill"].get("used_phrases", [])
human_attr["book_skill"]["last_fact"] = human_attr["book_skill"].get("last_fact", "")
human_attr["book_skill"]["used_genrebooks"] = human_attr["book_skill"].get("used_genrebooks", [])
try:
# TODO check correct order of concatenation of replies
book_just_active = is_previous_was_book_skill(dialog)
bot_phrases = [j["text"] for j in dialog["bot_utterances"]]
if len(bot_phrases) == 0:
bot_phrases.append("")
annotated_bot_phrase = {"text": "", "annotations": {}}
else:
annotated_bot_phrase = dialog["bot_utterances"][-1]
bot_phrases = [phrase for phrase in bot_phrases if "#repeat" not in phrase]
logger.debug(f"bot phrases: {bot_phrases}")
user_phrases = [utt["text"] for utt in dialog["human_utterances"][-2:]]
logger.info(f"Input received: {user_phrases[-1]}")
annotated_user_phrase = dialog["human_utterances"][-1]
genre_detected = get_genre(annotated_user_phrase)
yes_or_no = is_yes(annotated_user_phrase) or is_no(annotated_user_phrase)
questions_about_book = [
BOOK_ANY_PHRASE,
LAST_BOOK_READ,
WHAT_BOOK_IMPRESSED_MOST,
] + BOOK_SKILL_CHECK_PHRASES
we_asked_about_book = any([question in bot_phrases[-1] for question in questions_about_book])
we_offered_information = len(bot_phrases) >= 1 and any(
[k in bot_phrases[-1] for k in [TELL_REQUEST, TELL_REQUEST2]]
)
what_about_requested = what_is_book_about_request(annotated_user_phrase)
if len(dialog["human_utterances"]) > 1:
annotated_prev_phrase = dialog["human_utterances"][-2]
else:
annotated_prev_phrase = {"text": ""}
logger.debug(f"User phrase: last and prev from last: {user_phrases}")
# I don't denote annotated_user_phrase['text'].lower() as a single variable
# in order not to confuse it with annotated_user_phrase
should_stop = any(
[
IF_NOT_LOVE_READING in bot_phrases[-1],
book_just_active and is_switch_topic(annotated_user_phrase),
book_just_active and is_stop(annotated_user_phrase),
book_just_active and is_side_intent(annotated_user_phrase),
dontlike_books(annotated_user_phrase),
we_asked_about_book and is_no(annotated_user_phrase),
]
)
lets_chat_about_books = if_chat_about_particular_topic(
annotated_user_phrase, annotated_bot_phrase, compiled_pattern=BOOK_PATTERN
)
if lets_chat_about_books and not is_no(annotated_user_phrase) and not book_just_active:
# let's chat about books
logger.debug("Detected talk about books. Calling start phrase")
if START_PHRASE in human_attr["book_skill"]["used_phrases"]:
reply = get_not_given_question_about_books(human_attr["book_skill"]["used_phrases"])
confidence = self.default_conf
else:
reply, confidence = START_PHRASE, self.super_conf
elif should_stop:
logger.debug("Should stop")
reply, confidence = "", 0
elif all(
[
dontknow_books(annotated_user_phrase),
BOOK_ANY_PHRASE not in bot_phrases,
book_just_active,
we_asked_about_book,
]
):
reply, confidence = BOOK_ANY_PHRASE, self.default_conf
elif ASK_WHY in bot_phrases[-1]:
reply, confidence = f"{IF_LOVE_READING} {LAST_BOOK_READ}", self.default_conf
elif my_favorite(annotated_user_phrase) == "genre":
reply, confidence, human_attr = self.get_author_book_genre_movie_reply(
annotated_user_phrase, annotated_prev_phrase, bot_phrases, human_attr
)
elif my_favorite(annotated_user_phrase) == "book":
reply, confidence = f"So {WHAT_BOOK_IMPRESSED_MOST}", self.default_conf
elif fav_genre_request_detected(annotated_user_phrase):
# if user asked us about favorite genre
logger.debug("Detected favorite genre request")
reply, confidence = random.choice(FAVOURITE_GENRE_ANSWERS), self.super_conf
elif asked_about_genre(annotated_user_phrase) and genre_detected:
reply, confidence = GENRE_DICT[genre_detected], self.default_conf
reply = f"{reply} {GENRE_ADVICE_PHRASE}"
human_attr["book_skill"]["detected_genre"] = genre_detected
elif all(
[
bible_request(annotated_user_phrase),
BIBLE_RESPONSES[0] not in human_attr["book_skill"]["used_phrases"],
]
):
reply, confidence = BIBLE_RESPONSES[0], self.default_conf
elif any([bible_request(annotated_user_phrase), (bible_request(annotated_prev_phrase) and yes_or_no)]):
reply = BIBLE_RESPONSES[1]
if BIBLE_RESPONSES[0] == annotated_bot_phrase["text"]:
reply = f"I am pleased to know it. {reply}"
book_question = get_not_given_question_about_books(human_attr["book_skill"]["used_phrases"])
reply = f"{reply} Apart from the Bible, {book_question}"
confidence = self.super_conf
elif self.fav_book_request_detected(annotated_user_phrase, bot_phrases[-1], human_attr):
# if user asked us about favorite book
logger.debug("Detected favorite book request")
if "fav_book_phrases" not in human_attr["book_skill"]:
set_favourite(human_attr, 0, FAVOURITE_BOOK_ATTRS, FAVOURITE_BOOK_ANSWERS)
favourite_book_answers = human_attr["book_skill"]["fav_book_phrases"]
if favourite_book_answers[0] not in human_attr["book_skill"]["used_phrases"]:
reply = favourite_book_answers[0]
else:
reply = favourite_book_answers[1]
# TODO in next PRs: behave proactively about this book, propose to discuss it next
confidence = self.super_conf
elif START_PHRASE in bot_phrases[-1]:
# if we asked do you love reading previously
logger.debug("We have just said Do you love reading")
if is_no(annotated_user_phrase):
logger.debug("Detected answer NO")
reply, confidence = IF_NOT_LOVE_READING, self.super_conf
elif is_yes(annotated_user_phrase) or if_loves_reading(annotated_user_phrase):
logger.debug("Detected asnswer YES")
reply, confidence = f"{ASK_WHY}", self.super_conf
else:
logger.debug("No answer detected. Return nothing.")
reply, confidence = self.default_reply, 0
elif any([phrase in bot_phrases[-1] for phrase in QUESTIONS_ABOUT_BOOK]):
reply, confidence, human_attr = self.get_author_book_genre_movie_reply(
annotated_user_phrase, annotated_prev_phrase, bot_phrases, human_attr
)
elif WHEN_IT_WAS_PUBLISHED in bot_phrases[-1] or published_year_request(annotated_user_phrase):
if "n_years_ago" in human_attr["book_skill"]:
n_years_ago = human_attr["book_skill"]["n_years_ago"]
plain_bookname = human_attr["book_skill"]["plain_book"]
bookname = human_attr["book_skill"]["book"]
logger.debug("Bookname detected")
if n_years_ago > 0:
recency_phrase = f"{n_years_ago} years ago!"
else:
recency_phrase = "Just recently!"
# answering with default conf as we do not even check the user utterance at all
logger.debug("Giving recency phrase")
book_genre = genre_of_book(plain_bookname)
reply = f"{recency_phrase} {random.choice(DID_NOT_EXIST)}"
if book_genre:
reply = f"{reply} {ASK_GENRE_OF_BOOK}"
reply = reply.replace("BOOK", bookname)
human_attr["book_skill"]["genre"] = book_genre
else:
reply = self.book_linkto_reply(reply, human_attr)
confidence = self.default_conf
else:
reply, confidence = ASK_TO_REPEAT_BOOK, self.low_conf
elif bot_phrases[-1] in OPINION_REQUEST_ON_BOOK_PHRASES:
# if we previously asked about user's opinion on book
logger.debug("Last phrase was OPINION_REQUEST_ON_BOOK_PHRASES")
reply, confidence = self.reply_about_book(annotated_user_phrase, human_attr, is_yes, is_no, [])
elif ASK_GENRE_OF_BOOK in bot_phrases[-1] and "genre" in human_attr["book_skill"]:
book, genre = human_attr["book_skill"]["book"], human_attr["book_skill"]["genre"]
reply, confidence = f"{book} is {genre}. ", self.default_conf
reply = self.book_linkto_reply(reply, human_attr)
elif self.genrebook_request_detected(annotated_user_phrase, bot_phrases):
# push it to the end to move forward variants where we the topic is known
logger.debug(f"Last phrase is WHAT_IS_FAV_GENRE for {annotated_user_phrase['text']}")
book, author = self.get_genre_book(annotated_user_phrase, human_attr)
if book and author and not is_no(annotated_user_phrase):
logger.debug(f"Making genre request")
reply, confidence = f"{HAVE_YOU_READ_BOOK}{book} by {author}?", self.default_conf
if get_genre(annotated_user_phrase["text"]):
confidence = self.super_conf
human_attr["book_skill"]["book"] = book
else:
reply, confidence, human_attr = self.get_author_book_genre_movie_reply(
annotated_user_phrase, annotated_prev_phrase, bot_phrases, human_attr
)
elif any(
[k in bot_phrases[-1] for k in [ASK_ABOUT_OFFERED_BOOK, OFFER_FACT_ABOUT_BOOK, HAVE_YOU_READ_BOOK]]
):
# book_just_offered
bookname = human_attr["book_skill"]["book"]
logger.debug("Amazing! Have HAVE_YOU_READ_BOOK in last bot phrase")
if tell_me_more(annotated_user_phrase) and bookname in self.bookreads_books:
reply = tell_about_genre_book(bookname, self.bookreads_data)
new_reply = self.book_linkto_reply(reply, human_attr)
if new_reply == reply:
reply = exit_skill(reply, human_attr)
else:
reply = new_reply
confidence = self.default_conf
elif is_no(annotated_user_phrase) or havent_read(annotated_user_phrase):
logger.debug("intent NO detected")
reply, confidence = f"{random.choice(READ_BOOK_ADVICES)} {TELL_REQUEST}", self.super_conf
elif is_yes(annotated_user_phrase):
reply, confidence = self.reply_about_book(
annotated_user_phrase, human_attr, is_positive, is_negative, OPINION_REQUEST_ON_BOOK_PHRASES
)
else:
logger.debug("No intent detected. Returning nothing")
reply, confidence = self.book_linkto_reply("", human_attr), self.default_conf
elif we_offered_information or what_about_requested:
# We have offered information about book
plain_bookname = human_attr["book_skill"].get("plain_book", "")
bookname = human_attr["book_skill"].get("book", "")
logger.debug(f"TELL_REQUEST with {bookname} {plain_bookname}")
if (tell_me_more(annotated_user_phrase) or is_yes(annotated_user_phrase)) and bookname:
logger.debug("Tell_me_more or is_yes and bookname")
reply = tell_about_genre_book(bookname, self.bookreads_data)
if reply:
reply, confidence = self.book_linkto_reply(reply, human_attr), self.super_conf
elif plain_bookname:
book_fact = what_is_book_about(plain_bookname)
if book_fact:
reply = f"{book_fact} {WHEN_IT_WAS_PUBLISHED}"
confidence = self.super_conf
else:
reply = f"{WHEN_IT_WAS_PUBLISHED}"
confidence = self.default_conf
# запускаем в сценарий дальше
else:
warning_message = "Either plain_bookname or genre book should be. Check the code"
sentry_sdk.capture_exception(Exception(warning_message))
logger.exception(warning_message)
reply = self.book_linkto_reply("", human_attr)
confidence = self.default_conf
elif is_no(annotated_user_phrase):
reply = "OK, as you wish."
new_reply = self.book_linkto_reply(reply, human_attr)
if new_reply == reply:
logger.debug("We are over - finish")
reply = exit_skill(reply, human_attr)
confidence = self.default_conf
else:
reply, confidence = new_reply, self.default_conf
else:
reply, confidence = "", 0
elif about_book(annotated_user_phrase):
plain_bookname, n_years_ago = get_name(
annotated_user_phrase, mode="book", bookyear=True, return_plain=True
)
if not is_wikidata_entity(plain_bookname):
logger.debug("No bookname detected")
movie_name, _ = get_name(annotated_user_phrase, mode="movie")
if movie_name:
logger.debug("Moviename detected")
reply, confidence = get_movie_answer(annotated_user_phrase, human_attr), self.default_conf
else:
reply = self.book_linkto_reply("", human_attr)
if not reply:
logger.debug("We are over - finish")
reply = exit_skill(reply, human_attr)
confidence = self.default_conf
else:
bookname = entity_to_label(plain_bookname)
human_attr["book_skill"]["book"] = bookname
human_attr["book_skill"]["plain_book"] = plain_bookname
retrieved_fact = fact_about_book(annotated_user_phrase)
if retrieved_fact is not None and was_question_about_book(annotated_user_phrase):
# if user asked ANY | |
dict_sku_perf_table,
'BranchType': BranchType, # for naver map api permission
'ALLOWED_HOSTS': settings.ALLOWED_HOSTS # for naver map api permission
})
def post(self, request, *args, **kwargs):
o_sv_db = SvSqlAccess()
if not o_sv_db:
raise Exception('invalid db handler')
dict_rst = get_brand_info(o_sv_db, request, kwargs)
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
n_brand_id = dict_rst['dict_ret']['n_brand_id']
from .pandas_plugins.edi_by_branch import Performance
o_branch = Performance()
dict_rst = o_branch.add_memo(o_sv_db, int(kwargs['branch_id']), n_brand_id, request)
del o_branch
del o_sv_db
return
return redirect('svload:edi_branch', owner_id=kwargs['owner_id'], ga_view_id=kwargs['ga_view_id'],
branch_id=kwargs['branch_id'])
class BySkuEdi(LoginRequiredMixin, TemplateView):
# template_name = 'analyze/index.html'
def __init__(self):
return
def get(self, request, *args, **kwargs):
o_window = PeriodWindow()
dict_period = o_window.get_period_range(request)
dict_sampling_freq_mode = o_window.get_sampling_freq_ui() # sampling freq btn UI
del o_window
o_sv_db = SvSqlAccess()
if not o_sv_db:
raise Exception('invalid db handler')
dict_rst = get_brand_info(o_sv_db, request, kwargs)
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
n_brand_id = dict_rst['dict_ret']['n_brand_id']
s_brand_name = dict_rst['dict_ret']['s_brand_name']
lst_owned_brand = dict_rst['dict_ret']['lst_owned_brand'] # for global navigation
# begin - retrieve emart sku info
o_filter = EdiFilter(request)
dict_branch_info = o_filter.get_branch()
dict_sku_info = o_filter.get_sku_by_brand_id(o_sv_db, n_brand_id, n_sku_id=kwargs['item_id'])
del o_filter
# end - retrieve emart sku info
# set item name for template
s_item_name = dict_sku_info['dict_sku_info_by_id'][kwargs['item_id']]['mart_name'] + ' ' + \
dict_sku_info['dict_sku_info_by_id'][kwargs['item_id']]['name']
o_edi_raw = EdiRaw()
o_edi_raw.set_period_dict(dt_start=dict_period['dt_first_day_2year_ago'], dt_end=dict_period['dt_today'])
o_edi_raw.set_freq(dict_sampling_freq_mode)
o_edi_raw.set_sku_dict(dict_sku_info['dict_sku_info_by_id'])
o_edi_raw.set_branch_info(dict_branch_info)
df_edi_raw = o_edi_raw.load_sku(o_sv_db)
del o_edi_raw
del o_sv_db
from .pandas_plugins.edi_by_sku import Performance
# get whole period
o_whole_perf_summary = Performance()
o_whole_perf_summary.set_period_dict(dict_period)
o_whole_perf_summary.set_sku_dict(dict_sku_info['dict_sku_info_by_id'])
o_whole_perf_summary.set_all_branches(dict_branch_info)
o_whole_perf_summary.load_df(df_edi_raw)
dict_plots = {} # graph dict to draw
# begin - 당월 품목별 공급액 순위, 당월 품목별 출고량 비교
# end - 당월 품목별 공급액 순위, 당월 품목별 출고량 비교
# begin - 전국 공급액 추이 2년간
dict_rst_tm = o_whole_perf_summary.retrieve_monthly_gross_vb()
o_graph = Visualizer()
o_graph.set_title('최근 2년간 공급액')
o_graph.set_height(170)
o_graph.set_x_labels(dict_rst_tm['lst_x_labels'])
# for s_scope_name in dict_rst_tm['lst_series_lbl']:
for s_scope_name, lst_series_val in dict_rst_tm['lst_series_info'].items():
s_series_color = dict_rst_tm['lst_palette'].pop(0)
# lst_series_val = dict_rst_tm['lst_series_val'].pop(0)
o_graph.append_series(s_scope_name, s_series_color, lst_series_val)
dict_plots['monthly_national'] = o_graph.draw_vertical_bar(n_max_y_axis=None)
del o_graph
# end - 전국 공급액 추이 2년간
# begin - 상위 매장 공급액 추이 2년간
lst_item_line_color = ['#D6E2DF', '#A4C8C1', '#6CBDAC', '#079476', '#097C63',
'#026E57'] # last one is larger one
s_top_branch_cnt = str(len(lst_item_line_color))
# s_graph_title = '당월 Top ' + s_top_branch_cnt + ' 매장의 최근 2년간 공급액'
dict_rst_tm = o_whole_perf_summary.retrieve_monthly_gross_by_branch_vb(lst_item_line_color)
o_graph = Visualizer()
o_graph.set_title('당월 Top ' + s_top_branch_cnt + ' 매장의 최근 2년간 공급액')
o_graph.set_height(170)
o_graph.set_x_labels(dict_rst_tm['lst_x_labels'])
for s_branch_name, lst_series_val in dict_rst_tm['lst_series_info'].items():
s_series_color = dict_rst_tm['lst_palette'].pop(0)
o_graph.append_series(s_branch_name, s_series_color, lst_series_val)
dict_plots['monthly_branch_top4'] = o_graph.draw_vertical_bar(n_max_y_axis=None)
del o_graph
# end - Top4 공급액 추이 2년간
# begin - 매장 공급액, 전국 공급액, 매장 출고량, 전국 출고량
lst_branch_data_table = o_whole_perf_summary.retrieve_branch_overview()
# end - 매장 공급액, 전국 공급액, 매장 출고량, 전국 출고량
script, div = components(dict(dict_plots))
return render(request, 'svload/edi_by_sku.html',
{'script': script, 'div': div,
'dict_sampling_freq_mode': dict_sampling_freq_mode,
's_cur_period_window': dict_period['s_cur_period_window'],
'dict_period_date': {'from': dict_period['dt_first_day_this_month'].strftime("%Y%m%d"),
'to': dict_period['dt_today'].strftime("%Y%m%d")},
's_brand_name': s_brand_name,
'lst_owned_brand': lst_owned_brand, # for global navigation
's_item_name': s_item_name,
's_top_branch_cnt': s_top_branch_cnt,
'lst_branch_data_table': lst_branch_data_table
})
class BudgetView(LoginRequiredMixin, TemplateView):
__g_oSvDb = None
__g_dictBrandInfo = {}
def __init__(self):
self.__g_oSvDb = SvSqlAccess()
if not self.__g_oSvDb:
raise Exception('invalid db handler')
return
def __del__(self):
del self.__g_oSvDb
pass
def get(self, request, *args, **kwargs):
self.__g_dictBrandInfo = get_brand_info(self.__g_oSvDb, request, kwargs)
if self.__g_dictBrandInfo['b_error']:
dict_context = {'err_msg': self.__g_dictBrandInfo['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
if 'budget_id' in list(kwargs.keys()):
return self.__budget_detail(request, *args, **kwargs)
else:
return self.__budget_list(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
o_sv_db = SvSqlAccess()
if not o_sv_db:
raise Exception('invalid db handler')
dict_rst = get_brand_info(o_sv_db, request, kwargs)
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
n_brand_id = dict_rst['dict_ret']['n_brand_id']
s_act = request.POST.get('act')
s_return_url = request.META.get('HTTP_REFERER')
if s_act == 'add_budget':
# n_brand_id = dict_rst['dict_ret']['n_brand_id']
from .pandas_plugins.budget import Budget
o_budget = Budget(o_sv_db)
dict_rst = o_budget.add_budget(n_brand_id, request)
del o_budget
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg'], 's_return_url': s_return_url}
return render(request, "svload/deny.html", context=dict_context)
o_redirect = redirect('svload:budget_list', sv_brand_id=n_brand_id)
if s_act == 'update_budget':
if request.POST['budget_id'] == '':
dict_context = {'err_msg': dict_rst['s_msg'], 's_return_url': s_return_url}
return render(request, "svload/deny.html", context=dict_context)
n_budget_id = int(request.POST['budget_id'])
from .pandas_plugins.budget import Budget
o_budget = Budget(o_sv_db)
o_budget.update_budget(n_budget_id, request)
del o_budget
o_redirect = redirect('svload:budget_list', sv_brand_id=n_brand_id)
elif s_act == 'inquiry_budget':
s_period_from = request.POST.get('budget_period_from')
s_period_to = request.POST.get('budget_period_to')
o_redirect = redirect('svload:budget_period',
sv_brand_id=n_brand_id, period_from=s_period_from, period_to=s_period_to)
del o_sv_db
return o_redirect
def __budget_list(self, request, *args, **kwargs):
lst_kwargs = list(kwargs.keys())
if 'period_from' in lst_kwargs:
s_period_from = kwargs['period_from']
else:
s_period_from = None
if 'period_to' in lst_kwargs:
s_period_to = kwargs['period_to']
else:
s_period_to = None
del lst_kwargs
from .pandas_plugins.budget import Budget
n_brand_id = self.__g_dictBrandInfo['dict_ret']['n_brand_id']
o_budget = Budget(self.__g_oSvDb)
dict_budget_info = o_budget.get_list_by_period(n_brand_id, s_period_from, s_period_to)
lst_acct_list = o_budget.get_acct_list_for_ui()
del o_budget
s_brand_name = self.__g_dictBrandInfo['dict_ret']['s_brand_name']
lst_owned_brand = self.__g_dictBrandInfo['dict_ret']['lst_owned_brand'] # for global navigation
return render(request, 'svload/budget_list.html',
{'s_brand_name': s_brand_name,
'lst_owned_brand': lst_owned_brand, # for global navigation
'dict_budget_period': dict_budget_info['dict_budget_period'],
'lst_budget_table': dict_budget_info['lst_added_rst'],
'dict_budget_progress': dict_budget_info['dict_budget_progress'],
'lst_acct_list': lst_acct_list,
})
def __budget_detail(self, request, *args, **kwargs):
lst_kwargs = list(kwargs.keys())
if 'budget_id' not in lst_kwargs:
raise Exception('invalid budget id')
if 'period_from' in lst_kwargs:
s_period_from = kwargs['period_from']
else:
s_period_from = None
if 'period_to' in lst_kwargs:
s_period_to = kwargs['period_to']
else:
s_period_to = None
del lst_kwargs
n_budget_id = kwargs['budget_id']
n_brand_id = self.__g_dictBrandInfo['dict_ret']['n_brand_id']
o_budget = Budget(self.__g_oSvDb)
dict_budget_info = o_budget.get_detail_by_id(n_brand_id, n_budget_id)
dict_budget_info['n_budget_id'] = n_budget_id
dict_period_info = {'s_earliest_budget': s_period_from, 's_latest_budget': s_period_to}
lst_acct_list = o_budget.get_acct_list_for_ui()
del o_budget
s_brand_name = self.__g_dictBrandInfo['dict_ret']['s_brand_name']
lst_owned_brand = self.__g_dictBrandInfo['dict_ret']['lst_owned_brand'] # for global navigation
return render(request, 'svload/budget_detail.html',
{'s_brand_name': s_brand_name,
'lst_owned_brand': lst_owned_brand, # for global navigation
'dict_budget_info': dict_budget_info,
'dict_budget_period': dict_period_info,
'lst_acct_list': lst_acct_list,
})
class Morpheme(LoginRequiredMixin, TemplateView):
# template_name = 'analyze/index.html'
__g_nCntToVisitorNounRank = 100 # 추출할 순위 수
__g_nCntToInboundKeywordRank = 10 # 추출할 순위 수
__g_nCntToSourceMediumRank = 10 # 추출할 순위 수
def __init__(self):
return
def get(self, request, *args, **kwargs):
o_window = PeriodWindow()
dict_period = o_window.get_period_range(request)
dict_sampling_freq_mode = o_window.get_sampling_freq_ui() # sampling freq btn UI
del o_window
o_sv_db = SvSqlAccess()
if not o_sv_db:
raise Exception('invalid db handler')
dict_rst = get_brand_info(o_sv_db, request, kwargs)
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
s_brand_name = dict_rst['dict_ret']['s_brand_name']
lst_owned_brand = dict_rst['dict_ret']['lst_owned_brand'] # for global navigation
from .pandas_plugins.word_cloud import WcMainVisual
o_word_cloud = WcMainVisual(o_sv_db)
lst_period = ['ly', 'lm', 'tm']
o_word_cloud.set_period_dict(dict_period, lst_period)
o_word_cloud.load_df()
dict_config = {'n_brand_id': dict_rst['dict_ret']['n_brand_id'],
's_static_file_path': settings.STATICFILES_DIRS[0],
's_media_file_path': settings.MEDIA_ROOT,
's_media_url_root': settings.MEDIA_URL,
'lst_period': lst_period, 'n_th_rank': self.__g_nCntToVisitorNounRank}
dict_wc_rst = o_word_cloud.get_top_ranker(dict_config)
del o_word_cloud, dict_config
# script, div = components(dict(dict_plots))
return render(request, 'svload/morpheme.html',
{ # 'script': script, 'div': div,
'dict_sampling_freq_mode': dict_sampling_freq_mode,
's_cur_period_window': dict_period['s_cur_period_window'],
'dict_period_date': {'from': dict_period['dt_first_day_this_month'].strftime("%Y%m%d"),
'to': dict_period['dt_today'].strftime("%Y%m%d")},
's_brand_name': s_brand_name,
'lst_owned_brand': lst_owned_brand, # for global navigation
'lst_top_word_by_freq_trend': dict_wc_rst['lst_top_word_by_freq_trend'],
'visitor_noun_n_th_rank': self.__g_nCntToVisitorNounRank,
'dict_misc_word_cnt': dict_wc_rst['dict_misc_word_cnt'],
'dict_word_cloud_img_url': dict_wc_rst['dict_word_cloud_img_url']
})
def post(self, request, *args, **kwargs):
o_sv_db = SvSqlAccess()
if not o_sv_db:
raise Exception('invalid db handler')
dict_rst = get_brand_info(o_sv_db, request, kwargs)
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
dict_rst = get_brand_info(o_sv_db, request, kwargs)
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
s_brand_name = dict_rst['dict_ret']['s_brand_name']
lst_owned_brand = dict_rst['dict_ret']['lst_owned_brand'] # for global navigation
s_act = request.POST.get('act')
s_return_url = request.META.get('HTTP_REFERER')
if s_act == 'search_morpheme':
s_morpheme_query = request.POST.get('morpheme_query')
from .pandas_plugins.word_cloud import MorphemeVisual
o_morpheme = MorphemeVisual(o_sv_db)
dict_rst = o_morpheme.get_morpheme_id_by_morpheme(s_morpheme_query)
del o_morpheme
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg'], 's_return_url': s_return_url}
return render(request, "svload/deny.html", context=dict_context)
return render(request, "svload/morpheme.html",
context={ # 'dict_sampling_freq_mode': dict_sampling_freq_mode,
's_brand_name': s_brand_name,
'lst_owned_brand': lst_owned_brand, # for global navigation
's_morpheme_query': s_morpheme_query,
'lst_relevant_morpheme': dict_rst['lst_morpheme'],
})
del o_sv_db
class MorphemeChronicle(LoginRequiredMixin, TemplateView):
# template_name = 'analyze/index.html'
def __init__(self):
return
def get(self, request, *args, **kwargs):
o_window = PeriodWindow()
dict_period = o_window.get_period_range(request)
dict_sampling_freq_mode = o_window.get_sampling_freq_ui() # sampling freq btn UI
del o_window
o_sv_db = SvSqlAccess()
if not o_sv_db:
raise Exception('invalid db handler')
dict_rst = get_brand_info(o_sv_db, request, kwargs)
if dict_rst['b_error']:
dict_context = {'err_msg': dict_rst['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
s_brand_name = dict_rst['dict_ret']['s_brand_name']
lst_owned_brand = dict_rst['dict_ret']['lst_owned_brand'] # for global navigation
# if 'morpheme_id' in kwargs.keys(): # 7,21,758
from .pandas_plugins.word_cloud import MorphemeVisual
o_morpheme_chronicle = MorphemeVisual(o_sv_db)
lst_period = ['2ly', 'ly', 'lm', 'tm']
dict_url_rst = {'b_error': False, 's_msg': None}
if request.method == 'GET' and 'morpheme_id' in request.GET:
s_morpheme_ids = request.GET['morpheme_id']
lst_morpheme_id = s_morpheme_ids.split(',')
for x in lst_morpheme_id:
if not x.isdigit():
dict_url_rst['b_error'] = True
dict_url_rst['s_msg'] = 'invalid morpheme id'
break
else:
dict_url_rst['b_error'] = True
dict_url_rst['s_msg'] = 'invalid approach'
if dict_url_rst['b_error']:
dict_context = {'err_msg': dict_url_rst['s_msg']}
return render(request, "svload/analyze_deny.html", context=dict_context)
lst_morpheme_id = [int(x.strip()) for x in lst_morpheme_id] # ?morpheme_id=7,21,758
# begin -
lst_item_line_color = ['#D6E2DF', '#A4C8C1', '#6CBDAC', '#079476', '#960614', '#6b0000', '#205a86', '#140696',
'#4d6165', '#798984', '#8db670', '#ffad60'] # last one is the largest
n_max_morpheme_cnt = len(lst_item_line_color)
o_morpheme_chronicle.set_period_dict(dict_period, lst_period)
o_morpheme_chronicle.set_freq(dict_sampling_freq_mode)
o_morpheme_chronicle.set_morpheme_lst(lst_morpheme_id[:n_max_morpheme_cnt])
o_morpheme_chronicle.load_df()
dict_plots = {} # graph dict to draw
dict_4_multi_line = o_morpheme_chronicle.retrieve_daily_chronicle_by_morpheme_ml(lst_item_line_color)
o_graph = Visualizer()
o_graph.set_height(170)
o_graph.set_x_labels(dict_4_multi_line['lst_x_label'])
n_morpheme_cnt = len(dict_4_multi_line['lst_line_label'])
n_gross_freq = 0
| |
<reponame>WD-stefaang/USD<filename>pxr/usd/lib/usdUtils/complianceChecker.py
#
# Copyright 2018 Pixar
#
# Licensed under the Apache License, Version 2.0 (the "Apache License")
# with the following modification; you may not use this file except in
# compliance with the Apache License and the following modification to it:
# Section 6. Trademarks. is deleted and replaced with:
#
# 6. Trademarks. This License does not grant permission to use the trade
# names, trademarks, service marks, or product names of the Licensor
# and its affiliates, except as required to comply with Section 4(c) of
# the License and to reproduce the content of the NOTICE file.
#
# You may obtain a copy of the Apache License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the Apache License with the above modification is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the Apache License for the specific
# language governing permissions and limitations under the Apache License.
#
# A utility class for checking compliance of a given USD asset or a USDZ
# package.
# Because usdz files are zip files, someone could use generic zip tools to
# create an archive and just change the extension, producing a .usdz file that
# does not honor the additional constraints that usdz files require. Even if
# someone does use our official archive creation tools, though, we intentionally
# allow creation of usdz files that can be very "permissive" in their contents,
# for internal studio uses, where portability outside the studio is not a
# concern. For content meant to be delivered over the web (eg. ARKit assets),
# however, we must be much more restrictive.
#
# This class provides two levels of compliance checking:
# * "simple or structural" validation that primarily verifies the files within
# the package are:
# * laid out properly
# * aren't compressed or encrypted and
# * only contains usd and texture file formats that can be consumed
# directly from within the package (e.g. no abc files allowed)
# * contain no unresolvable paths or paths that resolve to assets outside
# the root package.
#
# * "ARKit" compatibility level, which brings in many more restrictions:
# * No file formats other than the core-supported ones.
# * No image file formats other than jpg and png.
# * no use of custom schemas not provided by core USD and no
# PointInstancers.
# * The stage must be Y-up.
# * no shader nodes with non-id implementatinSource and no shader nodes
# other than the Usd* nodes used for preview shading.
# *
#
class ComplianceChecker(object):
# Only core USD file formats are allowed.
_allowedLayerFormatIds = ('usd', 'usda', 'usdc', 'usdz')
# Allow just png and jpg for now.
_allowedImageFormats = ("jpg", "png")
# Only layer and image files are allowed.
_allowedFileExtensions = _allowedLayerFormatIds + _allowedImageFormats
# Include a list of "unsupported" image formats to provide better error
# messages whwn we find one of these.
_unsupportedImageFormats = ["bmp", "tga", "hdr", "exr", "tif", "zfile",
"tx"]
# All core prim types other than UsdGeomPointInstancers and the type in
# UsdLux are allowed.
_allowedPrimTypeNames = ('', 'Scope', 'Xform', 'Camera',
'Shader', 'Material',
'Mesh', 'Sphere', 'Cube', 'Cylinder', 'Cone',
'Capsule', 'GeomSubset', 'NurbsPatch',
'Points', 'SkelRoot', 'Skeleton',
'SkelAnimation', 'BlendShape')
@staticmethod
def GetRules(arkit=False):
baseRules = [
# 1
"Files within a usdz package must be laid out properly, i.e. they"
"should be aligned to 64 byptes.",
# 2
"Files withing a usdz package should not be compressed or "
"encrypted.",
# 3
"Texture files should be .jpg or .png.",
]
arkitRules = [
# 4
"The composed USD stage should not contain any unresolvable asset "
"dependencies (in every possible variation of the asset), when "
"using the default asset resolver. If the root layer is a package, "
"then the composed stage should not contain references to files "
"outside the package. In other words, the package should be "
"entirely self-contained.",
# 5
"All included layers that participate in composition should have "
"one of the core supported file formats.",
# 6
"UsdGeomPointInstancers and custom schemas not provided by core "
"USD are not allowed.",
# 7
"The stage must be Y-up.",
# 8
"Shader nodes must have \"id\" as their implementationSource with "
"id values that begin with \"Usd*\".",
# 9
"The root layer of the package must be a usdc file and must not "
"include any external dependencies that are USD layers."
]
allRules = baseRules
if arkit:
allRules += arkitRules
return allRules
@staticmethod
def DumpRules(arkit=False):
print "Checking rules: "
for ruleNum, rule in enumerate(ComplianceChecker.GetRules(arkit=arkit)):
print "[%s] %s" % (ruleNum + 1, rule)
print "-----------------------------------------"
def GetFailedChecks(self):
return self._failedChecks
def GetErrors(self):
return self._errors
def _Msg(self, msg):
if self._verbose:
print msg
def _AddError(self, errMsg):
self._errors.append(errMsg)
def _AddFailedCheck(self, msg, ruleNum):
# XXX: It would be nice to have separate classes for validating
# each rule in the future and not have to associate the failed check
# with a rule number like this.
self._failedChecks.append(msg + " (violates rule(s) %s)" % ruleNum)
if isinstance(ruleNum, list):
for num in ruleNum:
self._violatedRules.add(num)
else:
self._violatedRules.add(ruleNum)
def __init__(self, inputFile,
arkit=False, skipARKitRootLayerCheck=False,
rootPackageOnly=False,
skipVariants=False, verbose=False):
self._arkit = arkit
self._skipARKitRootLayerCheck = skipARKitRootLayerCheck
self._rootPackageOnly = rootPackageOnly
self._doVariants = not skipVariants
self._verbose = verbose
self._failedChecks = []
self._errors = []
self._violatedRules = set()
self._checkedPackages = set()
from pxr import Ar, Sdf, Usd, UsdUtils
if not Usd.Stage.IsSupportedFile(inputFile):
_AddError("Cannot open file '%s' on a USD stage." % args.inputFile)
return
# Collect all warnings using a diagnostic delegate.
delegate = UsdUtils.CoalescingDiagnosticDelegate()
usdStage = Usd.Stage.Open(inputFile)
allDiagnostics = delegate.TakeUncoalescedDiagnostics()
if self._arkit:
for diag in allDiagnostics:
# "_ReportErrors" is the name of the function that issues
# warnings about unresolved references, sublayers and other
# composition arcs.
if '_ReportErrors' in diag.sourceFunction and \
'usd/stage.cpp' in diag.sourceFileName:
self._AddFailedCheck(diag.commentary, ruleNum=4)
with Ar.ResolverContextBinder(usdStage.GetPathResolverContext()):
# This recursively computes all of inputFiles's external
# dependencies.
(allLayerDeps, allAssetDeps, unresolvedPaths) = \
UsdUtils.ComputeAllDependencies(Sdf.AssetPath(inputFile))
self._CheckDependencies(usdStage, allLayerDeps, allAssetDeps,
unresolvedPaths)
self._CheckStage(usdStage, allLayerDeps)
def _CheckDependencies(self, usdStage,
layerDeps, assetDeps, unresolvedPaths):
from pxr import Ar
def _IsPackageOrPackagedLayer(layer):
return layer.GetFileFormat().IsPackage() or \
Ar.IsPackageRelativePath(layer.identifier)
# Process every package just once by storing them all in a set.
packages = set()
for layer in layerDeps:
if _IsPackageOrPackagedLayer(layer):
packagePath = Ar.SplitPackageRelativePathInner(
layer.identifier)[0]
packages.add(packagePath)
self._CheckLayer(layer)
for package in packages:
self._CheckPackage(package)
# If the root layer is a package, validate that all the loaded layers
# belong to the package.
rootLayer = usdStage.GetRootLayer()
if self._arkit and _IsPackageOrPackagedLayer(rootLayer):
packagePath = usdStage.GetRootLayer().realPath
if packagePath:
if Ar.IsPackageRelativePath(packagePath):
packagePath = Ar.SplitPackageRelativePathOuter(
packagePath)[0]
for layer in layerDeps:
# In-memoery layers that session layers won't have a real
# path. We can skip them.
if layer.realPath:
if not layer.realPath.startswith(packagePath):
self._AddFailedCheck("Found loaded layer '%s' that "
"does not belong to the package '%s'." %
(layer.identifier, packagePath), ruleNum=4)
for asset in assetDeps:
if not asset.startswith(packagePath):
self._AddFailedCheck("Found asset reference '%s' that "
"does not belong to the package '%s'." %
(asset, packagePath), ruleNum=4)
for unresolvedPath in unresolvedPaths:
self._AddFailedCheck("Found unresolvable external dependency '%s'."
% unresolvedPath, ruleNum=4)
def _CheckStage(self, usdStage, allLayers):
if self._arkit:
from pxr import UsdGeom
if not self._skipARKitRootLayerCheck:
self._CheckARKitCompatibility(usdStage, allLayers)
upAxis = UsdGeom.GetStageUpAxis(usdStage)
if upAxis != UsdGeom.Tokens.y:
self._AddFailedCheck("Stage has upAxis '%s'. upAxis should be '%s'."
% (upAxis, UsdGeom.Tokens.y), ruleNum=7)
if self._rootPackageOnly:
self._CheckRootPackage(usdStage)
return
from pxr import Usd
# Author all variant switches in the session layer.
usdStage.SetEditTarget(usdStage.GetSessionLayer())
allPrimsIt = iter(Usd.PrimRange.Stage(usdStage,
Usd.TraverseInstanceProxies()))
self._TraverseRange(allPrimsIt, isStageRoot=True)
def _CheckARKitCompatibility(self, usdStage, allLayers):
layersOnDisk = [i for i in allLayers if i.realPath]
if len(layersOnDisk) > 1:
self._AddFailedCheck("The stage contains %s layers. It should "
"contain a single usdc layer to be compatible with ARKit's "
"implementation of usdz." % len(layersOnDisk), ruleNum=9)
# How do we check if the root layer in the package is a usdc?
rootLayerRealPath = usdStage.GetRootLayer().realPath
if rootLayerRealPath.endswith(".usdz"):
from pxr import Usd
zipFile | |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
A lightweight privacy method based on time shuffling designed for sharing \
power profiles based data in open forums.
Conceptulised in the Energy and Power Group.
Energy and Power Group is part of the Department of Engineering Science,
University of Oxford.
A profile is split into periods with datapoints scrambled within each period.
The mean value of each period is maintained between the scrambled data and
the raw profile. For example, if the original
profile was a HH profile of 48 datapoints and periods was chosen as 8,
the profile would be shuffled within 8 periods, each containing 6
datapoints representing 3 hours. Each of those periods in the scrambled
profile will have the same mean value as the equivalent period in the
original profile. Therefore, if the scrambled profile is shared openly,
only 3 hourly data can be interpreted accurately.
@author: <NAME>, <EMAIL>
Copyright (c) 2021 <NAME>
"""
__version__ = '0.0.4'
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def single_scramble(profile, periods=None, freq=None, source_freq=None,
seed=42, df_colname="consumption"):
"""
Split profile into p periods and scrambles the datapoints within each\
period.
The number of periods determines the level of granularity that can be
correctly interpreted without unscrambling. For example, if the original
profile was a HH profile of 48 datapoints and periods was chosen as 8,
the profile would be shuffled within 8 periods, each containing 6
datapoints representing 3 hours. Each of those periods in the scrambled
profile will have the same mean value as the equivalent period in the
original profile. Therefore, if the scrambled profile is shared openly,
only 3 hourly data can be interpreted accurately.
Todo: test freq and source_freq functionality
Parameters
----------
profile : np.Array or pd.Series
An array like object containing a time based profile. Must have a
datetime index if using the freq argument.
periods : int, optional
The number of periods to split the profile into, before scramling the
order of datapoint within each of these periods. Periods must be a
divisor of the total number of datapoints in the profile.
Either this, or freq must be supplied.
The default is None.
freq : str, optional
Frequency string of the desired final profile granularity over which
the profile can be openly interpreted. e.g. '3H' for 3 hourly. Either
this, or periods must be supplied. This has not been tested in depth.
The default is None.
source_freq : str, optional
Frequency string of the original profile. This is required if the
original profile does not contain a datetime index, or the frequency
cannot be determined from it. Used in combination with freq arg.
The default is None.
seed : int, optional
A random seed value used to determine the order by which data points
are shuffled. This must be known to successfully unshuffle the profile.
The default is 42.
df_colname : str, int, float, optional
The name of the column to use as the profile if a dataframe with
multiple columns is passed as the profile.
The default is "consumption".
Returns
-------
np.Array, pd.Series or pd.DataFrame
The scrambled profile as defined by the number of periods. Any
datetime index is returned unshuffled.
"""
if not isinstance(seed, int):
raise Exception("Seed argument must be an int")
if isinstance(profile, pd.Series):
series = True
dataframe = False
raw_profile = profile.copy()
profile = profile.to_numpy()
elif isinstance(profile, pd.DataFrame):
series = False
dataframe = True
raw_profile = profile.copy()
profile = profile[df_colname].to_numpy()
else:
series = False
dataframe = False
T = len(profile)
if isinstance(freq, str):
# if freq defined, try using instead of periods
try:
# if profile has a datetime index
source_freq = pd.infer_freq(profile.index)
except:
print("""
Could not determine source_freq, check if profile has
datetime index or use the source_freq argument
""")
# if both freq and source_freq are defined, use instead of periods arg
if (isinstance(freq, str) and isinstance(source_freq, str)):
periods = T / (pd.to_timedelta(freq)/pd.to_timedelta(source_freq))
# check that periods is a factor of the total number of periods T
if (len(profile) % periods) != 0:
raise Exception(str(periods) + """
periods arg not a divisor of total length of profile
""")
# sections
s = int(len(profile)/periods)
# reshape array to seperate out sections
p = profile.reshape((-1, s))
p2 = p.copy()
# create a scrambled key
key = list(range(s))
rng = np.random.default_rng(seed)
rng.shuffle(key)
for j in range(periods):
for i in range(s):
p2[j][i] = p[j][key[i]]
p3 = p2.flatten()
if series:
p4 = raw_profile.copy()
p4.iloc[:] = p3
return p4
if dataframe:
p4 = raw_profile.copy()
p4[df_colname] = p3
return p4
return p3
def multi_scramble(profile, multiperiods, seeds=None, **kwargs):
"""
Apply profile scrambling at multiple granularity levels.
This can be unscrambled at increasing levels of granularity depending
on how many of the periods and seed keys are shared.
Parameters
----------
profile : np.Array or pd.Series
An array like object containing a time based profile. Must have a
datetime index if using the freq argument
multiperiods : list
A list of integers representing the different periods to iteratively
split the profile into and shuffle within. Should be decreasing and
should be factors of the previous period and original profile length.
e.g. for a profile with 48 HH datapoints, [8,2,1] would be valid and
correspond to granularities of 3 hourly, 12 hourly and daily which
need to be unscrambled in the reverse order.
seeds : list, optional
A list of integers which are used to seed the random profile shuffle
within each period. The default is None.
**kwargs : TYPE
Arguments to pass through to single_scramble e.g. df_colname.
Returns
-------
profile : np.Array, pd.Series or pd.DataFrame
The mulit-level scrambled profile as defined by the number of periods.
Any datetime index is returned unshuffled.
seeds : list
A list of seeds used as security keys. Required to unscramble.
"""
# check if multiperiods are factors of the previous
for i, periods in enumerate(multiperiods):
if i == 0:
if (len(profile) % periods) != 0:
raise Exception(str(periods) + """
The first periods arg is not a divisor of total length
of profile
""")
else:
if (multiperiods[i-1] % multiperiods[i]) != 0:
raise Exception(str(multiperiods[i])
+ " is not a divisor of "
+ str(multiperiods[i-1]))
# check if seeds defined, if not, generate random seeds.
if isinstance(seeds, type(None)):
seeds = []
for periods in multiperiods:
rng = np.random.default_rng()
seed = rng.integers(12345)
seeds.append(int(seed))
else: # check if supplied seeds are ints
if len(seeds) != len(multiperiods):
raise Exception("Number of seeds must equal number of\
multiperiods")
for seed in seeds:
if not isinstance(seed, int):
raise Exception("Seed argument must be an int")
for periods, seed in zip(multiperiods, seeds):
profile = single_scramble(profile, periods=periods, seed=seed,
**kwargs)
return profile, seeds
def single_unscramble(scrambled, periods, seed,
df_colname="consumption"):
"""
Unscramble a single level of a scrambled profile.
Parameters
----------
scrambled : np.Array or pd.Series
An array like object containing a time based profile which has
previously been scrambled.
periods : int,
The number of periods used to scramble the profile to this level.
seed : int,
The seed used during the scramble process at the associated level.
df_colname : str, int, float, optional
The name of the column to use as the profile if a dataframe with
multiple columns is passed as the profile.
The default is "consumption".
Returns
-------
np.Array, pd.Series or pd.DataFrame
The unscrambled profile as defined by the number of periods and seed.
Any datetime index is returned unshuffled.
"""
if not isinstance(seed, int):
raise Exception("Seed argument must be an int")
if isinstance(scrambled, pd.Series):
series = True
dataframe = False
raw_profile = scrambled.copy()
scrambled = scrambled.to_numpy()
elif isinstance(scrambled, pd.DataFrame):
series = False
dataframe = True
raw_profile = scrambled.copy()
scrambled = scrambled[df_colname].to_numpy()
else:
series = False
dataframe = False
# check that each freq is valid
if (len(scrambled) % periods) != 0:
raise Exception(
str(periods) + """ periods arg not a divisor of total length of\
profile""")
s = int(len(scrambled)/periods)
# reshape array to seperate out sections
p = scrambled.reshape((-1, s))
p2 = p.copy()
# create a | |
import re
import string
import time
import xml.etree.ElementTree as ET
import enchant
import numpy as np
import pandas as pd
import spacy
# Try this if you get a problem with "spacy.load('en')":
# pip install spacy && python -m spacy download en
from nltk.corpus import stopwords
from nltk.stem.snowball import EnglishStemmer
d = enchant.Dict("en_US")
stemmer = EnglishStemmer()
nlp = spacy.load('en')
eng_stopwords = set(stopwords.words("english"))
num_str = [str(i) for i in range(0, 9)]
emoticons = {":-)": "happy", ":)": "happy", ":-]"":]": "happy", ":-3": "happy", ":3": "happy", ":->": "happy",
":>": "happy",
"8-)": "happy", "8)": "happy", ":-}": "happy", ":}": "happy", ":o)": "happy", ":c)": "happy",
":^)": "happy",
"=]": "happy", "=)": "happy", ":-D": "happy", ":D": "laugh", "8-D": "laugh", "8D": "laugh", "x-D": "laugh",
"xD": "laugh", "X-D": "laugh", "XD": "laugh", "=D": "laugh", "=3": "happy", "B^D": "laugh", ":-(": "sad",
":(": "sad", ":-c": "sad", ":c": "sad", ":-<": "sad", ":<": "sad", ":-[": "sad", ":[": "sad",
":-||": "sad",
">:[": "angry", ":{": "sad", ":@": "sad", ">:(": "angry", ";-)": "wink", ";)": "wink", "*-)": "wink",
"*)": "wink", ";-]": "wink", ";]": "wink", ";^)": "wink", ":-,": "wink", ";D": "laugh",
":-/": "scepticism", ":/": "scepticism", ":-.": "scepticism", ">:\\": "angry", ">:/": "angry",
":\\": "scepticism", "=/": "scepticism", "=\\": "scepticism", ":L": "scepticism", "=L": "scepticism",
":S": "scepticism"}
emoticons_re = {}
for key, val in emoticons.items():
new_key = key
for c in new_key:
if c in ['[', '\\', '^', '$', '.', '|', '?', '*', '+', '(', ')']:
new_key = new_key.replace(c, "\\" + c)
new_key = new_key.replace("\\\|", "\\|")
regex = re.compile(new_key + "+")
emoticons_re[regex] = val
class Text():
text_id = -1
text_type = ''
text = ''
clean_text = ''
heavy_clean_text = ''
spellchecked_text = ''
placeholders_text = ''
named_entities = []
pos_tags = []
lemmata = []
stems = []
tokens = []
clean_tokens = []
heavy_clean_tokens = []
placeholders_tokens = []
spellchecked_tokens = []
doc = None
punct = re.compile("(\.){2,}|(\?){2,}|(,){2,}|(-){2,}|(\"){2,}|(\$){2,}|(\*){2,}|(\'){2,}|(!){2,}")
tags2words = {'GPE': 'country', 'ORDINAL': 'number', 'LAW': 'law', 'CARDINAL': 'number',
'LOC': 'location', 'EVENT': 'event', 'DATE': 'date', 'QUANTITY': 'quantity', 'NOT_NE': 'None',
'PERCENT': 'percent', 'PRODUCT': 'product', 'MONEY': 'money', 'FAC': 'facility',
'NORP': 'nationality',
'TIME': 'time', 'WORK_OF_ART': 'art', 'PERSON': 'person',
'LANGUAGE': 'language', 'ORG': 'organization'}
def __init__(self, text: str, text_type: str, text_id: str):
self.text = text
self.text_type = text_type
self.text_id = text_id
def tokenize(self):
if self.doc is None:
self.doc = nlp(self.text)
self.tokens = [str(token.text) for token in self.doc]
return self.tokens
def lemmatize(self):
if self.doc is None:
self.doc = nlp(self.text)
self.lemmata = [str(token.lemma_) for token in self.doc]
return self.lemmata
def pos_tag(self):
if self.doc is None:
self.doc = nlp(self.text)
self.pos_tags = [str(token.pos_) for token in self.doc]
return self.pos_tags
def stemmatize(self):
if self.doc is None:
self.doc = nlp(self.text)
self.stems = [stemmer.stem(token.text) for token in self.doc]
return self.stems
def ner(self):
if self.doc is None:
self.doc = nlp(self.text)
ne_texts = [ent.text for ent in self.doc.ents]
ne = [(str(ent.text), str(ent.label_)) for ent in self.doc.ents]
self.named_entities = [(token.text, "NOT_NE") if token.text not in ne_texts else ne[ne_texts.index(token.text)]
for token in self.doc]
return self.named_entities
def spell_check(self):
if len(self.tokens) == 0:
self.tokenize()
self.spellchecked_text = []
for token in self.tokens:
if len(token) > 2 and not d.check(token) and len(d.suggest(token)) > 0:
self.spellchecked_text.append(d.suggest(token)[0])
else:
self.spellchecked_text.append(token)
self.spellchecked_text = ' '.join(self.spellchecked_text)
spellchecked_tokens = [str(token.text) for token in nlp(self.spellchecked_text)]
return self.spellchecked_text
def replace_ne(self):
if len(self.named_entities) == 0:
self.ner()
self.placeholders_text = self.text
for ent in self.named_entities:
if ent[1] != 'NOT_NE':
self.placeholders_text = self.placeholders_text.replace(ent[0], self.tags2words[ent[1]])
placeholders_tokens = [str(token.text) for token in nlp(self.placeholders_text)]
return self.placeholders_text
def clean(self):
self.clean_text = self.extract_emoticons(self.text)
self.clean_text = self.clean_punctuation(self.clean_text)
self.clean_tokens = [str(token.text) for token in nlp(self.clean_text)]
return self.clean_text
def extract_emoticons(self, text, tag=0):
transformed_text = text
try:
for emoticon in emoticons_re.keys():
if emoticon.search(text):
for m in emoticon.finditer(text):
if tag:
placeholder = " [EMOTICON:" + emoticons_re[emoticon] + "] "
else:
placeholder = " " + emoticons_re[emoticon] + " "
transformed_text = transformed_text.replace(m.group(), placeholder)
except Exception as e:
print(text)
return transformed_text
def clean_punctuation(self, text):
clean_text = text
while self.punct.search(clean_text):
repeated_character = self.punct.search(clean_text).group(0)
if "." in repeated_character:
repeated_character_regex = "\." + "{2,}"
repeated_character = "."
elif "?" in repeated_character or "*" in repeated_character or "$" in repeated_character:
repeated_character_regex = "\\" + repeated_character[0] + "+"
repeated_character = repeated_character[0]
else:
repeated_character_regex = repeated_character[0] + "+"
repeated_character = repeated_character[0]
clean_text = re.sub(repeated_character_regex, repeated_character, clean_text)
clean_text = re.sub('([.,!?()*\\\\"\'-:;0-9=\$%\&_])', r' \1 ', clean_text)
clean_text = re.sub('\s{2,}', ' ', clean_text)
return clean_text
def heavy_clean(self):
self.heavy_clean_text = ' '.join([y.lower() for y in self.text.split() if
not y.lower() in eng_stopwords and not y in num_str and not y in string.punctuation])
self.heavy_clean_tokens = [str(token.text) for token in nlp(self.heavy_clean_text)]
return self.heavy_clean_text
def transform_dataset(dataset_original, transformation):
dataset = dataset_original.copy(deep=True)
begin = time.time()
fields = list(set(dataset.columns) & set(['question', 'answer']))
for field in fields:
column_name = field + '_' + transformation[0]
dataset[column_name] = ''
dataset[column_name] = dataset[column_name].astype(object)
dataset[column_name] = dataset[field].apply(transformation[1])
end = time.time()
print('Transformation:', transformation[0], '\t Time elapsed:', (end - begin))
return dataset
class OrgQuestion():
id_q = -1
subj = ""
body = ""
thread = []
def __init__(self, id_q, subj, body):
self.id_q = id_q
self.subj = subj
self.body = body
def add_to_thread(self, elem):
self.thread = self.thread + [elem]
def pprint(self):
print('OrgQuestion:\n \tORGQ_ID = %s, \n \tOrgQSubject = %s, \n \tOrgQBody = %s' % (
self.id_q, self.subj, self.body))
for question in self.thread:
question.pprint()
class RelQuestion():
id_rq = -1
subj = ""
body = ""
relevance = 0
rank_order = -1
category = ""
rel_comments = []
def __init__(self, id_rq, subj, body, relevance, rank_order, category):
self.id_rq = id_rq
self.subj = subj
self.body = body
self.relevance = convert_score(relevance)
self.rank_order = int(rank_order)
self.category = category
def add_to_rel_comments(self, elem):
self.rel_comments = self.rel_comments + [elem]
def pprint(self):
print('\tRelQuestion:\n \t\t RELQ_ID = %s, \n \t\t RelQSubject = %s, \n \t\t RelQBody = %s' % (
self.id_rq, self.subj, self.body))
print('\n\t\t RELQ_RANKING_ORDER = %d, \n \t\t RELQ_CATEGORY = %s, \n \t\t RELQ_RELEVANCE2ORGQ = %d' % (
self.rank_order, self.category, self.relevance))
for comment in self.rel_comments:
comment.pprint()
class RelComment():
id_rc = -1
text = ""
relevance = 0
def __init__(self, id_rc, text, relevance):
self.id_rc = id_rc
self.text = text
self.relevance = convert_score(relevance)
def pprint(self):
print(
'\t\t--- RelComment:\n \t\t\t RELC_ID = %s, \n \t\t\t RelCText = %s, \n \t\t\t RELC_RELEVANCE2RELQ = %d' % (
self.id_rc, self.text, self.relevance))
def convert_score(s):
if s == 'Bad' or s == 'PotentiallyUseful' or s == 'Irrelevant':
return -1
elif s == 'Good' or s == 'PerfectMatch' or s == 'Relevant':
return 1
else:
return 0
def read_xml(files):
data = []
thread_count = 0
rel_q_count = 0
rel_c_count = 0
for file in files:
tree = ET.parse(file)
root = tree.getroot()
for thread in root:
thread_count += 1
rel_q = thread[0]
rel_q_body = ''
rel_q_subj = ''
for datum in rel_q:
if datum.tag == 'RelQSubject':
rel_q_subj = datum.text
elif datum.tag == 'RelQBody':
if datum.text:
rel_q_body = datum.text
rel_q = RelQuestion(rel_q.attrib['RELQ_ID'], rel_q_subj, rel_q_body, None, \
0, rel_q.attrib['RELQ_CATEGORY'])
for idx, comment in enumerate(thread[1:]):
rel_c = RelComment(comment.attrib['RELC_ID'], comment[0].text, comment.attrib['RELC_RELEVANCE2RELQ'])
rel_q.add_to_rel_comments(rel_c)
rel_c_count += 1
data.append(rel_q)
rel_q_count += 1
print("Threads: ", thread_count)
print("Questions: ", rel_q_count)
print("Comments: ", rel_c_count)
return data
def xml2dataframe_NoLabels(dataset, split_type=''):
tmp = {}
for obj in dataset:
candidates = []
for c in obj.rel_comments:
candidates.append(c.id_rc)
tmp[obj.id_rq] = (' '.join([obj.subj, obj.body]), candidates, split_type)
dataset_dataframe = pd.DataFrame.from_dict(tmp, orient='index').rename(
columns={0: 'question', 1: 'candidates', 2: 'split_type'})
for ind, row in dataset_dataframe.iterrows():
# TODO: Fix deprecation
dataset_dataframe.set_value(ind, 'qid', int(ind.split('_')[0][1:]))
dataset_dataframe.set_value(ind, 'rid', int(ind.split('_')[1][1:]))
dataset_dataframe = dataset_dataframe.sort_values(['qid', 'rid'])
answer_texts_dataset = {}
for obj in dataset:
for c in obj.rel_comments:
answer_texts_dataset[c.id_rc] = c.text
answer_texts_dataset = pd.DataFrame.from_dict(answer_texts_dataset, orient='index')
answer_texts_dataset.reset_index(inplace=True)
answer_texts_dataset = answer_texts_dataset.rename(columns={'index': 'answer_id', 0: 'answer'})
answer_texts_dataset.head()
return dataset_dataframe, answer_texts_dataset
def xml2dataframe_Labels(dataset, split_type):
tmp = {}
for obj in dataset:
pool_pos = []
pool_neg = []
for c in obj.rel_comments:
if c.relevance == -1:
pool_neg.append(c.id_rc)
else:
pool_pos.append(c.id_rc)
tmp[obj.id_rq] = (' '.join([obj.subj, obj.body]), pool_pos, pool_neg, split_type)
dataset_dataframe = pd.DataFrame.from_dict(tmp, orient='index').rename(
columns={0: 'question', 1: 'answer_ids', 2: 'pool', 3: 'split_type'})
for ind, row in dataset_dataframe.iterrows():
dataset_dataframe.at[ind, 'qid'] = int(ind.split('_')[0][1:])
dataset_dataframe.at[ind, 'rid'] = int(ind.split('_')[1][1:])
dataset_dataframe = dataset_dataframe.sort_values(['qid', 'rid'])
answer_texts_dataset = {}
for obj in dataset:
for c in obj.rel_comments:
answer_texts_dataset[c.id_rc] = c.text
answer_texts_dataset = pd.DataFrame.from_dict(answer_texts_dataset, orient='index')
answer_texts_dataset.reset_index(inplace=True)
answer_texts_dataset = answer_texts_dataset.rename(columns={'index': 'answer_id', 0: 'answer'})
return dataset_dataframe, answer_texts_dataset
def add_answers(dataset, answer_texts):
dataset['answer_id'] = dataset['answer_ids']
lst_col = 'answer_id'
dataset_expanded = pd.DataFrame({col: np.repeat(dataset[col].values, dataset[lst_col].str.len())
for col in dataset.columns.difference([lst_col])
}).assign(**{lst_col: np.concatenate(dataset[lst_col].values)})[
dataset.columns.tolist()]
dataset_expanded = dataset_expanded.merge(answer_texts, on='answer_id', how='left')
return dataset_expanded
def transform_dataset(dataset_original, transformation):
dataset | |
import json
import logging
import os
import re
import shutil
import types
from collections import namedtuple, defaultdict, Counter
from dataclasses import dataclass
from datetime import timedelta
from functools import total_ordering
from io import StringIO
from typing import Tuple, Optional, Dict, List, Set, Union, Iterable
from urllib.error import URLError, HTTPError
import requests
from Bio import Entrez, SeqIO
from django.conf import settings
from django.contrib.auth.models import User
from django.contrib.postgres.aggregates import StringAgg
from django.contrib.postgres.fields import CITextField
from django.core.cache import cache
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist, MultipleObjectsReturned
from django.db import models, IntegrityError, transaction
from django.db.models import Min, Max, QuerySet, TextField
from django.db.models.deletion import CASCADE, SET_NULL, PROTECT
from django.db.models.fields.json import KeyTextTransform
from django.db.models.functions import Upper
from django.db.models.query_utils import Q
from django.db.models.signals import post_save, pre_delete
from django.dispatch import receiver
from django.shortcuts import get_object_or_404
from django.urls.base import reverse
from django.utils import timezone
from django.utils.timezone import localtime
from django_extensions.db.models import TimeStampedModel
from guardian.shortcuts import get_objects_for_user
from lazy import lazy
from requests import RequestException
from genes.gene_coverage import load_gene_coverage_df
from genes.models_enums import AnnotationConsortium, HGNCStatus, GeneSymbolAliasSource
from library.constants import HOUR_SECS, WEEK_SECS
from library.django_utils import SortByPKMixin
from library.django_utils.django_partition import RelatedModelsPartitionModel
from library.file_utils import mk_path
from library.guardian_utils import assign_permission_to_user_and_groups, DjangoPermission
from library.log_utils import log_traceback
from library.utils import empty_dict, get_single_element, iter_fixed_chunks
from snpdb.models import Wiki, Company, Sample, DataState
from snpdb.models.models_enums import ImportStatus
from snpdb.models.models_genome import GenomeBuild
from upload.vcf.sql_copy_files import write_sql_copy_csv, gene_coverage_canonical_transcript_sql_copy_csv, \
gene_coverage_sql_copy_csv, GENE_COVERAGE_HEADER
class HGNCImport(TimeStampedModel):
pass
class NoTranscript(ValueError):
"""
Extends ValueError for backwards compatibility.
Indicates the transcript we are looking for is not in our database
"""
class MissingTranscript(NoTranscript):
"""
Transcript exists in RefSeq/Ensembl, so c.hgvs (or otherwise) might be okay.
"""
class BadTranscript(NoTranscript):
"""
Transcript not found in Ensembl or RefSeq (User error)
"""
class HGNC(models.Model):
# pk = HGNC id with HGNC: stripped out
alias_symbols = models.TextField()
approved_name = models.TextField()
ccds_ids = models.TextField(null=True, blank=True)
ensembl_gene_id = models.TextField(null=True, blank=True)
gene_group_ids = models.TextField(null=True, blank=True)
gene_groups = models.TextField(null=True, blank=True)
# Believe it or not, gene_symbol is not unique - eg MMP21 has multiple entries
gene_symbol = models.ForeignKey('GeneSymbol', on_delete=CASCADE)
hgnc_import = models.ForeignKey(HGNCImport, on_delete=CASCADE)
location = models.TextField(null=True, blank=True)
mgd_ids = models.TextField(null=True, blank=True)
omim_ids = models.TextField(null=True, blank=True)
previous_symbols = models.TextField(null=True, blank=True)
refseq_ids = models.TextField(null=True, blank=True)
rgd_ids = models.TextField(null=True, blank=True)
status = models.CharField(max_length=1, choices=HGNCStatus.choices)
ucsc_ids = models.TextField(null=True, blank=True)
uniprot_ids = models.TextField(null=True, blank=True)
def __str__(self):
return f"HGNC:{self.pk} approved symbol: {self.gene_symbol}, " \
f"previous symbols: {self.previous_symbols}, alias_symbols: {self.alias_symbols}"
def url(self):
return f"https://www.genenames.org/data/gene-symbol-report/#!/hgnc_id/HGNC:{self.pk}"
@lazy
def ccds_list(self):
return (self.ccds_ids or '').split(",")
@lazy
def gene_group_id_list(self):
return (self.gene_group_ids or '').split(",")
@lazy
def mgd_list(self):
return (self.mgd_ids or '').split(",")
@lazy
def rgd_list(self):
return (self.rgd_ids or '').split(",")
@lazy
def ucsc_list(self):
return (self.ucsc_ids or '').split(",")
@lazy
def uniprot_list(self) -> List['UniProt']:
ulist = []
if self.uniprot_ids:
uniprot_ids = self.uniprot_ids.split(",")
ulist = list(UniProt.objects.filter(pk__in=uniprot_ids))
return ulist
class UniProt(models.Model):
# accession = Primary (citable) accession number (1st element in SwissProt record)
accession = models.TextField(primary_key=True)
cached_web_resource = models.ForeignKey('annotation.CachedWebResource', on_delete=CASCADE)
function = models.TextField(null=True, blank=True)
pathway = models.TextField(null=True, blank=True)
pathway_interaction_db = models.TextField(null=True, blank=True)
reactome = models.TextField(null=True, blank=True)
tissue_specificity = models.TextField(null=True, blank=True)
def __str__(self):
return self.accession
class GeneSymbol(models.Model):
symbol = CITextField(primary_key=True)
@staticmethod
def cast(symbol: Union[str, 'GeneSymbol']) -> Optional['GeneSymbol']:
if isinstance(symbol, str):
return GeneSymbol.objects.filter(symbol=symbol).first()
return symbol
@property
def name(self):
""" For use by TextPhenotypeMatch """
return self.symbol
def get_genes(self) -> QuerySet:
# To match HPO/OMIM so it can be used interchangeably during phenotype matching
return Gene.objects.filter(~Q(identifier__startswith="unknown_"), geneversion__gene_symbol=self).distinct()
@lazy
def genes(self) -> List['Gene']:
# returns cached set of genes associated with this symbol
# use over get_genes when possible
return list(self.get_genes().all())
def get_absolute_url(self):
return reverse("view_gene_symbol", kwargs={"gene_symbol": self.symbol})
@lazy
def alias_meta(self) -> 'GeneSymbolAliasesMeta':
return GeneSymbolAliasesMeta(self)
def has_different_genes(self, other: 'GeneSymbol') -> bool:
"""
Tries to work out if genes are equivilant, not that sometimes refseq or ensembl assign gene ids to both the
symbol and the alias, but the other consortium only assigns to one. In that case we'd still like to treat them
as the "same"
"""
my_genes = set(self.genes)
other_genes = set(other.genes)
all_genes = my_genes.union(other_genes)
source_has_extra = False
other_has_extra = False
for g in all_genes:
if g in my_genes and g not in other_genes:
source_has_extra = True
elif g in other_genes and g not in my_genes:
other_has_extra = True
return source_has_extra and other_has_extra
def __lt__(self, other):
return self.symbol < other.symbol
def __str__(self):
return self.symbol
@staticmethod
def get_upper_case_lookup():
return dict(GeneSymbol.objects.annotate(uc_symbol=Upper("symbol")).values_list("uc_symbol", "symbol"))
class GeneSymbolAlias(TimeStampedModel):
""" Gene Aliases record keep track of "source" and are from:
NCBI:
* Source: ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/GENE_INFO/Mammalia/Homo_sapiens.gene_info.gz
* Code: python3 manage.py import_ncbi_gene_info <file>
HGNC:
* Source: https://www.genenames.org/cgi-bin/download
* Code: python3 manage.py hgnc_gene_symbols_import <file>
UCSC: We no longer use UCSC aliases, they will only exist upgraded legacy systems
* Source: https://genome.ucsc.edu/cgi-bin/hgTables?command=start export kgAlias table
* Code: N/A - obsolete
"""
alias = CITextField()
gene_symbol = models.ForeignKey(GeneSymbol, on_delete=CASCADE)
source = models.CharField(max_length=1, choices=GeneSymbolAliasSource.choices)
user = models.ForeignKey(User, null=True, on_delete=SET_NULL)
description = models.TextField(null=True)
class Meta:
unique_together = ('alias', 'gene_symbol')
@property
def match_info(self) -> str:
return f"{self.alias} is an alias for {self.gene_symbol_id} ({self.get_source_display()})"
def __str__(self):
return f"{self.gene_symbol_id} : {self.match_info}"
def get_absolute_url(self):
""" So search sends it to the symbol """
return reverse("view_gene_symbol", kwargs={"gene_symbol": self.gene_symbol_id})
@staticmethod
def get_upper_case_lookup():
return {a: (gs, alias_id) for a, gs, alias_id in GeneSymbolAlias.objects.values_list("alias", "gene_symbol", "id")}
@dataclass
@total_ordering
class GeneSymbolAliasSummary:
other_obj: GeneSymbol
other_symbol: str
source: str # HGNC etc
my_symbol_is_main: bool # true if the other symbol is an alias for this symbol, false if this symbol is an alias for the other
different_genes: bool # if true, then this should only be considered an alias with a priviso, and not used in automatic alias calculations
def __lt__(self, other):
return self.other_symbol < other.other_symbol
@property
def other_symbol_in_database(self) -> bool:
return self.other_obj is not None
class GeneSymbolAliasesMeta:
def __init__(self, gene_symbol: GeneSymbol):
self.gene_symbol = gene_symbol
self.alias_list: List[GeneSymbolAliasSummary] = list()
symbol = self.gene_symbol.symbol
for alias in GeneSymbolAlias.objects.filter(alias=symbol):
self.alias_list.append(
GeneSymbolAliasSummary(
other_obj=alias.gene_symbol,
other_symbol=alias.gene_symbol.symbol,
source=alias.get_source_display(),
my_symbol_is_main=False,
different_genes=self.gene_symbol.has_different_genes(alias.gene_symbol)
)
)
for alias in GeneSymbolAlias.objects.filter(gene_symbol=self.gene_symbol):
other_gene_symbol = GeneSymbol.objects.filter(symbol=alias.alias).first()
different_genes = False
if other_gene_symbol:
different_genes = self.gene_symbol.has_different_genes(other_gene_symbol)
self.alias_list.append(
GeneSymbolAliasSummary(
other_obj=other_gene_symbol,
other_symbol=alias.alias,
source=alias.get_source_display(),
my_symbol_is_main=True,
different_genes=different_genes
)
)
@lazy
def genes(self) -> Set['Gene']:
"""
Returns a set of genes associated with all safe aliases to/from the primary Gene Symbol.
(Even though we only look at "safe" aliases, e.g. ones where each symbol must be a subset of the other,
looking through these aliases still catch where Refseq assigned a Gene ID to both but Ensembl only assigned
their Gene ID to one and ignore the other)
"""
gene_set: Set[Gene] = set(self.gene_symbol.genes)
for alias_summary in self.alias_list:
if not alias_summary.different_genes and alias_summary.other_obj:
gene_set = gene_set.union(alias_summary.other_obj.genes)
return gene_set
@lazy
def alias_symbol_strs(self) -> List[str]:
gene_symbol_strs: Set[str] = {self.gene_symbol.symbol}
for alias_summary in self.alias_list:
if not alias_summary.different_genes:
gene_symbol_strs.add(alias_summary.other_symbol)
return list(sorted(gene_symbol_strs))
@lazy
def aliases_out(self) -> List[GeneSymbolAliasSummary]:
return list(sorted([alias for alias in self.alias_list if not alias.my_symbol_is_main]))
@lazy
def aliases_in(self) -> List[GeneSymbolAliasSummary]:
return list(sorted([alias for alias in self.alias_list if alias.my_symbol_is_main]))
class GeneAnnotationImport(TimeStampedModel):
""" A GTF file imported via 'python3 manage import_gene_annotation'
Many gene/transcript versions are shared among GTF annotations, so a GeneVersion/TranscriptVersion is only
created the first time it's seen (linked back to input which created it via 'import_source') """
annotation_consortium = models.CharField(max_length=1, choices=AnnotationConsortium.choices)
genome_build = models.ForeignKey(GenomeBuild, on_delete=CASCADE)
url = models.TextField()
def __str__(self):
return self.url
class Gene(models.Model):
""" A stable identifier - build independent - has build specific versions with gene details """
FAKE_GENE_ID_PREFIX = "unknown_" # Legacy from when we allowed inserting GenePred w/o GFF3
identifier = models.TextField(primary_key=True)
annotation_consortium = models.CharField(max_length=1, choices=AnnotationConsortium.choices)
summary = models.TextField(null=True, blank=True) # Only used by RefSeq
@property
def is_legacy(self):
""" Required internally, but probably shouldn't be shown to the user """
return self.identifier.startswith(Gene.FAKE_GENE_ID_PREFIX)
def get_external_url(self):
if self.annotation_consortium == AnnotationConsortium.REFSEQ:
return f"https://www.ncbi.nlm.nih.gov/gene/{self.identifier}"
if self.annotation_consortium == AnnotationConsortium.ENSEMBL:
return f"https://ensembl.org/Homo_sapiens/Gene/Summary?g={self.identifier}"
raise ValueError(f"Unknown external url for {self}")
def latest_gene_version(self, genome_build: GenomeBuild):
return self.geneversion_set.filter(genome_build=genome_build).order_by("-version").first()
def get_gene_symbol(self, genome_build: GenomeBuild) -> GeneSymbol:
return self.latest_gene_version(genome_build).gene_symbol
def get_symbols(self) -> QuerySet:
""" This can change over time as versions are assigned different symbols... """
return GeneSymbol.objects.filter(geneversion__gene=self).distinct()
def has_versions(self):
# RefSeq doesn't have gene versions
return self.annotation_consortium == AnnotationConsortium.ENSEMBL
def get_absolute_url(self):
return reverse("view_gene", kwargs={"gene_id": self.pk})
@staticmethod
def known_gene_ids(annotation_consortium=None):
qs = Gene.objects.all()
if annotation_consortium:
qs = qs.filter(annotation_consortium=annotation_consortium)
return set(qs.values_list("identifier", flat=True))
@staticmethod
def delete_orphaned_fake_genes():
used_genes = TranscriptVersion.objects.filter(gene_version__gene__identifier__startswith=Gene.FAKE_GENE_ID_PREFIX).values_list("gene_version__gene")
qs = Gene.objects.filter(identifier__startswith=Gene.FAKE_GENE_ID_PREFIX).exclude(identifier__in=used_genes)
ret = qs.delete()
if ret:
print(f"Deleted orphaned {Gene.FAKE_GENE_ID_PREFIX} records:")
print(ret)
def get_vep_canonical_transcript(self, variant_annotation_version: 'VariantAnnotationVersion') -> Optional['Transcript']:
""" This may be slow. It requires an annotated (non-ref) variant in the gene """
vta = self.varianttranscriptannotation_set.filter(version=variant_annotation_version, canonical=True).first()
transcript = None
if vta:
transcript = vta.transcript
return transcript
def __str__(self):
if self.annotation_consortium == AnnotationConsortium.REFSEQ:
gene_id_summary = | |
Set(Succ(x)) @ (4, REPLACE, 2, 3)
(Set(x) >> Set(Succ(x))) @ (5, DEDUCE)
All(x_, Set(x_) >> Set(Succ(x_))) @ ("successor_is_set", CLOSING, 5)
# infinity
clear()
Exist(a_, (Set(a_) & (Empty() *in_* a_)) & All(x_, (x_ *in_* a_) >> (Succ(x_) *in_* a_))) @ ("infinity", AXIOM)
# choice
clear()
Exist(G_, Function(G_) & All(a_, (Set(a_) & Exist(x_, x_ *in_* a_)) >> (G_(a_) *in_* a_))) @ ("choice", AXIOM)
# identity
clear()
UniquelyExist(D, All(x_, (x_ *in_* D) == (Set(x_) & Exist(a_, (a_ *in_* A) & (x_ == OrderedPair(a_, a_)))))) @ (0, DEFINE_CLASS, D)
All(A_, UniquelyExist(D, All(x_, (x_ *in_* D) == (Set(x_) & Exist(a_, (a_ *in_* A_) & (x_ == OrderedPair(a_, a_))))))) @ (1, CLOSING, 0)
Identity = make_function("identity")
All(A_, x_, (x_ *in_* Identity(A_)) == (Set(x_) & Exist(a_, (a_ *in_* A_) & (x_ == OrderedPair(a_, a_))))) @ (2, DEFINE_FUNCTION, "identity", 1)
with Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_))) @ 3:
((a *in_* A) & (x == OrderedPair(a, a))) @ (4, LET, a, 3)
(a *in_* A) @ (5, TAUTOLOGY, 4)
Set(a) @ (6, PUT_THEOREM, "set_condition", A, 5)
Set(OrderedPair(a, a)) @ (7, BY_THEOREM, "ordered_pair_is_set", 6)
(x == OrderedPair(a, a)) @ (8, TAUTOLOGY, 4)
Set(x) @ (9, REPLACE, 7, 8)
(Set(x) & Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_)))) @ (10, TAUTOLOGY, 3, 9)
(x *in_* Identity(A)) @ (11, BICONDITION, 2, 10)
(Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_))) >> (x *in_* Identity(A))) @ (12, DEDUCE)
with (x *in_* Identity(A)) @ 13:
(Set(x) & Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_)))) @ (14, BICONDITION, 2, 13)
Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_))) @ (15, TAUTOLOGY, 14)
((x *in_* Identity(A)) >> Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_)))) @ (16, DEDUCE)
((x *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_)))) @ (17, TAUTOLOGY, 16, 12)
All(A_, x_, (x_ *in_* Identity(A_)) == Exist(a_, (a_ *in_* A_) & (x_ == OrderedPair(a_, a_)))) @ ("identity", CLOSING, 17)
# element of identity
clear()
with (x *in_* Identity(A)) @ 0:
((x *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_)))) @ (1, BY_THEOREM, "identity")
Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_))) @ (2, TAUTOLOGY, 0, 1)
((a *in_* A) & (x == OrderedPair(a, a))) @ (3, LET, a, 2)
(a *in_* A) @ (4, TAUTOLOGY, 3)
Exist(C_, a *in_* C_) @ (6, FOUND, A, 4)
(Set(a) == Exist(C_, a *in_* C_)) @ (5, BY_THEOREM, "set")
Set(a) @ (6, TAUTOLOGY, 5, 6)
((Set(a) & Set(a)) & (x == OrderedPair(a, a))) @ (7, TAUTOLOGY, 3, 6)
Exist(b_, (Set(a) & Set(b_)) & (x == OrderedPair(a, b_))) @ (8, FOUND, a, 7)
Exist(a_, b_, (Set(a_) & Set(b_)) & (x == OrderedPair(a_, b_))) @ (9, FOUND, a, 8)
(Arity2(x) == Exist(a_, b_, (Set(a_) & Set(b_)) & (x == OrderedPair(a_, b_)))) @ (10, BY_THEOREM, "arity_2")
Arity2(x) @ (11, TAUTOLOGY, 9, 10)
((Set(Left(x)) & Set(Right(x))) & (x == OrderedPair(Left(x), Right(x)))) @ (12, BY_THEOREM, "right", 11)
(x == OrderedPair(a, a)) @ (13, TAUTOLOGY, 3)
(x == OrderedPair(Left(x), Right(x))) @ (14, TAUTOLOGY, 12)
(OrderedPair(a, a) == OrderedPair(Left(x), Right(x))) @ (15, BY_EQUIVALENCE, 13, 14)
((a == Left(x)) & (a == Right(x))) @ (16, BY_THEOREM, "comparison_of_ordered_pairs", 6, 12, 15)
(a == Left(x)) @ (17, TAUTOLOGY, 16)
(a == Right(x)) @ (18, TAUTOLOGY, 16)
(Left(x) == Right(x)) @ (19, BY_EQUIVALENCE, 17, 18)
((Arity2(x) & (x == OrderedPair(Left(x), Right(x)))) & (Left(x) == Right(x))) @ (20, TAUTOLOGY, 19, 14, 11)
((x *in_* Identity(A)) >> ((Arity2(x) & (x == OrderedPair(Left(x), Right(x)))) & (Left(x) == Right(x)))) @ (21, DEDUCE)
All(A_, x_, (x_ *in_* Identity(A_)) >> ((Arity2(x_) & (x_ == OrderedPair(Left(x_), Right(x_)))) & (Left(x_) == Right(x_)))) @ ("element_of_identity", CLOSING, 21)
# identity is relation
clear()
with (x *in_* Identity(A)) @ 0:
((x *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_)))) @ (1, BY_THEOREM, "identity")
Exist(a_, (a_ *in_* A) & (x == OrderedPair(a_, a_))) @ (2, TAUTOLOGY, 0, 1)
((a *in_* A) & (x == OrderedPair(a, a))) @ (3, LET, a, 2)
(a *in_* A) @ (4, TAUTOLOGY, 3)
Exist(C_, a *in_* C_) @ (5, FOUND, A, 4)
(Set(a) == Exist(C_, a *in_* C_)) @ (6, BY_THEOREM, "set")
Set(a) @ (7, TAUTOLOGY, 5, 6)
((Set(a) & Set(a)) & (x == OrderedPair(a, a))) @ (8, TAUTOLOGY, 7, 3)
Exist(b_, (Set(a) & Set(b_)) & (x == OrderedPair(a, b_))) @ (9, FOUND, a, 8)
Exist(a_, b_, (Set(a_) & Set(b_)) & (x == OrderedPair(a_, b_))) @ (10, FOUND, a, 9)
(Arity2(x) == Exist(a_, b_, (Set(a_) & Set(b_)) & (x == OrderedPair(a_, b_)))) @ (11, BY_THEOREM, "arity_2")
Arity2(x) @ (12, TAUTOLOGY, 10, 11)
((x *in_* Identity(A)) >> Arity2(x)) @ (13, DEDUCE)
All(x_, (x_ *in_* Identity(A)) >> Arity2(x_)) @ (14, CLOSING, 13)
(Relation(Identity(A)) == All(x_, (x_ *in_* Identity(A)) >> Arity2(x_))) @ (15, BY_THEOREM, "relation")
Relation(Identity(A)) @ (16, TAUTOLOGY, 14, 15)
All(A_, Relation(Identity(A_))) @ ("identity_is_relation", CLOSING, 16)
# identity is function
clear()
Relation(Identity(A)) @ (10, BY_THEOREM, "identity_is_relation")
with (((a *in_* Identity(A)) & (b *in_* Identity(A))) & (Left(a) == Left(b))) @ 0:
(a *in_* Identity(A)) @ (1, TAUTOLOGY, 0)
((a *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (a == OrderedPair(a_, a_)))) @ (2, BY_THEOREM, "identity")
Exist(a_, (a_ *in_* A) & (a == OrderedPair(a_, a_))) @ (3, TAUTOLOGY, 1, 2)
((u *in_* A) & (a == OrderedPair(u, u))) @ (4, LET, u, 3)
(u *in_* A) @ (5, TAUTOLOGY, 4)
Exist(C_, u *in_* C_) @ (6, FOUND, A, 5)
(Set(u) == Exist(C_, u *in_* C_)) @ (7, BY_THEOREM, "set")
Set(u) @ (8, TAUTOLOGY, 6, 7)
((Relation(Identity(A)) == All(x_, (x_ *in_* Identity(A)) >> Arity2(x_)))) @ (11, BY_THEOREM, "relation")
All(x_, (x_ *in_* Identity(A)) >> Arity2(x_)) @ (12, TAUTOLOGY, 11, 10)
Arity2(a) @ (13, BY_THEOREM, 12, 1)
((Set(Left(a)) & Set(Right(a))) & (a == OrderedPair(Left(a), Right(a)))) @ (14, BY_THEOREM, "right", 13)
(a == OrderedPair(u, u)) @ (15, TAUTOLOGY, 4)
(a == OrderedPair(Left(a), Right(a))) @ (36, TAUTOLOGY, 14)
(OrderedPair(u, u) == OrderedPair(Left(a), Right(a))) @ (17, BY_EQUIVALENCE, 15, 36)
(Set(u) & Set(Left(a)) & Set(Right(a))) @ (18, TAUTOLOGY, 14, 8)
((u == Left(a)) & (u == Right(a))) @ (19, BY_THEOREM, "comparison_of_ordered_pairs", 18, 17)
(u == Left(a)) @ (20, TAUTOLOGY, 19)
(u == Right(a)) @ (21, TAUTOLOGY, 19)
(Left(a) == Right(a)) @ (30, BY_EQUIVALENCE, 20, 21)
(b *in_* Identity(A)) @ (1, TAUTOLOGY, 0)
((b *in_* Identity(A)) == Exist(a_, (a_ *in_* A) & (b == OrderedPair(a_, a_)))) @ (2, BY_THEOREM, "identity")
Exist(a_, (a_ *in_* A) & (b == OrderedPair(a_, a_))) @ (3, TAUTOLOGY, 1, 2)
((v *in_* A) & (b == OrderedPair(v, v))) @ (4, LET, v, 3)
(v *in_* A) @ (5, TAUTOLOGY, 4)
Exist(C_, v *in_* C_) @ (6, FOUND, A, 5)
(Set(v) == Exist(C_, v *in_* C_)) @ (7, BY_THEOREM, "set")
Set(v) @ (8, TAUTOLOGY, 6, 7)
((Relation(Identity(A)) == All(x_, (x_ *in_* Identity(A)) >> Arity2(x_)))) @ (11, BY_THEOREM, "relation")
All(x_, (x_ *in_* Identity(A)) >> Arity2(x_)) @ (12, TAUTOLOGY, 11, 10)
Arity2(b) @ (13, BY_THEOREM, 12, 1)
(((Set(Left(b))) & Set(Right(b))) & (b == OrderedPair(Left(b), Right(b)))) @ (14, BY_THEOREM, "right", 13)
(b == OrderedPair(v, v)) @ (15, TAUTOLOGY, 4)
(b == OrderedPair(Left(b), Right(b))) @ (16, TAUTOLOGY, 14)
(OrderedPair(v, v) == OrderedPair(Left(b), Right(b))) @ (17, BY_EQUIVALENCE, 15, 16)
(Set(v) & Set(Left(b)) & Set(Right(b))) @ (18, TAUTOLOGY, 14, 8)
((v == Left(b)) & (v == Right(b))) @ (19, BY_THEOREM, "comparison_of_ordered_pairs", 18, 17)
(v == Left(b)) @ (20, TAUTOLOGY, 19)
(v == Right(b)) @ (21, TAUTOLOGY, 19)
(Left(b) == Right(b)) @ (22, BY_EQUIVALENCE, 20, 21)
(Left(a) == Left(b)) @ (23, TAUTOLOGY, 0)
(Right(a) == Right(b)) @ (24, BY_EQUIVALENCE, 23, 22, 30)
(a == OrderedPair(Left(b), Right(a))) @ (40, REPLACE, 36, 23)
(a == OrderedPair(Left(b), Right(b))) @ (41, REPLACE, 40, 24)
(a == b) @ (42, BY_EQUIVALENCE, 16, 41)
((((a *in_* Identity(A)) & (b *in_* Identity(A))) & (Left(a) == Left(b))) >> (a == b)) @ (43, DEDUCE)
All(A_, a_, b_, ((((a_ *in_* Identity(A_)) & (b_ *in_* Identity(A_))) & (Left(a_) == Left(b_))) >> (a_ == b_))) @ (44, CLOSING, 43)
All(a_, b_, ((((a_ *in_* Identity(A)) & (b_ *in_* Identity(A))) & (Left(a_) == Left(b_))) >> (a_ == b_))) @ (45, PUT, A, 44)
Relation(Identity(A)) @ (46, PUT, A, "identity_is_relation")
(Relation(Identity(A)) & All(a_, | |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the Kramers-Kronig Calculator software package.
#
# Copyright (c) 2013 <NAME>, <NAME>
#
# The software is licensed under the terms of the zlib/libpng license.
# For details see LICENSE.txt
"""This module implements a GUI using the wxPython toolkit."""
import logging
logger = logging.getLogger(__name__)
if __name__ == '__main__':
import sys
logging.basicConfig(level=logging.DEBUG)
logging.StreamHandler(stream=sys.stdout)
import wx
import wx.lib.plot as plot
import numpy
import os
import kk, data
try:
import scipy.optimize
SCIPY_FLAG = True
except ImportError:
SCIPY_FLAG = False
logger.info('Failed to import the scipy.optimize module - disabling the \'fix distortions\' checkbox.')
class MyFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, wx.ID_ANY, "Kramers-Kronig Calculator", size=(500, 800))
# Initialise variables
self.dirname = ''
self.raw_file = None
self.total_asf = None
self.total_Im_coeffs = None
self.merged_Im = None
self.nexafs_CutOut = []
self.MolecularMass = 1
self.asf_bg = None
#New set of variables to initialise. All those above might want to be removed.
self.ChemicalFormula = None
self.Stoichiometry = None
self.Relativistic_Correction = None
self.NearEdgeData = None
self.splice_ind = None
self.ASF_E = None
self.ASF_Data = None
self.Full_E = None
self.Imaginary_Spectrum = None
self.KK_Real_Spectrum = None
# Setting up the menus.
filemenu = wx.Menu()
filemenu.Append(wx.ID_OPEN, "L&oad", " Load photoabsorption data from file")
filemenu.AppendSeparator()
filemenu.Append(wx.ID_SAVE, "&Save", " Export results to file")
exportmenu = wx.Menu()
exportmenu.Append(201,"Photoabsorption", " Export X-ray absorption data")
exportmenu.Append(202,"Refractive Index", " Export beta and delta")
filemenu.AppendMenu(200,"Export",exportmenu) # Adding the "exportmenu" to the filemenu
filemenu.AppendSeparator()
filemenu.Append(wx.ID_EXIT, "E&xit", " Terminate the program")
helpmenu = wx.Menu()
helpmenu.Append(wx.ID_HELP, "&Help", " How to use this program")
helpmenu.AppendSeparator()
helpmenu.Append(wx.ID_ABOUT, "&About", " Information about this program")
# Creating the menubar.
menuBar = wx.MenuBar()
menuBar.Append(filemenu, "&File") # Adding the "filemenu" to the MenuBar
menuBar.Append(helpmenu, "&Help") # Adding the "helpmenu" to the MenuBar
self.SetMenuBar(menuBar) # Adding the MenuBar to the Frame content.
wx.EVT_MENU(self, wx.ID_OPEN, self.OnOpen)
wx.EVT_MENU(self, wx.ID_SAVE, self.OnSave)
wx.EVT_MENU(self, 201, self.OnSave) # will set convert_to="photoabsorption" when ID is recognised
wx.EVT_MENU(self, 202, self.OnSave) # will set convert_to="refractive_index" when ID is recognised
wx.EVT_MENU(self, wx.ID_EXIT, self.OnExit)
wx.EVT_MENU(self, wx.ID_ABOUT, self.OnAbout)
wx.EVT_MENU(self, wx.ID_HELP, self.OnHelp)
Sizer1 = wx.BoxSizer(wx.HORIZONTAL) # create outer sizer
SizerL = wx.BoxSizer(wx.VERTICAL) # create left-hand sizer for controls
SizerR = wx.BoxSizer(wx.VERTICAL) # create right-hand sizer for plots
############################Data box
DataBox = wx.StaticBoxSizer(wx.StaticBox(self, label="Near-Edge Data"), wx.VERTICAL)
self.FileText = wx.StaticText(self, -1, "File: (None)")
DataBox.Add(self.FileText, 1, wx.GROW)
DataTypeLabel = wx.StaticText(self, -1, "Data Type: ")
self.DataTypeCombo = wx.ComboBox(self, -1, value='Photoabsorption', style=wx.CB_READONLY)
self.DataTypeCombo.Append('Photoabsorption')
self.DataTypeCombo.Append('Beta')
self.DataTypeCombo.Append('Scattering Factor')
self.DataTypeCombo.Bind(wx.EVT_COMBOBOX, self.MergeAdd_check)
DataTypeSizer = wx.BoxSizer(wx.HORIZONTAL)
DataTypeSizer.Add(DataTypeLabel)
DataTypeSizer.Add(self.DataTypeCombo, 2, wx.GROW)
DataBox.Add(DataTypeSizer, 1, wx.GROW)
SpliceSizer = wx.BoxSizer(wx.HORIZONTAL)
self.SpliceText1 = wx.TextCtrl(self, -1, "Start", style=wx.TE_PROCESS_ENTER)
self.SpliceText1.Bind(wx.EVT_KILL_FOCUS, self.Splice_Text_check)
self.SpliceText1.Bind(wx.EVT_TEXT_ENTER, self.Splice_Text_check)
SpliceSizer.Add(self.SpliceText1, 1)
self.SpliceText2 = wx.TextCtrl(self, -1, "End", style=wx.TE_PROCESS_ENTER)
self.SpliceText2.Bind(wx.EVT_KILL_FOCUS, self.Splice_Text_check)
self.SpliceText2.Bind(wx.EVT_TEXT_ENTER, self.Splice_Text_check)
SpliceSizer.Add(self.SpliceText2, 1)
DataBox.Add(SpliceSizer, 1, wx.GROW)
# Background_CloseSizer = wx.BoxSizer(wx.HORIZONTAL)
# self.InvertDataCheckBox = wx.CheckBox(self, -1, "Invert Data")
# self.InvertDataCheckBox.Bind(wx.EVT_CHECKBOX, self.Splice_Text_check)
# DataBox.Add(self.InvertDataCheckBox, 0)
self.AddBackgroundCheckBox = wx.CheckBox(self, -1, "Add background")
self.AddBackgroundCheckBox.Bind(wx.EVT_CHECKBOX, self.Splice_Text_check)
self.AddBackgroundCheckBox.Disable()
self.AddBackgroundCheckBox.SetToolTip(wx.ToolTip("Not implemented"))
DataBox.Add(self.AddBackgroundCheckBox, 0)
self.FixDistortionsCheckBox = wx.CheckBox(self, -1, "Fix distortions")
self.FixDistortionsCheckBox.Bind(wx.EVT_CHECKBOX, self.Splice_Text_check)
if not SCIPY_FLAG:
self.FixDistortionsCheckBox.Disable()
self.FixDistortionsCheckBox.SetToolTip(wx.ToolTip("Install the SciPy module to use this feature"))
DataBox.Add(self.FixDistortionsCheckBox, 0)
# Background_CloseSizer.Add(self.AddBackgroundCheckBox, 0)
# self.AddBackgroundCheckBox.Bind(wx.EVT_CHECKBOX, self.MergeAdd_check)
# Background_CloseSizer.AddStretchSpacer(1)
# self.CloseFile = wx.Button(self, -1, "X", style= wx.BU_EXACTFIT)
# Background_CloseSizer.Add(self.CloseFile, 0)
# DataBox.Add(Background_CloseSizer, 1, wx.GROW)
############################Material box
self.MaterialBox = wx.StaticBoxSizer(wx.StaticBox(self, label="Material"), wx.VERTICAL)
DensitySizer = wx.BoxSizer(wx.HORIZONTAL)
DensitySizer.Add(wx.StaticText(self, -1, "Density: "))
self.DensityText = wx.TextCtrl(self, -1, "1", style=wx.TE_PROCESS_ENTER)
self.DensityText.Bind(wx.EVT_KILL_FOCUS, self.Splice_Text_check)
self.DensityText.Bind(wx.EVT_TEXT_ENTER, self.Splice_Text_check)
DensitySizer.Add(self.DensityText, 1)
DensitySizer.Add(wx.StaticText(self, -1, " g/ml"))
self.MaterialBox.Add(DensitySizer, 0)
StoichiometrySizer = wx.BoxSizer(wx.HORIZONTAL)
StoichiometrySizer.Add(wx.StaticText(self, -1, "Stoichiometry: "))
self.StoichiometryText = wx.TextCtrl(self, -1, "", style=wx.TE_PROCESS_ENTER)
self.StoichiometryText.Bind(wx.EVT_KILL_FOCUS, self.Stoichiometry_Text_check)
self.StoichiometryText.Bind(wx.EVT_TEXT_ENTER, self.Stoichiometry_Text_check)
StoichiometrySizer.Add(self.StoichiometryText, 1)
self.MaterialBox.Add(StoichiometrySizer, 0)
############################Calc box
CalcBox = wx.StaticBoxSizer(wx.StaticBox(self, label="Calculation"), wx.VERTICAL)
CalcButton = wx.Button(self, -1, "Calculate")
CalcBox.Add(CalcButton, 1, wx.GROW)
CalcButton.Bind(wx.EVT_BUTTON, self.calculate)
SizerL.Add(DataBox, 0, wx.GROW)
SizerL.Add(self.MaterialBox, 1, wx.GROW)
SizerL.AddStretchSpacer(1)
SizerL.Add(CalcBox, 0, wx.GROW)
self.PlotAxes = plot.PlotCanvas(self)
SizerR.Add(self.PlotAxes, 1, wx.GROW)
#SizerR.Add(self.Rplot, 1, wx.GROW)
# enable the zoom feature (drag a box around area of interest)
self.PlotAxes.SetEnableZoom(True)
#self.Rplot.SetEnableZoom(True)
Sizer1.Add(SizerL, 1, wx.GROW)
Sizer1.Add(SizerR, 3, wx.GROW)
self.SetAutoLayout(True)
self.SetSizer(Sizer1) # add outer sizer to frame
self.Fit()
self.Show(True)
self.plot_data()
#self.Test()
def Test(self):
"""Convenience function for repetitive testing"""
self.filename = "NC-Xy_norm_bgsub.txt"
self.dirname = "data"
self.FileText.SetLabel("File: "+self.filename)
#self.raw_file = self.LoadData(os.path.join(self.dirname, self.filename))
self.AddBackgroundCheckBox.SetValue(True)
self.combine_data()
self.PP_AlgorithmRadio.SetValue(True)
self.plot_data()
def OnAbout(self, e):
d = wx.MessageDialog(self, " A utility for calculating the real part of soft X-ray spectra.\nWritten by Dr. <NAME> at the Paul Scherrer Institut", "About KKcalc", wx.OK)
# Create a message dialog box
d.ShowModal() # Shows it
d.Destroy() # finally destroy it when finished.
def OnExit(self, e):
self.Close(True) # Close the frame.
def OnOpen(self, e):
"""Load data from a file."""
success = False
dlg = wx.FileDialog(self, "Choose a file", self.dirname, "", "*.*", wx.FD_OPEN)
if dlg.ShowModal() == wx.ID_OK:
success = True
self.dirname, self.filename = os.path.split(dlg.GetPath())
dlg.Destroy()
if success:
self.FileText.SetLabel("File: "+self.filename)
self.raw_file = data.load_data(os.path.join(self.dirname, self.filename))
self.combine_data()
self.plot_data()
def OnHelp(self, e):
logger.info("Opening web browser for help files.")
import webbrowser
webbrowser.open("README.rst")
def OnSave(self, e):
"""Write data to file."""
convert_to = None
if e.Id == 201:
convert_to = "photoabsorption"
elif e.Id == 202:
convert_to = "refractive_index"
logger.info("Save")
fd = wx.FileDialog(self, style=wx.FD_SAVE|wx.FD_OVERWRITE_PROMPT)
if fd.ShowModal()==wx.ID_OK:
metadata = {"Density": float(self.DensityText.GetValue()), "Molecular Formula":self.StoichiometryText.GetValue(),"Formula Mass":data.calculate_FormulaMass(self.Stoichiometry)}
data.export_data(fd.GetPath(), numpy.transpose(numpy.vstack((self.Full_E,self.KK_Real_Spectrum,data.coeffs_to_ASF(self.Full_E,self.Imaginary_Spectrum)))), header_info=metadata, convert_to=convert_to)
def combine_data(self):
"""Combine users near-edge data with extended spectrum data."""
self.Full_E = None
self.Imaginary_Spectrum = None
if self.raw_file is not None:
logger.info("Convert to scattering factors")
self.NearEdgeData = data.convert_data(self.raw_file,self.DataTypeCombo.GetValue(),'ASF')
# if self.InvertDataCheckBox.GetValue():
# self.NearEdgeData[:,1] = numpy.abs(self.NearEdgeData[:,1] - 2*numpy.mean(self.NearEdgeData[:,1]))
logger.info("Combine Data")
# Get splice points
splice_eV = numpy.array([10.0, 30000.0]) # Henke limits
if self.SpliceText1.GetValue() == "Start":
if self.raw_file is not None:
splice_eV[0] = self.NearEdgeData[0, 0]
else:
splice_eV[0] = float(self.SpliceText1.GetValue())
if self.SpliceText2.GetValue() == "End":
if self.raw_file is not None:
splice_eV[1] = self.NearEdgeData[-1, 0]
else:
splice_eV[1] = float(self.SpliceText2.GetValue())
if self.raw_file is not None and self.ASF_Data is None:
self.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), plotting_extras=True)
elif self.raw_file is None and self.ASF_Data is not None:
self.Full_E = self.ASF_E
self.Imaginary_Spectrum = self.ASF_Data
elif self.raw_file is not None and self.ASF_Data is not None:
self.Full_E, self.Imaginary_Spectrum, self.NearEdgeData, self.splice_ind = data.merge_spectra(self.NearEdgeData, self.ASF_E, self.ASF_Data, merge_points=splice_eV, add_background=self.AddBackgroundCheckBox.GetValue(), fix_distortions=self.FixDistortionsCheckBox.GetValue(), plotting_extras=True)
### get start and end Y values from nexafs and asf data
##splice_nexafs_Im = numpy.interp(splice_eV, raw_Im[:, 0], raw_Im[:, 1])
###splice_asf_Im = numpy.interp(splice_eV, self.total_asf[:, 0], self.total_asf[:, 2])
##splice_asf_Im = (data.coeffs_to_ASF(splice_eV[0],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[0])[0][-1]]),data.coeffs_to_ASF(splice_eV[1],self.total_Im_coeffs[numpy.where(self.total_E<splice_eV[1])[0][-1]]))
##cut_boolean = (splice_eV[0]<raw_Im[:, 0]) == (raw_Im[:, 0]<splice_eV[1])
### Merge Y values
##if not self.AddBackgroundCheckBox.GetValue():
##logger.info("Merge data sets")
##scale = (splice_asf_Im[1]-splice_asf_Im[0])/(splice_nexafs_Im[1]-splice_nexafs_Im[0])
##scaled_nexafs_Im = ((raw_Im[:, 1]-splice_nexafs_Im[0])*scale)+splice_asf_Im[0]
##self.asf_bg = None # We won't be using this variable this time
##else:
##logger.info("Add data sets (this will currently only work at energies below 30 keV)")
### Set up background function
### We trust this point to be just before the absorption edge
##trusted_ind = max(0, numpy.where(self.total_asf[:, 0]>splice_eV[0])[0][0]-1)
##Log_total_asf = numpy.log(self.total_asf[:, 2])
### Lets trust the 5 points before our trusted point and make an initial guess at the background function
##p = numpy.polyfit(self.total_asf[(trusted_ind-5):trusted_ind, 0], Log_total_asf[(trusted_ind-5):trusted_ind], 1)
### Now lets look for the points up util the absorption edge
##p_vals = numpy.exp(numpy.polyval(p, self.total_asf[(trusted_ind-5):-1, 0]))
##p_err = max(p_vals[0:5]-self.total_asf[(trusted_ind-5):trusted_ind, 2])
##edge_ind = numpy.where(self.total_asf[trusted_ind:-1, 2]-p_vals[4:-1]>p_err*10)
##if len(edge_ind[0])!=0:
##edge_ind = edge_ind[0][0]
##else:
##edge_ind = trusted_ind
### Redo background using the 5 points before the background point
##p = numpy.polyfit(self.total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind, 0], Log_total_asf[(trusted_ind+edge_ind-5):trusted_ind+edge_ind], 1)
##asf_bg = numpy.exp(numpy.polyval(p, raw_Im[:, 0]))
##logger.info("Background defined as: y=exp(%(p1)ex %(p0)+e)" % {"p1":p[1], "p0":p[0]})
### Apply background function
##scale = (splice_asf_Im[1]-numpy.exp(numpy.polyval(p, splice_eV[1])))/splice_nexafs_Im[1]
##scaled_nexafs_Im = raw_Im[:, 1]*scale+asf_bg
### store background data for plotting
##cut_boolean_wide = numpy.roll(cut_boolean, -1) + numpy.roll(cut_boolean, 1)
##self.asf_bg = [[trusted_ind+edge_ind-5, trusted_ind+edge_ind], numpy.vstack((raw_Im[cut_boolean_wide, 0], asf_bg[cut_boolean_wide])).T]
##nexafs_cut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T
####Merge point-wise data sets together
##asf_cut_high = self.total_asf[self.total_asf[:, 0]>splice_eV[1], :]
##asf_cut_low = self.total_asf[self.total_asf[:, 0]<splice_eV[0], :]
##self.merged_Im = numpy.vstack((asf_cut_low[:, [0, 2]], (splice_eV[0], splice_asf_Im[0]), nexafs_cut, (splice_eV[1], splice_asf_Im[1]), asf_cut_high[:, [0, 2]]))
####Merge coeff data together
##coeffs_cut_high = self.total_Im_coeffs[self.total_E[:-1]>splice_eV[1],:]
##coeffs_cut_low = self.total_Im_coeffs[self.total_E[:-1]<splice_eV[0],:]
###convert points to coeffs
##nexafs_coeffs_cut = numpy.zeros((len(nexafs_cut)+1,5))
##Y = numpy.append(numpy.insert(nexafs_cut[:,1],0,splice_asf_Im[0]),splice_asf_Im[1])
##nexafs_E = numpy.append(numpy.insert(nexafs_cut[:,0],0,splice_eV[0]),splice_eV[1])
##M = (Y[1:]-Y[:-1])/(nexafs_E[1:]-nexafs_E[:-1])
##nexafs_coeffs_cut[:,0] = M
##nexafs_coeffs_cut[:,1] = Y[:-1]-M*nexafs_E[:-1]
###assemble merged coeffs and energy values
##self.merged_Im_coeffs = numpy.vstack((coeffs_cut_low, nexafs_coeffs_cut, self.total_Im_coeffs[-coeffs_cut_high.shape[0]-2,:], coeffs_cut_high))
##self.merged_E = numpy.concatenate((self.total_E[self.total_E<splice_eV[0]], nexafs_E, self.total_E[self.total_E>splice_eV[1]]))
### Extras for plotting
##self.splice_ind = (len(asf_cut_low[:, 0]), -len(asf_cut_high[:, 0]))
##cut_boolean = (splice_eV[0]<=raw_Im[:, 0]) != (raw_Im[:, 0]<=splice_eV[1])
##self.nexafs_CutOut = numpy.vstack((raw_Im[cut_boolean, 0], scaled_nexafs_Im[cut_boolean])).T
### Previous calculation of f_1 is no longer matching displayed f_2 data
##self.KK_Real_Spectrum = None
def plot_data(self):
"""Plot data.
Parameters:
-----------
self.Full_E : vector of floats
photon energies at which the real and imaginary scattering factor data will be plotted.
self.Imaginary_Spectrum : Array of float
polynomial coefficients that can be evaluated to give the values of the imaginary scattering factors.
self.KK_Real_Spectrum : vector of float
the values of the real scattering factors.
Returns
-------
The GUI is updated, but nothing is returned.
"""
logger.info("plotting data")
# List of things to plot
plotlist = []
# get initial guess at X limits
X_min = 0
X_max = 30000
Y_max = 1
Y_min = 0
if self.NearEdgeData is not None:
X_min = self.NearEdgeData[0, 0]
X_max = self.NearEdgeData[-1, 0]
if self.SpliceText1.GetValue() != "Start":
X_min = float(self.SpliceText1.GetValue())
if self.SpliceText2.GetValue() != "End":
X_max = float(self.SpliceText2.GetValue())
if self.Imaginary_Spectrum is not None:
if self.Stoichiometry is not None:
scale = sum([Z*count for Z, count in self.Stoichiometry])
else:
scale = 1.
Im_energies, Im_values = data.coeffs_to_linear(self.Full_E, self.Imaginary_Spectrum, 0.001*scale)
plotlist.append(plot.PolyLine(zip(Im_energies,Im_values), colour='black', width=1))
# get Y limits
if self.splice_ind is None:
Y_max = max(Im_values)
Y_min = min(Im_values)
else:
Y_max = max(Im_values[self.splice_ind[0]:self.splice_ind[1]])
Y_min = min(Im_values[self.splice_ind[0]:self.splice_ind[1]])
if self.NearEdgeData is not None:
Y_max = max(self.NearEdgeData[:,1])
Y_min = min(self.NearEdgeData[:,1])
plotlist.append(plot.PolyMarker(zip(self.NearEdgeData[:,0], self.NearEdgeData[:,1]), colour='blue', marker='plus', size=1))
if self.splice_ind is not None:
splice_values = data.coeffs_to_ASF(self.Full_E[self.splice_ind], self.Imaginary_Spectrum[[self.splice_ind[0],min(self.splice_ind[1],self.Imaginary_Spectrum.shape[0]-1)]])
plotlist.append(plot.PolyMarker(zip(self.Full_E[self.splice_ind], splice_values), colour='red', marker='cross', size=1))
if self.raw_file is not None and self.Imaginary_Spectrum is None:
logger.info("plot raw data only")
plotlist.append(plot.PolyLine(self.NearEdgeData, colour='blue', width=1)) # User data
if self.asf_bg is not None:
plotlist.append(plot.PolyMarker(self.total_asf[self.asf_bg[0][0]:self.asf_bg[0][1], [0, 2]], colour='red', marker='cross', size=1))
plotlist.append(plot.PolyLine(self.asf_bg[1], colour='red', width=1))
# Real part
#plotlist.append(plot.PolyLine(self.total_asf[:, [0, 1]], colour='black', width=1))
if self.KK_Real_Spectrum is not None:
if self.splice_ind is None:
Y_max = max(self.KK_Real_Spectrum)
Y_min = min(self.KK_Real_Spectrum)
else:
Y_max = max(Y_max, max(self.KK_Real_Spectrum[self.splice_ind[0]:self.splice_ind[1]]))
Y_min = min(Y_min, min(self.KK_Real_Spectrum[self.splice_ind[0]:self.splice_ind[1]]))
plotlist.append(plot.PolyLine(zip(self.Full_E, self.KK_Real_Spectrum), colour='green', width=1))
# Expand plotting limits for prettiness
window_width = X_max-X_min
X_max = X_max+window_width*0.1
X_min = max(X_min-window_width*0.1, 0)
window_Im_height = Y_max-Y_min
window_Re_height = Y_max-Y_min
Y_max = Y_max+window_Im_height*0.1
Y_min = Y_min-window_Im_height*0.1
Y_max = Y_max+window_Re_height*0.1
Y_min = Y_min-window_Re_height*0.1
# set up text, axis and draw
#print plotlist
#print X_min, X_max, Y_min, Y_max
self.PlotAxes.Draw(plot.PlotGraphics(plotlist, '', 'Energy (eV)', 'Magnitude'), xAxis=(X_min, X_max), yAxis=(0, Y_max))
#print "Plotlist =", len(plotlist)
def Splice_Text_check(self, evt):
self.combine_data()
self.plot_data()
def MergeAdd_check(self, evt):
self.combine_data()
self.plot_data()
def Stoichiometry_Text_check(self, evt):
if len(self.StoichiometryText.GetValue()) == 0:
self.ChemicalFormula = None
self.Stoichiometry = None
self.Relativistic_Correction = None
self.ASF_E = None
self.ASF_Data = None
else:
self.ChemicalFormula = self.StoichiometryText.GetValue()
self.Stoichiometry = data.ParseChemicalFormula(self.ChemicalFormula)
self.Relativistic_Correction = kk.calc_relativistic_correction(self.Stoichiometry)
self.ASF_E, self.ASF_Data = data.calculate_asf(self.Stoichiometry)
self.combine_data()
self.plot_data()
def calculate(self, button):
"""Calculate Button."""
logger.debug("Calculate button")
if self.Imaginary_Spectrum is not None:
logger.info("Calculate Kramers-Kronig transform (PP)")
self.KK_Real_Spectrum = | |
<reponame>hansthienpondt/ansible-networking-collections
# (c) 2020 Nokia
#
# Licensed under the BSD 3 Clause license
# SPDX-License-Identifier: BSD-3-Clause
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = """
---
author:
- "<NAME> (@HansThienpondt)"
- "<NAME> (@wisotzky)"
connection: gnmi
short_description: Provides a persistent gRPC connection for gNMI API service
description:
- This gRPC plugin provides methods to interact with the gNMI service.
- OpenConfig gNMI specification
https://github.com/openconfig/reference/blob/master/rpc/gnmi/gnmi-specification.md
- gNMI API
https://raw.githubusercontent.com/openconfig/gnmi/master/proto/gnmi/gnmi.proto
- This connection plugin provides a persistent communication channel to
remote devices using gRPC including the underlying transport (TLS).
- The plugin binds to the gNMI gRPC service. It provide wrappers for gNMI
requests (Capabilities, Get, Set, Subscribe)
requirements:
- grpcio
- protobuf
options:
host:
description:
- Target host FQDN or IP address to establish gRPC connection.
default: inventory_hostname
vars:
- name: ansible_host
port:
type: int
description:
- Specifies the port on the remote device that listens for connections
when establishing the gRPC connection. If None only the C(host) part
will be used.
ini:
- section: defaults
key: remote_port
env:
- name: ANSIBLE_REMOTE_PORT
vars:
- name: ansible_port
remote_user:
description:
- The username used to authenticate to the remote device when the gRPC
connection is first established. If the remote_user is not specified,
the connection will use the username of the logged in user.
- Can be configured from the CLI via the C(--user) or C(-u) options.
ini:
- section: defaults
key: remote_user
env:
- name: ANSIBLE_REMOTE_USER
vars:
- name: ansible_user
password:
description:
- Configures the user password used to authenticate to the remote device
when first establishing the gRPC connection.
vars:
- name: ansible_password
- name: ansible_ssh_pass
private_key_file:
description:
- The PEM encoded private key file used to authenticate to the
remote device when first establishing the grpc connection.
ini:
- section: grpc_connection
key: private_key_file
env:
- name: ANSIBLE_PRIVATE_KEY_FILE
vars:
- name: ansible_private_key_file
root_certificates_file:
description:
- The PEM encoded root certificate file used to create a SSL-enabled
channel, if the value is None it reads the root certificates from
a default location chosen by gRPC at runtime.
ini:
- section: grpc_connection
key: root_certificates_file
env:
- name: ANSIBLE_ROOT_CERTIFICATES_FILE
vars:
- name: ansible_root_certificates_file
certificate_chain_file:
description:
- The PEM encoded certificate chain file used to create a SSL-enabled
channel. If the value is None, no certificate chain is used.
ini:
- section: grpc_connection
key: certificate_chain_file
env:
- name: ANSIBLE_CERTIFICATE_CHAIN_FILE
vars:
- name: ansible_certificate_chain_file
certificate_path:
description:
- Folder to search for certificate and key files
ini:
- section: grpc_connection
key: certificate_path
env:
- name: ANSIBLE_CERTIFICATE_PATH
vars:
- name: ansible_certificate_path
gnmi_encoding:
description:
- Encoding used for gNMI communication
- Must be either JSON or JSON_IETF
- If not provided, will run CapabilityRequest for auto-detection
ini:
- section: grpc_connection
key: gnmi_encoding
env:
- name: ANSIBLE_GNMI_ENCODING
vars:
- name: ansible_gnmi_encoding
grpc_channel_options:
description:
- Key/Value pairs (dict) to define gRPC channel options to be used
- gRPC reference
U(https://grpc.github.io/grpc/core/group__grpc__arg__keys.html)
- Provide the I(ssl_target_name_override) option to override the TLS
subject or subjectAltName (only in the case secure connections are
used). The option must be provided in cases, when the FQDN or IPv4
address that is used to connect to the device is different from the
subject name that is provided in the host certificate. This is
needed, because the TLS validates hostname or IP address to avoid
man-in-the-middle attacks.
vars:
- name: ansible_grpc_channel_options
grpc_environment:
description:
- Key/Value pairs (dict) to define environment settings specific to gRPC
- The standard mechanism to provide/set the environment in Ansible
cannot be used, because those environment settings are not passed to
the client process that establishes the gRPC connection.
- Set C(GRPC_VERBOSITY) and C(GRPC_TRACE) to setup gRPC logging. Need to
add code for log forwarding of gRPC related log messages to the
persistent messages log (see below).
- Set C(HTTPS_PROXY) to specify your proxy settings (if needed).
- Set C(GRPC_SSL_CIPHER_SUITES) in case the default TLS ciphers do not match
what is offered by the gRPC server.
vars:
- name: ansible_grpc_environment
persistent_connect_timeout:
type: int
description:
- Configures, in seconds, the amount of time to wait when trying to
initially establish a persistent connection. If this value expires
before the connection to the remote device is completed, the connection
will fail.
default: 5
ini:
- section: persistent_connection
key: connect_timeout
env:
- name: ANSIBLE_PERSISTENT_CONNECT_TIMEOUT
vars:
- name: ansible_connect_timeout
persistent_command_timeout:
type: int
description:
- Configures the default timeout value (in seconds) when awaiting a
response after issuing a call to a RPC. If the RPC does not return
before the timeout exceed, an error is generated and the connection
is closed.
default: 300
ini:
- section: persistent_connection
key: command_timeout
env:
- name: ANSIBLE_PERSISTENT_COMMAND_TIMEOUT
vars:
- name: ansible_command_timeout
persistent_log_messages:
type: boolean
description:
- This flag will enable logging the command executed and response received
from target device in the ansible log file. For this option to work the
'log_path' ansible configuration option is required to be set to a file
path with write access.
- Be sure to fully understand the security implications of enabling this
option as it could create a security vulnerability by logging sensitive
information in log file.
default: False
ini:
- section: persistent_connection
key: log_messages
env:
- name: ANSIBLE_PERSISTENT_LOG_MESSAGES
vars:
- name: ansible_persistent_log_messages
"""
import os
import re
import json
import base64
import datetime
try:
import grpc
HAS_GRPC = True
except ImportError:
HAS_GRPC = False
try:
from google import protobuf
HAS_PROTOBUF = True
except ImportError:
HAS_PROTOBUF = False
from ansible.errors import AnsibleConnectionFailure, AnsibleError
from ansible.plugins.connection import NetworkConnectionBase
from ansible.plugins.connection import ensure_connect
from google.protobuf import json_format
from ansible_collections.nokia.grpc.plugins.connection.pb import gnmi_pb2
from ansible.module_utils._text import to_text
class Connection(NetworkConnectionBase):
"""
Connection plugin for gRPC
To use gRPC connections in Ansible one (or more) sub-plugin(s) for the
required gRPC service(s) must be loaded. To load gRPC sub-plugins use the
method `register_service()` with the name of the sub-plugin to be
registered.
After loading the sub-plugin, Ansible modules can call methods provided by
that sub-plugin. There is a wrapper available that consumes the attribute
name {sub-plugin name}__{method name} to call a specific method of that
sub-plugin.
"""
transport = "nokia.grpc.gnmi"
has_pipelining = True
def __init__(self, play_context, new_stdin, *args, **kwargs):
super(Connection, self).__init__(
play_context, new_stdin, *args, **kwargs
)
self._task_uuid = to_text(kwargs.get("task_uuid", ""))
if not HAS_PROTOBUF:
raise AnsibleError(
"protobuf is required to use gRPC connection type. " +
"Please run 'pip install protobuf'"
)
if not HAS_GRPC:
raise AnsibleError(
"grpcio is required to use gRPC connection type. " +
"Please run 'pip install grpcio'"
)
self._connected = False
def readFile(self, optionName):
"""
Reads a binary certificate/key file
Parameters:
optionName(str): used to read filename from options
Returns:
File content
Raises:
AnsibleConnectionFailure: file does not exist or read excpetions
"""
path = self.get_option('certificate_path')
if not path:
path = '/etc/ssl:/etc/ssl/certs:/etc/ca-certificates'
filename = self.get_option(optionName)
if filename:
if filename.startswith('~'):
filename = os.path.expanduser(filename)
if not filename.startswith('/'):
for entry in path.split(':'):
if os.path.isfile(os.path.join(entry, filename)):
filename = os.path.join(entry, filename)
break
if os.path.isfile(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except Exception as exc:
raise AnsibleConnectionFailure(
'Failed to read cert/keys file %s: %s' % (filename, exc)
)
else:
raise AnsibleConnectionFailure(
'Cert/keys file %s does not exist' % filename
)
return None
def _connect(self):
"""
Establish gRPC connection to remote node and create gNMI stub.
This method will establish the persistent gRPC connection, if not
already done. After this, the gNMI stub will be created. To get
visibility about gNMI capabilities of the remote device, a gNM
CapabilityRequest will be sent and result will be persisted.
Parameters:
None
Returns:
None
"""
if self.connected:
self.queue_message('v', 'gRPC connection to host %s already exist' % self._target)
return
grpcEnv = self.get_option('grpc_environment') or {}
if not isinstance(grpcEnv, dict):
raise AnsibleConnectionFailure("grpc_environment must be a dict")
for key in grpcEnv:
if grpcEnv[key]:
os.environ[key] = str(grpcEnv[key])
else:
try:
del os.environ[key]
except KeyError:
# no such setting in current environment, but thats ok
pass
self._login_credentials = [
('username', self.get_option('remote_user')),
('password', self.get_option('password'))
]
host = self.get_option('host')
port = self.get_option('port')
self._target = host if port is None else '%s:%d' % (host, port)
self._timeout = self.get_option('persistent_command_timeout')
certs = {}
certs['root_certificates'] = self.readFile('root_certificates_file')
certs['certificate_chain'] = self.readFile('certificate_chain_file')
| |
dtype=numpy.float32)
else:
error = self._error
if mask_fit is not None and self.getMask() is not None:
self.setMask(numpy.logical_or(self.getMask(), mask_fit))
elif mask_fit is not None:
self.setMask(mask_fit)
if self._mask.sum() == self._mask.size:
raise ValueError('No data points in spectrum left for fitting.')
if vel is not None and disp is not None:
SSPLibrary = SSPLibrary.applyGaussianLOSVD(vel, disp)
if len(SSPLibrary.getWave()) != len(self._wave) or numpy.sum(SSPLibrary.getWave() - self._wave) != 0.0:
tempLib = SSPLibrary.resampleBase(self._wave)
else:
tempLib = SSPLibrary
if SSPLibrary.getBaseNumber() == 1:
libFit = fit_linearComb(tempLib.getBase(), coeff=numpy.array([1], dtype=numpy.float32))
else:
libFit = fit_linearComb(tempLib.getBase())
libFit.fit(self._data, error, self._mask, negative=negative)
bestfit_spec = Spectrum1D(self._wave, data=libFit(), normalization=self.getNormalization(), mask=self.getMask())
chi2 = libFit.chisq(self._data, sigma=error, mask=self._mask)
return libFit.getCoeff(), bestfit_spec, chi2
def fitKinMCMC_fixedpop(self, spec_model, vel_min, vel_max, vel_disp_min, vel_disp_max, mcmc_code='emcee',
walkers=50, burn=1000, samples=3000, thin=2):
"""Fits a spectrum according to Markov chain Monte Carlo
algorithm to obtain statistics on the kinematics. This uses the
PyMC library.
Parameters
----------
spec_model : Spectrum1D
A single spectrum from a SSP library used to fit the data of the
current spectrum with.
vel_min : float
The minimum velocity in km/s used in the MCMC.
vel_max : float
The maximum velocity in km/s used in the MCMC.
vel_disp_min : float
The minimum velocity dispersion in km/s used in the MCMC.
vel_disp_max : float
The maximum velocity dispersion in km/s used in the MCMC.
burn : int, optional
The burn-in parameter that is often applied in MCMC
implementations. The first `burn` samples will be discarded
in the further analysis.
samples : int, optional
The number of iterations runned by PyMC.
thin : int, optional
Only keeps every `thin`th sample, this argument should
circumvent any possible autocorrelation among the samples.
Returns
-------
M : pymc.MCMC
Contains all the relevant information from the PyMC-run.
"""
valid_pix = numpy.logical_not(self._mask)
if mcmc_code == 'pymc':
vel = pymc.Uniform('vel', lower=vel_min, upper=vel_max)
disp = pymc.Uniform('disp', lower=vel_disp_min, upper=vel_disp_max)
@pymc.deterministic(plot=False)
def m(vel=vel, disp=disp):
return spec_model.applyKin(vel, disp, self._wave).getData()[
valid_pix]
d = pymc.Normal('d', mu=m, tau=self._error[valid_pix] ** (-2),
value=self._data[valid_pix], observed=True)
m = pymc.MCMC([vel, disp, m, d])
m.use_step_method(pymc.AdaptiveMetropolis, [vel, disp])
for i in range(walkers):
m.sample(samples, burn, thin, progress_bar=False)
return m
ndim = 2
def log_prior(theta):
vel, disp = theta
if vel_min < vel < vel_max and vel_disp_min < disp < vel_disp_max:
return 0.0
return -numpy.inf
def log_likelihood(theta, x, y, e):
vel, disp = theta
y_model = spec_model.applyKin(vel, disp, x).getData()[valid_pix]
inv_sigma2 = 1.0 / (e[valid_pix] ** 2)
return -0.5 * numpy.sum((y[valid_pix] - y_model) ** 2 * inv_sigma2 - numpy.log(inv_sigma2))
def log_posterior(theta, x, y, e):
return log_prior(theta) + log_likelihood(theta, x, y, e)
guess = numpy.c_[numpy.random.uniform(vel_min, vel_max, walkers),
numpy.random.uniform(vel_disp_min, vel_disp_max, walkers)]
sampler = emcee.EnsembleSampler(walkers, ndim, log_posterior,
args=(self.getWave(), self.getData(),
self.getError()))
sampler.run_mcmc(guess, samples, thin=thin)
trace = sampler.chain[:, burn // thin:, :].T
return trace
def fit_Kin_Lib_simple(self, lib_SSP, nlib_guess, vel_min, vel_max, disp_min, disp_max, mask_fit=None, iterations=3,
mcmc_code='emcee', walkers=50, burn=50, samples=200, thin=1, sample_out=False):
"""Fits template spectra according to Markov chain Monte Carlo
algorithm. This uses the PyMC library.
Parameters
----------
lib_SSP : SSPlibrary
The library containing the template spectra.
nlib_guess : int
The initial guess for the best fitting template spectrum.
vel_min : float
The minimum velocity in km/s used in the MCMC.
vel_max : float
The maximum velocity in km/s used in the MCMC.
disp_min : float
The minimum velocity dispersion in km/s used in the MCMC.
disp_max : float
The maximum velocity dispersion in km/s used in the MCMC.
mask_fit : `numpy.ndarray`
A boolean array representing any wavelength regions which are masked
out during the fitting.
iterations : int
The number of iterations applied to determine the best combination of
velocity, velocity dispersion and the coefficients for the set of
template spectra.
burn : int, optional
The burn-in parameter that is often applied in MCMC implementations.
The first `burn` samples will be discarded in the further analysis.
samples : int, optional
The number of iterations runned by PyMC.
thin : int, optional
Only keeps every `thin`th sample, this argument should circumvent
any possible autocorrelation among the samples.
Returns
-------
vel : float
The average velocity from the MCMC-sample.
vel_err : float
The standard deviation in the velocity from the MCMC-sample.
disp : float
The average velocity dispersion from the MCMC-sample.
disp_err : float
The standard devation in the velocity dispersion from the
MCMC-sample.
bestfit_spec : Spectrum1D
The best fitted spectrum obtained from the linear
combination of template spectra.
coeff : numpy.ndarray
The coefficients of the SSP library which will produce the best fit
to the data.
chisq : float
The chi^2 value between `bestfit_spec` and `data`.
"""
# The case that the spectrum is fitted to each SSP
if nlib_guess == 0:
bestresult = [numpy.inf]
coeff = None
for i in range(lib_SSP.getBaseNumber()):
select = numpy.zeros(lib_SSP.getBaseNumber(), dtype=bool)
select[i] = True
lib = lib_SSP.subLibrary(select)
result = self.fit_Kin_Lib_simple(lib, 1, vel_min, vel_max, disp_min, disp_max, mask_fit,
iterations, burn, samples, thin)
if result[-1] < bestresult[-1]:
bestresult = result
coeff = select
vel, vel_err, Rvel, disp, disp_err, Rdisp, bestfit_spec, trash, chi2 = bestresult
return vel, vel_err, Rvel, disp, disp_err, Rdisp, bestfit_spec, coeff, chi2
elif 0 < nlib_guess <= lib_SSP.getBaseNumber():
spec_lib_guess = lib_SSP.getSpec(nlib_guess)
else:
spec_lib_guess = lib_SSP.getSpec(1)
mask_init = deepcopy(self.getMask())
if mask_fit is not None:
excl_fit_init = mask_fit.maskPixelsObserved(self.getWave(), ((vel_min+vel_max)/2.0) / 300000.0)
if mask_init is not None:
self.setMask(numpy.logical_or(mask_init, excl_fit_init))
else:
self.setMask(excl_fit_init)
for i in range(iterations):
w = walkers
if mcmc_code == 'pymc' and i != iterations - 1:
w = 1
m = self.fitKinMCMC_fixedpop(spec_lib_guess, vel_min, vel_max,
disp_min, disp_max, mcmc_code, w,
burn, samples, thin)
if mcmc_code == 'pymc':
trace_vel = m.trace('vel', chain=None)[:]
trace_disp = m.trace('disp', chain=None)[:]
else:
trace_vel, trace_disp = m
vel = numpy.mean(trace_vel)
vel_err = numpy.std(trace_vel)
disp = numpy.mean(trace_disp)
disp_err = numpy.std(trace_disp)
lib_vel = lib_SSP.applyGaussianLOSVD(vel, disp)
(coeff, bestfit_spec, chi2) = self.fitSuperposition(lib_vel)
if nlib_guess < 0:
break
spec_lib_guess = lib_SSP.compositeSpectrum(coeff)
if mask_fit is not None:
excl_fit = mask_fit.maskPixelsObserved(self.getWave(), vel / 300000.0)
if mask_init is not None:
self.setMask(numpy.logical_or(mask_init, excl_fit))
else:
self.setMask(excl_fit)
if mcmc_code == 'pymc':
if walkers > 2:
gelman_rubin = pymc.gelman_rubin(m)
Rvel = gelman_rubin['vel']
Rdisp = gelman_rubin['disp']
else:
Rvel = 0
Rdisp = 0
else:
n = float(trace_vel.shape[0])
Bvel = n * numpy.var(trace_vel.mean(axis=0), ddof=1)
Wvel = numpy.mean(numpy.var(trace_vel, axis=0, ddof=1))
Rvel = numpy.sqrt(1 - 1 / n + Bvel / n / Wvel)
Bdisp = n * numpy.var(trace_disp.mean(axis=0), ddof=1)
Wdisp = numpy.mean(numpy.var(trace_disp, axis=0, ddof=1))
Rdisp = numpy.sqrt(1 - 1 / n + Bdisp / n / Wdisp)
if not sample_out:
return vel, vel_err, Rvel, disp, disp_err, Rdisp, bestfit_spec, coeff, chi2, None, None
else:
return vel, vel_err, Rvel, disp, disp_err, Rdisp, bestfit_spec, coeff, chi2, trace_vel, trace_disp
def fit_Lib_Boots(self, lib_SSP, vel, disp, vel_err=None, disp_err=None, par_eline=None, select_wave_eline=None,
mask_fit=None, method_eline='leastsq', guess_window=10.0, spectral_res=0.0, ftol=1e-4, xtol=1e-4,
bootstraps=100, modkeep=80, parallel=1, plot=False):
"""
Parameters
----------
lib_SSP : SSPlibrary
The library containing the template spectra.
vel : float
The redshift of the object in km/s.
disp : float
The velocity dispersion value in km/s, which will be used
for the Gaussian broadening of the template spectra.
vel_err : float, optional
???Unused???
disp_err : float, optional
???Unused???
par_eline : parFile, optional
A list of emission lines that will be included in the bootstrap
fitting procedure.
select_wave_eline : numpy.ndarray
A 1D boolean array where the True value represents which wavelength
elements will be used in the emission line fitting.
method_eline : {'leastsq', 'simplex'}, optional
This argument specifies if ordinary least squares fitting
(`leastsq`) should be applied, or if a downhill simplex algorithm
(`simplex`) should be used.
guess_window : float, optional
The wavelength region in which the emission line will be fitted.
spectral_res : float, optional
The spectral resolution of the line.
ftol : float, optional
The maximum acceptable error for fit convergence.
xtol : float, optional
The relative acceptable error for fit convergence.
bootstraps : int, optional
The number of bootstraps runs that will be performed.
modkeep : float
parallel : {'auto', int}, optional
If parallel is not equal to one, the python multiprocessing routine
shall be used to run parts of the code in parallel. With the option
`auto`, it adjusts the number of parallel processes to the number
of | |
#importar cosas
from tkinter import *
from tkinter import messagebox
import os
from time import strftime
import time
import pickle
import random
import os
import sys
import fpdf
from fpdf import FPDF
global continuo
continuo=0
def VentanaPrincipal():
#configuracion ventana principal
Ventana_C=Tk()
Ventana_C.geometry('800x750+450+50')
Ventana_C.title('Futoshiki')
Ventana_C.config(bg='beige')
Ventana_C.resizable(width= False, height=False)
#titulos
Mensaje_titulo=Message(Ventana_C,text="FUTOSHIKI",width='300',font=("Comic Sans",20),bg="#C2D8FB",fg="black")
Mensaje_titulo.place(x=300,y=50)
Mensaje_nombre=Message(Ventana_C,text="Ingrese su nombre:",width='300',font=("Arial",15),bg='beige',fg="black")
Mensaje_nombre.place(x=50,y=100)
Mensaje_reloj=Message(Ventana_C,text="Opciones de reloj:",width='320',font=("Arial",16),bg='beige',fg="black")
Mensaje_reloj.place(x=15,y=605)
Mensaje_panel=Message(Ventana_C,text="Orientacion del panel de digitos:",width='320',font=("Arial",16),bg='beige',fg="black")
Mensaje_panel.place(x=465,y=605)
Mensaje_dificultad=Message(Ventana_C,text="Dicultad:",width='300',font=("Arial",20),bg='beige',fg="black")
Mensaje_dificultad.place(x=300,y=150)
#dato
Nombre_text=StringVar()
Nombre_widget=Entry(Ventana_C,width='50',textvariable=Nombre_text)
Nombre_widget.place(x=250,y=110)
#limita lo que se ingresa
def max_name(Nombre_text):
if len(Nombre_text.get()) > 0:
Nombre_text.set(Nombre_text.get()[:20])
Nombre_text.trace("w", lambda *args: max_name(Nombre_text))
#reloj/temporizador
var=IntVar()
RadioButton1=Radiobutton(Ventana_C, text='Reloj',variable=var,value=1)
RadioButton1.place(x=20,y=640)
RadioButton2=Radiobutton(Ventana_C, text='Temporizador',variable=var,value=2)
RadioButton2.place(x=20,y=660)
RadioButton3=Radiobutton(Ventana_C, text='NO',variable=var,value=3)
RadioButton3.place(x=20,y=680)
#lado de los numeros
var2=IntVar()
RadioButton4=Radiobutton(Ventana_C, text='Izquierda',variable=var2,value=1)
RadioButton4.place(x=700,y=640)
RadioButton5=Radiobutton(Ventana_C, text='Derecha',variable=var2,value=2)
RadioButton5.place(x=700,y=660)
#dificultad
var3=IntVar()
RadioButton6=Radiobutton(Ventana_C, text='Facil',variable=var3,value=1)
RadioButton6.place(x=310,y=190)
RadioButton7=Radiobutton(Ventana_C, text='Intermedio',variable=var3,value=2)
RadioButton7.place(x=310,y=210)
RadioButton8=Radiobutton(Ventana_C, text='Dificil',variable=var3,value=3)
RadioButton8.place(x=310,y=230)
#funcion de multinivel
global continuo
continuo=0
def multinivel():
global continuo
if continuo==1:
continuo=0
Botonnivel["bg"]="grey"
Botonnivel["text"]="Multinivel: Apagado"
else:
continuo=1
Botonnivel["bg"]="green"
Botonnivel["text"]="Multinivel: Encendido"
#boton de multinivel
Botonnivel=Button(Ventana_C,text="Multinivel: Apagado",width='17',height='2',font=("Arial",15),bg='grey',fg="black",command=multinivel)
Botonnivel.place(x=280,y=660)
#archivo de juegos
filesize=os.path.getsize("futoshiki2020partidas.dat")
if filesize==0:
listafacil=[(("2",0,0),("3",2,2)),(("3",3,3),("4",0,3),("1",1,1)),(("1",1,3),("2",0,2))]
listaintermedio=[((("v",0,1),(">",0,2),(">",0,3),(">",1,1),(">",2,3),("˄",2,3),("<",3,3),("˄",3,4)),(("v",0,0),("v",0,1),(">",1,1),(">",2,0),("v",2,1),("v",3,0),("v",3,1),(">",4,3)),((">",0,0),("<",0,2),("˄",0,2),("v",0,4),("v",1,3),("˄",1,4),("v",3,2),("<",3,3),("˄",3,4),(">",4,3)))]
listadificil=[((("4",0,0),("˄",0,1),("2",1,1),("<",1,1),("v",2,0),("<",2,0),("v",2,2),(">",2,3),("1",3,0),("v",3,1),(">",3,2),("<",4,0)),(("<",0,0),("2",0,2),("˄",1,0),(">",1,2),("4",1,3),("v",2,1),("v",2,2),("1",2,3),(">",3,2),("<",4,3)),(("<",0,0),("<",0,1),(">",0,2),("4",1,1),("˄",1,1),(">",1,1),("˄",1,3),("v",2,1),("1",2,4),("<",3,2),("˄",3,2),("<",3,3),(">",4,0)))]
listadefinitiva=[listafacil,listaintermedio,listadificil]
a=open("futoshiki2020partidas.dat","wb")
pickle.dump(listadefinitiva,a)
a.close()
#el juego nuevo
def jugar(Nombre,reloj,lado,dificultad,continuo):
if Nombre=="" or reloj==0 or lado==0 or dificultad==0:#control de errores
messagebox.showerror(message="Error, entradas incompletas")
else:
Ventana_C.withdraw()#nueva ventana y se cierra la anterior
Ventana_J=Tk()#configuracion nueva pantalla
Ventana_J.geometry('800x750+450+50')
Ventana_J.title('Futoshiki')
Ventana_J.config(bg='beige')
Ventana_J.resizable(width= False, height=False)
Mensaje_nombre=Label(Ventana_J,text="Nombre: "+Nombre,font=("Arial",20),bg='beige')
Mensaje_nombre.place(x=275,y=20)#titulos
#para que el boton seleccionado haga algo
global uno
global dos
global tres
global cuatro
global cinco
global num
global lista
global btn0
global btn1
global btn2
global btn3
global btn4
global btn5
global btn6
global btn7
global btn8
global btn9
global btn10
global btn11
global btn12
global btn13
global btn14
global btn15
global btn16
global btn17
global btn18
global btn19
global btn20
global btn21
global btn22
global btn23
global btn24
global cuadricula
global algo
global puntaje
global top10
global listaborrada
global nivelmult
global dificultadC
global sugerido
global relojC
relojC=reloj
sugerido=0
dificultadC=dificultad
uno="1"
dos="2"
tres="3"
cuatro="4"
cinco="5"
num=""
algo=0
puntaje=0
filesize=os.path.getsize("futoshiki2020top10.dat")
if filesize==0:
top10=[]
y=open("futoshiki2020top10.dat","wb")
pickle.dump(top10,y)
y.close()
else:
y=open("futoshiki2020top10.dat","rb")
top10=pickle.load(y)
y.close()
listaborrada=[]
if isinstance(dificultad,list):
lista=dificultad[2]
cuadricula=dificultad[1]
nivelmult=dificultad[3]
btn0=cuadricula[0][0]
btn1=cuadricula[0][1]
btn2=cuadricula[0][2]
btn3=cuadricula[0][3]
btn4=cuadricula[0][4]
btn5=cuadricula[1][0]
btn6=cuadricula[1][1]
btn7=cuadricula[1][2]
btn8=cuadricula[1][3]
btn9=cuadricula[1][4]
btn10=cuadricula[2][0]
btn11=cuadricula[2][1]
btn12=cuadricula[2][2]
btn13=cuadricula[2][3]
btn14=cuadricula[2][4]
btn15=cuadricula[3][0]
btn16=cuadricula[3][1]
btn17=cuadricula[3][2]
btn18=cuadricula[3][3]
btn19=cuadricula[3][4]
btn20=cuadricula[4][0]
btn21=cuadricula[4][1]
btn22=cuadricula[4][2]
btn23=cuadricula[4][3]
btn24=cuadricula[4][4]
algo=1
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
else:
btn0=0
btn1=0
btn2=0
btn3=0
btn4=0
btn5=0
btn6=0
btn7=0
btn8=0
btn9=0
btn10=0
btn11=0
btn12=0
btn13=0
btn14=0
btn15=0
btn16=0
btn17=0
btn18=0
btn19=0
btn20=0
btn21=0
btn22=0
btn23=0
btn24=0
lista=[]
nivelmult=0
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
#funciones de borrado de jugadas
def borra_todo():
global sugerido
if sugerido==1:
sugerencia()
while lista!=[]:
anterior()
def anterior():
global listaborrada
global lista
global num
global btn0
global btn1
global btn2
global btn3
global btn4
global btn5
global btn6
global btn7
global btn8
global btn9
global btn10
global btn11
global btn12
global btn13
global btn14
global btn15
global btn16
global btn17
global btn18
global btn19
global btn20
global btn21
global btn22
global btn23
global btn24
global cuadricula
global sugerido
if sugerido==1:
sugerencia()
if lista==[]:
messagebox.showerror(message="Error, ya no hay jugadas anteriores a esta")
elif lista[-1]==0:
listaborrada.append((lista[-1],btn0))
btn0=0
lista=lista[:-1]
num=""
boton0["text"]=num
boton0["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==1:
listaborrada.append((lista[-1],btn1))
btn1=0
lista=lista[:-1]
num=""
boton1["text"]=num
boton1["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==2:
listaborrada.append((lista[-1],btn2))
btn2=0
lista=lista[:-1]
num=""
boton2["text"]=num
boton2["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==3:
listaborrada.append((lista[-1],btn3))
btn3=0
lista=lista[:-1]
num=""
boton3["text"]=num
boton3["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==4:
listaborrada.append((lista[-1],btn4))
btn4=0
lista=lista[:-1]
num=""
boton4["text"]=num
boton4["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==5:
listaborrada.append((lista[-1],btn5))
btn5=0
lista=lista[:-1]
num=""
boton5["text"]=num
boton5["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==6:
listaborrada.append((lista[-1],btn6))
btn6=0
lista=lista[:-1]
num=""
boton6["text"]=num
boton6["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==7:
listaborrada.append((lista[-1],btn7))
btn7=0
lista=lista[:-1]
num=""
boton7["text"]=num
boton7["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==8:
listaborrada.append((lista[-1],btn8))
btn8=0
lista=lista[:-1]
num=""
boton8["text"]=num
boton8["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==9:
listaborrada.append((lista[-1],btn9))
btn9=0
lista=lista[:-1]
num=""
boton9["text"]=num
boton9["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==10:
listaborrada.append((lista[-1],btn10))
btn10=0
lista=lista[:-1]
num=""
boton10["text"]=num
boton10["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==11:
listaborrada.append((lista[-1],btn11))
btn11=0
lista=lista[:-1]
num=""
boton11["text"]=num
boton11["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==12:
listaborrada.append((lista[-1],btn12))
btn12=0
lista=lista[:-1]
num=""
boton12["text"]=num
boton12["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==13:
listaborrada.append((lista[-1],btn13))
btn13=0
lista=lista[:-1]
num=""
boton13["text"]=num
boton13["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==14:
listaborrada.append((lista[-1],btn14))
btn14=0
lista=lista[:-1]
num=""
boton14["text"]=num
boton14["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==15:
listaborrada.append((lista[-1],btn15))
btn15=0
lista=lista[:-1]
num=""
boton15["text"]=num
boton15["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==16:
listaborrada.append((lista[-1],btn16))
btn16=0
lista=lista[:-1]
num=""
boton16["text"]=num
boton16["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==17:
listaborrada.append((lista[-1],btn17))
btn17=0
lista=lista[:-1]
num=""
boton17["text"]=num
boton17["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==18:
listaborrada.append((lista[-1],btn18))
btn18=0
lista=lista[:-1]
num=""
boton18["text"]=num
boton18["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==19:
listaborrada.append((lista[-1],btn19))
btn19=0
lista=lista[:-1]
num=""
boton19["text"]=num
boton19["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==20:
listaborrada.append((lista[-1],btn20))
btn20=0
lista=lista[:-1]
num=""
boton20["text"]=num
boton20["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==21:
listaborrada.append((lista[-1],btn21))
btn21=0
lista=lista[:-1]
num=""
boton21["text"]=num
boton21["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==22:
listaborrada.append((lista[-1],btn22))
btn22=0
lista=lista[:-1]
num=""
boton22["text"]=num
boton22["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==23:
listaborrada.append((lista[-1],btn23))
btn23=0
lista=lista[:-1]
num=""
boton23["text"]=num
boton23["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
elif lista[-1]==24:
listaborrada.append((lista[-1],btn24))
btn24=0
lista=lista[:-1]
num=""
boton24["text"]=num
boton24["state"]='normal'
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
#encuentra si ya se ganó
def ganar(cuadricula):
global continuo
global dificultadC
if isinstance(dificultadC,list):
dificultadC=dificultadC[0]
ceros=0
for i in cuadricula:
for j in i:
if j==0 or j=="0":
ceros+=1
if ceros==0:
if reloj==1:
parar()
Boton4_J["state"]=DISABLED
Boton5_J["state"]=DISABLED
Boton6_J["state"]=DISABLED
Boton0_num["state"]=DISABLED
Boton1_num["state"]=DISABLED
Boton2_num["state"]=DISABLED
Boton3_num["state"]=DISABLED
Boton4_num["state"]=DISABLED
if continuo==0:
Mensaje_ganar=Message(Ventana_J,text="¡EXCELENTE! JUEGO TERMINADO CON ÉXITO",width='885',font=("Comic Sans",40),bg="#C2D8FB",fg="black")
Mensaje_ganar.place(x=10,y=475)
elif continuo==1 and dificultadC <3:
Mensaje_ganar=Message(Ventana_J,text="¡EXCELENTE! NIVEL TERMINADO CON ÉXITO",width='885',font=("Comic Sans",40),bg="#C2D8FB",fg="black")
Mensaje_ganar.place(x=10,y=475)
else:
continuo=0
Mensaje_ganar=Message(Ventana_J,text="¡EXCELENTE! JUEGO TERMINADO CON ÉXITO",width='885',font=("Comic Sans",40),bg="#C2D8FB",fg="black")
Mensaje_ganar.place(x=10,y=475)
def cerrar():
global continuo
global dificultadC
filesize=os.path.getsize("futoshiki2020top10.dat")
if filesize!=0:
file = open("futoshiki2020top10.dat","r+")
file.truncate(0)
file.close()
top10.append((Nombre,puntaje))
y=open("futoshiki2020top10.dat","wb")
pickle.dump(top10,y)
y.close()
if continuo==1:
Ventana_J.destroy()
jugar(Nombre,reloj,lado,dificultadC+1,continuo)
else:
Ventana_J.destroy()
Ventana_C.destroy()
if continuo==0:
btfin=Button(Ventana_J,text="Fin",width='11',height='3',command=cerrar)
else:
btfin=Button(Ventana_J,text="siguiente",width='11',height='3',command=cerrar)
btfin.place(x=300,y=200)
#funciones de los botones de la cuadricula
def numero(n):
global num
num=n
if num=="1":
Boton0_num["bg"]="green"
Boton1_num["bg"]="white"
Boton2_num["bg"]="white"
Boton3_num["bg"]="white"
Boton4_num["bg"]="white"
elif num=="2":
Boton0_num["bg"]="white"
Boton1_num["bg"]="green"
Boton2_num["bg"]="white"
Boton3_num["bg"]="white"
Boton4_num["bg"]="white"
elif num=="3":
Boton0_num["bg"]="white"
Boton1_num["bg"]="white"
Boton2_num["bg"]="green"
Boton3_num["bg"]="white"
Boton4_num["bg"]="white"
elif num=="4":
Boton0_num["bg"]="white"
Boton1_num["bg"]="white"
Boton2_num["bg"]="white"
Boton3_num["bg"]="green"
Boton4_num["bg"]="white"
elif num=="5":
Boton0_num["bg"]="white"
Boton1_num["bg"]="white"
Boton2_num["bg"]="white"
Boton3_num["bg"]="white"
Boton4_num["bg"]="green"
#agregar para que cambie de color al presionar
def original0():
global num
global btn0
global cuadricula
global listaborrada
global sugerido
listaborrada=[]
cumple=0
if sugerido==0:
for i in cuadricula:
if num==i[0]:
cumple=1
messagebox.showerror(message="Este numero ya esta en esta columna")
if num in cuadricula[0]:
cumple=1
messagebox.showerror(message="Este numero ya está en la fila")
for i in plantilla:
if i[1]==0 and i[2]==0:
if btn1!=0 and btn5!=0:
if i[0]=="<":
if num>=btn1:
cumple=1
messagebox.showerror(message="numero no cumple con el signo < ")
elif i[0]==">":
if num<=btn1:
cumple=1
messagebox.showerror(message="numero no cumple con el signo > ")
elif i[0]=="v":
if num<=btn5:
cumple=1
messagebox.showerror(message="numero no cumple con el signo v ")
elif i[0]=="˄":
if num>=btn5:
cumple=1
messagebox.showerror(message="numero no cumple con el signo ˄ ")
if cumple==0:
if num=="1":
Boton0_num["bg"]="white"
elif num=="2":
Boton1_num["bg"]="white"
elif num=="3":
Boton2_num["bg"]="white"
elif num=="4":
Boton3_num["bg"]="white"
else:
Boton4_num["bg"]="white"
if num!="":
btn0=num
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
boton0["text"]=num
boton0["state"]=DISABLED
lista.append(0)
num=""
ganar(cuadricula)
else:
sugerencia()
listadenumeros=["1","2","3","4","5"]
nuevalista=[]
listavertical=[]
for i in cuadricula:
listavertical.append(i[0])
listahorizontal=cuadricula[0]
for j in listadenumeros:
if (j not in listavertical) and (j not in listahorizontal) :
nuevalista.append(j)
messagebox.showinfo(nuevalista)
def original1():
global num
global btn1
global cuadricula
global listaborrada
global sugerido
listaborrada=[]
cumple=0
if sugerido==0:
for i in cuadricula:
if num==i[1]:
cumple=1
messagebox.showerror(message="Este numero ya esta en esta columna")
if num in cuadricula[0]:
cumple=1
messagebox.showerror(message="Este numero ya está en la fila")
for i in plantilla:
if i[1]==0 and i[2]==1:
if btn2!=0 and btn6!=0:
if i[0]=="<":
if num>=btn2:
cumple=1
messagebox.showerror(message="numero no cumple con el signo < ")
elif i[0]==">":
if num<=btn2:
cumple=1
messagebox.showerror(message="numero no cumple con el signo > ")
elif i[0]=="v":
if num<=btn6:
cumple=1
messagebox.showerror(message="numero no cumple con el signo v ")
elif i[0]=="˄":
if num>=btn6:
cumple=1
messagebox.showerror(message="numero no cumple con el signo ˄ ")
if cumple==0:
if num=="1":
Boton0_num["bg"]="white"
elif num=="2":
Boton1_num["bg"]="white"
elif num=="3":
Boton2_num["bg"]="white"
elif num=="4":
Boton3_num["bg"]="white"
else:
Boton4_num["bg"]="white"
if num!="":
btn1=num
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
boton1["text"]=num
boton1["state"]=DISABLED
lista.append(1)
num=""
ganar(cuadricula)
else:
sugerencia()
listadenumeros=["1","2","3","4","5"]
nuevalista=[]
listavertical=[]
for i in cuadricula:
listavertical.append(i[1])
listahorizontal=cuadricula[0]
for j in listadenumeros:
if (j not in listavertical) and (j not in listahorizontal) :
nuevalista.append(j)
messagebox.showinfo(nuevalista)
def original2():
global num
global btn2
global cuadricula
global listaborrada
global sugerido
listaborrada=[]
cumple=0
if sugerido==0:
for i in cuadricula:
if num==i[2]:
cumple=1
messagebox.showerror(message="Este numero ya esta en esta columna")
if num in cuadricula[0]:
cumple=1
messagebox.showerror(message="Este numero ya está en la fila")
for i in plantilla:
if i[1]==0 and i[2]==2:
if btn3!=0 and btn7!=0:
if i[0]=="<":
if num>=btn3:
cumple=1
messagebox.showerror(message="numero no cumple con el signo < ")
elif i[0]==">":
if num<=btn3:
cumple=1
messagebox.showerror(message="numero no cumple con el signo > ")
elif i[0]=="v":
if num<=btn7:
cumple=1
messagebox.showerror(message="numero no cumple con el signo v ")
elif i[0]=="˄":
if num>=btn7:
cumple=1
messagebox.showerror(message="numero no cumple con el signo ˄ ")
if cumple==0:
if num=="1":
Boton0_num["bg"]="white"
elif num=="2":
Boton1_num["bg"]="white"
elif num=="3":
Boton2_num["bg"]="white"
elif num=="4":
Boton3_num["bg"]="white"
else:
Boton4_num["bg"]="white"
if num!="":
btn2=num
cuadricula=[[btn0,btn1,btn2,btn3,btn4],[btn5,btn6,btn7,btn8,btn9],[btn10,btn11,btn12,btn13,btn14],[btn15,btn16,btn17,btn18,btn19],[btn20,btn21,btn22,btn23,btn24]]
boton2["text"]=num
boton2["state"]=DISABLED
lista.append(2)
num=""
ganar(cuadricula)
| |
import copy
import numpy as np
from circuits.circuit_pack import aRCb
# from circuits.ecm import ecm_serial_matcher
from circuits.ecm_simulator import ecm_simulator_1
from circuits.elements import ele_C, ele_L
class L_M_0:
"""
简单实现L-M的核心部分,并能拟合简单的函数
"""
def __init__(self, input_x_arr, input_y_arr, para_arr, obj_fun, iter_max=100):
"""
:param
观测数据
input_x_arr: ndarray(float)
input_y_arr: ndarray(float)
para_arr: ndarray(float)
待拟合的参数
obj_fun:
iter_max:
"""
self.input_x_arr = input_x_arr
self.input_y_arr = input_y_arr
self.para_arr = para_arr
self.obj_fun = obj_fun
self.iter_max = iter_max
# defined in paper0 eq 3.15a
self.e1 = 1e-7
# defined in paper0 eq 3.15b
self.e2 = 1e-18
self.e2 = 1e-15
# defined in paper0 eq 3.14
self.tao = 1e-6
self.iter_count = 0
def cal_residual(self):
y_arr = self.obj_fun(input_x_arr=self.input_x_arr, para_arr=self.para_arr)
residual_arr = self.input_y_arr - y_arr
return residual_arr
def cal_derivative(self, para_index):
step = 1e-10
# step = 1e-3
big_para_arr = copy.deepcopy(self.para_arr)
big_para_arr[para_index] += step
small_para_arr = copy.deepcopy(self.para_arr)
small_para_arr[para_index] -= step
big_output_arr = self.obj_fun(input_x_arr = self.input_x_arr, para_arr = big_para_arr)
small_output_arr = self.obj_fun(input_x_arr = self.input_x_arr, para_arr = small_para_arr)
derivative_arr = (big_output_arr - small_output_arr) / (2 * step)
return derivative_arr
def cal_Jacobian(self):
M = self.input_x_arr.shape[0]
N = self.para_arr.shape[0]
J_arr = np.zeros(shape=(M,N))
for i in range(N):
J_arr[:, i] = self.cal_derivative(para_index=i)
return J_arr
def iterate(self):
v = 2
jacob_arr = self.cal_Jacobian()
A = jacob_arr.T.dot(jacob_arr)
# defined in paper0 eq 3.14
mu = self.tao * max([A[i, i] for i in range(A.shape[0])])
# g = jacob_arr.T.dot(self.obj_fun(input_x_arr = self.input_x_arr, para_arr = self.para_arr).reshape(jacob_arr.shape[0], 1))
g = jacob_arr.T.dot(self.cal_residual())
found = np.linalg.norm(g, ord=np.inf) <= self.e1
iter_count = 0
while (not found) and (iter_count < self.iter_max):
hessian_LM_arr = A + mu * np.eye(A.shape[0])
# h同时包含参数更新的大小和方向
# h = np.linalg.inv(hessian_LM_arr).dot(-1 * g)
h = np.linalg.inv(hessian_LM_arr).dot(g)
if (np.linalg.norm(self.cal_residual(), ord=2) <= self.e2 * np.linalg.norm(self.para_arr, ord=2)):
found = True
break
else:
# cal gain ratio, defined in paper0 eq 2.18
# F(x) = 0.5 * || f(x) ||
F_0 = 0.5 * (np.linalg.norm(self.cal_residual(), ord=2) ** 2)
new_para_arr = self.para_arr + h.ravel()
F_h = 0.5 * (np.linalg.norm(self.input_y_arr - self.obj_fun(input_x_arr=self.input_x_arr, para_arr=new_para_arr),
ord=2) ** 2)
# cal L(0) - L(h), defined in paper0 eq 3.14的下方
L0_minus_Lh = 0.5 * h.T.dot(mu * h + g)
rou = (F_0 - F_h) / L0_minus_Lh
if rou > 0: # accept h (step)
self.para_arr = new_para_arr
# update Jacobian, A, g
jacob_arr = self.cal_Jacobian()
A = jacob_arr.T.dot(jacob_arr)
g = jacob_arr.T.dot(self.cal_residual())
found1 = np.linalg.norm(g, ord=np.inf) <= self.e1
found2 = np.linalg.norm(self.cal_residual(), ord=2) <= 1e-6
if found1 or found2:
break
# update mu, v
mu = mu * max(1 / 3, 1 - (2 * mu - 1) ** 3)
v = 2
else:
mu = mu * v
v = 2 * v
iter_count += 1
print('LM-iter:',iter_count, self.para_arr)
# def f1(input_x_arr, para_arr):
# a, b = para_arr
# y_arr = a * np.exp(b * input_x_arr)
# return y_arr
# add noise
# def add_noise(y_arr):
# mu, sigma = 0, 5
# y_distubed_arr = y_arr + np.random.normal(mu, sigma, y_arr.shape[0])
# return y_distubed_arr
# input_x_arr = np.linspace(0, 10, 100)
# y_arr = f1(input_x_arr=input_x_arr, para_arr=np.array([10.0, 0.8]))
# y_distubed_arr = add_noise(y_arr)
# lm = L_M_0(input_x_arr=input_x_arr, input_y_arr=y_distubed_arr, para_arr=np.array([1.0, 1.0]), obj_fun=f1)
# lm.iterate()
# import matplotlib.pyplot as plt
# plt.scatter(input_x_arr, y_distubed_arr)
# y_fit_arr = f1(input_x_arr, para_arr=lm.para_arr)
# plt.plot(input_x_arr, y_fit_arr)
# plt.show()
"""
Require:
1- 可以用于KKT进行IS的数据有效性校验
object function = weight * (Zimag_residual ** 2)
2- 可用于IS拟合ECM参数
object function = weight * (Zreal_residual ** 2 + Zimag_residual ** 2)
refer
papers:
**paper-0: LOA: <METHODS FOR NON-LINEAR LEAST SQUARES PROBLEMS>
3.2. The Levenberg–Marquardt Method
blogs:
blog1: **[优化]Levenberg-Marquardt 最小二乘优化
https://zhuanlan.zhihu.com/p/42415718
blog0: LM(Levenberg–Marquardt)算法原理及其python自定义实现
https://blog.csdn.net/wolfcsharp/article/details/89674973
"""
def ecm_obj_fun_1(w_arr, para_arr, ecm_serial, z_arr):
z_sim_list = ecm_simulator_1(ecm_serial, para_list=para_arr.tolist(), fre=None , w=w_arr.tolist())
z_sim_arr = np.array(z_sim_list)
return z_sim_arr / np.abs(z_arr)
def ecm_obj_fun(w_arr, para_arr, ecm_serial):
z_sim_list = ecm_simulator_1(ecm_serial, para_list=para_arr.tolist(), fre=None , w=w_arr.tolist())
z_sim_arr = np.array(z_sim_list)
# residual_arr = z_arr - z_sim_arr
# ZSimpWin_ChiSquare = (1 / z_arr.shape[0]) * (residual_arr.real ** 2 + residual_arr.imag ** 2) / (np.abs(z_arr) ** 2)
# modulus_weight_arr = 1 / (np.abs(z_arr) ** 2)
# (residual_arr.real ** 2) / modulus_weight_arr
# pass
return z_sim_arr
def vogit_obj_fun_1(w_arr, para_arr, tao_arr, obj_fun_mode='both', add_C=False):
# def vogit_obj_fun_1(w_arr, para_arr, tao_arr, obj_fun_mode='both', add_C=False):
"""
Function
:param
w_arr:
para_arr:
tao_arr:
obj_fun_mode:
add_C:
:return:
Version:
1: 将电感的添加设置为默认, add-capacity为可选
"""
if obj_fun_mode == 'imag':
# para_arr = [R0, R1, ..., R_M-1]
# RC_para_list = [[R, tao / R] for R, tao in zip(para_arr, tao_arr)]
#
# z_sim_arr = np.empty(shape=(len(RC_para_list), w_arr.shape[0]), dtype=complex)
# for i, RC_list in enumerate(RC_para_list):
# R, C = RC_list
# tmp_z_sim_list = [aRCb(w, R0=R, C0=C) for w in w_arr]
# # IndexError: too many indices for array: array is 1-dimensional, but 2 were indexed
# z_sim_arr[i, :] = tmp_z_sim_list
# z_sim_arr = z_sim_arr.sum(axis=0)
# return z_sim_arr.imag
pass
elif (obj_fun_mode == 'real') or (obj_fun_mode == 'both'):
if add_C:
# para_arr = [*Rs*, *Ls*, *Cs*, R0, R1, ..., R_M-1]
Rs = para_arr[0]
Ls = para_arr[1]
Cs = para_arr[2]
M_R_arr = para_arr[3:]
# -------------- 计算M个RC各自产生的阻抗 --------------
z_sim_arr = np.empty(shape=(M_R_arr.size, w_arr.shape[0]), dtype=complex)
for i, R in enumerate(M_R_arr):
tao = tao_arr[i]
tmp_z_sim_list = [aRCb(w, R0=R, C0=tao/R) for w in w_arr]
# IndexError: too many indices for array: array is 1-dimensional, but 2 were indexed
z_sim_arr[i, :] = tmp_z_sim_list
# -------------- 计算M个RC各自产生的阻抗 --------------
# 加上Ls产生的阻抗
L_z_sim_arr = np.array([ele_L(w, Ls) for w in w_arr]).reshape((1, w_arr.size))
# 加上Cs产生的阻抗
C_z_sim_arr = np.array([ele_C(w, Cs) for w in w_arr]).reshape((1, w_arr.size))
# 合并M个RC + Ls + Cs 各自产生的阻抗
z_sim_arr = np.concatenate((z_sim_arr, L_z_sim_arr, C_z_sim_arr), axis=0)
z_sim_arr = z_sim_arr.sum(axis=0)
# 合并M个RC + Ls + Cs + Rs 各自产生的阻抗
z_sim_arr += Rs
if obj_fun_mode == 'real':
return z_sim_arr.real
elif obj_fun_mode == 'both':
return z_sim_arr
else:
# para_arr = [*Rs*, *Ls*, R0, R1, ..., R_M-1]
Rs = para_arr[0]
Ls = para_arr[1]
M_R_arr = para_arr[2:]
# -------------- 计算M个RC各自产生的阻抗 --------------
z_sim_arr = np.empty(shape=(M_R_arr.size, w_arr.shape[0]), dtype=complex)
for i, R in enumerate(M_R_arr):
tao = tao_arr[i]
tmp_z_sim_list = [aRCb(w, R0=R, C0=tao / R) for w in w_arr]
# IndexError: too many indices for array: array is 1-dimensional, but 2 were indexed
z_sim_arr[i, :] = tmp_z_sim_list
# -------------- 计算M个RC各自产生的阻抗 --------------
# 加上Ls产生的阻抗
L_z_sim_arr = np.array([ele_L(w, Ls) for w in w_arr]).reshape((1, w_arr.size))
# 合并M个RC + Ls 各自产生的阻抗
z_sim_arr = np.concatenate((z_sim_arr, L_z_sim_arr), axis=0)
z_sim_arr = z_sim_arr.sum(axis=0)
# 合并Rs + M个RC各自产生的阻抗
z_sim_arr += Rs
if obj_fun_mode == 'real':
return z_sim_arr.real
elif obj_fun_mode == 'both':
return z_sim_arr
def vogit_obj_fun_0(w_arr, para_arr, tao_arr, obj_fun_mode='both', add_C=False, add_L=False):
if obj_fun_mode == 'imag':
# para_arr = [R0, R1, ..., R_M-1]
RC_para_list = [[R, tao / R] for R, tao in zip(para_arr, tao_arr)]
z_sim_arr = np.empty(shape=(len(RC_para_list), w_arr.shape[0]), dtype=complex)
for i, RC_list in enumerate(RC_para_list):
R, C = RC_list
tmp_z_sim_list = [aRCb(w, R0=R, C0=C) for w in w_arr]
# IndexError: too many indices for array: array is 1-dimensional, but 2 were indexed
z_sim_arr[i, :] = tmp_z_sim_list
z_sim_arr = z_sim_arr.sum(axis=0)
return z_sim_arr.imag
elif (obj_fun_mode == 'real') or (obj_fun_mode == 'both'):
if add_C:
# para_arr = [*Rs*, *C*, R0, R1, ..., R_M-1]
Rs = para_arr[0]
# Wrong,在下面的for循环中,C的值被改动了
# C = para_arr[1]
# Right
Cs = para_arr[1]
RC_para_list = [[R, tao / R] for R, tao in zip(para_arr[2:], tao_arr)]
# -------------- 计算M个RC各自产生的阻抗 --------------
z_sim_arr = np.empty(shape=(len(RC_para_list)+1, w_arr.shape[0]), dtype=complex)
for i, RC_list in enumerate(RC_para_list):
R, C = RC_list
tmp_z_sim_list = [aRCb(w, R0=R, C0=C) for w in w_arr]
# IndexError: too many indices for array: array is 1-dimensional, but 2 were indexed
z_sim_arr[i, :] = tmp_z_sim_list
# 加上Cs产生的阻抗
z_sim_arr[-1, :] = [ele_C(w, Cs) for w in w_arr]
# -------------- 计算M个RC各自产生的阻抗 --------------
# 合并M个RC各自产生的阻抗
z_sim_arr = z_sim_arr.sum(axis=0)
# 合并Rs + M个RC各自产生的阻抗
z_sim_arr += Rs
if obj_fun_mode == 'real':
return z_sim_arr.real
elif obj_fun_mode == 'both':
return z_sim_arr
else:
# para_arr = [*Rs*, R0, R1, ..., R_M-1]
Rs = para_arr[0]
RC_para_list = [[R, tao / R] for R, tao in zip(para_arr[1:], tao_arr)]
# -------------- 计算M个RC各自产生的阻抗 --------------
z_sim_arr = np.empty(shape=(len(RC_para_list), w_arr.shape[0]), dtype=complex)
for i, RC_list in enumerate(RC_para_list):
R, C = RC_list
tmp_z_sim_list = [aRCb(w, R0=R, C0=C) for w in w_arr]
# IndexError: too many indices for array: array is 1-dimensional, but 2 were indexed
z_sim_arr[i, :] = tmp_z_sim_list
# -------------- 计算M个RC各自产生的阻抗 --------------
# 合并M个RC各自产生的阻抗
z_sim_arr = z_sim_arr.sum(axis=0)
# 合并Rs + M个RC各自产生的阻抗
z_sim_arr += Rs
if obj_fun_mode == 'real':
return z_sim_arr.real
elif obj_fun_mode == 'both':
return z_sim_arr
class Levenberg_Marquart_0:
def __init__(self, impSpe, obj_fun, obj_fun_mode='real', obj_fun_weighting_type='modulus', iter_max=100, **kwargs):
"""
:param
impSpe: cls
obj_fun:
ECM-vogit
| |
#!/usr/bin/env python
#
# Copyright 2016 MIT Lincoln Laboratory, Massachusetts Institute of Technology
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use these files except in compliance with
# the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
#
"""
Authors: <NAME>
Date: September 19, 2014
Installation: Python 2.7 on Windows 7
This script computes various measures of textual similarity between users and their followers. The following functions
are primarly used in depth-first sampling methods.
"""
import datetime, time, math, string, numpy as np, sys
from nltk.corpus import stopwords
import pyTweet, json_to_database
# import psycopg2
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
##
# FUNCTIONS FOR TEXT PROCESSING
def process_word_list(word_list):
"""
This function processes a list of words from Tweets. It removes punctuation from words and converts them to lower
case. If the word is a URL it is left alone.
:param word_list: original list of words
:return processed_word_list: processed list of words
"""
sw = stopwords.words('english')
sw.extend(['', '""', "''", 'rt'])
processed_word_list = []
for w in word_list:
if ('http:' in w) or ('https:' in w):
processed_word_list.append(w) # Leave URLs as they are
else:
try:
# Convert text to lowercase and remove punctuation
word = w.translate(string.maketrans("", ""), string.punctuation).lower()
# Remove English stopwords
if word not in sw:
processed_word_list.append(word)
except UnicodeEncodeError:
pass
return processed_word_list
def create_documents(cur, conn, tl_start_date, tl_end_date):
"""
This function creates a 'document' for TF-IDF calculations.
:param cur: Cursor to database
:param conn: Connection to database
:param tl_start_date: datetime object to indicate beginning of time line
:param tl_end_date: datetime object to indicate the end of a time line
"""
# Select relevant users that do not already have timeline-documents created
cur.execute(cur.mogrify("SELECT DISTINCT (tweets.user_id) FROM tweets INNER JOIN users ON users.user_id=tweets.user_id WHERE users.timeline_document IS NULL AND users.has_timeline = TRUE AND (users.expand_user IS NULL OR users.expand_user = TRUE) AND (tweets.created_at >= %s AND tweets.created_at <= %s);", (tl_start_date, tl_end_date)))
uids = cur.fetchall()
print "\nCreate documents for {} users".format(len(uids))
# Create the timeline-documents
for u in range(len(uids)):
print "\tCreate document for user {}: {} out of {}".format(uids[u][0], u, len(uids))
timeline_document = []
# Grab relevant tweets
cur.execute(cur.mogrify("SELECT tweet FROM tweets WHERE user_id = %s AND (created_at <= %s AND created_at >= %s);", (uids[u][0], tl_end_date, tl_start_date)))
for twt in cur:
timeline_document.extend(twt[0].split(' '))
# Process each word in timeline: convert to lower case, remove punctuation, remove English stop-words
timeline_document = process_word_list(timeline_document)
# Add timeline_document to table
json_to_database.make_sql_edit(cur, conn, cur.mogrify("UPDATE users SET timeline_document = %s WHERE user_id = %s;", (timeline_document, uids[u][0])))
if len(timeline_document) < 1:
json_to_database.make_sql_edit(cur, conn, "UPDATE users SET timeline_is_relevant = FALSE WHERE user_id = {};".format(uids[u][0]))
##
# New TF-IDF ANALYSIS
def find_most_similar_followers(cur, conn, tl_start_date, tl_end_date, user_ids, prev_users):
"""
This function identifies the top 10% of most textually simliar followers to a user
:param cur: Cursor to database
:param conn: Connection to database
:param tl_start_date: datetime object to indicate beginning of time line
:param tl_end_date: datetime object to indicate the end of a time line
:param user_ids: set of user IDs
:param prev_users: hop -1
"""
if 'sklearn' not in sys.modules.keys():
import sklearn
original_user_ids = set(user_ids)
print "\nFind friends/followers most similar to the previous hop using a TF-IDF transformation."
print "\tBegin with {} friends and followers for similarity test".format(len(user_ids))
user_timeline_hash = {} # hash table for user IDs and indexes in the TF-IDF matrix
# Create document for khop-1 users
user_doc = ''
for jj in prev_users:
cur.execute(cur.mogrify("SELECT tweets.tweet FROM tweets INNER JOIN users ON users.user_id=tweets.user_id WHERE (users.user_id = %s) AND (users.has_timeline=TRUE) AND (users.expand_user=TRUE) AND (tweets.created_at >= %s AND tweets.created_at <= %s);", (jj, tl_start_date, tl_end_date)))
for t in cur:
user_doc += t[0]
corpus = [user_doc]
user_timeline_hash[0] = 'prev_users'
# Create document for all hop users
idx = 1
jj_users = list(user_ids)
for jj in jj_users:
user_doc = ''
cur.execute(cur.mogrify("SELECT tweets.tweet FROM tweets INNER JOIN users ON users.user_id=tweets.user_id WHERE (users.user_id=%s) AND (users.has_timeline=TRUE) AND (tweets.created_at >= %s AND tweets.created_at <= %s) AND (users.expand_user IS NOT FALSE);", (jj, tl_start_date, tl_end_date)))
for t in cur:
user_doc += t[0]
if user_doc.strip() != '':
corpus.append(user_doc)
user_timeline_hash[idx] = jj
idx += 1
else:
user_ids.remove(jj)
print "\tCompare previous hop with {} friends and followers".format(len(user_timeline_hash)-1)
if corpus != ['']:
# Perform TF-IDF transformation
# tfidf_vectorizer = sklearn.feature_extraction.text.TfidfVectorizer(min_df=1)
tfidf_vectorizer = TfidfVectorizer(min_df=1)
tfidf_matrix = tfidf_vectorizer.fit_transform(corpus)
# Compute cosine similarity between khop-1 and all other timelines
score = cosine_similarity(tfidf_matrix[0:1], tfidf_matrix)
# score = sklearn.metrics.pairwise.cosine_similarity(tfidf_matrix[0:1], tfidf_matrix)
# Exapand the top 5% of users
if len(score[0]) < 2:
return
threshold = np.percentile(score[0][1:], 80)
expand_idx = np.where(score[0] >= threshold)[0]
expand_count = 0
for k in user_timeline_hash.keys():
if k < 1:
continue
if k in expand_idx:
expand_count += 1
json_to_database.make_sql_edit(cur, conn, "UPDATE users SET expand_user=TRUE, decision_tfidf={} WHERE user_id={};".format(score[0][k], user_timeline_hash[k]))
else:
user_ids.remove(user_timeline_hash[k])
print "\tExpand {} friends/followers".format(expand_count)
return original_user_ids
##
# TF-DF ANALYSIS
def compute_df(cur, conn, tl_start_date, tl_end_date, user_set):
"""
This function computes the IDF for each term in the topic table. Note that a 'document' is defined as a user's
timeline between tl_start_date_date and tl_end_date.
IDF(t) = log_e( # of timelines with the term t / # of timelines in database)
:param cur: Cursor to database
:param conn: Connection to database
:param tl_start_date: datetime object to indicate beginning of time line
:param tl_end_date: datetime object to indicate the end of a time line
:param user_set: Subset of users to restrict calculation, list object
"""
print "\nCompute DF for each topic."
# Get total number of timelines in database
a = " OR ".join(['user_id = ' + str(j) for j in user_set])
cur.execute(cur.mogrify("SELECT COUNT (DISTINCT user_id) FROM tweets WHERE (created_at >= %s AND created_at <= %s) AND ({});".format(a), (tl_start_date, tl_end_date)))
q = cur.fetchone()
if (q is None) or (q[0] is None):
print "WARNING: q or q[0] is None!"
json_to_database.make_sql_edit(cur, conn, "UPDATE topics SET df = 0.0;")
return
total_timelines = float(q[0])
print "\tThere are {} timelines for this set of friends/followers".format(total_timelines)
# Case: No timelines
if total_timelines < 1.0:
json_to_database.make_sql_edit(cur, conn, "UPDATE topics SET df = 0.0;")
return
# Get count of timelines containing topic t, for each topic
cur.execute("SELECT topic FROM topics;")
topics = cur.fetchall()
for t in topics:
# Count the number of timelines that the topic appears in
topic_freq = 0.0
if 'http' in t[0]:
cur.execute(cur.mogrify("SELECT DISTINCT user_id FROM tweets WHERE ({}) AND ((tweet ~ %s) OR (%s = ANY(url_entities))) AND (created_at >= %s AND created_at <= %s);".format(a), ('%' + t[0] + '%', '%' + t[0] + '%', tl_start_date, tl_end_date)))
else:
cur.execute(cur.mogrify("SELECT DISTINCT user_id FROM tweets WHERE ({}) AND ((tweet ~* %s) OR (LOWER(%s) = ANY(hashtag_entities))) AND (created_at >= %s AND created_at <= %s);".format(a), ('\m' + t[0] + '\M', t[0], tl_start_date, tl_end_date)))
q = cur.fetchall()
topic_freq += float(len(q))
# Compute IDF
df = 0.0
if topic_freq > 0:
df = math.log(topic_freq/total_timelines, 10.0)
json_to_database.make_sql_edit(cur, conn, "UPDATE topics SET document_frequency = {} WHERE topic = '{}';".format(df, t[0]))
def compute_tf(document, term, type='raw'):
"""
This function computes the raw term-frequency (TF) for a given document and term.
:param document: array of terms (list object)
:param term: single term
:param type: Type of TF calculation to use, default is 'raw'. Other options are 'augmented' and 'boolean'
:return tf: raw term frequency of term in the document
"""
assert (type in ['raw', 'augmented', 'boolean']), "The parameter 'type' is not recognized. Please enter 'raw', 'boolean' or 'augmented' as it's value."
tf = 0.0
if type == 'raw':
tf = float(document.count(term))
if type == 'boolean':
tf = float(term in document)
if type == 'augmented':
tf = 0.5 + ((0.5 * float(document.count(term))) / float(max([document.count(x) for x in document])))
return tf
def compute_tfdf_score(cur, user_id, tf_type):
"""
This function computes the TF-DF score for a user's timeline.
:param cur: Cursor to database
:param user_id: Twitter user ID
:param tf_type: term-fequency calculation: 'raw', 'augmented', or 'boolean'
"""
| |
def _get_all_matches(self, instance):
# a match is a raw parameter instance
matches = []
multikey_exceptions = []
for filepath, raw_parameters in iteritems(instance.raw_data):
match, error = self._raw_parameters_from_single_source(raw_parameters)
if match is not None:
matches.append(match)
if error:
multikey_exceptions.append(error)
return matches, multikey_exceptions
@abstractmethod
def _merge(self, matches):
raise NotImplementedError()
def _expand(self, data):
if self._expandvars:
# This is similar to conda._vendor.auxlib.type_coercion.typify_data_structure
# It could be DRY-er but that would break SRP.
if isinstance(data, Mapping):
return type(data)((k, expand_environment_variables(v)) for k, v in iteritems(data))
elif isiterable(data):
return type(data)(expand_environment_variables(v) for v in data)
else:
return expand_environment_variables(data)
else:
return data
def __get__(self, instance, instance_type):
# strategy is "extract and merge," which is actually just map and reduce
# extract matches from each source in SEARCH_PATH
# then merge matches together
if self.name in instance._cache_:
return instance._cache_[self.name]
matches, errors = self._get_all_matches(instance)
merged = self._merge(matches) if matches else self.default
# We need to expand any environment variables before type casting.
# Otherwise e.g. `my_bool_var: $BOOL` with BOOL=True would raise a TypeCoercionError.
expanded = self._expand(merged)
try:
result = typify_data_structure(expanded, self._element_type)
except TypeCoercionError as e:
errors.append(CustomValidationError(self.name, e.value, "<<merged>>", text_type(e)))
else:
errors.extend(self.collect_errors(instance, result))
raise_errors(errors)
instance._cache_[self.name] = result # lgtm [py/uninitialized-local-variable]
return result # lgtm [py/uninitialized-local-variable]
def collect_errors(self, instance, value, source="<<merged>>"):
"""Validate a Parameter value.
Args:
instance (Configuration): The instance object to which the Parameter descriptor is
attached.
value: The value to be validated.
"""
errors = []
if not isinstance(value, self._type):
errors.append(InvalidTypeError(self.name, value, source, type(value),
self._type))
elif self._validation is not None:
result = self._validation(value)
if result is False:
errors.append(ValidationError(self.name, value, source))
elif isinstance(result, string_types):
errors.append(CustomValidationError(self.name, value, source, result))
return errors
def _match_key_is_important(self, raw_parameter):
return raw_parameter.keyflag() is ParameterFlag.final
def _first_important_matches(self, matches):
idx = first(enumerate(matches), lambda x: self._match_key_is_important(x[1]),
apply=lambda x: x[0])
return matches if idx is None else matches[:idx+1]
@staticmethod
def _str_format_flag(flag):
return " #!%s" % flag if flag is not None else ''
@staticmethod
def _str_format_value(value):
if value is None:
return 'None'
return value
@classmethod
def repr_raw(cls, raw_parameter):
raise NotImplementedError()
class PrimitiveParameter(Parameter):
"""Parameter type for a Configuration class that holds a single python primitive value.
The python primitive types are str, int, float, complex, bool, and NoneType. In addition,
python 2 has long and unicode types.
"""
def __init__(self, default, aliases=(), validation=None, element_type=None, expandvars=False):
"""
Args:
default (Any): The parameter's default value.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value. Returning
`None` also indicates a valid value.
element_type (type or Tuple[type]): Type-validation of parameter's value. If None,
type(default) is used.
"""
self._type = type(default) if element_type is None else element_type
self._element_type = self._type
super(PrimitiveParameter, self).__init__(default, aliases, validation, expandvars)
def _merge(self, matches):
important_match = first(matches, self._match_key_is_important, default=None)
if important_match is not None:
return important_match.value(self)
last_match = last(matches, lambda x: x is not None, default=None)
if last_match is not None:
return last_match.value(self)
raise ThisShouldNeverHappenError() # pragma: no cover
def repr_raw(self, raw_parameter):
return "%s: %s%s" % (raw_parameter.key,
self._str_format_value(raw_parameter.value(self)),
self._str_format_flag(raw_parameter.keyflag()))
class SequenceParameter(Parameter):
"""Parameter type for a Configuration class that holds a sequence (i.e. list) of python
primitive values.
"""
_type = tuple
def __init__(self, element_type, default=(), aliases=(), validation=None,
string_delimiter=',', expandvars=False):
"""
Args:
element_type (type or Iterable[type]): The generic type of each element in
the sequence.
default (Iterable[str]): The parameter's default value.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._element_type = element_type
self.string_delimiter = string_delimiter
super(SequenceParameter, self).__init__(default, aliases, validation, expandvars)
def collect_errors(self, instance, value, source="<<merged>>"):
errors = super(SequenceParameter, self).collect_errors(instance, value)
element_type = self._element_type
for idx, element in enumerate(value):
if not isinstance(element, element_type):
errors.append(InvalidElementTypeError(self.name, element, source,
type(element), element_type, idx))
return errors
def _merge(self, matches):
# get matches up to and including first important_match
# but if no important_match, then all matches are important_matches
relevant_matches_and_values = tuple((match, match.value(self)) for match in
self._first_important_matches(matches))
for match, value in relevant_matches_and_values:
if not isinstance(value, tuple):
raise InvalidTypeError(self.name, value, match.source, value.__class__.__name__,
self._type.__name__)
# get individual lines from important_matches that were marked important
# these will be prepended to the final result
def get_marked_lines(match, marker, parameter_obj):
return tuple(line
for line, flag in zip(match.value(parameter_obj),
match.valueflags(parameter_obj))
if flag is marker) if match else ()
top_lines = concat(get_marked_lines(m, ParameterFlag.top, self) for m, _ in
relevant_matches_and_values)
# also get lines that were marked as bottom, but reverse the match order so that lines
# coming earlier will ultimately be last
bottom_lines = concat(get_marked_lines(m, ParameterFlag.bottom, self) for m, _ in
reversed(relevant_matches_and_values))
# now, concat all lines, while reversing the matches
# reverse because elements closer to the end of search path take precedence
all_lines = concat(v for _, v in reversed(relevant_matches_and_values))
# stack top_lines + all_lines, then de-dupe
top_deduped = tuple(unique(concatv(top_lines, all_lines)))
# take the top-deduped lines, reverse them, and concat with reversed bottom_lines
# this gives us the reverse of the order we want, but almost there
# NOTE: for a line value marked both top and bottom, the bottom marker will win out
# for the top marker to win out, we'd need one additional de-dupe step
bottom_deduped = unique(concatv(reversed(tuple(bottom_lines)), reversed(top_deduped)))
# just reverse, and we're good to go
return tuple(reversed(tuple(bottom_deduped)))
def repr_raw(self, raw_parameter):
lines = list()
lines.append("%s:%s" % (raw_parameter.key,
self._str_format_flag(raw_parameter.keyflag())))
for q, value in enumerate(raw_parameter.value(self)):
valueflag = raw_parameter.valueflags(self)[q]
lines.append(" - %s%s" % (self._str_format_value(value),
self._str_format_flag(valueflag)))
return '\n'.join(lines)
def _get_all_matches(self, instance):
# this is necessary to handle argparse `action="append"`, which can't be set to a
# default value of NULL
# it also config settings like `channels: ~`
matches, exceptions = super(SequenceParameter, self)._get_all_matches(instance)
matches = tuple(m for m in matches if m._raw_value is not None)
return matches, exceptions
class MapParameter(Parameter):
"""Parameter type for a Configuration class that holds a map (i.e. dict) of python
primitive values.
"""
_type = frozendict
def __init__(self, element_type, default=None, aliases=(), validation=None, expandvars=False):
"""
Args:
element_type (type or Iterable[type]): The generic type of each element.
default (Mapping): The parameter's default value. If None, will be an empty dict.
aliases (Iterable[str]): Alternate names for the parameter.
validation (callable): Given a parameter value as input, return a boolean indicating
validity, or alternately return a string describing an invalid value.
"""
self._element_type = element_type
default = default and frozendict(default) or frozendict()
super(MapParameter, self).__init__(default, aliases, validation, expandvars)
def collect_errors(self, instance, value, source="<<merged>>"):
errors = super(MapParameter, self).collect_errors(instance, value)
if isinstance(value, Mapping):
element_type = self._element_type
errors.extend(InvalidElementTypeError(self.name, val, source, type(val),
element_type, key)
for key, val in iteritems(value) if not isinstance(val, element_type))
return errors
def _merge(self, matches):
# get matches up to and including first important_match
# but if no important_match, then all matches are important_matches
relevant_matches_and_values = tuple((match, match.value(self)) for match in
self._first_important_matches(matches))
for match, value in relevant_matches_and_values:
if not isinstance(value, Mapping):
raise InvalidTypeError(self.name, value, match.source, value.__class__.__name__,
self._type.__name__)
# mapkeys with important matches
def key_is_important(match, key):
return match.valueflags(self).get(key) == ParameterFlag.final
important_maps = tuple(dict((k, v)
for k, v in iteritems(match_value)
if key_is_important(match, k))
for match, match_value in relevant_matches_and_values)
# dump all matches in a dict
# then overwrite with important matches
return frozendict(merge(concatv((v for _, v in relevant_matches_and_values),
reversed(important_maps))))
def repr_raw(self, raw_parameter):
lines = list()
lines.append("%s:%s" % (raw_parameter.key,
self._str_format_flag(raw_parameter.keyflag())))
for valuekey, value in iteritems(raw_parameter.value(self)):
valueflag = raw_parameter.valueflags(self).get(valuekey)
lines.append(" %s: %s%s" % (valuekey, self._str_format_value(value),
self._str_format_flag(valueflag)))
return '\n'.join(lines)
def _get_all_matches(self, instance):
# it also config settings like `proxy_servers: ~`
matches, exceptions = super(MapParameter, self)._get_all_matches(instance)
matches = tuple(m for m in matches if m._raw_value is not None)
return matches, exceptions
class ConfigurationType(type):
"""metaclass for Configuration"""
def __init__(cls, name, bases, attr):
super(ConfigurationType, cls).__init__(name, bases, attr)
# call _set_name for each parameter
cls.parameter_names = tuple(p._set_name(name) for name, p in iteritems(cls.__dict__)
if isinstance(p, Parameter))
@with_metaclass(ConfigurationType)
class Configuration(object):
def __init__(self, search_path=(), app_name=None, argparse_args=None):
# Currently, __init__ does a **full** disk reload of all files.
# A future improvement would be to cache files that are already loaded.
self.raw_data = odict()
self._cache_ = dict()
self._reset_callbacks = IndexedSet()
self._validation_errors = defaultdict(list)
self._set_search_path(search_path)
self._set_env_vars(app_name)
self._set_argparse_args(argparse_args)
def _set_search_path(self, search_path):
self._search_path | |
Enterprise+ and Edge licenses only.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[str] key: the identity key of the repo.
:param pulumi.Input[Sequence[pulumi.Input['FederatedComposerRepositoryMemberArgs']]] members: The list of Federated members and must contain this repository URL (configured base URL
`/artifactory/` + repo `key`). Note that each of the federated members will need to have a base URL set.
Please follow the [instruction](https://www.jfrog.com/confluence/display/JFROG/Working+with+Federated+Repositories#WorkingwithFederatedRepositories-SettingUpaFederatedRepository)
to set up Federated repositories correctly.
:param pulumi.Input[bool] priority_resolution: Setting repositories with priority will cause metadata to be merged only from repositories set with this field
:param pulumi.Input[Sequence[pulumi.Input[str]]] project_environments: Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
:param pulumi.Input[str] project_key: Project key for assigning this repository to. When assigning repository to a project, repository key must be prefixed
with project key, separated by a dash.
:param pulumi.Input[Sequence[pulumi.Input[str]]] property_sets: List of property set name
:param pulumi.Input[str] repo_layout_ref: Repository layout key for the local repository
:param pulumi.Input[bool] xray_index: Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
if archive_browsing_enabled is not None:
pulumi.set(__self__, "archive_browsing_enabled", archive_browsing_enabled)
if blacked_out is not None:
pulumi.set(__self__, "blacked_out", blacked_out)
if description is not None:
pulumi.set(__self__, "description", description)
if download_direct is not None:
pulumi.set(__self__, "download_direct", download_direct)
if excludes_pattern is not None:
pulumi.set(__self__, "excludes_pattern", excludes_pattern)
if includes_pattern is not None:
pulumi.set(__self__, "includes_pattern", includes_pattern)
if key is not None:
pulumi.set(__self__, "key", key)
if members is not None:
pulumi.set(__self__, "members", members)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if package_type is not None:
pulumi.set(__self__, "package_type", package_type)
if priority_resolution is not None:
pulumi.set(__self__, "priority_resolution", priority_resolution)
if project_environments is not None:
pulumi.set(__self__, "project_environments", project_environments)
if project_key is not None:
pulumi.set(__self__, "project_key", project_key)
if property_sets is not None:
pulumi.set(__self__, "property_sets", property_sets)
if repo_layout_ref is not None:
pulumi.set(__self__, "repo_layout_ref", repo_layout_ref)
if xray_index is not None:
pulumi.set(__self__, "xray_index", xray_index)
@property
@pulumi.getter(name="archiveBrowsingEnabled")
def archive_browsing_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
When set, you may view content such as HTML or Javadoc files directly from Artifactory. This may not be safe and
therefore requires strict content moderation to prevent malicious users from uploading content that may compromise
security (e.g., cross-site scripting attacks).
"""
return pulumi.get(self, "archive_browsing_enabled")
@archive_browsing_enabled.setter
def archive_browsing_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "archive_browsing_enabled", value)
@property
@pulumi.getter(name="blackedOut")
def blacked_out(self) -> Optional[pulumi.Input[bool]]:
"""
When set, the repository does not participate in artifact resolution and new artifacts cannot be deployed.
"""
return pulumi.get(self, "blacked_out")
@blacked_out.setter
def blacked_out(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "blacked_out", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="downloadDirect")
def download_direct(self) -> Optional[pulumi.Input[bool]]:
"""
When set, download requests to this repository will redirect the client to download the artifact directly from the cloud
storage provider. Available in Enterprise+ and Edge licenses only.
"""
return pulumi.get(self, "download_direct")
@download_direct.setter
def download_direct(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "download_direct", value)
@property
@pulumi.getter(name="excludesPattern")
def excludes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
"""
return pulumi.get(self, "excludes_pattern")
@excludes_pattern.setter
def excludes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "excludes_pattern", value)
@property
@pulumi.getter(name="includesPattern")
def includes_pattern(self) -> Optional[pulumi.Input[str]]:
"""
List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
"""
return pulumi.get(self, "includes_pattern")
@includes_pattern.setter
def includes_pattern(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "includes_pattern", value)
@property
@pulumi.getter
def key(self) -> Optional[pulumi.Input[str]]:
"""
the identity key of the repo.
"""
return pulumi.get(self, "key")
@key.setter
def key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "key", value)
@property
@pulumi.getter
def members(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['FederatedComposerRepositoryMemberArgs']]]]:
"""
The list of Federated members and must contain this repository URL (configured base URL
`/artifactory/` + repo `key`). Note that each of the federated members will need to have a base URL set.
Please follow the [instruction](https://www.jfrog.com/confluence/display/JFROG/Working+with+Federated+Repositories#WorkingwithFederatedRepositories-SettingUpaFederatedRepository)
to set up Federated repositories correctly.
"""
return pulumi.get(self, "members")
@members.setter
def members(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['FederatedComposerRepositoryMemberArgs']]]]):
pulumi.set(self, "members", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter(name="packageType")
def package_type(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "package_type")
@package_type.setter
def package_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "package_type", value)
@property
@pulumi.getter(name="priorityResolution")
def priority_resolution(self) -> Optional[pulumi.Input[bool]]:
"""
Setting repositories with priority will cause metadata to be merged only from repositories set with this field
"""
return pulumi.get(self, "priority_resolution")
@priority_resolution.setter
def priority_resolution(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "priority_resolution", value)
@property
@pulumi.getter(name="projectEnvironments")
def project_environments(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Project environment for assigning this repository to. Allow values: "DEV" or "PROD"
"""
return pulumi.get(self, "project_environments")
@project_environments.setter
def project_environments(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "project_environments", value)
@property
@pulumi.getter(name="projectKey")
def project_key(self) -> Optional[pulumi.Input[str]]:
"""
Project key for assigning this repository to. When assigning repository to a project, repository key must be prefixed
with project key, separated by a dash.
"""
return pulumi.get(self, "project_key")
@project_key.setter
def project_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_key", value)
@property
@pulumi.getter(name="propertySets")
def property_sets(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of property set name
"""
return pulumi.get(self, "property_sets")
@property_sets.setter
def property_sets(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "property_sets", value)
@property
@pulumi.getter(name="repoLayoutRef")
def repo_layout_ref(self) -> Optional[pulumi.Input[str]]:
"""
Repository layout key for the local repository
"""
return pulumi.get(self, "repo_layout_ref")
@repo_layout_ref.setter
def repo_layout_ref(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "repo_layout_ref", value)
@property
@pulumi.getter(name="xrayIndex")
def xray_index(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Indexing In Xray. Repository will be indexed with the default retention period. You will be able to change it via
Xray settings.
"""
return pulumi.get(self, "xray_index")
@xray_index.setter
def xray_index(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "xray_index", value)
class FederatedComposerRepository(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
archive_browsing_enabled: Optional[pulumi.Input[bool]] = None,
blacked_out: Optional[pulumi.Input[bool]] = None,
description: Optional[pulumi.Input[str]] = None,
download_direct: Optional[pulumi.Input[bool]] = None,
excludes_pattern: Optional[pulumi.Input[str]] = None,
includes_pattern: Optional[pulumi.Input[str]] = None,
key: Optional[pulumi.Input[str]] = None,
members: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FederatedComposerRepositoryMemberArgs']]]]] = None,
notes: Optional[pulumi.Input[str]] = None,
priority_resolution: Optional[pulumi.Input[bool]] = None,
project_environments: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
project_key: Optional[pulumi.Input[str]] = None,
property_sets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
repo_layout_ref: Optional[pulumi.Input[str]] = None,
xray_index: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
Creates a federated Composer repository.
## Example Usage
```python
import pulumi
import pulumi_artifactory as artifactory
terraform_federated_test_composer_repo = artifactory.FederatedComposerRepository("terraform-federated-test-composer-repo",
key="terraform-federated-test-composer-repo",
members=[
artifactory.FederatedComposerRepositoryMemberArgs(
enabled=True,
url="http://tempurl.org/artifactory/terraform-federated-test-composer-repo",
),
artifactory.FederatedComposerRepositoryMemberArgs(
enabled=True,
url="http://tempurl2.org/artifactory/terraform-federated-test-composer-repo-2",
),
])
```
## Import
Federated repositories can be imported using their name, e.g.
```sh
$ pulumi import artifactory:index/federatedComposerRepository:FederatedComposerRepository terraform-federated-test-composer-repo terraform-federated-test-composer-repo
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] archive_browsing_enabled: When set, you may view content such as HTML or Javadoc files directly from Artifactory. This may not be safe and
therefore requires strict content moderation to prevent malicious users from uploading content that may compromise
security (e.g., cross-site scripting attacks).
:param pulumi.Input[bool] blacked_out: When set, the repository does not participate in artifact resolution and new artifacts cannot be deployed.
:param pulumi.Input[bool] download_direct: When set, download requests to this repository will redirect the client to download the artifact directly from the cloud
storage provider. Available in Enterprise+ and Edge licenses only.
:param pulumi.Input[str] excludes_pattern: List of artifact patterns to exclude when evaluating artifact requests, in the form of x/y/**/z/*. By default no
artifacts are excluded.
:param pulumi.Input[str] includes_pattern: List of artifact patterns to include when evaluating artifact requests in the form of x/y/**/z/*. When used, only
artifacts matching one of the include patterns are served. By default, all artifacts are included (**/*).
:param pulumi.Input[str] key: the identity key of the repo.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['FederatedComposerRepositoryMemberArgs']]]] members: The list of Federated members and must contain this repository URL (configured base URL
`/artifactory/` + repo `key`). Note that each of the federated members will need to have a base URL set.
Please follow the [instruction](https://www.jfrog.com/confluence/display/JFROG/Working+with+Federated+Repositories#WorkingwithFederatedRepositories-SettingUpaFederatedRepository)
to set up Federated repositories correctly.
:param | |
version=self.version),
self.version, nsMap)
result.append(ret)
if samlToken:
result.append('%s %s %s' %
(WSSE_HEADER_START, samlToken, WSSE_HEADER_END))
result.append(SOAP_HEADER_END)
result.append('\n')
# Serialize soap body
result.extend([
SOAP_BODY_START,
'<%s xmlns="%s">' % (info.wsdlName, defaultNS),
_SerializeToUnicode(
mo,
Object(name="_this", type=ManagedObject, version=self.version),
self.version, nsMap)
])
# Serialize soap request parameters
for (param, arg) in zip(info.params, args):
result.append(_SerializeToUnicode(arg, param, self.version, nsMap))
result.extend(
['</%s>' % info.wsdlName, SOAP_BODY_END, SOAP_ENVELOPE_END])
return ''.join(result).encode(XML_ENCODING)
# Subclass of HTTPConnection that connects over a Unix domain socket
# instead of a TCP port. The path of the socket is passed in place of
# the hostname. Fairly gross but does the job.
class UnixSocketConnection(six.moves.http_client.HTTPConnection):
# The HTTPConnection ctor expects a single argument, which it interprets
# as the host to connect to; for UnixSocketConnection, we instead interpret
# the parameter as the filesystem path of the Unix domain socket.
def __init__(self, path):
# Pass '' as the host to HTTPConnection; it doesn't really matter
# what we pass (since we've overridden the connect method) as long
# as it's a valid string.
six.moves.http_client.HTTPConnection.__init__(self, '')
self.path = path
def connect(self):
# Hijack the connect method of HTTPConnection to connect to the
# specified Unix domain socket instead. Obey the same contract
# as HTTPConnection.connect, which puts the socket in self.sock.
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
sock.connect(self.path)
self.sock = sock
def _VerifyThumbprint(thumbprint, connection):
'''If there is a thumbprint, connect to the server and verify that the
SSL certificate matches the given thumbprint. An exception is thrown
if there is a mismatch.
'''
if thumbprint and isinstance(connection,
six.moves.http_client.HTTPSConnection):
if not connection.sock:
connection.connect()
derCert = connection.sock.getpeercert(True)
thumbprint_len = len(thumbprint)
if thumbprint_len == 40:
sha = hashlib.sha1()
elif thumbprint_len == 64:
sha = hashlib.sha256()
elif thumbprint_len == 128:
sha = hashlib.sha512()
else:
raise Exception("SHA thumbprint LENGTH is not valid -- %s" %
thumbprint)
sha.update(derCert)
shaDigest = sha.hexdigest().lower()
if shaDigest != thumbprint:
raise ThumbprintMismatchException(thumbprint, shaDigest)
# Internal version of http connection
class _HTTPConnection(six.moves.http_client.HTTPConnection):
def __init__(self, *args, **kwargs):
# Only pass in the named arguments that HTTPConnection constructor
# understands
tmpKwargs = {}
httpConn = six.moves.http_client.HTTPConnection
for key in httpConn.__init__.__code__.co_varnames:
if key in kwargs and key != 'self':
tmpKwargs[key] = kwargs[key]
six.moves.http_client.HTTPConnection.__init__(self, *args, **tmpKwargs)
# Internal version of https connection
#
# Support ssl.wrap_socket params which are missing from httplib
# HTTPSConnection (e.g. ca_certs)
# Note: Only works iff the ssl params are passing in as kwargs
class _HTTPSConnection(six.moves.http_client.HTTPSConnection):
def __init__(self, *args, **kwargs):
# Extract ssl.wrap_socket param unknown to httplib.HTTPSConnection,
# and push back the params in connect()
self._sslArgs = {}
tmpKwargs = kwargs.copy()
for key in [
"server_side", "cert_reqs", "ssl_version", "ca_certs",
"do_handshake_on_connect", "suppress_ragged_eofs", "ciphers"
]:
if key in tmpKwargs:
self._sslArgs[key] = tmpKwargs.pop(key)
six.moves.http_client.HTTPSConnection.__init__(self, *args,
**tmpKwargs)
# Override connect to allow us to pass in additional ssl paramters to
# ssl.wrap_socket (e.g. cert_reqs, ca_certs for ca cert verification)
def connect(self):
if len(self._sslArgs) == 0:
# No override
six.moves.http_client.HTTPSConnection.connect(self)
return
sock = socket.create_connection((self.host, self.port), self.timeout,
self.source_address)
if self._tunnel_host:
self.sock = sock
self._tunnel()
self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file,
**self._sslArgs)
# TODO: Additional verification of peer cert if needed
# cert_reqs = self._sslArgs.get("cert_reqs", ssl.CERT_NONE)
# ca_certs = self._sslArgs.get("ca_certs", None)
# if cert_reqs != ssl.CERT_NONE and ca_certs:
# if hasattr(self.sock, "getpeercert"):
# # TODO: verify peer cert
# dercert = self.sock.getpeercert(False)
# # pemcert = ssl.DER_cert_to_PEM_cert(dercert)
# Stand-in for the HTTPSConnection class that will connect to a proxy and
# issue a CONNECT command to start an SSL tunnel.
class SSLTunnelConnection(object):
# @param proxyPath The path to pass to the CONNECT command.
def __init__(self, proxyPath):
self.proxyPath = proxyPath
# Connects to a proxy server and initiates a tunnel to the destination
# specified by proxyPath. If successful, a new HTTPSConnection is returned.
# For Python Version < 2.7.9. cert_reqs=CERT_OPTIONAL to verify
# server certificate
#
# @param path The destination URL path.
# @param key_file **** Deprecated. Please pass context instread ****
# sslContext.load_cert_chain(cert_file, key_file)
# The SSL key file to use when wrapping the socket.
# @param cert_file **** Deprecated. Please pass context instread ****
# sslContext.load_cert_chain(cert_file, key_file)
# The SSL certificate file to use when wrapping
# the socket.
# @param context SSL Context describing the various SSL options. It is
# only supported in Python 2.7.9 or higher.
# if context is used, load cert & key to the context with API
# context = ssl.create_default_context(cafile=ca_cert_file)
# context.load_cert_chain(key_file, cert_file)
# @param kwargs In case caller passed in extra parameters not handled by
# SSLTunnelConnection
def __call__(self,
path,
key_file=None,
cert_file=None,
context=None,
**kwargs):
_sslArgs = {}
tmpKwargs = kwargs.copy()
for key in [
"server_side", "cert_reqs", "ssl_version", "ca_certs",
"do_handshake_on_connect", "suppress_ragged_eofs", "ciphers"
]:
if key in tmpKwargs:
_sslArgs[key] = tmpKwargs.pop(key)
if context:
tmpKwargs['context'] = context
if cert_file and key_file:
tmpKwargs['context'] = _CloneSSLContext(
context, cert_file, key_file)
else:
if key_file:
tmpKwargs['key_file'] = key_file
if cert_file:
tmpKwargs['cert_file'] = cert_file
tunnel = _HTTPConnection(path, **kwargs)
tunnel.request('CONNECT', self.proxyPath)
resp = tunnel.getresponse()
if resp.status != 200:
raise six.moves.http_client.HTTPException(
"%d %s" % (resp.status, resp.reason))
host, port = splitport(path)
if 'port' not in tmpKwargs:
tmpKwargs['port'] = port
retval = six.moves.http_client.HTTPSConnection(host=host, **tmpKwargs)
if hasattr(retval, '_context'):
if host in ['localhost', '127.0.0.1', '::1']:
retval._context.check_hostname = False
if 'ca_certs' in kwargs and kwargs['ca_certs']:
retval._context.load_verify_locations(kwargs['ca_certs'])
# Call set_tunnel when proxyPath is a stand alone proxy host.
proxyHost = splitport(self.proxyPath)[0]
if (_CheckIPv4(proxyHost) or _CheckIPv6(proxyHost)
or _CheckHostname(proxyHost)):
retval.set_tunnel(self.proxyPath)
# Call wrap_socket if ProxyPath is VC inbuilt proxyPath
# ex: /sdkTunnel
else:
retval.sock = retval._context.wrap_socket(sock=tunnel.sock,
server_hostname=host)
else:
if host in ['localhost', '127.0.0.1', '::1']:
_sslArgs['cert_reqs'] = ssl.CERT_NONE
retval.sock = ssl.wrap_socket(tunnel.sock,
keyfile=key_file,
certfile=cert_file,
**_sslArgs)
return retval
class GzipReader:
GZIP = 1
DEFLATE = 2
def __init__(self, rfile, encoding=GZIP, readChunkSize=512):
self.rfile = rfile
self.chunks = []
self.bufSize = 0 # Remaining buffer
assert (encoding in (GzipReader.GZIP, GzipReader.DEFLATE))
self.encoding = encoding
self.unzip = None
self.readChunkSize = readChunkSize
def _CreateUnzip(self, firstChunk):
import zlib
if self.encoding == GzipReader.GZIP:
wbits = zlib.MAX_WBITS + 16
elif self.encoding == GzipReader.DEFLATE:
# Sniff out real deflate format
chunkLen = len(firstChunk)
# Assume raw deflate
wbits = -zlib.MAX_WBITS
if firstChunk[:3] == ['\x1f', '\x8b', '\x08']:
# gzip: Apache mod_deflate will send gzip. Yurk!
wbits = zlib.MAX_WBITS + 16
elif chunkLen >= 2:
b0 = ord(firstChunk[0])
b1 = ord(firstChunk[1])
if (b0 & 0xf) == 8 and (((b0 * 256 + b1)) % 31) == 0:
# zlib deflate
wbits = min(((b0 & 0xf0) >> 4) + 8, zlib.MAX_WBITS)
else:
assert (False)
self.unzip = zlib.decompressobj(wbits)
return self.unzip
def read(self, bytes=-1):
chunks = self.chunks
bufSize = self.bufSize
while bufSize < bytes or bytes == -1:
# Read and decompress
chunk = self.rfile.read(self.readChunkSize)
if self.unzip is None:
self._CreateUnzip(chunk)
if chunk:
inflatedChunk = self.unzip.decompress(chunk)
bufSize += len(inflatedChunk)
chunks.append(inflatedChunk)
else:
# Returns whatever we have
break
if bufSize <= bytes or bytes == -1:
leftoverBytes = 0
leftoverChunks = []
else:
leftoverBytes = bufSize - bytes
# Adjust last chunk to hold only the left over bytes
lastChunk = chunks.pop()
chunks.append(lastChunk[:-leftoverBytes])
leftoverChunks = [lastChunk[-leftoverBytes:]]
self.chunks = leftoverChunks
self.bufSize = leftoverBytes
buf = b"".join(chunks)
return buf
# SOAP stub adapter object
class SoapStubAdapter(SoapStubAdapterBase):
# Constructor
#
# The endpoint can be specified individually as either a host/port
# combination, or with a URL (using a url= keyword).
# @param self self
# @param host host
# @param port port (pass negative port number for no SSL)
# @param **** Deprecated. Please use version instead **** ns API namespace
# @param path location of SOAP VMOMI service
# @param url URL (overrides host, port, path if set)
# @param sock unix domain socket path (overrides host, port, url if set)
# @param poolSize size of HTTP connection pool
# @param certKeyFile **** Deprecated. Please load cert to context and pass
# context instread ****
# sslContext.load_cert_chain(key_file, cert_file)
# The path to the PEM-encoded SSL private key file.
# @param certFile **** Deprecated. Please pass context instread ****
# sslContext.load_cert_chain(key_file, cert_file)
# The path to the PEM-encoded SSL certificate file.
# @param httpProxyHost The host name of the proxy server.
# @param httpProxyPort The proxy server port.
# @param sslProxyPath Path to use when tunneling through VC's reverse proxy
| |
= "Disable Associated Widget"
else:
strs = "Enable Associated Widget"
else:
strs = "Enable Associated Widget"
item6 = menu.Append(wx.ID_ANY, strs)
if not haswin:
item6.Enable(False)
item7 = menu.Append(wx.ID_ANY, "Disable Item")
menu.AppendSeparator()
item8 = menu.Append(wx.ID_ANY, "Change Item Icons")
menu.AppendSeparator()
item9 = menu.Append(wx.ID_ANY, "Get Other Information For This Item")
menu.AppendSeparator()
item10 = menu.Append(wx.ID_ANY, "Delete Item")
if item == self.GetRootItem():
item10.Enable(False)
item11 = menu.Append(wx.ID_ANY, "Prepend An Item")
item12 = menu.Append(wx.ID_ANY, "Append An Item")
self.Bind(wx.EVT_MENU, self.OnItemBackground, item1)
self.Bind(wx.EVT_MENU, self.OnItemForeground, item2)
self.Bind(wx.EVT_MENU, self.OnItemBold, item3)
self.Bind(wx.EVT_MENU, self.OnItemFont, item4)
self.Bind(wx.EVT_MENU, self.OnItemHyperText, item5)
self.Bind(wx.EVT_MENU, self.OnEnableWindow, item6)
self.Bind(wx.EVT_MENU, self.OnDisableItem, item7)
self.Bind(wx.EVT_MENU, self.OnItemIcons, item8)
self.Bind(wx.EVT_MENU, self.OnItemInfo, item9)
self.Bind(wx.EVT_MENU, self.OnItemDelete, item10)
self.Bind(wx.EVT_MENU, self.OnItemPrepend, item11)
self.Bind(wx.EVT_MENU, self.OnItemAppend, item12)
self.PopupMenu(menu)
menu.Destroy()
"""
def OnItemBackground(self, event):
colourdata = wx.ColourData()
colourdata.SetColour(self.itemdict["back"])
dlg = wx.ColourDialog(self, colourdata)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
col1 = data.GetColour().Get()
self.SetItemBackgroundColour(self.current, col1)
dlg.Destroy()
def OnItemForeground(self, event):
colourdata = wx.ColourData()
colourdata.SetColour(self.itemdict["fore"])
dlg = wx.ColourDialog(self, colourdata)
dlg.GetColourData().SetChooseFull(True)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetColourData()
col1 = data.GetColour().Get()
self.SetItemTextColour(self.current, col1)
dlg.Destroy()
def OnItemBold(self, event):
self.SetItemBold(self.current, not self.itemdict["isbold"])
def OnItemFont(self, event):
data = wx.FontData()
font = self.itemdict["font"]
if font is None:
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
data.SetInitialFont(font)
dlg = wx.FontDialog(self, data)
if dlg.ShowModal() == wx.ID_OK:
data = dlg.GetFontData()
font = data.GetChosenFont()
self.SetItemFont(self.current, font)
dlg.Destroy()
def OnItemHyperText(self, event):
self.SetItemHyperText(self.current, not self.itemdict["ishtml"])
def OnEnableWindow(self, event):
enable = self.GetItemWindowEnabled(self.current)
self.SetItemWindowEnabled(self.current, not enable)
def OnDisableItem(self, event):
self.EnableItem(self.current, False)
def OnItemIcons(self, event):
bitmaps = [self.itemdict["normal"], self.itemdict["selected"],
self.itemdict["expanded"], self.itemdict["selexp"]]
wx.BeginBusyCursor()
dlg = TreeIcons(self, -1, bitmaps=bitmaps)
wx.EndBusyCursor()
dlg.ShowModal()
def SetNewIcons(self, bitmaps):
self.SetItemImage(self.current, bitmaps[0], CT.TreeItemIcon_Normal)
self.SetItemImage(self.current, bitmaps[1], CT.TreeItemIcon_Selected)
self.SetItemImage(self.current, bitmaps[2], CT.TreeItemIcon_Expanded)
self.SetItemImage(self.current, bitmaps[3], CT.TreeItemIcon_SelectedExpanded)
def OnItemInfo(self, event):
itemtext = self.itemdict["text"]
numchildren = str(self.itemdict["children"])
itemtype = self.itemdict["itemtype"]
pydata = repr(type(self.itemdict["pydata"]))
if itemtype == 0:
itemtype = "Normal"
elif itemtype == 1:
itemtype = "CheckBox"
else:
itemtype = "RadioButton"
strs = "Information On Selected Item:\n\n" + "Text: " + itemtext + "\n" \
"Number Of Children: " + numchildren + "\n" \
"Item Type: " + itemtype + "\n" \
"Item Data Type: " + pydata + "\n"
dlg = wx.MessageDialog(self, strs, "CustomTreeCtrlDemo Info", wx.OK | wx.ICON_INFORMATION)
dlg.ShowModal()
dlg.Destroy()
#---Delete Item
def OnItemDelete(self, event):
colour = self.GetItemBackgroundColour(self.current)
if colour != '#e6f1f5' and self.MainFrame.isPFPOnManaging != True:
wx.MessageBox("Can not delete the public contents")
return
strs = "Are You Sure You Want To Delete Item " + self.GetItemText(self.current) + "?"
dlg = wx.MessageDialog(None, strs, 'Deleting Item', wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if dlg.ShowModal() in [wx.ID_NO, wx.ID_CANCEL]:
dlg.Destroy()
return
dlg.Destroy()
self.DeleteChildren(self.current)
self.Delete(self.current)
self.current = None
CurrentID = self.GetPyData(self.item)
DBPath = ""
if colour == '#e6f1f5':
DBPath = self.UserDBPath
else:
DBPath = self.PublicDBPath
con = sqlite3.connect( DBPath )
cursor = con.cursor()
UpdateQuery = "update ProcessContentsTable set isDeleted = 'y' where ContentsID = '" + CurrentID + "'"
cursor.execute(UpdateQuery)
con.commit()
SelectQuery = "select ParentID, Sequence from ProcessContentsTable where ContentsID = '" + CurrentID + "'"
cursor.execute( SelectQuery )
ResultRow = cursor.fetchone()
SelectQuery = "select Sequence, ContentsID from ProcessContentsTable where cast(Sequence as integer) > " + ResultRow[1] + " and ParentID ='" + ResultRow[0] + "'"
cursor.execute( SelectQuery )
ResultList = cursor.fetchall()
for Row in ResultList:
UpdateQuery = "update ProcessContentsTable set Sequence = '" + str(int(Row[0]) - 1) +"' where ContentsID = '" + Row[1] + "'"
cursor.execute( UpdateQuery )
con.commit()
con.close()
#---Add Tree item
def OnItemAddSub(self, event):
dlg = wx.TextEntryDialog(self, "Please enter the new process group", 'group naming', 'insert new..')
if dlg.ShowModal() == wx.ID_OK:
newname = dlg.GetValue()
newitem = self.AppendItem(self.current, newname)
self.SetItemImage(newitem, self.folder_close_idx, wx.TreeItemIcon_Normal)
self.SetItemImage(newitem, self.folder_open_idx, wx.TreeItemIcon_Expanded)
self.SetItemBackgroundColour(newitem, '#e6f1f5')
self.EnsureVisible(newitem)
#insert
ParentID = self.GetPyData(self.current)
con = sqlite3.connect( self.UserDBPath )
cursor = con.cursor()
ProcessCategory = "Process"
Location = "ProcessGroup"
Text = newname
ContentsPath = ""
Description = ""
SelectQuery = "select LastContentsID, NextContentsID from ContentsIDTable where IDType = 'Local'"
cursor.execute( SelectQuery )
ResultContentsID = cursor.fetchone()
LastContentsID = int(ResultContentsID[0])
NextContentsID = int(ResultContentsID[1])
ContentsID = str(NextContentsID)
self.SetPyData(newitem, ContentsID)
InsertParentID = ParentID
isDeleted = "n"
Author = "Guest"
Contact = "Guest"
UserContentsLocation = ""
con = sqlite3.connect( self.PublicDBPath )
cursor = con.cursor()
UserContentsLocation = "top"
cursor.execute("Select ContentsID from ProcessContentsTable where isDeleted = 'n' and ParentID = '"+ InsertParentID +"' order by cast(Sequence as decimal)")
ResultRows = cursor.fetchall()
for ResultRow in ResultRows:
UserContentsLocation = ResultRow[0]
con = sqlite3.connect( self.UserDBPath )
cursor = con.cursor()
cursor.execute("Select * from ProcessContentsTable where isDeleted = 'n' and ParentID = '"+ InsertParentID +"' and UserContentsLocation = '" + UserContentsLocation + "'")
ResultRows = cursor.fetchall()
Sequence = str(len(ResultRows))
InsertQuery = "insert into ProcessContentsTable ( ProcessCategory , Location , Text , ContentsPath , Description , ContentsID , ParentID , isDeleted , Author , Contact , Sequence, UserContentsLocation ) values ( '" + ProcessCategory + "','" + Location + "','" + Text + "','" + ContentsPath + "','" + Description + "','" + ContentsID + "','" + InsertParentID + "','" + isDeleted + "','" + Author + "','" + Contact + "','" + Sequence + "','" + UserContentsLocation + "');"
cursor.execute( InsertQuery )
con.commit()
LastContentsID += 1
NextContentsID += 1
UpdateQuery = "update ContentsIDTable set LastContentsID = '" + str(LastContentsID) + "', NextContentsID = '" + str(NextContentsID) + "' where IDType = 'Local'"
cursor.execute( UpdateQuery )
con.commit()
#Load Selected members
RelatedContentsWindow = self.GetParent().FindWindowByName('RelatedContents')
RelatedContentsWindow.DeleteAllItems()
con = sqlite3.connect( self.PublicDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
PublicResultRows = cursor.fetchall()
con = sqlite3.connect( self.UserDBPath )
cursor = con.cursor()
cursor.execute("Select Location, Text, ContentsPath, Description, ContentsID, UserContentsLocation from ProcessContentsTable where ParentID = '" + ParentID + "' and isDeleted = 'n' order by cast(Sequence as decimal)")
UserResultRows = cursor.fetchall()
ResultRows = []
for UserRow in UserResultRows:
if "top" in UserRow[5]:
ResultRows.append(UserRow)
for PublicRow in PublicResultRows:
ResultRows.append(PublicRow)
for UserRow in UserResultRows:
if UserRow[5] == PublicRow[4]:
ResultRows.append(UserRow)
idx = 0
for ResultRow in ResultRows:
RelatedContentsWindow.InsertStringItem(idx, ResultRow[0])
RelatedContentsWindow.SetStringItem(idx, 1, ResultRow[1])
RelatedContentsWindow.SetStringItem(idx, 2, ResultRow[2])
RelatedContentsWindow.SetStringItem(idx, 3, ResultRow[4])
if ResultRow[0] == "ProcessGroup":
RelatedContentsWindow.SetItemColumnImage(idx, 0, 0)
elif ResultRow[0] == "Category":
RelatedContentsWindow.SetItemColumnImage(idx, 0, 1)
elif ResultRow[0] == "Analysis Point":
RelatedContentsWindow.SetItemColumnImage(idx, 0, 2)
elif ResultRow[0] == "Target":
RelatedContentsWindow.SetItemColumnImage(idx, 0, 3)
if int(ResultRow[4]) < 100000 :
RelatedContentsWindow.SetItemBackgroundColour(idx, '#e6f1f5')
idx += 1
con.close()
dlg.Destroy()
def OnItemAddSibling(self, event):
dlg = wx.TextEntryDialog(self, "Please Enter The New Item Name", 'Item Naming', 'Python')
if dlg.ShowModal() == wx.ID_OK:
newname = dlg.GetValue()
newitem = self.AppendItem(self.current, newname)
self.EnsureVisible(newitem)
dlg.Destroy()
def OnBeginEdit(self, event):
self.log.write("OnBeginEdit" + "\n")
# show how to prevent edit...
item = event.GetItem()
if item and self.GetItemText(item) == "The Root Item":
wx.Bell()
self.log.write("You can't edit this one..." + "\n")
# Lets just see what's visible of its children
cookie = 0
root = event.GetItem()
(child, cookie) = self.GetFirstChild(root)
while child:
self.log.write("Child [%s] visible = %d" % (self.GetItemText(child), self.IsVisible(child)) + "\n")
(child, cookie) = self.GetNextChild(root, cookie)
event.Veto()
def OnEndEdit(self, event):
self.log.write("OnEndEdit: %s %s" %(event.IsEditCancelled(), event.GetLabel()))
# show how to reject edit, we'll not allow any digits
for x in event.GetLabel():
if x in string.digits:
self.log.write(", You can't enter digits..." + "\n")
event.Veto()
return
self.log.write("\n")
def OnLeftDClick(self, event):
pt = event.GetPosition()
item, flags = self.HitTest(pt)
Name = self.GetItemText(item)
comboPath = self.parent.GetParent().GetParent().combo.GetValue()
window = self.MainFrame.FindWindowByName('VestigeLocationOnList')
window.NowComboSelected = comboPath.split("<")[0].strip() + "\\" + Name
ComboText = self.parent.GetParent().GetParent().combo.GetValue()
self.parent.GetParent().GetParent().OriginalCombo = ComboText
self.parent.GetParent().GetParent().combo.SetValue(comboPath.split("<")[0].strip() + "\\" + Name)
threads = []
th = threading.Thread(target=window.SetFileSystemTreeAndList, args=())
th.start()
threads.append(th)
event.Skip()
def OnItemExpanded(self, event):
item = event.GetItem()
#if item:
# self.log.write("OnItemExpanded: %s" % self.GetItemText(item) + "\n")
def OnItemExpanding(self, event):
| |
#!/usr/bin/env python
"""
.. module:: belt
:synopsis: The belt function.
.. moduleauthor:: <NAME>
"""
import math
import numbers
import numpy
import opensdraw.lcad_language.curveFunctions as curveFunctions
import opensdraw.lcad_language.geometry as geometry
import opensdraw.lcad_language.interpreter as interp
import opensdraw.lcad_language.lcadExceptions as lcadExceptions
import opensdraw.lcad_language.lcadTypes as lcadTypes
lcad_functions = {}
def parsePulley(pulley):
if not isinstance(pulley, list):
raise lcadExceptions.WrongTypeException("list", type(pulley))
if (len(pulley) != 4):
raise PulleyException(len(pulley))
# Position vector.
pos = geometry.parseArgs(pulley[0])
# Orientation vector.
z_vec = geometry.parseArgs(pulley[1])
# Radius
radius = pulley[2]
if not isinstance(radius, numbers.Number):
raise lcadExceptions.WrongTypeException("number", type(radius))
# Winding.
winding = pulley[3]
if not isinstance(winding, numbers.Number):
raise lcadExceptions.WrongTypeException("number", type(winding))
return [pos, z_vec, radius, winding]
#
# These classes create a belt function that can be used in opensdraw.
#
class LCadBelt(interp.LCadFunction):
"""
**belt** - Creates a belt function.
This function creates and returns a function that parametrizes a belt
making it easier to add custom belts / chains to a MOC. Unlike the
chain function this allows for (almost) arbitrary locations and
orientations of the pulleys / sprockets. All units are in LDU.
Each pulley / sprocket is specified by a 4 member list consisting of
*(position orientation radius winding-direction)*.
:param position: A 3 element list specifying the location of the pulley / sprocket.
:param orientation: A 3 element list specifying the vector perpendicular to the plane of the pulley / sprocket.
:param radius: The radius of the pulley / sprocket in LDU.
:param winding-direction: Which way the belt goes around the pulley / sprocket (1 = counter-clockwise, -1 = clockwise).
The belt goes around the pulleys / sprockets in the order in which they
are specified, and when *:continuous* is **t** returns from the last
pulley / sprocket to the first to close the loop.
When you call the created belt function you will get a 4 x 4 transform
matrix which will translate to the requested position on the belt and
orient to a coordinate system where the z-axis is pointing along the
belt, the y-axis is in the plane of the belt and the x-axis is
perpendicular to the plane of the belt, pointing in the direction
of the pulley / sprocket perpendicular vector.
If you call the created belt function with the argument **t** it will return the
length of the belt.
Additionally belt has the keyword argument::
:continuous t/nil ; The default is t, distances will be interpreted modulo the belt length, and
; the belt will go from that last pulley back to the first pulley. If nil
; then negative distances will wrap around the first pulley and positive
; distances will wrap around the last pulley.
Usage::
(def a-belt (belt (list (list (list 0 0 0) ; Create a belt with two pulleys.
(list 0 0 1) ; Pulley one is at 0,0,0 and is in the
1.0 1) ; x-y plane with radius 1 and counter-clockwise
(list (list 4 0 0) ; winding direction.
(list 0 0 1) ; Pulley two is at 4,0,0 with radius 1.5.
1.5 1))))
(def m (a-belt 1)) ; m is a 4 x 4 transform matrix.
(a-belt t) ; Returns the length of the belt.
"""
def __init__(self):
interp.LCadFunction.__init__(self, "belt")
self.setSignature([[list],
["keyword", {"continuous" : [[lcadTypes.LCadBoolean], interp.lcad_t]}]])
def call(self, model, pulley_list, continuous = interp.lcad_t):
# Keywords
continuous = True if interp.isTrue(continuous) else False
# Get list of pulleys.
if (len(pulley_list) < 2):
raise NumberPulleysException(len(pulley_list))
# Create belt.
belt = Belt(continuous)
for pulley in pulley_list:
belt.addSprocket(Sprocket(*parsePulley(pulley)))
belt.finalize()
# Return belt function.
return curveFunctions.CurveFunction(belt, "user created belt function.")
lcad_functions["belt"] = LCadBelt()
class NumberPulleysException(lcadExceptions.LCadException):
def __init__(self, got):
lcadExceptions.LCadException.__init__(self, "A belt must have 2 sprockets, got " + str(got))
class PulleyException(lcadExceptions.LCadException):
def __init__(self, got):
lcadExceptions.LCadException.__init__(self, "A pulley must have 4 arguments, got " + str(got))
#
# The classes below do the math necessary to create a belt / chain.
#
class Belt(object):
"""
Belt/chain.
"""
def __init__(self, continuous):
self.continuous = continuous
self.dists = []
self.length = 0
self.sprockets = []
def addSprocket(self, sprocket):
self.sprockets.append(sprocket)
def finalize(self):
# Add sprockets.
for i in range(len(self.sprockets) - 1):
self.sprockets[i].nextSprocket(self.sprockets[i+1])
self.sprockets[-1].nextSprocket(self.sprockets[0])
# Calculate tangents.
for i in range(len(self.sprockets) - 1):
self.sprockets[i].calcTangent(self.sprockets[i+1])
if self.continuous:
self.sprockets[-1].calcTangent(self.sprockets[0])
# Adjust angles.
for sp in self.sprockets:
sp.adjustAngles()
self.length += sp.getLength()
self.dists.append(self.length)
def getLength(self):
return self.length
def getMatrix(self, distance):
"""
Return the position and orientation for a segment at distance along the spring.
The z-axis points along the spring. The x-axis is the radial direction.
"""
if self.continuous:
while (distance < 0):
distance += self.length
while (distance > self.length):
distance -= self.length
last_dist = 0
for i in range(len(self.dists)):
if (distance < self.dists[i]):
return self.sprockets[i].getMatrix(distance - last_dist)
last_dist = self.dists[i]
if (len(self.dists) > 1):
return self.sprockets[-1].getMatrix(distance - self.dists[-2])
else:
return self.sprockets[-1].getMatrix(distance)
class Sprocket(object):
"""
A sprocket / pulley. This does most of the work.
"""
def __init__(self, pos, z_vec, radius, ccw):
self.ccw = True
if (ccw == -1):
self.ccw = False
self.enter_angle = None
self.enter_vec = None
self.leave_angle = None
self.leave_vec = None
self.length = 0
self.matrix = numpy.zeros((3,3))
self.n_vec = None
self.pos = numpy.array(pos)
self.radius = radius
self.sp_length = 0
self.t_twist = None
self.t_vec = None
self.z_vec = numpy.array(z_vec)
self.z_vec = self.z_vec/numpy.linalg.norm(self.z_vec)
def adjustAngles(self):
"""
Adjust angles so that the leave > start in the winding direction.
"""
if (self.enter_angle is not None) and (self.leave_angle is not None):
if self.ccw:
while (self.leave_angle < self.enter_angle):
self.leave_angle += 2.0 * math.pi
else:
while (self.leave_angle > self.enter_angle):
self.leave_angle -= 2.0 * math.pi
self.sp_length = self.radius * abs(self.enter_angle - self.leave_angle)
if (self.t_vec is not None):
self.length = self.sp_length + numpy.linalg.norm(self.t_vec)
def calcTangent(self, next_sp):
"""
Calculate the tangent line between the current and the next sprocket.
"""
# Starting points for enter & exit angles.
if self.ccw:
self.leave_angle = 0
else:
self.leave_angle = math.pi
# Calculate angle offset for the next sprocket.
p_vec = next_sp.pos - self.pos
p_angle = math.atan2(numpy.dot(next_sp.x_vec, p_vec),
numpy.dot(next_sp.y_vec, p_vec))
if next_sp.ccw:
next_sp.enter_angle = -p_angle
else:
next_sp.enter_angle = -p_angle + math.pi
# Refine angles.
for i in range(5):
leave_vec = self.rotateVector(numpy.array([self.radius, 0, 0]), self.leave_angle)
enter_vec = next_sp.rotateVector(numpy.array([next_sp.radius, 0, 0]), next_sp.enter_angle)
t_vec = (next_sp.pos + enter_vec) - (self.pos + leave_vec)
t_vec = t_vec/numpy.linalg.norm(t_vec)
d_leave = math.acos(numpy.dot(t_vec, leave_vec)/numpy.linalg.norm(leave_vec)) - 0.5 * math.pi
d_enter = math.acos(numpy.dot(t_vec, enter_vec)/numpy.linalg.norm(enter_vec)) - 0.5 * math.pi
if (abs(d_leave) < 1.0e-3) and (abs(d_enter) < 1.0e-3):
break
if self.ccw:
self.leave_angle += d_leave
else:
self.leave_angle -= d_leave
if next_sp.ccw:
next_sp.enter_angle += d_enter
else:
next_sp.enter_angle -= d_enter
# Calculate entrance, exit and tangent vectors.
self.leave_vec = self.rotateVector(numpy.array([self.radius, 0, 0]), self.leave_angle)
next_sp.enter_vec = next_sp.rotateVector(numpy.array([next_sp.radius, 0, 0]), next_sp.enter_angle)
self.t_vec = (next_sp.pos + next_sp.enter_vec) - (self.pos + self.leave_vec)
# Calculate twist along the tangent vector.
l_vec = self.leave_vec / numpy.linalg.norm(self.leave_vec)
if not self.ccw:
l_vec = -l_vec
z_vec = self.t_vec / numpy.linalg.norm(self.t_vec)
y_vec = numpy.cross(z_vec, next_sp.z_vec)
y_vec = y_vec / numpy.linalg.norm(y_vec)
x_vec = numpy.cross(y_vec, z_vec)
self.t_twist = math.atan2(numpy.dot(l_vec, x_vec), numpy.dot(l_vec, y_vec))
def getLength(self):
return self.length
def getMatrix(self, distance):
# On the sprocket.
if (distance < self.sp_length) or (self.t_vec is None):
angle = self.enter_angle
if (distance < 0):
angle = self.leave_angle
if self.ccw:
angle += distance / self.radius
else:
angle -= distance / self.radius
y_vec = self.rotateVector(numpy.array([self.radius, 0, 0]), angle)
pos = self.pos + y_vec
y_vec = y_vec/numpy.linalg.norm(y_vec)
if not self.ccw:
y_vec = -y_vec
z_vec = numpy.cross(self.z_vec, y_vec)
x_vec = numpy.cross(y_vec, z_vec)
return geometry.vectorsToMatrix(pos, x_vec, y_vec, z_vec)
# Between this sprocket and the next sprocket.
else:
dist = (distance - self.sp_length)/numpy.linalg.norm(self.t_vec)
pos = self.pos + self.leave_vec + dist * self.t_vec
twist = dist * self.t_twist
z_vec = self.t_vec / numpy.linalg.norm(self.t_vec)
y_vec = numpy.cross(z_vec, self.z_vec)
y_vec = y_vec/numpy.linalg.norm(y_vec)
x_vec = numpy.cross(y_vec, z_vec)
m = geometry.vectorsToMatrix(pos, x_vec, y_vec, z_vec)
if (twist == 0.0):
return m
else:
return numpy.dot(m, geometry.rotationMatrixZ(twist)).view(lcadTypes.LCadMatrix)
def nextSprocket(self, next_sp):
"""
Calculate sprocket coordinate system.
z_vec points up.
y_vec is in the plane defined by z_vec and the centers of the current and the next sprocket.
x_vec is perpendicular to y_vec and z_vec.
"""
self.n_vec = next_sp.pos - self.pos
self.n_vec = self.n_vec/numpy.linalg.norm(self.n_vec)
self.x_vec | |
"福建省厦门市杏林区", "350206", "福建省厦门市湖里区",
"350211", "福建省厦门市集美区", "350221", "福建省同安县", "3503", "福建省莆田市",
"350301", "福建省莆田市市辖区", "350302", "福建省莆田市城厢区", "350303",
"福建省莆田市涵江区", "350321", "福建省莆田县", "350322", "福建省仙游县", "3504",
"福建省三明市", "350401", "福建省三明市市辖区", "350402", "福建省三明市梅列区", "350403",
"福建省三明市三元区", "350421", "福建省明溪县", "350423", "福建省清流县", "350424",
"福建省宁化县", "350425", "福建省大田县", "350426", "福建省尤溪县", "350427",
"福建省沙县", "350428", "福建省将乐县", "350429", "福建省泰宁县", "350430",
"福建省建宁县", "350481", "福建省永安市", "3505", "福建省泉州市", "350501",
"福建省泉州市市辖区", "350502", "福建省泉州市鲤城区", "350521", "福建省惠安县", "350524",
"福建省安溪县", "350525", "福建省永春县", "350526", "福建省德化县", "350527",
"福建省金门县", "350581", "福建省石狮市", "350582", "福建省晋江市", "350583",
"福建省南安市", "3506", "福建省漳州市", "350601", "福建省漳州市市辖区", "350602",
"福建省漳州市芗城区", "350622", "福建省云霄县", "350623", "福建省漳浦县", "350624",
"福建省诏安县", "350625", "福建省长泰县", "350626", "福建省东山县", "350627",
"福建省南靖县", "350628", "福建省平和县", "350629", "福建省华安县", "350681",
"福建省龙海市", "3521", "福建省南平地区", "352101", "福建省南平市", "352102",
"福建省邵武市", "352103", "福建省武夷山市", "352104", "福建省建瓯市", "352121",
"福建省顺昌县", "352122", "福建省建阳县", "352124", "福建省浦城县", "352127",
"福建省光泽县", "352128", "福建省松溪县", "352129", "福建省政和县", "3522",
"福建省宁德地区", "352201", "福建省宁德市", "352202", "福建省福安市", "352224",
"福建省福鼎县", "352225", "福建省霞浦县", "352227", "福建省古田县", "352228",
"福建省屏南县", "352229", "福建省寿宁县", "352230", "福建省周宁县", "352231",
"福建省柘荣县", "3526", "福建省龙岩地区", "352601", "福建省龙岩市", "352602",
"福建省漳平市", "352622", "福建省长汀县", "352623", "福建省永定县", "352624",
"福建省上杭县", "352625", "福建省武平县", "352627", "福建省连城县"
, "36", "江西省", "3601", "江西省南昌市", "360101", "江西省南昌市市辖区", "360102",
"江西省南昌市东湖区", "360103", "江西省南昌市西湖区", "360104", "江西省南昌市青云谱区",
"360105", "江西省南昌市湾里区", "360111", "江西省南昌市郊区", "360121", "江西省南昌县",
"360122", "江西省新建县", "360123", "江西省安义县", "360124", "江西省进贤县", "3602",
"江西省景德镇市", "360201", "江西省景德镇市市辖区", "360202", "江西省景德镇市昌江区",
"360203", "江西省景德镇市珠山区", "360222", "江西省浮梁县", "360281", "江西省乐平市",
"3603", "江西省萍乡市", "360301", "江西省萍乡市市辖区", "360302", "江西省萍乡市安源区",
"360311", "江西省萍乡市上栗区", "360312", "江西省萍乡市芦溪区", "360313",
"江西省萍乡市湘东区", "360321", "江西省莲花县", "3604", "江西省九江市", "360401",
"江西省九江市市辖区", "360402", "江西省九江市庐山区", "360403", "江西省浔阳县", "360421",
"江西省九江县", "360423", "江西省武宁县", "360424", "江西省修水县", "360425",
"江西省永修县", "360426", "江西省德安县", "360427", "江西省星子县", "360428",
"江西省都昌县", "360429", "江西省湖口县", "360430", "江西省彭泽县", "360481",
"江西省瑞昌市", "3605", "江西省新余市", "360501", "江西省新余市市辖区", "360502",
"江西省新余市渝水区", "360521", "江西省分宜县", "3606", "江西省鹰潭市", "360601",
"江西省鹰潭市市辖区", "360602", "江西省鹰潭市月湖区", "360621", "江西省贵溪县", "360622",
"江西省余江县", "3621", "江西省赣州地区", "362101", "江西省赣州市", "362121", "江西省赣县",
"362122", "江西省南康县", "362123", "江西省信丰县", "362124", "江西省大余县",
"362125", "江西省上犹县", "362126", "江西省崇义县", "362127", "江西省安远县",
"362128", "江西省龙南县", "362129", "江西省定南县", "362130", "江西省全南县",
"362131", "江西省宁都县", "362132", "江西省于都县", "362133", "江西省兴国县",
"362134", "江西省瑞金县", "362135", "江西省会昌县", "362136", "江西省寻乌县",
"362137", "江西省石城县", "3622", "江西省宜春地区", "362201", "江西省宜春市",
"362202", "江西省丰城市", "362203", "江西省樟树市", "362204", "江西省高安市",
"362226", "江西省奉新县", "362227", "江西省万载县", "362228", "江西省上高县",
"362229", "江西省宜丰县", "362232", "江西省靖安县", "362233", "江西省铜鼓县", "3623",
"江西省上饶地区", "362301", "江西省上饶市", "362302", "江西省德兴市", "362321",
"江西省上饶县", "362322", "江西省广丰县", "362323", "江西省玉山县", "362324",
"江西省铅山县", "362325", "江西省横峰县", "362326", "江西省弋阳县", "362329",
"江西省余干县", "362330", "江西省波阳县", "362331", "江西省万年县", "362334",
"江西省婺源县", "3624", "江西省吉安地区", "362401", "江西省吉安市", "362402",
"江西省井岗山市", "362421", "江西省吉安县", "362422", "江西省吉水县", "362423",
"江西省峡江县", "362424", "江西省新干县", "362425", "江西省永丰县", "362426",
"江西省泰和县", "362427", "江西省遂川县", "362428", "江西省万安县", "362429",
"江西省安福县", "362430", "江西省永新县", "362432", "江西省宁冈县", "3625",
"江西省抚州地区", "362502", "江西省临川市", "362522", "江西省南城县", "362523",
"江西省黎川县", "362524", "江西省南丰县", "362525", "江西省崇仁县", "362526",
"江西省乐安县", "362527", "江西省宜黄县", "362528", "江西省金溪县", "362529",
"江西省资溪县", "362531", "江西省东乡县", "362532", "江西省广昌县"
, "37", "山东省", "3701", "山东省济南市", "370101", "山东省济南市市辖区", "370102",
"山东省济南市历下区", "370103", "山东省济南市市中区", "370104", "山东省济南市槐荫区",
"370105", "山东省济南市天桥区", "370112", "山东省济南市历城区", "370123", "山东省长清县",
"370124", "山东省平阴县", "370125", "山东省商河县", "370126", "山东省济阳县",
"370181", "山东省章丘市", "3702", "山东省青岛市", "370201", "山东省青岛市市辖区",
"370202", "山东省青岛市市南区", "370203", "山东省青岛市市北区", "370204",
"山东省青岛市台东区", "370205", "山东省青岛市四方区", "370206", "山东省青岛市沧口区",
"370211", "山东省青岛市黄岛区", "370212", "山东省青岛市崂山区", "370281", "山东省胶州市",
"370282", "山东省即墨市", "370283", "山东省平度市", "370284", "山东省胶南市",
"370285", "山东省菜西市", "3703", "山东省淄博市", "370301", "山东省淄博市市辖区",
"370302", "山东省淄博市淄川区", "370303", "山东省淄博市张店区", "370304",
"山东省淄博市博山区", "370305", "山东省淄博市临淄区", "370306", "山东省淄博市周村区",
"370321", "山东省桓台县", "370322", "山东省高青县", "370323", "山东省沂源县", "3704",
"山东省枣庄市", "370401", "山东省枣庄市市辖区", "370402", "山东省枣庄市市中区", "370403",
"山东省枣庄市薛城区", "370404", "山东省枣庄市峄城区", "370405", "山东省枣庄市台儿庄区",
"370406", "山东省枣庄市山亭区", "370481", "山东省滕州市", "3705", "山东省东营市",
"370501", "山东省东营市市辖区", "370502", "山东省东营市东营区", "370503",
"山东省东营市河口区", "370521", "山东省垦利县", "370522", "山东省利津县", "370523",
"山东省广饶县", "3706", "山东省烟台市", "370601", "山东省烟台市市辖区", "370602",
"山东省烟台市芝罘区", "370611", "山东省烟台市福山区", "370628", "山东省栖霞县", "370629",
"山东省海阳县", "370631", "山东省牟平县", "370634", "山东省长岛县", "370681",
"山东省龙口市", "370682", "山东省莱阳市", "370683", "山东省莱州市", "370684",
"山东省蓬莱市", "370685", "山东省招远市", "3707", "山东省潍坊市", "370701",
"山东省潍坊市市辖区", "370702", "山东省潍坊市潍城区", "370703", "山东省潍坊市寒亭区",
"370704", "山东省潍坊市坊子区", "370722", "山东省安丘县", "370724", "山东省临朐县",
"370725", "山东省昌乐县", "370726", "山东省昌邑县", "370727", "山东省高密县",
"370781", "山东省青州市", "370782", "山东省诸城市", "370783", "山东省寿光市", "3708",
"山东省济宁市", "370801", "山东省济宁市市辖区", "370802", "山东省济宁市市中区", "370811",
"山东省济宁市任城区", "370826", "山东省微山县", "370827", "山东省鱼台县", "370828",
"山东省金乡县", "370829", "山东省嘉祥县", "370830", "山东省汶上县", "370831",
"山东省泗水县", "370832", "山东省梁山县", "370881", "山东省曲阜市", "370882",
"山东省兖州市", "370883", "山东省邹城市", "3709", "山东省泰安市", "370901",
"山东省泰安市市辖区", "370902", "山东省泰安市泰山区", "370911", "山东省泰安市郊区", "370921",
"山东省宁阳县", "370923", "山东省东平县", "370982", "山东省新泰市", "370983",
"山东省肥城市", "3710", "山东省威海市", "371001", "山东省威海市市辖区", "371002",
"山东省威海市环翠区", "371081", "山东省文登市", "371082", "山东省荣城市", "371083",
"山东省乳山市", "3711", "山东省日照市", "371101", "山东省日照市市辖区", "371102",
"山东省日照市东港区", "371121", "山东省五莲县", "371122", "山东省莒县", "3712",
"山东省莱芜市", "371201", "山东省莱芜市市辖区", "371202", "山东省莱芜市莱城区", "371203",
"山东省莱芜市钢城区", "3723", "山东省滨州地区", "372301", "山东省滨州市", "372321",
"山东省惠民县", "372323", "山东省阳信县", "372324", "山东省无棣县", "372325",
"山东省沾化县", "372328", "山东省博兴县", "372330", "山东省邹平县", "3724",
"山东省德州地区", "372401", "山东省德州市", "372402", "山东省乐陵市", "372403",
"山东省禹城市", "372421", "山东省陵县", "372422", "山东省平原县", "372423",
"山东省夏津县", "372424", "山东省武城县", "372425", "山东省齐河县", "372428",
"山东省临邑县", "372431", "山东省宁津县", "372432", "山东省庆云县", "3725",
"山东省聊城地区", "372501", "山东省聊城市", "372502", "山东省临清市", "372522",
"山东省阳谷县", "372523", "山东省莘县", "372524", "山东省茌平县", "372525",
"山东省东阿县", "372526", "山东省冠县", "372527", "山东省高唐县", "3728", "山东省临沂地区",
"372801", "山东省临沂市", "372822", "山东省郯城县", "372823", "山东省苍山县",
"372824", "山东省莒南县", "372827", "山东省沂水县", "372829", "山东省蒙阴县",
"372830", "山东省平邑县", "372831", "山东省费县", "372832", "山东省沂南县",
"372833", "山东省临沭县", "3729", "山东省菏泽地区", "372901", "山东省菏泽市",
"372922", "山东省曹县", "372923", "山东省定陶县", "372924", "山东省成武县",
"372925", "山东省单县", "372926", "山东省巨野县", "372928", "山东省郓城县",
"372929", "山东省鄄城县", "372930", "山东省东明县"
, "41", "河南省", "4101", "河南省郑州市", "410101", "河南省郑州市市辖区", "410102",
"河南省郑州市中原区", "410103", "河南省郑州市二七区", "410104", "河南省郑州市管城回族区",
"410105", "河南省郑州市金水区", "410106", "河南省郑州市上街区", "410108",
"河南省郑州市邙山区", "410121", "河南省荥阳县", "410122", "河南省中牟县", "410123",
"河南省新郑县", "410125", "河南省登封县", "410126", "河南省密县", "410181",
"河南省巩义市", "4102", "河南省开封市", "410201", "河南省开封市市辖区", "410202",
"河南省开封市龙亭区", "410203", "河南省开封市顺河回族区", "410204", "河南省开封市鼓楼区",
"410205", "河南省开封市南关区", "410211", "河南省开封市郊区", "410221", "河南省杞县",
"410222", "河南省通许县", "410223", "河南省尉氏县", "410224", "河南省开封县",
"410225", "河南省兰考县", "4103", "河南省洛阳市", "410301", "河南省洛阳市市辖区",
"410302", "河南省洛阳市老城区", "410303", "河南省洛阳市西工区", "410304",
"河南省洛阳市廛河回族区", "410305", "河南省洛阳市涧西区", "410306", "河南省洛阳市吉利区",
"410311", "河南省洛阳市郊区", "410322", "河南省孟津县", "410323", "河南省新安县",
"410324", "河南省栾川县", "410325", "河南省嵩县", "410326", "河南省汝阳县",
"410327", "河南省宜阳县", "410328", "河南省洛宁县", "410329", "河南省伊川县",
"410381", "河南省偃师市", "4104", "河南省平顶山市", "410401", "河南省平顶山市市辖区",
"410402", "河南省平顶山市新华区", "410403", "河南省平顶山市卫东区", "410411",
"河南省平顶山市郊区", "410421", "河南省宝丰县", "410422", "河南省叶县", "410423",
"河南省鲁山县", "410425", "河南省郏县", "410426", "河南省襄城县", "410481",
"河南省舞钢市", "410482", "河南省汝州市", "4105", "河南省安阳市", "410501",
"河南省安阳市市辖区", "410502", "河南省安阳市文峰区", "410503", "河南省安阳市北关区",
"410504", "河南省安阳市铁西区", "410511", "河南省安阳市郊区", "410521", "河南省林县",
"410522", "河南省安阳县", "410523", "河南省汤阴县", "410526", "河南省滑县",
"410527", "河南省内黄县", "4106", "河南省鹤壁市", "410601", "河南省鹤壁市市辖区",
"410602", "河南省鹤壁市鹤山区", "410603", "河南省鹤壁市山城区", "410611", "河南省鹤壁市郊区",
"410621", "河南省浚县", "410622", "河南省淇县", "4107", "河南省新乡市 ",
"410701", "河南省新乡市市辖区", "410702", "河南省新乡市红旗区", "410703",
"河南省新乡市新华区", "410704", "河南省新乡市北站区", "410711", "河南省新乡市郊区", "410721",
"河南省新乡县", "410724", "河南省获嘉县", "410725", "河南省原阳县", "410726",
"河南省延津县", "410727", "河南省封丘县", "410728", "河南省长恒县", "410781",
"河南省卫辉市", "410782", "河南省辉县市", "4108", "河南省焦作市", "410801",
"河南省焦作市市辖区", "410802", "河南省焦作市解放区", "410803", "河南省焦作市中站区",
"410804", "河南省焦作市马村区", "410811", "河南省焦作市山阳区", "410821", "河南省修武县",
"410822", "河南省博爱县", "410823", "河南省武陟县", "410825", "河南省温县",
"410826", "河南省孟县", "410881", "河南省济源市", "410882", "河南省沁阳市", "4109",
"河南省濮阳市", "410901", "河南省濮阳市市辖区", "410902", "河南省濮阳市市区", "410922",
"河南省清丰县", "410923", "河南省南乐县", "410926", "河南省范县", "410927",
"河南省台前县", "410928", "河南省濮阳县", "4110", "河南省许昌市", "411001",
"河南省许昌市市辖区", "411002", "河南省许昌市魏都区", "411023", "河南省许昌县", "411024",
"河南省鄢陵县", "411081", "河南省禹州市", "411082", "河南省长葛市", "4111", "河南省漯河市",
"411101", "河南省漯河市市辖区", "411102", "河南省漯河市源仁区", "411121", "河南省舞阳县",
"411122", "河南省临颖县", "411123", "河南省郾城县", "4112", "河南省三门峡市",
"411201", "河南省三门峡市市辖区", "411202", "河南省三门峡市湖滨区", "411221", "河南省渑池县",
"411222", "河南省陕县", "411224", "河南省卢氏县", "411281", "河南省义马市",
"411282", "河南省灵宝市", "4123", "河南省商丘地区", "412301", "河南省商丘市",
"412321", "河南省虞城县", "412322", "河南省商丘县", "412323", "河南省民权县",
"412324", "河南省宁陵县", "412325", "河南省睢县", "412326", "河南省夏邑县",
"412327", "河南省柘城县", "412328", "河南省永城县", "4127", "河南省周口地区",
"412701", "河南省周口市", "412702", "河南省项城市", "412721", "河南省扶沟县",
"412722", "河南省西华县", "412723", "河南省商水县", "412724", "河南省太康县",
"412725", "河南省鹿邑县", "412726", "河南省郸城县", "412727", "河南省淮阳县",
"412728", "河南省沈丘县", "4128", "河南省驻马店地区", "412801", "河南省驻马店市",
"412821", "河南省确山县", "412822", "河南省泌阳县", "412823", "河南省遂平县",
"412824", "河南省西平县", "412825", "河南省上蔡县", "412826", "河南省汝南县",
"412827", "河南省平舆县", "412828", "河南省新蔡县", "412829", "河南省正阳县", "4129",
"河南省南阳地区", "412901", "河南省南阳市", "412902", "河南省邓州市", "412921",
"河南省南召县", "412922", "河南省方城县", "412923", "河南省西峡县", "412924",
"河南省南阳县", "412925", "河南省镇平县", "412926", "河南省内乡县", "412927",
"河南省淅川县", "412928", "河南省社旗县", "412929", "河南省唐河县", "412931",
"河南省新野县", "412932", "河南省桐柏县", "4130", "河南省信阳地区", "413001",
"河南省信阳市", "413021", "河南省息县", "413022", "河南省淮滨县", "413023",
"河南省信阳县", "413024", "河南省横川县", "413025", "河南省光山县", "413026",
"河南省固始县", "413027", "河南省商城县", "413028", "河南省罗山县", "413029", "河南省新县"
, "42", "湖北省", "4201", "湖北省武汉市", "420101", "湖北省武汉市市辖区", "420102",
"湖北省武汉市江岸区", "420103", "湖北省武汉市江汉区", "420104", "湖北省武汉市乔口区",
"420105", "湖北省武汉市汉阳区", "420106", "湖北省武汉市武昌区", "420107",
"湖北省武汉市青山区", "420111", "湖北省武汉市洪山区", "420112", "湖北省武汉市东西湖区",
"420113", "湖北省武汉市汉南区", "420114", "湖北省蔡甸区", "420122", "湖北省武昌县",
"420123", "湖北省黄陂县", "420124", "湖北省新洲县", "4202", "湖北省黄石市", "420201",
"湖北省黄石市市辖区", "420202", "湖北省黄石市黄石港区", "420203", "湖北省黄石市石灰窑区",
"420204", "湖北省黄石市下陆区", "420205", "湖北省黄石市铁山区", "420221", "湖北省大冶县",
"4203", "湖北省十堰市", "420301", "湖北省十堰市市辖区", "420302", "湖北省十堰市茅箭区",
"420303", "湖北省十堰市张湾区", "4204", "湖北省沙市市", "420400", "湖北省沙市市",
"4205", "湖北省宜昌市", "420501", "湖北省宜昌市市辖区", "420502", "湖北省宜昌市西陵区",
"420503", "湖北省宜昌市伍家岗区", "420504", "湖北省宜昌市点军区", "420521", "湖北省宜昌县",
"420523", "湖北省枝江县", "420525", "湖北省远安县", "420526", "湖北省兴山县",
"420527", "湖北省秭归县", "420528", "湖北省长阳土家族自治县", "420529",
"湖北省五峰土家族自治县", "420581", "湖北省枝城市", "420582", "湖北省当阳市", "4206",
"湖北省襄樊市", "420601", "湖北省襄樊市市辖区", "420602", "湖北省襄樊市襄城区", "420603",
"湖北省襄樊市樊东区", "420604", "湖北省襄樊市樊西区", "420605", "湖北省襄樊市郊区", "420621",
"湖北省襄阳县", "420623", "湖北省宜城县", "420624", "湖北省南漳县", "420625",
"湖北省谷城县", "420626", "湖北省保康县", "420681", "湖北省随州市", "420682",
"湖北省老河口市", "420683", "湖北省枣阳市", "4207", "湖北省鄂州市", "420701",
"湖北省鄂州市市辖区", "420702", "湖北省鄂州市梁子湖区", "420703", "湖北省鄂州市谷容区",
"420704", "湖北省鄂州市鄂城区", "4208", "湖北省荆门市", "420801", "湖北省荆门市市辖区",
"420802", "湖北省荆门市东宝区", "420803", "湖北省荆门市沙洋区", "4209", "湖北省孝感市",
"420901", "湖北省孝感市市辖区", "420902", "湖北省孝感市孝南区", "420903",
"湖北省孝感市孝昌区", "420922", "湖北省大悟县", "420923", "湖北省云梦县", "420924",
"湖北省汉川县", "420981", "湖北省应城市", "420982", "湖北省安陆市", "420983",
"湖北省广水市", "4221", "湖北省黄冈地区", "422101", "湖北省麻城市", "422102",
"湖北省武穴市 ", "422103", "湖北省黄州市", "422123", "湖北省红安县", "422125",
"湖北省罗田县", "422126", "湖北省英山县", "422127", "湖北省浠水县", "422128",
"湖北省蕲春县", "422130", "湖北省黄梅县", "4223", "湖北省咸宁地区", "422301",
"湖北省咸宁市", "422302", "湖北省蒲圻市", "422322", "湖北省嘉鱼县", "422324",
"湖北省通城县", "422325", "湖北省崇阳县", "422326", "湖北省通山县", "422327",
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# FILE: Base.py
#
# A base class for making asynchronous web service requests
#
# Copyright by Author. All rights reserved. Not for reuse without
# express permissions.
#
import sys, gc, json, time, logging, random, copy
import requests
from requests.exceptions import HTTPError, URLRequired
from threading import Thread, Semaphore
from infx.common.AsyncTimer import AsyncTimer
class Base(Thread):
def __init__(self,
name="Base",
logger=None,
args=(),
kwargs={}):
Thread.__init__(self, name=name, args=args, kwargs=kwargs)
# First disable the "requests" chatty log messages, yuck
requests_logger = logging.getLogger("requests")
requests_logger.setLevel("ERROR")
self.requests_session = None
self.logger = None
if( logger ):
self.logger = logger
else:
fmt='[%(asctime)s][%(module)s:%(funcName)s():%(lineno)d] %(levelname)s:%(message)s'
logging.basicConfig(format=fmt,
level=logging.INFO)
self.logger = logging.getLogger(__name__)
self.async_timer = None
self.msem = Semaphore(1) # semaphore for the message buffer
self.rsem = Semaphore(1) # semaphore for the request activity
self.message_queue = []
self.pqsem = Semaphore(1) # semaphore for queue of prior requests
self.prior_requests = [] # a queue/list of prior requests
self.rqsem = Semaphore(1) # semaphore to prevent changes to the request_data
self._request_data = {} # the request_data that is to be used for the request
self.headers = {}
self.max_prior_requests = 25
self.max_queue_len = 8000 # max message queue length
self.querying = False
self.running = False
self.my_receiver = None
self.request_pages = 10 # when paged, how many pages to request
self.domain = None # the domain, url prefix, for this request
self.req_params = {}
self.throttling = False
self.last_throttle_check = None
self.throttle_wait = 0
self.auth_obj = None
self.debug_output = False
self.reset_requests_session()
##
# resets the requests http sessions for this object
#
def reset_requests_session(self):
if( self.requests_session ):
self.requests_session.close()
self.requests_session = None
gc.collect()
self.requests_session = requests.Session()
r_a1 = requests.adapters.HTTPAdapter(pool_connections=100,
pool_maxsize=100,
max_retries=3)
r_a2 = requests.adapters.HTTPAdapter(pool_connections=100,
pool_maxsize=100,
max_retries=3)
self.requests_session.mount('http://', r_a1)
self.requests_session.mount('https://', r_a2)
if( self.auth_obj ):
self.auth_obj.reset_requests_session()
return
##
# Sets a "fake" user agent for an http request. The default is a
# python specific script user agent. Some web services will deny
# script based requests.
#
def set_user_agent(self, agent="random"):
if( agent ):
if( agent=="random" ):
agents = ['ie','mozilla','safari','opera','konqueror','chrome','android']
agent = agents[random.randint(0,6)]
if( agent=="mozilla" ):
self.headers["User-Agent"] = "Mozilla/5.0 (Windows; U; Windows NT 6.1; rv:2.2) Gecko/20110201"
elif( agent=="safari" ):
self.headers["User-Agent"] = "Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7_3; en-US) AppleWebKit/535.20 (KHTML, like Gecko) Version/5.1 Safari/535.20"
elif( agent=="chrome" ):
self.headers["User-Agent"] = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/32.0.1664.3 Safari/537.36"
elif( agent=="opera" ):
self.headers["User-Agent"] = "Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en-US) Presto/2.9.168 Version/11.52"
elif( agent=="konqueror" ):
self.headers["User-Agent"] = "Mozilla/5.0 (X11) KHTML/4.9.1 (like Gecko) Konqueror/4.9"
elif( agent=="ie" ):
self.headers["User-Agent"] = "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)"
elif( agent=="android" ):
self.headers["User-Agent"] = "Mozilla/5.0 (Linux; U; Android 2.3.5; en-US; HTC Vision Build/GRI40) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1"
else:
if( "User-Agent" in self.headers ):
del self.headers["User-Agent"]
else:
if( "User-Agent" in self.headers ):
del self.headers["User-Agent"]
##
# Returns a copy of the http headers information
#
def get_request_headers(self):
return self.headers.copy()
##
# Sets the domain for an http request
#
def set_request_domain(self, domain=None):
if( domain ):
if(domain.startswith("http://") or domain.startswith("https://")):
self.domain = domain
else:
self.domain = "http://"+domain
else:
self.domain = None
##
# Get the current domain for this http request
#
def get_request_domain(self):
return self.domain
##
# Sets the value for an http request keyword
#
def set_request_param(self, kw=None, val=None):
assert kw is not None
if( val ):
self.req_params[kw] = val
else:
if( kw in self.req_params ):
del self.req_params[kw]
##
# Removes the value for an http request keyword
#
def remove_request_param(self, kw=None):
assert kw is not None
if( kw in self.req_params ):
del self.req_params[kw]
##
# Returns a copy of the http request parameters, keys and values
#
def get_request_params(self):
return self.req_params
##
# Clears the http request parameters
#
def clear_request_params(self):
self.req_params = {}
##
# Returns the value for one http request keyword
#
def get_request_param(self, kw=None):
assert kw is not None
result = None
if( kw in self.req_params ):
result = self.req_params[kw]
return result
##
# This sets the request info prior to making the request, this is the
# way to set the
#
#
def set_request(self, domain=None, params=None, method="GET", headers=None, payload=None):
self.rqsem.acquire()
self._request_data = {}
if( domain ):
self._request_data['domain'] = domain
self._request_data['params'] = None
self._request_data['method'] = None
self._request_data['headers'] = None
self._request_data['payload'] = None
if( params ):
self._request_data['params'] = params
if( params is str ):
self._request_data['params'] = None
if( params.startswith('?') or domain.endswith('?') ):
self._request_data['domain'] = domain+params
else:
self._request_data['domain'] = domain+"?"+params
if( method ):
self._request_data['method'] = method.upper()
if( self._request_data['method']=="POST" ):
self._request_data['payload'] = payload
if( headers ):
self._request_data['headers'] = headers
else:
self._request_data['headers'] = self.headers
self.rqsem.release()
return
##
# Pushes the information of the request onto the prior_requests queue
#
def push_request_info(self, request=None):
self.rqsem.acquire()
info = {}
if( request ):
info = copy.copy(request)
else:
info = copy.copy(self._request_data)
self.rqsem.release()
info['response'] = None
info['warning'] = None
info['error'] = None
info['success'] = None
self.pqsem.acquire()
if( len(self.prior_requests) < self.max_prior_requests ):
self.prior_requests.append(info)
else:
# append adds to the end, this shifts the whole
# list to the left, dropping out one item to keep
# the total number of items in the list fixed
self.prior_requests = self.prior_requests[1:]
self.prior_requests.append(info)
self.pqsem.release()
return
##
# Returns the information of the last quest from prior_requests
#
def get_request_info(self):
self.pqsem.acquire()
item = None
if( len(self.prior_requests) > 0 ):
# return the last item in the list,
# but don't pop and remove it
item = self.prior_requests[-1]
self.pqsem.release()
return item
##
# Pops the information of the last quest from prior_requests
#
def pop_request_info(self):
self.pqsem.acquire()
item = None
if( len(self.prior_requests) > 0 ):
item = self.prior_requests.pop()
self.pqsem.release()
return item
##
# Returns the number of request info items that that can be reviewed
#
def has_request_info(self):
self.pqsem.acquire()
count = len(self.prior_requests)
self.pqsem.release()
return count
##
# Clears the prior_requests set
#
def clear_request_info(self):
self.pqsem.acquire()
self.prior_requests = []
self.pqsem.release()
##
# Looks for a specific header key in an http response header and
# returns that value if it exists
#
def get_header_value(self, headers=None, key=None):
val = None
if( headers ):
if( isinstance(headers,requests.structures.CaseInsensitiveDict) ):
k = key.lower().replace(':','')
try:
val = headers[k]
except KeyError, ke:
val = None
else:
hstr = str(headers).replace("\r",'')
hlines = hstr.split("\n")
for line in hlines:
#print "\t:",line
if( line.startswith(key) ):
val = line.replace(key,'')
break
return val
##
# Sets the authorization object for this object, if this is
# not set, then this is not handled as an authenticated request
#
def set_auth_obj(self, obj=None):
self.auth_obj = obj
if( self.auth_obj ):
self.auth_obj.reset_requests_session()
##
# returns the authorization object for this object
#
def get_auth_obj(self):
return self.auth_obj
##
# Sets the receiver for this web service data request
# If the web service request has a receiver object then that
# will be where the pages of the request are posted. Otherwise
# items will be placed into the existing message queue.
#
def set_receiver(self, obj=None):
self.my_receiver = obj
##
# Get the receiver for this web service object
#
def get_receiver(self):
return self.my_receiver
##
# Sets the number of pages that will be retrieved for this web service
# request. This is only used for a paged request. Some requests are not
# paged - in those cases this value should be ignored.
#
def set_pages_to_request(self, r=25):
self.request_pages = r
##
# If this is in the process of making a query, then this is True.
#
def query_in_process(self):
return self.querying
##
# Status of the query thread
#
def is_running(self):
return self.running
##
# Sets whether or not we are going to be throttling these queries
#
def set_throttling(self, tr=False):
self.throttling = tr
##
# How long a wait might be
#
def _throttling(self, qs=None):
waits = 0.0
if( self.throttling ):
if( qs<0.5 ):
return 2.5
elif( qs<1.0 ):
return 1.5
elif( qs<2.0 ):
return 0.75
return waits
##
# Forces a wait to help throttle overly active queries. If the
# do_wait is set to True (default) then this forces a sleep,
| |
<gh_stars>10-100
# ro_annotation.py
"""
Research Object annotation read, write, decode functions
"""
__author__ = "<NAME> (<EMAIL>)"
__copyright__ = "Copyright 2011-2013, University of Oxford"
__license__ = "MIT (http://opensource.org/licenses/MIT)"
import sys
import os
import os.path
import datetime
import logging
import re
import urlparse
log = logging.getLogger(__name__)
import rdflib
#from rdflib.namespace import RDF, RDFS
#from rdflib import URIRef, Namespace, BNode
#from rdflib import Literal
import ro_settings
import ro_manifest
from ro_namespaces import RDF, RDFS, RO, AO, ORE, DCTERMS, ROTERMS
from ro_uriutils import resolveUri, resolveFileAsUri
from ro_prefixes import prefix_dict
# Default list of annotation types
annotationTypes = (
[
{ "name": "type", "prefix": "dcterms", "localName": "type", "type": "string"
, "baseUri": DCTERMS.baseUri, "fullUri": DCTERMS.type
, "label": "Type"
, "description": "Word or brief phrase describing type of Research Object component"
}
, { "name": "keywords", "prefix": "dcterms", "localName": "subject", "type": "termlist"
, "baseUri": DCTERMS.baseUri, "fullUri": DCTERMS.subject
, "label": "Keywords"
, "description": "List of key words or phrases associated with a Research Object component"
}
, { "name": "description", "prefix": "dcterms", "localName": "description", "type": "text"
, "baseUri": DCTERMS.baseUri, "fullUri": DCTERMS.description
, "label": "Description"
, "description": "Extended description of Research Object component"
}
, { "name": "format", "prefix": "dcterms", "localName": "format", "type": "string"
, "baseUri": DCTERMS.baseUri, "fullUri": DCTERMS.format
, "label": "Data format"
, "description": "String indicating the data format of a Research Object component"
}
, { "name": "note", "prefix": "dcterms", "localName": "note", "type": "text"
, "baseUri": ROTERMS.baseUri, "fullUri": ROTERMS.note
, "label": "Note"
, "description": "String indicating some information about a Research Object component"
}
, { "name": "title", "prefix": "dcterms", "localName": "title", "type": "string"
, "baseUri": DCTERMS.baseUri, "fullUri": DCTERMS.title
, "label": "Title"
, "description": "Title of Research Object component"
}
, { "name": "created", "prefix": "dcterms", "localName": "created", "type": "datetime"
, "baseUri": DCTERMS.baseUri, "fullUri": DCTERMS.created
, "label": "Creation time"
, "description": "Date and time that Research Object component was created"
}
, { "name": "rdf:type", "prefix": "rdf", "localName": "type", "type": "resource"
, "baseUri": RDF.baseUri, "fullUri": RDF.type
, "label": "RDF type"
, "description": "RDF type of the annotated object"
}
, { "name": "rdfs:seeAlso", "prefix": "rdfs", "localName": "seeAlso", "type": "resource"
, "baseUri": RDFS.baseUri, "fullUri": RDFS.seeAlso
, "label": "See also"
, "description": "Related resource with further information"
}
])
# Default list of annotation prefixes
annotationPrefixes = prefix_dict.copy()
annotationPrefixes.update({'ex': "http://example.org/ro/annotation#"})
# Annotation support functions
def getResourceNameString(ro_config, rname, base=None):
"""
Returns a string value corresoponding to a URI indicated by the supplied parameter.
Relative references are assumed to be paths relative to the supplied base URI or,
if no rbase is supplied, relative to the current directory.
"""
rsplit = rname.split(":")
if len(rsplit) == 2:
# Try to interpret name as CURIE
for rpref in ro_config["annotationPrefixes"]:
if rsplit[0] == rpref:
rname = ro_config["annotationPrefixes"][rpref]+rsplit[1]
if urlparse.urlsplit(rname).scheme == "":
if base:
rname = resolveUri(rname, base)
else:
rname = resolveFileAsUri(rname)
return rname
def getAnnotationByName(ro_config, aname, defaultType="string"):
"""
Given an attribute name from the command line, returns
attribute predicate and type URIs as a URIRef node and attribute value type
"""
predicate = aname
valtype = defaultType
for atype in ro_config["annotationTypes"]:
# Try to interpret attribute name as predefined name
if atype["name"] == aname:
predicate = atype["fullUri"]
valtype = atype["type"]
break
else:
predicate = getResourceNameString(ro_config, aname, base=ROTERMS.defaultBase+"#")
predicate = rdflib.URIRef(predicate)
return (predicate, valtype)
def getAnnotationByUri(ro_config, auri, defaultType="string"):
"""
Given an attribute URI from the manifest graph, returns an
attribute name and type tuple for displaying an attribute
"""
# Look for predefined name
for atype in ro_config["annotationTypes"]:
if str(atype["fullUri"]) == str(auri):
return (atype["name"], atype["type"])
# Look for CURIE match
for (pref,puri) in ro_config["annotationPrefixes"].iteritems():
if auri.startswith(puri):
return (pref+":"+auri[len(puri):], defaultType)
# return full URI in angle brackets
return ("<"+str(auri)+">", defaultType)
def getAnnotationNameByUri(ro_config, uri):
"""
Given an attribute URI from the manifest graph, returns an
attribute name for displaying an attribute
"""
return getAnnotationByUri(ro_config, uri)[0]
def makeAnnotationFilename(rodir, afile):
#log.debug("makeAnnotationFilename: %s, %s"%(rodir, afile))
return os.path.join(os.path.abspath(rodir), ro_settings.MANIFEST_DIR+"/", afile)
def makeComponentFilename(rodir, rofile):
log.debug("makeComponentFilename: %s, %s"%(rodir, rofile))
return os.path.join(rodir, rofile)
def readAnnotationBody(rodir, annotationfile):
"""
Read annotation body from indicated file, return RDF Graph of annotation values.
"""
log.debug("readAnnotationBody: %s, %s"%(rodir, annotationfile))
annotationfilename = makeComponentFilename(rodir, annotationfile)
if not os.path.exists(annotationfilename): return None
annotationformat = "xml"
# Look at file extension to figure format
if re.search("\.(ttl|n3)$", annotationfile): annotationformat="n3"
rdfGraph = rdflib.Graph()
rdfGraph.parse(annotationfilename, format=annotationformat)
return rdfGraph
def createAnnotationGraphBody(ro_config, ro_dir, rofile, anngraph):
"""
Create a new annotation body for a single resource in a research object, based
on a supplied graph value.
Existing annotations for the same resource are not touched; if an annotation is being
added or replaced, it is the calling program'sresponsibility to update the manifest to
reference the active annotations. A new name is allocated for the created annotation,
graph which is returned as the result of this function.
ro_config is the research object manager configuration, supplied as a dictionary
ro_dir is the research object root directory
rofile is the name of the Research Object component to be annotated, possibly
relative to the RO root directory.
anngraph is an annotation graph that is to be saved.
Returns the name of the annotation body created relative to the RO
manifest and metadata directory.
"""
# Determine name for annotation body
log.debug("createAnnotationGraphBody: %s, %s"%(ro_dir, rofile))
annotation_filename = None
name_index = 0
name_suffix = os.path.basename(rofile)
if name_suffix in [".",""]:
name_suffix = os.path.basename(os.path.normpath(ro_dir))
today = datetime.date.today()
while annotation_filename == None:
name_index += 1
name = ("Ann-%04d%02d%02d-%04d-%s.rdf"%
(today.year, today.month, today.day, name_index, name_suffix))
if not os.path.exists(makeAnnotationFilename(ro_dir, name)):
annotation_filename = name
# Create annotation body file
log.debug("createAnnotationGraphBody: %s"%(annotation_filename))
anngraph.serialize(destination=makeAnnotationFilename(ro_dir, annotation_filename),
format='xml', base=ro_manifest.getRoUri(ro_dir), xml_base="..")
return annotation_filename
def createAnnotationBody(ro_config, ro_dir, rofile, attrdict, defaultType="string"):
"""
Create a new annotation body for a single resource in a research object.
Existing annotations for the same resource are not touched; if an annotation is being
added or replaced, it is the calling program'sresponsibility to update the manifest to
reference the active annotations. A new name is allocated for the created annotation,
which is returned as the result of this function.
ro_config is the research object manager configuration, supplied as a dictionary
ro_dir is the research object root directory
rofile is the name of the Research Object component to be annotated, possibly
relative to the RO root directory.
attrdict is a dictionary of attributes to be saved inthe annotation body.
Dictionary keys are attribute names that can be resolved via getAnnotationByName.
Returns the name of the annotation body created relative to the RO
manifest and metadata directory.
"""
# Assemble data for annotation
anngraph = rdflib.Graph()
s = ro_manifest.getComponentUri(ro_dir, rofile)
for k in attrdict:
(p,t) = getAnnotationByName(ro_config, k, defaultType)
anngraph.add((s, p, makeAnnotationValue(ro_config, attrdict[k],t)))
# Write graph and return filename
return createAnnotationGraphBody(ro_config, ro_dir, rofile, anngraph)
def _addAnnotationBodyToRoGraph(ro_graph, ro_dir, rofile, annfile):
"""
Add a new annotation body to an RO graph
ro_graph graph to which annotation is added
ro_dir is the research object directory
rofile is the research object file being annotated
annfile is the base file name of the annotation body to be added
"""
# <ore:aggregates>
# <ro:AggregatedAnnotation>
# <ro:annotatesAggregatedResource rdf:resource="data/UserRequirements-astro.ods" />
# <ao:body rdf:resource=".ro/(annotation).rdf" />
# </ro:AggregatedAnnotation>
# </ore:aggregates>
ann = rdflib.BNode()
ro_graph.add((ann, RDF.type, RO.AggregatedAnnotation))
ro_graph.add((ann, RO.annotatesAggregatedResource, ro_manifest.getComponentUri(ro_dir, rofile)))
ro_graph.add((ann, AO.body, ro_manifest.getComponentUri(ro_dir, ro_settings.MANIFEST_DIR+"/"+annfile)))
ro_graph.add((ro_manifest.getComponentUri(ro_dir, "."), ORE.aggregates, ann))
return
def _removeAnnotationBodyFromRoGraph(ro_graph, annbody):
"""
Remove references to an annotation body from an RO graph
ro_graph graph from which annotation is removed
annbody is the the annotation body node to be removed
"""
ro_graph.remove((annbody, None, None ))
ro_graph.remove((None, ORE.aggregates, annbody))
return
def _addSimpleAnnotation(ro_config, ro_dir, rofile, attrname, attrvalue):
"""
Add a simple annotation to a file in a research object.
ro_config is the research object manager configuration, supplied as a dictionary
ro_dir is the research object root directory
rofile names the file or resource to be annotated, possibly relative to the RO.
attrname names the attribute in a form recognized by getAnnotationByName
attrvalue is a value to be associated with the attribute
"""
annfile = createAnnotationBody(ro_config, ro_dir, rofile, { attrname: attrvalue} )
ro_graph = ro_manifest.readManifestGraph(ro_dir)
_addAnnotationBodyToRoGraph(ro_graph, ro_dir, rofile, annfile)
ro_manifest.writeManifestGraph(ro_dir, ro_graph)
return
def _removeSimpleAnnotation(ro_config, ro_dir, rofile, attrname, attrvalue):
"""
Remove a simple annotation or multiple matching annotations a research object.
ro_config is the research object manager configuration, supplied as a dictionary
ro_dir is the | |
import copy
import crc32
import hashlib
import json
import random
import socket
# from multiprocessing.process import Process
# from multiprocessing.queues import Queue
import threading
import time
import uuid
import zlib
from random import Random
from TestInput import TestInputServer
from TestInput import TestInputSingleton
from Queue import Queue
from BucketLib.BucketOperations import BucketHelper
from Cb_constants import constants
from common_lib import sleep
from global_vars import logger
from mc_bin_client import MemcachedClient, MemcachedError
from mc_ascii_client import MemcachedAsciiClient
from memcached.helper.old_kvstore import ClientKeyValueStore
from membase.api.rest_client import RestConnection
from memcacheConstants import ERR_NOT_MY_VBUCKET, ERR_ETMPFAIL, ERR_EINVAL
# from perf_engines import mcsoda
try:
import concurrent.futures
except ImportError:
print("WARNING: Failed to import concurrent.futures module")
class MemcachedClientHelperException(Exception):
def __init__(self, errorcode, message):
Exception.__init__(self, errorcode, message)
self._message = message
self.errorcode = errorcode
self._args = (errorcode, message)
class MemcachedClientHelper(object):
# value_sizes {10:0.1,20:0.2:40:0.8}
@staticmethod
def create_threads(servers=None, name='default', ram_load_ratio=-1,
number_of_items=-1, value_size_distribution=None,
number_of_threads=50, override_vBucketId=-1, write_only=False,
async_write=False, delete_ratio=0, expiry_ratio=0):
log = logger.get("test")
if not servers:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="servers is not set")
if ram_load_ratio < 0 and number_of_items < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio or number_of_items must be specified")
if not value_size_distribution:
value_size_distribution = {16: 0.25, 128: 0.25, 512: 0.25, 1024: 0.25}
list = []
if ram_load_ratio >= 0:
info = BucketHelper(servers[0]).get_bucket(name)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'
.format(space_to_fill, emptySpace))
for size, probability in value_size_distribution.items():
how_many = int(space_to_fill / (size + 250) * probability)
payload_generator = DocumentGenerator.make_docs(number_of_items,
{"name": "user-${prefix}", "payload": "memcached-json-${prefix}-${padding}",
"size": size, "seed": str(uuid.uuid4())})
list.append({'size': size, 'value': payload_generator, 'how_many': how_many})
else:
for size, probability in value_size_distribution.items():
how_many = ((number_of_items / number_of_threads) * probability)
payload_generator = DocumentGenerator.make_docs(
number_of_items, {"name": "user-${prefix}",
"payload": "memcached-json-${prefix}-${padding}",
"size": size,
"seed": str(uuid.uuid4())})
list.append({'size': size, 'value': payload_generator, 'how_many': how_many})
for item in list:
item['how_many'] /= int(number_of_threads)
# at least one element for each value size
if item['how_many'] < 1:
item['how_many'] = 1
msg = "each thread will send {0} items with value of size : {1}"
log.info(msg.format(item['how_many'], item['size']))
threads = []
for i in range(0, int(number_of_threads)):
# choose one of the servers random
thread = WorkerThread(
serverInfo=MemcachedClientHelper.random_pick(servers),
name=name, values_list=list,
override_vBucketId=override_vBucketId,
write_only=write_only,
async_write=async_write, delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio)
threads.append(thread)
return threads
@staticmethod
def create_threads_for_load_bucket(serverInfo=None, name='default',
ram_load_ratio=-1, number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
delete_ratio=0, expiry_ratio=0):
log = logger.get("test")
if not serverInfo:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="serverInfo is not set")
if ram_load_ratio < 0 and number_of_items < 0:
raise MemcachedClientHelperExcetion(errorcode='invalid_argument',
message="ram_load_ratio or number_of_items must be specified")
if not value_size_distribution:
value_size_distribution = {16: 0.33, 128: 0.33, 1024: 0.33}
list = []
if ram_load_ratio >= 0:
info = BucketHelper(serverInfo).get_bucket(name)
emptySpace = info.stats.ram - info.stats.memUsed
space_to_fill = (int((emptySpace * ram_load_ratio) / 100.0))
log.info('space_to_fill : {0}, emptySpace : {1}'
.format(space_to_fill, emptySpace))
for size, probability in value_size_distribution.items():
# let's assume overhead per key is 64 bytes ?
how_many = int(space_to_fill / (size + 250) * probability)
payload = MemcachedClientHelper.create_value('*', size)
list.append({'size': size, 'value': payload, 'how_many': how_many})
else:
for size, probability in value_size_distribution.items():
how_many = (number_of_items * probability)
payload = MemcachedClientHelper.create_value('*', size)
list.append({'size': size, 'value': payload, 'how_many': how_many})
for item in list:
item['how_many'] /= int(number_of_threads)
# at least one element for each value size
if item['how_many'] < 1:
item['how_many'] = 1
msg = "each thread will send {0} items with value of size : {1}"
log.info(msg.format(item['how_many'], item['size']))
threads = []
for _ in range(0, int(number_of_threads)):
thread = WorkerThread(
serverInfo=serverInfo, name=name, values_list=list,
override_vBucketId=override_vBucketId, write_only=write_only,
delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio)
threads.append(thread)
return threads
@staticmethod
def load_bucket_and_return_the_keys(servers=None, name='default',
ram_load_ratio=-1, number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False,
delete_ratio=0, expiry_ratio=0):
inserted_keys = []
rejected_keys = []
log = logger.get("test")
threads = MemcachedClientHelper.create_threads(
servers, name, ram_load_ratio, number_of_items,
value_size_distribution, number_of_threads, override_vBucketId,
write_only=write_only, delete_ratio=delete_ratio,
expiry_ratio=expiry_ratio)
# we can start them!
for thread in threads:
thread.start()
log.info("waiting for all worker thread to finish their work...")
[thread.join() for thread in threads]
log.info("worker threads are done...")
inserted_count = 0
rejected_count = 0
deleted_count = 0
expired_count = 0
for thread in threads:
t_inserted, t_rejected = thread.keys_set()
inserted_count += thread.inserted_keys_count()
rejected_count += thread.rejected_keys_count()
deleted_count += thread._delete_count
expired_count += thread._expiry_count
inserted_keys.extend(t_inserted)
rejected_keys.extend(t_rejected)
msg = "inserted keys count : {0} , rejected keys count : {1}"
log.info(msg.format(inserted_count, rejected_count))
msg = "deleted keys count : {0} , expired keys count : {1}"
log.info(msg.format(deleted_count, expired_count))
return inserted_keys, rejected_keys
@staticmethod
def load_bucket(servers,
name='default',
ram_load_ratio=-1,
number_of_items=-1,
value_size_distribution=None,
number_of_threads=50,
override_vBucketId=-1,
write_only=False):
inserted_keys_count = 0
rejected_keys_count = 0
log = logger.get("test")
threads = MemcachedClientHelper.create_threads(
servers, name, ram_load_ratio, number_of_items,
value_size_distribution, number_of_threads, override_vBucketId,
write_only)
# we can start them!
for thread in threads:
thread.start()
log.info("waiting for all worker thread to finish their work...")
[thread.join() for thread in threads]
log.info("worker threads are done...")
for thread in threads:
inserted_keys_count += thread.inserted_keys_count()
rejected_keys_count += thread.rejected_keys_count()
msg = "inserted keys count : {0} , rejected keys count : {1}"
log.info(msg.format(inserted_keys_count, rejected_keys_count))
return inserted_keys_count, rejected_keys_count
@staticmethod
def create_value(pattern, size):
return (pattern * (size/len(pattern))) + pattern[0:(size % len(pattern))]
@staticmethod
def random_pick(list):
if list:
if len(list) > 1:
return list[Random().randint(0, len(list) - 1)]
return list[0]
# raise array empty ?
return None
@staticmethod
def direct_client(server, bucket, timeout=30,
admin_user='Administrator', admin_pass='password'):
log = logger.get("test")
rest = RestConnection(server)
node = None
try:
node = rest.get_nodes_self()
except ValueError as e:
log.info("could not connect to server {0}, will try scanning all nodes".format(server))
if not node:
nodes = rest.get_nodes()
for n in nodes:
if n.ip == server.ip and n.port == server.port:
node = n
if isinstance(server, dict):
log.info("dict:{0}".format(server))
log.info("creating direct client {0}:{1} {2}"
.format(server["ip"], node.memcached, bucket.name))
else:
log.info("creating direct client {0}:{1} {2}"
.format(server.ip, node.memcached, bucket.name))
BucketHelper(server).vbucket_map_ready(bucket, 60)
vBuckets = BucketHelper(server).get_vbuckets(bucket)
if isinstance(server, dict):
client = MemcachedClient(server["ip"], node.memcached, timeout=timeout)
else:
client = MemcachedClient(server.ip, node.memcached, timeout=timeout)
if vBuckets is not None:
client.vbucket_count = len(vBuckets)
else:
client.vbucket_count = 0
# todo raise exception for not bucket_info
bucket_name = bucket.name.encode('ascii')
client.sasl_auth_plain(admin_user, admin_pass)
client.bucket_select(bucket_name)
return client
@staticmethod
def proxy_client(server, bucket, timeout=30, force_ascii=False):
# for this bucket on this node what is the proxy ?
bucket_info = BucketHelper(server).get_bucket_json(bucket.name)
nodes = bucket_info["nodes"]
log = logger.get("test")
if (TestInputSingleton.input
and "ascii" in TestInputSingleton.input.test_params
and TestInputSingleton.input.test_params["ascii"].lower() == "true") or force_ascii:
ascii = True
else:
ascii = False
for _ in nodes:
BucketHelper(server).vbucket_map_ready(bucket, 60)
vBuckets = BucketHelper(server).get_vbuckets(bucket)
if ascii:
log.info("creating ascii client {0}:{1} {2}"
.format(server.ip, constants.memcached_port, bucket))
client = MemcachedAsciiClient(server.ip, constants.memcached_port,
timeout=timeout)
else:
if isinstance(server, dict):
log.info("creating proxy client {0}:{1} {2}"
.format(server["ip"], constants.memcached_port, bucket))
client = MemcachedClient(server["ip"], constants.memcached_port,
timeout=timeout)
else:
log.info("creating proxy client {0}:{1} {2}"
.format(server.ip, constants.memcached_port, bucket))
client = MemcachedClient(server.ip, constants.memcached_port,
timeout=timeout)
client.vbucket_count = len(vBuckets)
return client
if isinstance(server, dict):
raise Exception("unable to find {0} in get_nodes()"
.format(server["ip"]))
else:
raise Exception("unable to find {0} in get_nodes()"
.format(server.ip))
@staticmethod
def flush_bucket(server, bucket, admin_user='cbadminbucket',admin_pass='password'):
# if memcached throws OOM error try again ?
client = MemcachedClientHelper.direct_client(server, bucket, admin_user=admin_user, admin_pass=admin_pass)
retry_attempt = 5
while retry_attempt > 0:
try:
client.flush()
logger.get("test").info("Bucket %s flushed" % bucket)
break
except MemcachedError:
retry_attempt -= 1
sleep(5, "Flush raised memcached error. Will retry..")
client.close()
return
class ReaderThread(object):
def __init__(self, info, keyset, queue):
self.info = info
self.error_seen = 0
self.keyset = keyset
self.aborted = False
self.queue = queue
def abort(self):
self.aborted = True
def _saw_error(self, key):
# error_msg = "unable to get key {0}"
self.error_seen += 1
# if self.error_seen < 500:
# log.error(error_msg.format(key))
def start(self):
client = MemcachedClientHelper.direct_client(
self.info["server"],
self.info['name'],
admin_user='cbadminbucket',
admin_pass='password')
sleep(5, "Wait for MC client to be acquired", log_type="infra")
while self.queue.empty() and self.keyset:
selected = MemcachedClientHelper.random_pick(self.keyset)
selected['how_many'] -= 1
if selected['how_many'] < 1:
self.keyset.remove(selected)
key = "{0}-{1}-{2}".format(self.info['baseuuid'],
selected['size'],
int(selected['how_many']))
try:
client.send_get(key)
except Exception:
self._saw_error(key)
client.close()
# mutation ? let' do two cycles , first run and then try to mutate all those itesm
# and return
class WorkerThread(threading.Thread):
# too flags : stop after x errors
# slow down after every seeing y errors
# value_list is a list of document generators
def __init__(self, serverInfo, name, values_list,
ignore_how_many_errors=5000, override_vBucketId=-1,
terminate_in_minutes=120, write_only=False,
async_write=False, delete_ratio=0, expiry_ratio=0):
threading.Thread.__init__(self)
self.serverInfo = serverInfo
self.name = name
self.values_list = []
self.values_list.extend(copy.deepcopy(values_list))
self._value_list_copy = []
self._value_list_copy.extend(copy.deepcopy(values_list))
self._inserted_keys_count = 0
self._rejected_keys = []
self._rejected_keys_count = 0
self._delete_ratio = delete_ratio
self._expiry_ratio = expiry_ratio
self._delete_count = 0
self._expiry_count = 0
self._delete = []
self.ignore_how_many_errors = ignore_how_many_errors
self.override_vBucketId = override_vBucketId
self.terminate_in_minutes = terminate_in_minutes
self._base_uuid = uuid.uuid4()
self.queue = Queue()
# let's create a read_thread
self.info = {'server': serverInfo,
'name': self.name,
'baseuuid': self._base_uuid}
self.write_only = write_only
self.aborted = False
self.async_write = async_write
self.log = logger.get("test")
| |
setting.
:type ui_name: Array of String
| ``api version min:`` 2.4
| ``api version max:`` 2.4
| ``required:`` False
| ``default:`` None
:param visible: A flag indicating if this setting visible.
:type visible: Boolean
| ``api version min:`` 2.5
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param visible: A flag indicating if this setting visible.
:type visible: Array of Boolean
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 0
:param start: The record number to return in the selected page of data. It will always appear, although it may not be the first record. See the :limit for more information.
:type start: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` 1000
:param limit: The size of the page of data, that is, the maximum number of records returned. The limit size will be used to break the data up into pages and the first page with the start record will be returned. So if you have 100 records and use a :limit of 10 and a :start of 10, you will get records 10-19. The maximum limit is 10000.
:type limit: Integer
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` id
:param sort: The data field(s) to use for sorting the output. Default is id. Valid values are id, name, setting_type, category, visible, description, default_value, allow_empty, feature, display_hints, ui_name.
:type sort: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` asc
:param dir: The direction(s) in which to sort the data. Default is 'asc'. Valid values are 'asc' and 'desc'.
:type dir: Array of String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param select: The list of attributes to return for each AdvSettingDef. Valid values are id, name, setting_type, category, visible, description, default_value, allow_empty, feature, display_hints, ui_name. If empty or omitted, all attributes will be returned.
:type select: Array
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_field: The field name for NIOS GOTO that is used for locating a row position of records.
:type goto_field: String
| ``api version min:`` 2.8
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param goto_value: The value of goto_field for NIOS GOTO that is used for locating a row position of records.
:type goto_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param query: This value will be matched against adv setting defs, looking to see if one or more of the listed attributes contain the passed value. You may also surround the value with '/' and '/' to perform a regular expression search rather than a containment operation. Any record that matches will be returned. The attributes searched are: allow_empty, category, default_value, description, display_hints, feature, id, name, setting_type, ui_name, visible.
:type query: String
| ``api version min:`` 2.3
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param xml_filter: A SetFilter XML structure to further refine the search. The SetFilter will be applied AFTER any search query or field values, but before any limit options. The limit and pagination will be enforced after the filter. Remind that this kind of filter may be costly and inefficient if not associated with a database filtering.
:type xml_filter: String
**Outputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:return adv_setting_defs: An array of the AdvSettingDef objects that match the specified input criteria.
:rtype adv_setting_defs: Array of AdvSettingDef
"""
return self.api_list_request(self._get_method_fullname("search"), kwargs)
def find(self, **kwargs):
"""Lists the available adv setting defs matching the input specification. This provides the most flexible search specification of all the query mechanisms, enabling searching using comparison operations other than equality. However, it is more complex to use and will not perform as efficiently as the index or search methods. In the input descriptions below, 'field names' refers to the following fields: allow_empty, category, default_value, description, display_hints, feature, id, name, setting_type, ui_name, visible.
**Inputs**
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_allow_empty: The operator to apply to the field allow_empty. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. allow_empty: A flag indicating if this setting can be empty. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_allow_empty: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_allow_empty: If op_allow_empty is specified, the field named in this input will be compared to the value in allow_empty using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_allow_empty must be specified if op_allow_empty is specified.
:type val_f_allow_empty: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_allow_empty: If op_allow_empty is specified, this value will be compared to the value in allow_empty using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_allow_empty must be specified if op_allow_empty is specified.
:type val_c_allow_empty: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_category: The operator to apply to the field category. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. category: The category of this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_category: If op_category is specified, the field named in this input will be compared to the value in category using the specified operator. That is, the value in this input will be treated as another field name, rather than a constant value. Either this field or val_c_category must be specified if op_category is specified.
:type val_f_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_c_category: If op_category is specified, this value will be compared to the value in category using the specified operator. The value in this input will be treated as an explicit constant value. Either this field or val_f_category must be specified if op_category is specified.
:type val_c_category: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param op_default_value: The operator to apply to the field default_value. Valid values are: =, <>, rlike, not rlike, >, >=, <, <=, like, not like, is null, is not null, between. default_value: Default value for this setting. For the between operator the value will be treated as an Array if comma delimited string is passed, and it must contain an even number of values.
:type op_default_value: String
| ``api version min:`` None
| ``api version max:`` None
| ``required:`` False
| ``default:`` None
:param val_f_default_value: If op_default_value is specified, the field named in this input will be compared to the value in default_value using the specified operator. That is, the value in this input will be | |
import xarray as xr
import numpy as np
class SplitAndStandardize:
"""Class instantiation of SplitAndStandardize:
Here we will be preprocessing data for deep learning model training.
This module includes methods for training and testing data splits and standardization.
Attributes:
climate (str): The climate period to derive deep learning data for; ``current`` or ``future``.
variable (str): Variable to run script the for, which can include ``TK``, ``EV``, ``EU``, ``QVAPOR``,
``PRESS``, ``W_vert``, ``WMAX``, ``DBZ``, ``CTT``, ``UH25``, ``UH03``, or ``MASK``.
percent_split (float): Percentage of total data to assign as training data. The remaining data will be
assigned as testing data. For example, 0.6 is 60% training data, 40% testing data.
working_directory (str): The directory path to where the produced files will be saved and worked from.
threshold1 (int): The threshold for used for the chosen classification method (e.g., 75 UH25).
mask (boolean): Whether the threshold was applied within the storm patch mask or not. Defaults to ``False``.
unbalanced (boolean): Whether training data will be artificially balanced (``False``) or left unbalanced (``True``). Defaults to ``False``.
currenttrain_futuretest (boolean):
Raises:
Exceptions: Checks whether correct values were input for climate, variable, and percent_split.
"""
def __init__(self, climate, variable, percent_split, working_directory, threshold1, mask=False, unbalanced=False,
currenttrain_futuretest=False, kfold_total=5, kfold_indx=None, use_kfold=False):
# assigning class attributes
if climate!='current' and climate!='future':
raise Exception("Please enter current or future for climate option.")
else:
self.climate=climate
# variable name checks and string automatic assignments
if variable!='TK' and variable!='EV' and variable!='EU' and variable!='QVAPOR' and variable!='PRESS' and variable!='W_vert' \
and variable!='WMAX' and variable!='DBZ' and variable!='CTT' and variable!='UH25' and variable!='UH03' and variable!='MASK':
raise Exception("Please enter TK, EV, EU, QVAPOR, PRESS, W_vert, UH25, UH03, MAXW, CTT, DBZ, or MASK as variable.")
else:
self.variable=variable
# temperature at 1, 3, 5, and 7 km
if self.variable=="TK":
self.choice_var1="temp_sev_1"
self.choice_var3="temp_sev_3"
self.choice_var5="temp_sev_5"
self.choice_var7="temp_sev_7"
self.attrs_array=np.array(["tk_1km", "tk_3km", "tk_5km", "tk_7km"])
self.single=False
# v-wind at 1, 3, 5, and 7 km
if self.variable=="EV":
self.choice_var1="evwd_sev_1"
self.choice_var3="evwd_sev_3"
self.choice_var5="evwd_sev_5"
self.choice_var7="evwd_sev_7"
self.attrs_array=np.array(["ev_1km", "ev_3km", "ev_5km", "ev_7km"])
self.single=False
# u-wind at 1, 3, 5, and 7 km
if self.variable=="EU":
self.choice_var1="euwd_sev_1"
self.choice_var3="euwd_sev_3"
self.choice_var5="euwd_sev_5"
self.choice_var7="euwd_sev_7"
self.attrs_array=np.array(["eu_1km", "eu_3km", "eu_5km", "eu_7km"])
self.single=False
# water vapor at 1, 3, 5, and 7 km
if self.variable=="QVAPOR":
self.choice_var1="qvap_sev_1"
self.choice_var3="qvap_sev_3"
self.choice_var5="qvap_sev_5"
self.choice_var7="qvap_sev_7"
self.attrs_array=np.array(["qv_1km", "qv_3km", "qv_5km", "qv_7km"])
self.single=False
# pressure at 1, 3, 5, and 7 km
if self.variable=="PRESS":
self.choice_var1="pres_sev_1"
self.choice_var3="pres_sev_3"
self.choice_var5="pres_sev_5"
self.choice_var7="pres_sev_7"
self.attrs_array=np.array(["pr_1km", "pr_3km", "pr_5km", "pr_7km"])
self.single=False
# w-wind at 1, 3, 5, and 7 km
if self.variable=="W_vert":
self.choice_var1="wwnd_sev_1"
self.choice_var3="wwnd_sev_3"
self.choice_var5="wwnd_sev_5"
self.choice_var7="wwnd_sev_7"
self.attrs_array=np.array(["ww_1km", "ww_3km", "ww_5km", "ww_7km"])
self.single=False
# max-w
if self.variable=="WMAX":
self.choice_var1="maxw_sev_1"
self.attrs_array=np.array(["maxw"])
self.single=True
# dbz
if self.variable=="DBZ":
self.choice_var1="dbzs_sev_1"
self.attrs_array=np.array(["dbzs"])
self.single=True
# cloud top temperature
if self.variable=="CTT":
self.choice_var1="ctts_sev_1"
self.attrs_array=np.array(["ctts"])
self.single=True
# 2-5 km updraft helicity
if self.variable=="UH25":
self.choice_var1="uh25_sev_1"
self.attrs_array=np.array(["uh25"])
self.single=True
# 0-3 km updraft helicity
if self.variable=="UH03":
self.choice_var1="uh03_sev_1"
self.attrs_array=np.array(["uh03"])
self.single=True
# storm masks
if self.variable=="MASK":
self.choice_var1="mask_sev_1"
self.attrs_array=np.array(["mask"])
self.single=True
# percent splitting for train and test sets
if percent_split>=1:
raise Exception("Percent split should be a float less than 1.")
if percent_split<1:
self.percent_split=percent_split
# assign class attributes
self.working_directory=working_directory
self.threshold1=threshold1
self.unbalanced=unbalanced
self.mask=mask
# mask option string naming for files
if not self.mask:
self.mask_str='nomask'
if self.mask:
self.mask_str='mask'
# boolean for training with current, testing with future, standardization
self.currenttrain_futuretest=currenttrain_futuretest
if self.currenttrain_futuretest:
if self.climate == 'current':
raise Exception("Set currenttrain_futuretest to False!")
# for k-fold cross validation
self.use_kfold=use_kfold
if self.use_kfold:
self.kfold_total=kfold_total
self.kfold_indx=kfold_indx
def variable_translate(self):
"""Variable name for the respective filenames.
Returns:
variable (str): The variable string used to save files.
Raises:
ValueError: Input variable must be from available list.
"""
var={
'EU':'EU',
'EV':'EV',
'TK':'TK',
'QVAPOR':'QVAPOR',
'WMAX':'MAXW',
'W_vert':'W',
'PRESS':'P',
'DBZ':'DBZ',
'CTT':'CTT',
'UH25':'UH25',
'UH03':'UH03',
'MASK':'MASK'
}
try:
out=var[self.variable]
return out
except:
raise ValueError("Please enter ``TK``, ``EU``, ``EV``, ``QVAPOR``, ``PRESS``, ``DBZ``, ``CTT``, ``UH25``, ``UH03``, ``W_vert``, ``WMAX``, or ``MASK`` as variable.")
def open_above_threshold(self):
"""Open and concat files for the six months of analysis (threshold exceedance).
Returns:
data (Xarray dataset): Concatenated six months of data.
"""
# opening monthly above threshold files
data_dec=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_12.nc",
parallel=True, combine='by_coords')
data_jan=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_01.nc",
parallel=True, combine='by_coords')
data_feb=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_02.nc",
parallel=True, combine='by_coords')
data_mar=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_03.nc",
parallel=True, combine='by_coords')
data_apr=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_04.nc",
parallel=True, combine='by_coords')
data_may=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_uh{self.threshold1}_{self.mask_str}_05.nc",
parallel=True, combine='by_coords')
# concatenating
data=xr.concat([data_dec, data_jan, data_feb, data_mar, data_apr, data_may], dim='patch')
# closing files (these are large files!)
data_dec=data_dec.close()
data_jan=data_jan.close()
data_feb=data_feb.close()
data_mar=data_mar.close()
data_apr=data_apr.close()
data_may=data_may.close()
return data
def open_below_threshold(self):
"""Open and concat files for six months of analysis (threshold non-exceedance).
Returns:
data (Xarray dataset): Concatenated six months of data.
"""
# opening monthly above threshold files
data_dec=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_12.nc",
parallel=True, combine='by_coords')
data_jan=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_01.nc",
parallel=True, combine='by_coords')
data_feb=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_02.nc",
parallel=True, combine='by_coords')
data_mar=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_03.nc",
parallel=True, combine='by_coords')
data_apr=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_04.nc",
parallel=True, combine='by_coords')
data_may=xr.open_mfdataset(f"/{self.working_directory}/{self.climate}_nonuh{self.threshold1}_{self.mask_str}_05.nc",
parallel=True, combine='by_coords')
# concatenating
data=xr.concat([data_dec, data_jan, data_feb, data_mar, data_apr, data_may], dim='patch')
# closing files (these are large files!)
data_dec=data_dec.close()
data_jan=data_jan.close()
data_feb=data_feb.close()
data_mar=data_mar.close()
data_apr=data_apr.close()
data_may=data_may.close()
return data
def grab_variables(self, data):
"""Eagerly load variable data. This function converts dask arrays into numpy arrays.
Args:
data (Xarray dataset): The original Xarray dataset containing dask arrays.
Returns:
data_1, data_2, data_3, data_4 or data_1 (numpy array(s)): Input data as numpy arrays.
"""
# if variable file contains 4 heights
if not self.single:
data_1=data[self.choice_var1].values
data_2=data[self.choice_var3].values
data_3=data[self.choice_var5].values
data_4=data[self.choice_var7].values
return data_1, data_2, data_3, data_4
# if variable file is single height
if self.single:
data_1=data[self.choice_var1].values
return data_1
def create_traintest_data(self, data_b, data_a, return_label=False):
"""This function performs balancing of above and below threshold data for training and testing data. Data is permuted
before being assigned to training and testing groups.
The training group sample size is computed using the assigned percentage (``self.percent_split``) from the above threshold population.
Then, the testing group sample size is computed using the leftover percentage (e.g., 1-``self.percent_split``) from a population
with a similar ratio of above and below threshold storm patches (e.g., ~5% above threshold to 95% below threshold). This is done
artificially balance the ratio of threshold exceeding storms to that of non-exceeding storms, to ensure that the training data set
contains sufficient examples of above threshold storm patches, given that they are rare events. The testing data set is left with
a population of storms that resembles the original data's population.
Args:
data_b (numpy array): Concatenated six months of data exceeding the threshold.
data_a (numpy array): Concatenated six months of data below the threshold.
return_label (boolean): Whether to return the label data or not. Defaults to ``False``.
Returns:
train_data, test_data or train_data, test_data, train_label, test_label (numpy arrays): The training and testing data, and if
return_label=``True``, the training and testing data labels for supervised learning.
"""
# train above (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_above=data_a[select_data]
# train below (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_below=data_b[select_data]
# test above (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[int(data_a.shape[0]*self.percent_split):]
test_above=data_a[select_data]
# generate index for test below (stratified sampling)
indx_below=int((((data_a.shape[0]*(1-self.percent_split))*data_b.shape[0])/data_a.shape[0])+(data_a.shape[0]*(1-self.percent_split)))
# test below (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[int(data_a.shape[0] * self.percent_split):indx_below]
test_below=data_b[select_data]
train_data=np.vstack([train_above, train_below])
if return_label:
train_above_label=np.ones(train_above.shape[0])
train_below_label=np.zeros(train_below.shape[0])
train_label=np.hstack([train_above_label, train_below_label])
test_data=np.vstack([test_above, test_below])
if return_label:
test_above_label=np.ones(test_above.shape[0])
test_below_label=np.zeros(test_below.shape[0])
test_label=np.hstack([test_above_label, test_below_label])
# finally, permute the data that has been merged and properly balanced
np.random.seed(10)
train_data=np.random.permutation(train_data)
np.random.seed(10)
test_data=np.random.permutation(test_data)
if not return_label:
return train_data, test_data
if return_label:
np.random.seed(10)
train_label=np.random.permutation(train_label)
np.random.seed(10)
test_label=np.random.permutation(test_label)
return train_data, test_data, train_label, test_label
def create_traintest_unbalanced(self, data_b, data_a, return_label=False):
"""This function performs creates and permutes training and testing data.
Args:
data_b (numpy array): Concatenated six months of data exceeding the threshold.
data_a (numpy array): Concatenated six months of data below the threshold.
return_label (boolean): Whether to return the label data or not. Defaults to ``False``.
Returns:
train_data, test_data or train_data, test_data, train_label, test_label (numpy arrays): The training and testing data, and if
return_label=``True``, the training and testing data labels for supervised learning.
"""
# train above UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_a.shape[0])[:int(data_a.shape[0]*self.percent_split)]
train_above=data_a[select_data]
# train below UH threshold (stratified sampling)
np.random.seed(0)
select_data=np.random.permutation(data_b.shape[0])[:int(data_b.shape[0]*self.percent_split)]
train_below=data_b[select_data]
# test | |
<reponame>Truth0906/PTTLibrary
import time
import progressbar
import threading
import re
try:
from . import data_type
from . import config
from . import lib_util
from . import i18n
from . import connect_core
from . import log
from . import screens
from . import exceptions
from . import command
from . import check_value
from . import version
from . import _api_post
from . import _api_get_time
except ModuleNotFoundError:
import data_type
import config
import lib_util
import i18n
import connect_core
import log
import screens
import exceptions
import command
import check_value
import version
import _api_post
import _api_get_time
class API:
def __init__(
self,
language: int = 0,
log_level: int = 0,
screen_timeout: int = 0,
screen_long_timeout: int = 0,
screen_post_timeout: int = 0,
connect_mode: int = 0,
port: int = 0,
log_handler=None,
host: int = 0):
self._mailbox_full = False
self._ID = None
if log_handler is not None and not callable(log_handler):
raise TypeError('[PyPtt] log_handler is must callable!!')
if log_handler is not None:
has_log_handler = True
set_log_handler_result = True
try:
if log_level != log.level.SILENT:
log_handler(f'PyPtt v {version.V}')
log_handler('Developed by CodingMan')
except TypeError:
log_handler = None
set_log_handler_result = False
else:
has_log_handler = False
if log_level != log.level.SILENT:
print(f'PyPtt v {version.V}')
print('Developed by CodingMan')
self._login_status = False
self.unregistered_user = True
self.registered_user = False
self.process_picks = 0
self.config = config.Config()
if not isinstance(language, int):
raise TypeError('[PyPtt] language must be integer')
if not isinstance(log_level, int):
raise TypeError('[PyPtt] log_level must be integer')
if not isinstance(screen_timeout, int):
raise TypeError('[PyPtt] screen_timeout must be integer')
if not isinstance(screen_long_timeout, int):
raise TypeError('[PyPtt] screen_long_timeout must be integer')
if (not isinstance(host, int)) and (not isinstance(host, str)):
raise TypeError('[PyPtt] host must be integer or string')
if screen_timeout != 0:
self.config.screen_timeout = screen_timeout
if screen_long_timeout != 0:
self.config.screen_long_timeout = screen_long_timeout
if screen_post_timeout != 0:
self.config.screen_post_timeout = screen_post_timeout
if log_level == 0:
log_level = self.config.log_level
elif not lib_util.check_range(log.level, log_level):
raise ValueError('[PyPtt] Unknown log_level', log_level)
else:
self.config.log_level = log_level
if language == 0:
language = self.config.language
elif not lib_util.check_range(i18n.language, language):
raise ValueError('[PyPtt] Unknown language', language)
else:
self.config.language = language
i18n.load(self.config.language)
if log_handler is not None:
self.config.log_handler = log_handler
log.show_value(
self.config,
log.level.INFO,
i18n.log_handler,
i18n.Init)
elif has_log_handler and not set_log_handler_result:
log.show_value(
self.config,
log.level.INFO,
i18n.log_handler,
[
i18n.Init,
i18n.Fail
])
if self.config.language == i18n.language.CHINESE:
log.show_value(
self.config, log.level.INFO, [
i18n.ChineseTranditional,
i18n.languageModule
],
i18n.Init)
elif self.config.language == i18n.language.ENGLISH:
log.show_value(
self.config, log.level.INFO, [
i18n.English,
i18n.languageModule
],
i18n.Init)
##################
if isinstance(host, int):
if host == 0:
host = self.config.host
elif not lib_util.check_range(data_type.host_type, host):
raise ValueError('[PyPtt] Unknown host', host)
# elif isinstance(host, str):
# pass
self.config.host = host
if self.config.host == data_type.host_type.PTT1:
log.show_value(
self.config,
log.level.INFO,
[
i18n.Connect,
i18n.host
],
i18n.PTT)
elif self.config.host == data_type.host_type.PTT2:
log.show_value(
self.config,
log.level.INFO,
[
i18n.Connect,
i18n.host
],
i18n.PTT2)
elif self.config.host == data_type.host_type.LOCALHOST:
log.show_value(
self.config,
log.level.INFO,
[
i18n.Connect,
i18n.host
],
i18n.Localhost)
else:
log.show_value(
self.config,
log.level.INFO,
[
i18n.Connect,
i18n.host
],
self.config.host)
##################
if isinstance(host, int):
connect_core.connect_mode.min_value = connect_core.connect_mode.WEBSOCKET
connect_core.connect_mode.max_value = connect_core.connect_mode.WEBSOCKET
elif isinstance(host, str):
connect_core.connect_mode.min_value = connect_core.connect_mode.TELNET
connect_core.connect_mode.max_value = connect_core.connect_mode.WEBSOCKET
check_value.check(self.config, int, 'connect_mode', connect_mode)
if connect_mode == 0:
connect_mode = self.config.connect_mode
elif not lib_util.check_range(connect_core.connect_mode, connect_mode):
raise ValueError('[PyPtt] Unknown connect_mode', connect_mode)
else:
self.config.connect_mode = connect_mode
check_value.check(self.config, int, 'port', port)
if port == 0:
port = self.config.port
elif not 0 < port < 65535:
raise ValueError('[PyPtt] Unknown port', port)
else:
self.config.port = port
self.connect_core = connect_core.API(self.config)
self._exist_board_list = list()
self._board_info_list = dict()
self._ModeratorList = dict()
self._LastThrowWaterBallTime = 0
self._ThreadID = threading.get_ident()
self._goto_board_list = list()
self._board_info_list = dict()
log.show_value(
self.config,
log.level.DEBUG,
'ThreadID',
self._ThreadID)
log.show_value(
self.config,
log.level.INFO,
[
i18n.Library,
' v ' + version.V,
],
i18n.Init)
def _one_thread(self) -> None:
current_thread_id = threading.get_ident()
if current_thread_id == self._ThreadID:
return
log.show_value(
self.config,
log.level.DEBUG,
'ThreadID',
self._ThreadID)
log.show_value(
self.config,
log.level.DEBUG,
'Current thread id',
current_thread_id)
raise exceptions.MultiThreadOperated()
def get_version(self) -> str:
self._one_thread()
return self.config.Version
def _login(
self,
ptt_id: str,
password: <PASSWORD>,
kick_other_login: bool = False) -> None:
try:
from . import _api_loginout
except ModuleNotFoundError:
import _api_loginout
return _api_loginout.login(
self,
ptt_id,
password,
kick_other_login)
def login(
self,
ptt_id: str,
password: str,
kick_other_login: bool = False) -> None:
self._one_thread()
self.config.log_last_value = None
check_value.check(self.config, str, 'ID', ptt_id)
check_value.check(self.config, str, 'Password', password)
check_value.check(self.config, bool, 'kick_other_login', kick_other_login)
try:
return self._login(
ptt_id,
password,
kick_other_login=kick_other_login)
except exceptions.LoginError:
return self._login(
ptt_id,
password,
kick_other_login=kick_other_login)
def logout(self) -> None:
self._one_thread()
if not self._login_status:
return
self.config.log_last_value = None
try:
from . import _api_loginout
except ModuleNotFoundError:
import _api_loginout
return _api_loginout.logout(self)
def log(self, *msg) -> None:
self._one_thread()
msg = [str(x) for x in msg]
current_msg = ' '.join(msg)
log.log(self.config, log.level.OUTSIDE, current_msg)
def get_time(self) -> str:
self._one_thread()
if not self._login_status:
raise exceptions.Requirelogin(i18n.Requirelogin)
self.config.log_last_value = None
return _api_get_time.get_time(self)
def get_post(
self,
board: str,
post_aid: str = None,
post_index: int = 0,
search_type: int = 0,
search_condition: str = None,
search_list: list = None,
query: bool = False) -> data_type.PostInfo:
self._one_thread()
if not self._login_status:
raise exceptions.Requirelogin(i18n.Requirelogin)
self.config.log_last_value = None
check_value.check(self.config, str, 'Board', board)
if post_aid is not None:
check_value.check(self.config, str, 'PostAID', post_aid)
check_value.check(self.config, int, 'PostIndex', post_index)
check_value.check(self.config, int, 'SearchType', search_type,
value_class=data_type.post_search_type)
if search_condition is not None:
check_value.check(self.config, str,
'SearchCondition', search_condition)
if search_list is not None:
check_value.check(self.config, list,
'search_list', search_condition)
if len(board) == 0:
raise ValueError(log.merge(
self.config,
[
i18n.Board,
i18n.ErrorParameter,
board
]))
if post_index != 0 and isinstance(post_aid, str):
raise ValueError(log.merge(
self.config,
[
'PostIndex',
'PostAID',
i18n.ErrorParameter,
i18n.BothInput
]))
if post_index == 0 and post_aid is None:
raise ValueError(log.merge(
self.config,
[
'PostIndex',
'PostAID',
i18n.ErrorParameter
]))
if search_condition is not None and search_type == 0:
raise ValueError(log.merge(
self.config,
[
'SearchType',
i18n.ErrorParameter,
]))
if search_type == data_type.post_search_type.PUSH:
try:
S = int(search_condition)
except ValueError:
raise ValueError(log.merge(
self.config,
[
'SearchCondition',
i18n.ErrorParameter,
]))
if not (-100 <= S <= 110):
raise ValueError(log.merge(
self.config,
[
'SearchCondition',
i18n.ErrorParameter,
]))
if post_aid is not None and search_condition is not None:
raise ValueError(log.merge(
self.config,
[
'PostAID',
'SearchCondition',
i18n.ErrorParameter,
i18n.BothInput,
]))
if post_index != 0:
newest_index = self._get_newest_index(
data_type.index_type.BBS,
board=board,
search_type=search_type,
search_condition=search_condition,
search_list=search_list)
if post_index < 1 or newest_index < post_index:
raise ValueError(log.merge(
self.config,
[
'PostIndex',
i18n.ErrorParameter,
i18n.OutOfRange,
f'0 ~ {newest_index} but get {post_index}'
]))
self._check_board(board)
for i in range(2):
need_continue = False
post = None
try:
post = self._get_post(
board,
post_aid,
post_index,
search_type,
search_condition,
search_list,
query)
except exceptions.ParseError as e:
if i == 1:
raise e
need_continue = True
except exceptions.UnknownError as e:
if i == 1:
raise e
need_continue = True
except exceptions.NoSuchBoard as e:
if i == 1:
raise e
need_continue = True
except exceptions.NoMatchTargetError as e:
if i == 1:
raise e
need_continue = True
if post is None:
need_continue = True
elif not post.pass_format_check:
need_continue = True
if need_continue:
log.log(
self.config,
log.level.DEBUG,
'Wait for retry repost')
time.sleep(0.1)
continue
break
return post
def _check_board(
self,
board: str,
check_moderator: bool = False) -> data_type.BoardInfo:
if board.lower() not in self._exist_board_list:
board_info = self._get_board_info(board, False)
self._exist_board_list.append(board.lower())
self._board_info_list[board.lower()] = board_info
moderators = board_info.moderators
moderators = [x.lower() for x in moderators]
self._ModeratorList[board.lower()] = moderators
self._board_info_list[board.lower()] = board_info
if check_moderator:
if self._ID.lower() not in self._ModeratorList[board.lower()]:
raise exceptions.NeedModeratorPermission(board)
return self._board_info_list[board.lower()]
def _get_post(
self,
board: str,
post_aid: str = None,
post_index: int = 0,
search_type: int = 0,
search_condition: str = None,
search_list: list = None,
query: bool = False) -> data_type.PostInfo:
try:
from . import _api_get_post
except ModuleNotFoundError:
import _api_get_post
return _api_get_post.get_post(
self,
board,
post_aid,
post_index,
search_type,
search_condition,
search_list,
query)
def _get_newest_index(
self,
index_type: int,
search_type: int = 0,
search_condition: str = None,
search_list: list = None,
board: str = None) -> int:
check_value.check(
self.config, int, 'index_type',
index_type, value_class=data_type.index_type)
try:
from . import _api_get_newest_index
except ModuleNotFoundError:
import _api_get_newest_index
return _api_get_newest_index.get_newest_index(
self,
index_type,
search_type,
search_condition,
search_list,
board)
def get_newest_index(
self,
index_type: int,
board: str = None,
search_type: int = 0,
search_condition: str = None,
search_list: list = None) -> int:
self._one_thread()
if index_type == data_type.index_type.BBS or index_type == data_type.index_type.MAIL:
if not self._login_status:
raise exceptions.Requirelogin(i18n.Requirelogin)
if index_type == data_type.index_type.BBS:
check_value.check(
self.config, int, 'SearchType', search_type,
value_class=data_type.post_search_type)
if index_type == data_type.index_type.MAIL:
if self.unregistered_user:
raise exceptions.UnregisteredUser(lib_util.get_current_func_name())
check_value.check(
self.config, int, 'SearchType', search_type,
value_class=data_type.mail_search_type)
self.config.log_last_value = None
if search_condition is not None:
check_value.check(
self.config, str,
'SearchCondition', search_condition)
if search_list is not None:
check_value.check(
self.config, list,
'search_list', search_list)
check_value.check(self.config, int, 'SearchType', search_type)
return self._get_newest_index(
index_type,
search_type,
search_condition,
search_list,
| |
# Copyright (C) 2019 Red Hat, Inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_config import cfg
from oslo_log import log as logging
from nova.compute import manager as compute_manager
from nova.compute import resource_tracker as rt
from nova import context
from nova import objects
from nova import test
from nova.tests.functional import integrated_helpers
from nova.tests.functional.libvirt import base
from nova.tests.unit.virt.libvirt import fake_os_brick_connector
from nova.tests.unit.virt.libvirt import fakelibvirt
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class NUMALiveMigrationBase(base.ServersTestBase,
integrated_helpers.InstanceHelperMixin):
"""Base for all the test classes here. Gives us the NUMATopologyFilter and
small helper methods.
"""
api_major_version = 'v2.1'
microversion = 'latest'
ADDITIONAL_FILTERS = ['NUMATopologyFilter']
ADMIN_API = True
def setUp(self):
super(NUMALiveMigrationBase, self).setUp()
# NOTE(artom) There's a specific code path that we want to test.
# There's an instance.save() call in the compute manager's
# post_live_migration_at_destination(), and another instance.save()
# call in the libvirt driver's cleanup(), as called from
# _post_live_migration() in the compute manager. We want to make sure
# the latter does not clobber any NUMA topology information saved by
# the former. In order to trigger that code path, two things need to
# happen. First, the do_cleanup variable needs to be True, in order for
# driver.cleanup() to actually get called by _post_live_migration().
# Second, destroy_disks needs to be True as well, in order for
# cleanup() to enter the code block containing the instance.save()
# call. Both do_cleanup and destroy_disks are set by
# _live_migration_cleanup_flags(), so we just monkeypatch it to return
# what we want regardless of any shared storage configuration.
self.useFixture(fixtures.MonkeyPatch(
'nova.compute.manager.ComputeManager.'
'_live_migration_cleanup_flags',
lambda *args, **kwargs: (True, True)))
self.useFixture(fixtures.MonkeyPatch(
'nova.virt.libvirt.driver.connector',
fake_os_brick_connector))
def _migrate_stub(self, domain, destination, params, flags):
raise test.TestingException('_migrate_stub() must be implemented in '
' tests that expect the live migration '
' to start.')
def get_host(self, server_id):
server = self.api.get_server(server_id)
return server['OS-EXT-SRV-ATTR:host']
def _get_migration_context(self, instance_uuid):
ctxt = context.get_admin_context()
return objects.MigrationContext.get_by_instance_uuid(ctxt,
instance_uuid)
def _assert_instance_pinned_cpus(self, uuid, instance_cpus, host_cpus):
ctxt = context.get_admin_context()
topology = objects.InstanceNUMATopology.get_by_instance_uuid(
ctxt, uuid)
self.assertEqual(1, len(topology.cells))
# NOTE(artom) DictOfIntegersField has strings as keys, need to convert
self.assertCountEqual([str(cpu) for cpu in instance_cpus],
topology.cells[0].cpu_pinning_raw.keys())
self.assertCountEqual(host_cpus,
topology.cells[0].cpu_pinning_raw.values())
def _assert_host_consumed_cpus(self, host, cpus):
ctxt = context.get_admin_context()
topology = objects.NUMATopology.obj_from_db_obj(
objects.ComputeNode.get_by_nodename(ctxt, host).numa_topology)
self.assertCountEqual(cpus, topology.cells[0].pinned_cpus)
class NUMALiveMigrationPositiveBase(NUMALiveMigrationBase):
"""Base for all tests that expect the live migration to actually start.
Sets up an "environment" with two computes, each with 4 CPUs spead evenly
across 2 NUMA nodes.
"""
def setUp(self):
super(NUMALiveMigrationPositiveBase, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
'nova.tests.unit.virt.libvirt.fakelibvirt.Domain.migrateToURI3',
self._migrate_stub))
self.migrate_stub_ran = False
def start_computes_and_servers(self):
# Start 2 computes
self.start_compute(
hostname='host_a',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
self.start_compute(
hostname='host_b',
host_info=fakelibvirt.HostInfo(
cpu_nodes=1, cpu_sockets=1, cpu_cores=4, cpu_threads=1))
# Create a 2-CPU flavor
extra_spec = {'hw:cpu_policy': 'dedicated'}
flavor = self._create_flavor(vcpu=2, extra_spec=extra_spec)
# Boot 2 servers with 2 CPUs each, one on host_a and one on host_b.
# Given the cpu_dedicated_set we set earlier, they should both be on
# CPUs 0,1.
for server_name, host in [('server_a', 'host_a'),
('server_b', 'host_b')]:
server = self._create_server(flavor_id=flavor, host=host,
networks='none')
setattr(self, server_name,
self._wait_for_state_change(server, 'ACTIVE'))
self.assertEqual(host, self.get_host(server['id']))
self._assert_instance_pinned_cpus(server['id'], [0, 1], [0, 1])
def _rpc_pin_host(self, hostname):
ctxt = context.get_admin_context()
dest_mgr = self.computes[hostname].manager
dest_mgr.compute_rpcapi = integrated_helpers.StubComputeRPCAPI(
'5.2')
self.assertFalse(
dest_mgr.compute_rpcapi.router.client(
ctxt).can_send_version('5.3'))
class NUMALiveMigrationPositiveTests(NUMALiveMigrationPositiveBase):
"""Tests that expect the live migration to succeed. Stubs out fakelibvirt's
migrateToURI3() with a stub that "suceeds" the migration.
"""
def _migrate_stub(self, domain, destination, params, flags):
"""This method is designed to stub out libvirt's migrateToURI3 in order
to test periodics running during the live migration. It also has the
nice side effect of giving us access to the destination XML so that we
can assert stuff about it. Because migrateToURI3 is spawned in a
background thread, this method does not block the upper Nova layers.
Because we don't want Nova to think the live migration has finished
until this method is done, the last thing we do is make fakelibvirt's
Domain.jobStats() return VIR_DOMAIN_JOB_COMPLETED.
"""
self.assertIsInstance(
self._get_migration_context(self.server_a['id']),
objects.MigrationContext)
# During the migration, server_a is consuming CPUs 0,1 on host_a, while
# all 4 of host_b's CPU are consumed by server_b and the incoming
# migration.
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
host_a_rp = self._get_provider_uuid_by_name('host_a')
host_b_rp = self._get_provider_uuid_by_name('host_b')
usages_a = self._get_provider_usages(host_a_rp)
usages_b = self._get_provider_usages(host_b_rp)
self.assertEqual(2, usages_a['PCPU'])
self.assertEqual(4, usages_b['PCPU'])
# In a real live migration, libvirt and QEMU on the source and
# destination talk it out, resulting in the instance starting to exist
# on the destination. Fakelibvirt cannot do that, so we have to
# manually create the "incoming" instance on the destination
# fakelibvirt.
dest = self.computes['host_b']
dest.driver._host.get_connection().createXML(
params['destination_xml'],
'fake-createXML-doesnt-care-about-flags')
# The resource update periodic task should not change the consumed
# CPUs, as the migration is still happening. The test should still pass
# without running periodics, this just makes sure updating available
# resources does the right thing.
self._run_periodics()
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
source = self.computes['host_a']
conn = source.driver._host.get_connection()
dom = conn.lookupByUUIDString(self.server_a['id'])
dom.complete_job()
self.migrate_stub_ran = True
def _test(self, pin_dest):
"""Live migrate the server on host_a to host_b.
"""
# Make sure instances initially land on "overlapping" CPUs on both
# hosts and boot 2 instances.
self.flags(cpu_dedicated_set='0,1', group='compute')
self.start_computes_and_servers()
# Increase cpu_dedicated_set to 0-3, expecting the live migrated server
# to end up on 2,3.
self.flags(cpu_dedicated_set='0-3', group='compute')
self.computes['host_a'] = self.restart_compute_service(
self.computes['host_a'])
self.computes['host_b'] = self.restart_compute_service(
self.computes['host_b'])
# Live migrate, RPC-pinning the destination host if asked
if pin_dest:
self._rpc_pin_host('host_b')
self._live_migrate(self.server_a, 'completed')
self.assertEqual('host_b', self.get_host(self.server_a['id']))
self.assertIsNone(self._get_migration_context(self.server_a['id']))
# At this point host_a should have no CPUs consumed (server_a has moved
# to host_b), and host_b should have all of its CPUs consumed. In
# addition, server_a should be pinned to 2,3 because 0,1 are used up by
# server_b on host_b. Check this, then run periodics and check again.
# Running periodics is not necessary for the test to pass, but it's
# good to know it does the right thing.
self._assert_host_consumed_cpus('host_a', [])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
self._assert_instance_pinned_cpus(self.server_a['id'],
[0, 1], [2, 3])
self._run_periodics()
self._assert_host_consumed_cpus('host_a', [])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
self._assert_instance_pinned_cpus(self.server_a['id'],
[0, 1], [2, 3])
self.assertTrue(self.migrate_stub_ran)
# TODO(artom) It'd be a good idea to live migrate in the other
# direction here.
def test_numa_live_migration(self):
self._test(pin_dest=False)
def test_numa_live_migration_dest_pinned(self):
self._test(pin_dest=True)
def test_bug_1843639(self):
"""Live migrations in 'accepted' status were not considered in progress
before the fix for 1845146 merged, and were ignored by the update
available resources periodic task. From the task's POV, live-migrating
instances with migration status 'accepted' were considered to be on the
source, and any resource claims on the destination would get
erroneously removed. For that to happen, the task had to run at just
the "right" time, when the migration was in 'accepted' and had not yet
been moved to 'queued' by live_migration() in the compute manager.
This test triggers this race by wrapping around live_migration() and
running the update available resources periodic task while the
migration is still in 'accepted'.
"""
self.live_migration_ran = False
orig_live_migration = compute_manager.ComputeManager.live_migration
def live_migration(*args, **kwargs):
self._run_periodics()
# During the migration, server_a is consuming CPUs 0,1 on host_a,
# while all 4 of host_b's CPU are consumed by server_b and the
# incoming # migration.
self._assert_host_consumed_cpus('host_a', [0, 1])
self._assert_host_consumed_cpus('host_b', [0, 1, 2, 3])
# The migration should also be in 'accepted' at this point in time.
ctxt = context.get_admin_context()
self.assertIsInstance(
objects.Migration.get_by_instance_and_status(
ctxt, self.server_a['id'], 'accepted'),
objects.Migration)
self.live_migration_ran = True
return orig_live_migration(*args, **kwargs)
self.useFixture(fixtures.MonkeyPatch(
'nova.compute.manager.ComputeManager.live_migration',
live_migration))
self._test(pin_dest=False)
self.assertTrue(self.live_migration_ran)
class NUMALiveMigrationRollbackTests(NUMALiveMigrationPositiveBase):
"""Tests that expect the live migration to fail, and exist to test the
rollback code. Stubs out fakelibvirt's migrateToURI3() with a stub that
"fails" the migration.
"""
def _migrate_stub(self, domain, destination, params, flags):
"""Designed to stub fakelibvirt's migrateToURI3 and "fail" the
live migration by monkeypatching jobStats() to return an error.
| |
import numpy as np
import sys
import string
M=4
N=5
# grille rempli par sequence
s_grille=np.full((M+N,M+N),0)
# grille remplit par 0 -1 1
grille=np.full((M,N), -1)
#grille[1][0]=0
sequence1=[1,1]
sequence2=[2,1]
# non-colore -1
# blanche 0
# noire 1
def lire_fichier(s_grille):
#file=sys.argv[1:]
try:
in_file = open(sys.argv[1], "r")
except:
sys.exit("ERROR. Can't read supplied filename.")
text = in_file.read()
lg=len(text)
i=0
nextline=0
line=0
colonne=0
bool=0
j=0
while(i<lg-1):
if(text[i]=='\n'):
nextline=1
if(bool==1):
bool=0
else:
line=line+1
i=i+1
colonne=0
continue
else:
if nextline==1:
if text[i]=="0x20":
if text[i+1]!="0x20" and text[i+1]!="\n":
s_grille[line][colonne]=0
colonne=colonne+1
nextline==1
else:
nextline==1
elif (text[i]>='1' and text[i]<='9'):
s_grille[line][colonne]=text[i]
colonne=colonne+1
nextline==0
elif text[i]=='#':
j=line-1
bool=1
nextline==0
else:
nextline==0
if nextline==0:
#print("hi")
if (text[i]>='1' and text[i]<='9'):
s_grille[line][colonne]=text[i]
i=i+1
#print(s_grille)
in_file.close()
return s_grille
def compare_block_ligne(grille, i, j, sl):
if ((j+1<N) and (grille[i][j+1]==1))or ((j-1>=0) and (grille[i][j-1]==1)):
return False
while(j>=0 and j<N and sl>0):
if grille[i][j]==0:
return False
j=j+1
sl=sl-1
return True
def compare_block_colonne(grille, i, j, sl):
if ((i+1<M) and (grille[i+1][j]==1))or ((i-1>=0) and (grille[i-1][j]==1)):
return False
while(i>=0 and i<M and sl>0):
if grille[i][j]==0:
return False
i=i+1
sl=sl-1
return True
def coloriage_possible_ligne(grille, sequence, i, j, l, cl):
# problem de syntaxe
# cas a: si l depasse le nb d'element de la sequence, inviolement de syntaxe
# cas b, i n'est pas compris entre 0 et N-1, inviolement de syntaxe
# cas c, j < 0 , inviolement de syntaxe
if (len(sequence)<l) or (i<0) or (i>N-1) or(j<0):
return False
# cas 1 : l=0:
# -si j=0, vrai
# -sinon faux
if (l==0):
if (j==0):
return True
print("1false")
return False
else:
val=sequence[l-1]
print("s", sequence[l-1])
# cas 2a : si j < sl -1
if (j<(sequence[l-1]-1)):
print("2false")
return False
# cas 2b : si j == sl-1
# -si l == 1, vrai
# -sinon faux
elif (j==(sequence[l-1]-1)):
cpt=j
bool=0
while(j>=0):
if grille[i][j]==0 or cl==0:
bool=1
break
j=j-1
print(l, bool)
if l==1 and bool==0:
print("ABC true")
return True
print("3false")
return False
else:
#cas 2c
return coloriage_possible_ligne_rec(grille, sequence, i, j, l, -1, cl )#, case_j ,nb_block)
def coloriage_possible_ligne_rec(grille, sequence, i, j, l, check ,cl):#, case_j ,nb_block):
if (l==0) and j>=-1 :
print("ABC True")
return True
if j<0:
print(i, j, l)
print(grille)
print("0false")
return False
# Pour la premiere iteration, on ne sait pas si c'est une case blanche ou noire
print(grille)
if check ==-1:
if cl==0:
compare=compare_block_ligne(grille, i, j-sequence[l-1], sequence[l-1])
else:
compare=compare_block_ligne(grille, i, j-sequence[l-1]+1, sequence[l-1])
print("i, j", i, j,"compare:", compare, "l", l)
if grille[i][j]==-1:
if not (compare):
print("4false")
return False
else:
if(j==0) and l==1 and sequence[0]==1:
return True
print("here i j", i ,j-(sequence[l-1])-(1-cl)-1)
if (j-(sequence[l-1])-(1-cl)-1<-1):
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1]), l-1, 0, cl)
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-(1-cl)-1, l-1, 0, cl)
elif grille[i][j]==1:
if(j==0) and l==1 and sequence[0]==1:
return True
if cl==0:
return False
if compare:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl)
return False
elif grille[i][j]==0:
if(j==0) and l==1 and sequence[0]==1:
return False
if cl==1:
return False
if compare:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-2, l-1 ,0, cl)
return False
else:
print("Syntaxe erreur valeur different que -1 0 1")
exit()
else:
compare_1=compare_block_ligne(grille, i, j-sequence[l-1], sequence[l-1])
compare_2=compare_block_ligne(grille, i, j-sequence[l-1]+1, sequence[l-1])
print("i, j", i, j,"compare1:", compare_1, "l",l)
print("i, j", i, j,"compare2:", compare_2, "l",l)
if grille[i][j]==-1:
if(j==0) and l==1 and sequence[0]==1:
return True
#print(i,j-sequence[l-1] ,sequence[l-1])
if grille[i][j-sequence[l-1]-1]==1 and compare_1:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-2, l-1, 0, cl)
elif grille[i][j-sequence[l-1]]==1 and compare_2:
#if(j==0):
# return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1]), l-1 ,0, cl)
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl)
elif not (compare_1 or compare_2):
print("6false")
return False
else:
if grille[i][j-sequence[l-1]-1]==0:
l=len(sequence[l-1])
while(l>=0):
list[i][j-(sequence[l-1])+l]=1
l=l-1
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl)
else:
print("or")
if (j==0) and sequence[l-1]==1:
print("ABC True")
return True
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl) or coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-2, l-1, 0, cl)
elif grille[i][j]==1:
if(j==0) and l==1 and sequence[0]==1:
return True
if compare_2:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-1, l-1 ,0, cl)
else:
print("7false")
return False
elif grille[i][j]==0:
if(j==0) and l==1 and sequence[0]==1:
return False
if compare_1:
return coloriage_possible_ligne_rec(grille, sequence, i ,j-(sequence[l-1])-2, l-1 ,0, cl)
else:
print("8false")
return False
else:
print("Syntaxe erreur valeur different que -1 0 1")
exit()
def coloriage_possible_colonne(grille, sequence, i, j, l ,cl):
# problem de syntaxe
# cas a: si l depasse le nb d'element de la sequence, inviolement de syntaxe
# cas b, i n'est pas compris entre 0 et N-1, inviolement de syntaxe
# cas c, j < 0 , inviolement de syntaxe
if (len(sequence)<l) or (i<0) or (i>N-1) or(j<0):
return False
# cas 1 : l=0:
# -si j=0, vrai
# -sinon faux
if (l==0):
if (i==0):
return True
print("11false")
return False
else:
print("i")
val=sequence[l-1]
# cas 2a : si j < sl -1
if (i<(sequence[l-1]-1)):
print("22false")
return False
# cas 2b : si j == sl-1
# -si l == 1, vrai
# -sinon faux
elif (i==(sequence[l-1]-1)):
cpt=i
bool=0
while(i>=0):
if grille[i][j]==0 or cl==0:
bool=1
break
i=i-1
if l==1 and bool==0:
print("ABC true")
return True
print("33false")
return False
else:
#cas 2c
return coloriage_possible_colonne_rec(grille, sequence, i, j, l, -1 ,cl)#, case_j ,nb_block)
def coloriage_possible_colonne_rec(grille, sequence, i, j, l, check, cl):#, case_j ,nb_block):
if (l==0) and (i>=-1):
print("ABC true")
return True
if i<0:
print("44false")
return False
# Pour la premiere iteration, on ne sait pas si c'est une case blanche ou noire
print(grille)
if check ==-1:
if cl==0:
compare=compare_block_colonne(grille, i-sequence[l-1], j, sequence[l-1])
else:
compare=compare_block_colonne(grille, i-sequence[l-1]+1, j, sequence[l-1])
print("i, j", i, j,"compare:", compare, "l", l)
if grille[i][j]==-1:
if not (compare):
print("55false")
return False
else:
if(i==0) and l==1 and sequence[0]==1:
return True
print("here i j", i-(sequence[l-1])-(1-cl)-1 ,j)
if (i-(sequence[l-1])-(1-cl)-1<-1):
return coloriage_possible_ligne_rec(grille, sequence, i-(sequence[l-1]) ,j, l-1, 0, cl)
return coloriage_possible_ligne_rec(grille, sequence, i-(sequence[l-1])-(1-cl)-1 ,j, l-1, 0, cl)
elif grille[i][j]==1:
if(i==0) and l==1 and sequence[0]==1:
return True
if compare:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl)
else:
##print("77false")
return False
elif grille[i][j]==0:
return False
else:
print("Syntaxe erreur valeur different que -1 0 1")
exit()
else:
compare_1=compare_block_colonne(grille, i-sequence[l-1], j, sequence[l-1])
compare_2=compare_block_colonne(grille, i-sequence[l-1]+1, j, sequence[l-1])
print("i, j", i, j,"compare1:", compare_1, "l",l)
print("i, j", i, j,"compare2:", compare_2, "l",l)
if grille[i][j]==-1:
if grille[i][j-sequence[l-1]-1]==1 and compare_1:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-2 ,j, l-1, 0, cl)
elif grille[i][j-sequence[l-1]]==1 and compare_2:
if(i==0):
return coloriage_possible_ligne_rec(grille, sequence, i-(sequence[l-1]) ,j, l-1 ,0, cl)
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl)
elif not (compare_1 or compare_2):
print("66false")
return False
else:
if grille[i][j-sequence[l-1]-1]==0:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl)
else:
if (j==0) and sequence[l-1]==1:
print("ABC True")
return True
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl) or coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-2 ,j, l-1, 0, cl)
elif grille[i][j]==1:
if(i==0) and l==1 and sequence[0]==1:
return True
if compare_2:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-1 ,j, l-1 ,0, cl)
else:
print("77false")
return False
elif grille[i][j]==0:
if(i==0) and l==1 and sequence[0]==1:
return False
if compare_1:
return coloriage_possible_colonne_rec(grille, sequence, i-(sequence[l-1])-2 ,j, l-1 ,0, cl)
else:
print("88false")
return False
else:
print("Syntaxe erreur valeur different que -1 0 1")
exit()
def dupliquer(grille):
grille_d=np.full((M,N), -1)
for i in range(M):
for j in range(N):
grille_d[i][j]=grille[i][j]
return grille_d
def creer_sequence(indice, direction):
init=1
k=0
sequence=[]
#print(s_grille)
if direction==1:
while(k<M):
if(s_grille[indice][k]!=0 or init==1):
sequence.append(s_grille[indice][k])
#print("this",indice, k)
#print(s_grille[indice][k])
init=0
k=k+1
elif direction==2:
while(k<N):
if(s_grille[indice+M][k]!=0 or init==1):
sequence.append(s_grille[indice+M][k])
init=0
k=k+1
return sequence
def coloreLig(grille, i):
sequence=creer_sequence(i,1)# 1 signifie ligne 2 signifie colonne
l=len(sequence)
a=0
somme_sequence=0
while (a<l):
somme_sequence=somme_sequence+sequence[a]
a=a+1
j=N-1
bool=0
print("----------------------",sequence)
while(j>=0 and l>0):
print("i",i, "j", j, "l", l)
resultat_blanc=(coloriage_possible_ligne(grille, sequence, i, j, l, 0))
print("noir")
resultat_noir=(coloriage_possible_ligne(grille, sequence, i, j, l, 1) )
print("resultat_blanc, resultat_noir",resultat_blanc, resultat_noir)
k=j
if resultat_noir==True:
bool=1
if resultat_blanc==False:
s=sequence[l-1]
print(l-1)
while(s>0):
print("in while")
print(sequence)
grille[i][k]=1
k=k-1
s=s-1
del sequence[l-1]
else:
nb=j-1
min=j
max=-1
while(nb>=0):
#print(grille[i][nb], nb)
if grille[i][nb]==1:
if(grille[i][nb]>max):
max=nb
if(grille[i][nb]<min):
min=nb
nb=nb-1
print("max",max)
print("min",min)
l=len(sequence)
print("************l",l,"max-min+1", max-min+1)
print((l==1 and max-min+1==sequence[l-1]))
if not (l==1 and max-min+1==sequence[l-1]):
print("why?")
del sequence[l-1]
print("fin")
if resultat_noir==False and resultat_blanc==False and j==N-1:
print(i, j)
return (False, grille)
j=k-1
l=len(sequence)
if(j<0 and l>0):
del sequence[l-1]
j=M-1
l=len(sequence)
if(bool==1):
return (True,grille)
print("what")
return (False, grille)
return resultat
def coloreCol(grille, j):
sequence=creer_sequence(j,2)# 1 signifie ligne 2 signifie colonne
l=len(sequence)
i=M-1
bool=0
print("----------------------",sequence)
while(i>=0 and l>0):
bool_del=0
print("i",i, "j", j, "l", l)
resultat_blanc=(coloriage_possible_colonne(grille, sequence, i, j, l, 0))
print("noir")
resultat_noir=(coloriage_possible_colonne(grille, sequence, i, j, l, 1) )
print("resultat_blanc, resultat_noir",resultat_blanc, | |
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
11: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
12: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
13: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
14: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
15: {'desc': '',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
16: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
17: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
18: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
19: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
20: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
21: {'desc': 'bad AMC',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
22: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
23: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
24: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
25: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
26: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
27: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
28: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
29: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
30: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
31: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
32: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
33: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120},
34: {'desc': 'clean',
'files': ['tvd', 'c3d', 'amc'],
'fps': 120}}},
107: {'desc': 'Walking with obstacles 1',
'motions': {1: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': '',
'files': ['tvd', 'c3d', 'amc', 'avi'],
'fps': 120}}},
108: {'desc': 'Walking with obstacles 2',
'motions': {1: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
16: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
17: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
18: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
19: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
20: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
21: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
22: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
23: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
24: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
25: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
26: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
27: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
28: {'desc': '',
'files': ['c3d', 'amc', 'avi'],
'fps': 120}}},
111: {'desc': '<NAME>',
'motions': {1: {'desc': 'Walk backwards',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'Bow',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'Crawling',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'Curtsey',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'Dance',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'Get up from floor',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'Get up from floor',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'Get up from floor',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'Get up from chair',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': 'get up from chair',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'get up from chair',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'Lay down',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'March',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'Mope',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': 'Motorcycle',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
16: {'desc': 'Peekaboo',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
17: {'desc': 'Pick up',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
18: {'desc': 'Pick up',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
19: {'desc': 'Punch Kick',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
20: {'desc': 'Ring around the rosie',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
21: {'desc': 'Roll over',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
22: {'desc': 'Range of motion',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
23: {'desc': 'Run',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
24: {'desc': 'Run',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
25: {'desc': 'Shrug',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
26: {'desc': 'Walk sideways',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
27: {'desc': 'Walking up stairs',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
28: {'desc': 'Standing still',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
29: {'desc': 'Stepping over',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
30: {'desc': 'Stepping over',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
31: {'desc': 'Stepping up / stepping down',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
32: {'desc': 'Stretch and yawn',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
33: {'desc': 'Throwing',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
34: {'desc': 'Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
35: {'desc': 'Walk',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
36: {'desc': 'Walk and carry',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
37: {'desc': 'Wave',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
38: {'desc': 'Yoga',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
39: {'desc': 'Yoga',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
40: {'desc': 'Yoga',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
41: {'desc': 'Yoga',
'files': ['c3d', 'amc', 'avi'],
'fps': 120}}},
113: {'desc': 'Post pregnant woman',
'motions': {1: {'desc': 'Walk backwards',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
2: {'desc': 'Bow',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
3: {'desc': 'Curtsey',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
4: {'desc': 'Dance',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
5: {'desc': 'Walk digital 8',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
6: {'desc': 'Walk figure 8',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
7: {'desc': 'Run',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
8: {'desc': 'Lay down and get up',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
9: {'desc': 'March',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
10: {'desc': 'Walk, mope around',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
11: {'desc': 'Motorcycle',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
12: {'desc': 'Peekaboo',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
13: {'desc': 'Punch and kick',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
14: {'desc': 'Ring around the rosie',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
15: {'desc': 'Sit in chair and get up',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
16: {'desc': 'Shrug',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
17: {'desc': 'Walk sideway',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
18: {'desc': 'Walk sideways',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
19: {'desc': 'Walking up and down stairs',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
20: {'desc': 'Walk up and down stairs',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
21: {'desc': 'Standing Still',
'files': ['c3d', 'amc', 'avi'],
'fps': 120},
22: {'desc': 'Step over',
'files': ['c3d', 'amc', 'avi'],
| |
*bo, int bolen, int dimslow, int dimmid, int dimfast, int padding){
/* safety check on args */
size_t els, ele;
void *array;
char byteorder[15];
if(len == elsize*elements && elements==dimfast*dimmid*dimslow){
array = data;
els = elsize;
ele = elements;
strncpy(byteorder,bo,bolen<15?bolen:14);
byteorder[bolen<15?bolen:14] = 0;
cbf_failnez(cbf_set_realarray_wdims_sf (self, compression, binary_id,
(void *) data, (size_t) elsize, (size_t) elements, (const char *)byteorder,
(size_t) dimslow, (size_t) dimmid, (size_t) dimfast, (size_t)padding));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_realarray_wdims_sf",
[ "int compression", "int binary_id","(binary) String data",
"int elsize","int elements", "String byteorder", "int dimslow", "int dimmid", "int dimfast", "int padding"],[]],
"cbf_set_realarray_wdims_fs":["""
/* CBFlib must NOT modify the data string nor the byteorder string
which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_realarray_wdims_fs;
%apply (char *STRING, int LENGTH) { (char *bo, int bolen) } set_realarray_wdims_fs;
void set_realarray_wdims_fs(unsigned int compression, int binary_id,
char *data, int len, int elsize, int elements,
char *bo, int bolen, int dimfast, int dimmid, int dimslow, int padding){
/* safety check on args */
size_t els, ele;
void *array;
char byteorder[15];
if(len == elsize*elements && elements==dimfast*dimmid*dimslow){
array = data;
els = elsize;
ele = elements;
strncpy(byteorder,bo,bolen<15?bolen:14);
byteorder[bolen<15?bolen:14] = 0;
cbf_failnez(cbf_set_realarray_wdims_fs (self, compression, binary_id,
(void *) data, (size_t) elsize, (size_t) elements, (const char *)byteorder,
(size_t) dimfast, (size_t) dimmid, (size_t) dimslow, (size_t)padding));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_realarray_wdims_fs",
[ "int compression", "int binary_id","(binary) String data",
"int elsize","int elements", "String byteorder", "int dimfast", "int dimmid", "int dimslow", "int padding"],[]],
"cbf_set_image":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_image;
void set_image(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int elsign, int ndimslow, int ndimfast){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_image (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, elsign, (size_t) ndimslow, (size_t)ndimfast));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_image",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int elsign", "int dimslow", "int dimfast"],[]],
"cbf_set_image_fs":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_image;
void set_image_fs(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int elsign, int ndimfast, int ndimslow){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_image (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, elsign, (size_t) ndimfast, (size_t)ndimslow));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_image_fs",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int elsign", "int dimfast", "int dimslow"],[]],
"cbf_set_image_sf":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_image_sf;
void set_image_sf(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int elsign, int ndimslow, int ndimfast){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_image_sf (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, elsign, (size_t) ndimslow, (size_t)ndimfast));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_image_sf",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int elsign", "int dimslow", "int dimfast"],[]],
"cbf_set_real_image":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_real_image;
void set_real_image(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int ndimslow, int ndimfast){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_real_image (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, (size_t) ndimslow, (size_t)ndimfast));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_real_image",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int dimslow", "int dimfast"],[]],
"cbf_set_real_image_fs":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_real_image;
void set_real_image_fs(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int ndimfast, int ndimslow){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_real_image_fs (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, (size_t) ndimfast, (size_t)ndimslow));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_real_image_fs",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int dimfast", "int dimslow"],[]],
"cbf_set_real_image_sf":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_real_image_sf;
void set_real_image_sf(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int ndimslow, int ndimfast){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_real_image_sf (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, (size_t) ndimslow, (size_t)ndimfast));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_real_image_sf",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int dimslow", "int dimfast"],[]],
"cbf_set_3d_image":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_3d_image;
void set_3d_image(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int elsign, int ndimslow, int ndimmid, int ndimfast){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimmid*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_3d_image (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, elsign, (size_t) ndimslow, (size_t) ndimmid, (size_t)ndimfast));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_3d_image",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int elsign", "int dimslow", "int dimmid", "int dimfast"],[]],
"cbf_set_3d_image_fs":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_3d_image;
void set_3d_image_fs(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int elsign, int ndimfast, int ndimmid, int ndimslow){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimmid*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_3d_image_fs (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, elsign, (size_t) ndimfast, (size_t) ndimmid, (size_t)ndimslow));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_3d_image_fs",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int elsign", "int dimfast", "int dimmid", "int dimslow"],[]],
"cbf_set_3d_image_sf":["""
/* CBFlib must NOT modify the data string which belongs to the scripting
language we will get and check the length via a typemap */
%apply (char *STRING, int LENGTH) { (char *data, int len) } set_3d_image;
void set_3d_image_sf(unsigned int element_number,
unsigned int compression,
char *data, int len, int elsize, int elsign, int ndimslow, int ndimmid, int ndimfast){
/* safety check on args */
size_t els;
unsigned int reserved;
void *array;
if(len == elsize*ndimslow*ndimmid*ndimfast){
array = data;
els = elsize;
reserved = 0;
cbf_failnez(cbf_set_3d_image_sf (self, reserved, element_number, compression,
(void *) data, (size_t) elsize, elsign, (size_t) ndimslow, (size_t) ndimmid, (size_t)ndimfast));
}else{
cbf_failnez(CBF_ARGUMENT);
}
}
""","set_3d_image_sf",
[ "int element_number","int compression","(binary) String data",
"int elsize", "int elsign", "int dimslow", | |
= lambda: or_clear(e)
def OrEvent(*events):
or_event = threading.Event()
def changed():
bools = [e.is_set() for e in events]
if any(bools):
or_event.set()
else:
or_event.clear()
for e in events:
orify(e, changed)
changed()
return or_event
def any_event(*events, **kwargs):
"""Returns when the any of the events is set"""
timeout=kwargs.pop("timeout", 0)
event = OrEvent(*events)
return event.wait(timeout)
class WampWebSocketClient(WebSocketBaseClient):
"""
:type _session_id: int
:type _call_reqs: Dict[int, CallRequest]
:type _publish_reqs: Dict[int, PublishRequest]
:type _register_reqs: Dict[int, RegisterRequest]
:type _subscriptions: Dict[int, list[Subscription]]
:type _subscribe_reqs: Dict[int, SubscribeRequest]
:type _invocations: Dict[int, InvocationRequest]
"""
debug = False
def __init__(self, url, config=None, timeout=5.0, max_threads=None, serializers=None):
if serializers is None:
serializers = []
# try MsgPack WAMP serializer
try:
from beltway.wamp.serializer import MsgPackSerializer
serializers.append(MsgPackSerializer(batched=False)) # batched not supported
except ImportError:
pass
# try JSON WAMP serializer
try:
from beltway.wamp.serializer import JsonSerializer
serializers.append(JsonSerializer(batched=False)) # batched not supported
except ImportError:
pass
if not serializers:
raise Exception(u'Could not import any WAMP serializer')
self._serializers = {}
for ser in serializers:
self._serializers[ser.SERIALIZER_ID] = ser
protocols = [u'wamp.2.{}'.format(ser.SERIALIZER_ID) for ser in serializers]
super(WampWebSocketClient, self).__init__(url=url, protocols=protocols, timeout=timeout)
self.config = config or types.ComponentConfig(realm='realm1')
self.timeout = timeout
if max_threads is None:
# the python2.7 compat ThreadPoolExecutor fix
# If max_workers is None or not given, it will default to the number
# of processors on the machine, multiplied by 5, assuming that ThreadPoolExecutor
# is often used to overlap I/O instead of CPU work and the number of
# workers should be higher than the number of workers for ProcessPoolExecutor.
try:
import multiprocessing
max_threads = multiprocessing.cpu_count() * 5
except (ImportError, NotImplementedError):
# going with 4 threads otherwise it likely won't work
max_threads = 4
self.send_message_lock = threading.RLock()
self.listener_thread = threading.Thread(target=self.run, name='wamp-ws-listener')
self.executor = ThreadPoolExecutor(max_workers=max_threads)
self._session_id = None
self._goodbye_sent = False
self._transport_is_closing = False
self._joined_realm = None
# outstanding requests
self._publish_reqs = {}
self._subscribe_reqs = {}
self._unsubscribe_reqs = {}
self._call_reqs = {}
self._register_reqs = {}
self._unregister_reqs = {}
# subscriptions in place
self._subscriptions = {}
# registrations in place
self._registrations = {}
# incoming invocations
self._invocations = {}
self._serializer = None
self._request_id_gen = IdGenerator()
self.joined_event = threading.Event()
self.aborted_event = threading.Event()
# mapping of exception classes to WAMP error URIs
self._ecls_to_uri_pat = {}
# mapping of WAMP error URIs to exception classes
self._uri_to_ecls = {
ApplicationError.INVALID_PAYLOAD: SerializationError
}
def run(self):
"""
Performs the operation of reading from the underlying
connection in order to feed the stream of bytes.
We start with a small size of two bytes to be read
from the connection so that we can quickly parse an
incoming frame header. Then the stream indicates
whatever size must be read from the connection since
it knows the frame payload length.
Note that we perform some automatic opererations:
* On a closing message, we respond with a closing
message and finally close the connection
* We respond to pings with pong messages.
* Whenever an error is raised by the stream parsing,
we initiate the closing of the connection with the
appropiate error code.
This method is blocking and should likely be run
in a thread.
"""
self.sock.setblocking(True)
# self.sock.settimeout(0.5)
with Heartbeat(self, frequency=self.heartbeat_freq):
s = self.stream
try:
self.opened()
while not self.terminated:
if self.once() is False:
break
finally:
self.terminate()
def once(self):
"""
Performs the operation of reading from the underlying
connection in order to feed the stream of bytes.
We start with a small size of two bytes to be read
from the connection so that we can quickly parse an
incoming frame header. Then the stream indicates
whatever size must be read from the connection since
it knows the frame payload length.
It returns `False` if an error occurred at the
socket level or during the bytes processing. Otherwise,
it returns `True`.
"""
if self.terminated:
log.debug("WebSocket is already terminated")
return False
try:
b = self.sock.recv(self.reading_buffer_size)
# This will only make sense with secure sockets.
if self._is_secure:
b += self._get_from_pending()
except socket.timeout:
# Just return True in this case, so we can loop again.
pass
except (socket.error, OSError, pyOpenSSLError) as e:
self.unhandled_error(e)
return False
else:
if not self.process(b):
return False
return True
def run_forever(self):
"""
Simply blocks the thread until the
websocket has terminated.
"""
while not self.terminated:
self.listener_thread.join(timeout=0.1)
def handshake_ok(self):
"""
Called when the upgrade handshake has completed
successfully.
Starts the client's thread.
"""
super(WampWebSocketClient, self).handshake_ok()
self.listener_thread.start()
self.on_connect()
def join(self, realm, authmethods=None, authid=None):
"""
Implements :func:`autobahn.wamp.interfaces.ISession.join`
"""
log.debug("Joining realm.", realm=realm)
if self._session_id:
raise ProtocolError("already joined")
self._goodbye_sent = False
msg = message.Hello(realm, role.DEFAULT_CLIENT_ROLES, authmethods, authid)
self._joined_realm = realm
self.send_message(msg)
any_event(self.joined_event, self.aborted_event, timeout=self.timeout)
if self.aborted_event.is_set():
self._bailout(CloseStatusCode.INTERNAL_ERROR, reason=self._close_details.reason)
raise ApplicationError(self._close_details.reason, self._close_details.message)
elif not self.joined_event.is_set():
msg = "Timeout waiting for JOIN message."
self._bailout(CloseStatusCode.INTERNAL_ERROR, reason=msg)
raise TimeoutError(msg)
def closed(self, code, reason=None):
# TODO: Add a reconnect here if the close was not deliberately triggered.
log.debug("WAMP client closed.".format(code, reason), code=code, reason=reason)
def send_message(self, msg):
# log.debug("Sending message: {!r}".format(msg))
payload, is_binary = self._serializer.serialize(msg)
with self.send_message_lock:
self.send(payload, binary=is_binary)
def on_connect(self):
self.join(realm=self.config.realm)
def on_join(self, details):
pass
def on_challenge(self, challenge):
pass
def on_leave(self, details):
pass
def _bailout(self, code, reason=None):
"""
Parameters:
code: CloseStatusCode
"""
log.debug("Failing WAMP-over-WebSocket transport.".format(code, reason), code=code, reason=reason)
self.close(code.value, reason)
def received_message(self, payload):
"""
Callback from :func:`autobahn.websocket.interfaces.IWebSocketChannel.onMessage`
"""
# TODO: This should spin off into its own thread to handle the messages
# Even better would be a thread pool.
data = None
if payload.is_binary:
data = payload.data
else: # text payload
data = str(payload)
try:
for msg in self._serializer.unserialize(data):
if self.debug:
log.debug("Parsed message: {}".format(msg))
if not self._session_id:
if msg.__class__ not in message.OUT_OF_SESSION_RECV_MESSAGES:
raise ProtocolError("Received {0} message, and session is not yet established".format(msg.__class__))
else:
if msg.__class__ not in message.IN_SESSION_RECV_MESSAGES:
if msg.__class__ in message.OUT_OF_SESSION_RECV_MESSAGES:
raise ProtocolError("Received {0} message, but session is already established.".format(msg.__class__))
else:
raise ProtocolError("Unsupported message {0}".format(msg.__class__))
self.executor.submit(self.handle_message, msg)
except ProtocolError as e:
reason = "WAMP Protocol Error ({0})".format(e)
log.error(reason, exc_info=True)
self._bailout(CloseStatusCode.PROTOCOL_ERROR, reason=reason)
except Exception as e:
reason = "WAMP Internal Error ({0})".format(e)
log.error(reason, exc_info=True)
self._bailout(CloseStatusCode.INTERNAL_ERROR, reason=reason)
def register(self, endpoint, procedure=None, options=None):
"""
Implements :func:`autobahn.wamp.interfaces.ICallee.register`
"""
assert (callable(endpoint) and procedure is not None) or hasattr(endpoint, '__class__')
assert procedure is None or isinstance(procedure, string_types)
assert options is None or isinstance(options, types.RegisterOptions)
if self.terminated:
raise TransportLost()
def _register_fn(obj, fn, procedure, options, resolve_future=True):
request_id = self._request_id_gen.next()
on_reply = Future()
endpoint_obj = Endpoint(fn, obj, options.details_arg if options else None)
self._register_reqs[request_id] = RegisterRequest(request_id, on_reply, procedure, endpoint_obj)
if options:
msg = message.Register(request_id, procedure, **options.message_attr())
else:
msg = message.Register(request_id, procedure)
self.send_message(msg)
return on_reply.result(timeout=self.timeout) if resolve_future else on_reply
if callable(endpoint):
# register a single callable
return _register_fn(None, endpoint, procedure, options)
else:
# register all methods on an object decorated with "wamp.register"
on_replies = []
for k in inspect.getmembers(endpoint.__class__, is_method_or_function):
proc = k[1]
if "_wampuris" in proc.__dict__:
for pat in proc.__dict__["_wampuris"]:
if pat.is_endpoint():
uri = pat.uri()
proc_options = proc.__dict__["_wampoptions"].get(uri, options)
on_replies.append(_register_fn(endpoint, proc, uri, proc_options))
return on_replies
def _unregister(self, registration):
"""
Called from :meth:`autobahn.wamp.protocol.Registration.unregister`
"""
assert (isinstance(registration, Registration))
assert registration.active
assert (registration.id in self._registrations)
if self.terminated:
raise TransportLost()
request_id = self._request_id_gen.next()
on_reply = Future()
self._unregister_reqs[request_id] = UnregisterRequest(request_id, on_reply, registration.id)
msg = message.Unregister(request_id, registration.id)
self.send_message(msg)
return on_reply.result(timeout=self.timeout)
def publish(self, topic, *args, **kwargs):
"""
Implements :func:`autobahn.wamp.interfaces.IPublisher.publish`
"""
assert isinstance(topic, string_types)
if self.terminated:
raise TransportLost()
request_id = self._request_id_gen.next()
if 'options' in kwargs and isinstance(kwargs['options'], types.PublishOptions):
options = kwargs.pop('options')
msg = message.Publish(request_id, topic, args=args, kwargs=kwargs, **options.message_attr())
else:
options = None
msg = message.Publish(request_id, topic, args=args, kwargs=kwargs)
on_reply = None
if options and options.acknowledge:
# only acknowledged publications expect a reply ..
on_reply = Future()
self._publish_reqs[request_id] = PublishRequest(request_id, on_reply)
try:
# Notes:
#
# * this might raise autobahn.wamp.exception.SerializationError
# when the user payload cannot be serialized
# * we have to setup a PublishRequest() in _publish_reqs _before_
# calling transpor.send(), because a mock- or side-by-side transport
# will immediately lead on an incoming WAMP message in onMessage()
#
self.send_message(msg)
except Exception as e:
if request_id in self._publish_reqs:
del self._publish_reqs[request_id]
raise e
if on_reply:
return on_reply.result(timeout=self.timeout)
def subscribe(self, handler, topic=None, options=None):
"""
:param handler: Callable.
:param topic:
:param options:
:type options: types.SubscribeOptions
:return:
"""
assert (callable(handler) and topic is not None) | |
divide 3 * 2^8,
# which forces some rounding to happen.
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.rebalance()
rb.validate()
# Older versions of the ring builder code would round down when
# computing parts_wanted, while the new code rounds up. Make sure we
# can handle a ring built by the old method.
#
# This code mimics the old _set_parts_wanted.
weight_of_one_part = rb.weight_of_one_part()
for dev in rb._iter_devs():
if not dev['weight']:
dev['parts_wanted'] = -rb.parts * rb.replicas
else:
dev['parts_wanted'] = (
int(weight_of_one_part * dev['weight']) -
dev['parts'])
rb.pretend_min_part_hours_passed()
rb.rebalance() # this crashes unless rebalance resets parts_wanted
rb.validate()
def test_add_replicas_then_rebalance_respects_weight(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 1, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdi'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 3,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdl'})
rb.rebalance(seed=1)
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 96, 1: 96,
2: 32, 3: 32,
4: 96, 5: 96,
6: 32, 7: 32,
8: 96, 9: 96,
10: 32, 11: 32})
rb.replicas *= 2
rb.rebalance(seed=1)
r = rb.get_ring()
counts = {}
for part2dev_id in r._replica2part2dev_id:
for dev_id in part2dev_id:
counts[dev_id] = counts.get(dev_id, 0) + 1
self.assertEqual(counts, {0: 192, 1: 192,
2: 64, 3: 64,
4: 192, 5: 192,
6: 64, 7: 64,
8: 192, 9: 192,
10: 64, 11: 64})
def test_overload(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdg'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdh'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 1, 'weight': 1,
'ip': '127.0.0.1', 'port': 10001, 'device': 'sdi'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdj'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdk'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 2, 'weight': 2,
'ip': '127.0.0.2', 'port': 10002, 'device': 'sdl'})
rb.rebalance(seed=12345)
rb.validate()
# sanity check: balance respects weights, so default
part_counts = self._partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 192)
self.assertEqual(part_counts[1], 192)
self.assertEqual(part_counts[2], 384)
# Devices 0 and 1 take 10% more than their fair shares by weight since
# overload is 10% (0.1).
rb.set_overload(0.1)
rb.pretend_min_part_hours_passed()
rb.rebalance()
part_counts = self._partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 212)
self.assertEqual(part_counts[1], 211)
self.assertEqual(part_counts[2], 345)
# Now, devices 0 and 1 take 50% more than their fair shares by
# weight.
rb.set_overload(0.5)
for _ in range(3):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 256)
self.assertEqual(part_counts[1], 256)
self.assertEqual(part_counts[2], 256)
# Devices 0 and 1 may take up to 75% over their fair share, but the
# placement algorithm only wants to spread things out evenly between
# all drives, so the devices stay at 50% more.
rb.set_overload(0.75)
for _ in range(3):
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='zone')
self.assertEqual(part_counts[0], 256)
self.assertEqual(part_counts[1], 256)
self.assertEqual(part_counts[2], 256)
def test_unoverload(self):
# Start off needing overload to balance, then add capacity until we
# don't need overload any more and see that things still balance.
# Overload doesn't prevent optimal balancing.
rb = ring.RingBuilder(8, 3, 1)
rb.set_overload(0.125)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 1,
'ip': '127.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 10, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.add_dev({'id': 11, 'region': 0, 'zone': 0, 'weight': 2,
'ip': '127.0.0.3', 'port': 10000, 'device': 'sdc'})
rb.rebalance(seed=12345)
# sanity check: our overload is big enough to balance things
part_counts = self._partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 216)
self.assertEqual(part_counts['127.0.0.2'], 216)
self.assertEqual(part_counts['127.0.0.3'], 336)
# Add some weight: balance improves
for dev in rb.devs:
if dev['ip'] in ('127.0.0.1', '127.0.0.2'):
rb.set_dev_weight(dev['id'], 1.22)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 238)
self.assertEqual(part_counts['127.0.0.2'], 237)
self.assertEqual(part_counts['127.0.0.3'], 293)
# Even out the weights: balance becomes perfect
for dev in rb.devs:
if dev['ip'] in ('127.0.0.1', '127.0.0.2'):
rb.set_dev_weight(dev['id'], 2)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
part_counts = self._partition_counts(rb, key='ip')
self.assertEqual(part_counts['127.0.0.1'], 256)
self.assertEqual(part_counts['127.0.0.2'], 256)
self.assertEqual(part_counts['127.0.0.3'], 256)
# Add a new server: balance stays optimal
rb.add_dev({'id': 12, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdd'})
rb.add_dev({'id': 13, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sde'})
rb.add_dev({'id': 14, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
rb.add_dev({'id': 15, 'region': 0, 'zone': 0,
'weight': 2,
'ip': '127.0.0.4', 'port': 10000, 'device': 'sdf'})
# we're moving more than 1/3 of the replicas but fewer than 2/3, so
# we have to do this twice
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
rb.pretend_min_part_hours_passed()
rb.rebalance(seed=12345)
expected = {
'127.0.0.1': 192,
'127.0.0.2': 192,
'127.0.0.3': 192,
'127.0.0.4': 192,
}
part_counts = self._partition_counts(rb, key='ip')
self.assertEqual(part_counts, expected)
def test_overload_keeps_balanceable_things_balanced_initially(self):
rb = ring.RingBuilder(8, 3, 1)
rb.add_dev({'id': 0, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 1, 'region': 0, 'zone': 0, 'weight': 8,
'ip': '10.0.0.1', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 2, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 3, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.2', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 4, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 5, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.3', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 6, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 7, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.4', 'port': 10000, 'device': 'sdb'})
rb.add_dev({'id': 8, 'region': 0, 'zone': 0, 'weight': 4,
'ip': '10.0.0.5', 'port': 10000, 'device': 'sda'})
rb.add_dev({'id': 9, 'region': 0, 'zone': 0, 'weight': 4,
| |
import os
import json
from numpy import genfromtxt
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
# there is a bug in log files, some of them have __ instead of _
EPISODE_LOG_NAME = "episode_logfile__average.csv"
EPISODE_LOG_NAME_2 = "episode_logfile_average.csv"
METADATA_FILE_NAME = "metadata"
RAW_LOGFILES_FOLDER = "raw_logfiles"
def read_metadata_file(path):
metadata_filename = None
json_files = get_json_files(path)
for json_file in json_files:
if json_file[:len(METADATA_FILE_NAME)] == METADATA_FILE_NAME:
metadata_filename = json_file
if metadata_filename is None:
raise ValueError("The file does not exist: 'episode_logfile__average.csv'")
metadata_path = os.path.join(path, metadata_filename)
with open(metadata_path, 'r') as f:
data = json.load(f)
return data
def get_json_files(path):
json_files = []
for rootdir, dirs, files in os.walk(path):
for file in files:
if file.endswith(".json"):
json_files.append(file)
return json_files
def get_csv_files(path):
csv_files = []
for rootdir, dirs, files in os.walk(path):
for file in files:
if file.endswith(".csv"):
csv_files.append(file)
return csv_files
def get_subfolders(path):
subfolders = os.listdir(path)
return subfolders
def load_episode_average_log(path, folder_name):
log_path = os.path.join(path, folder_name, RAW_LOGFILES_FOLDER)
episode_average_log_name = None
csv_files = get_csv_files(log_path)
for csv_file in csv_files:
if csv_file == EPISODE_LOG_NAME or csv_file == EPISODE_LOG_NAME_2:
episode_average_log_name = csv_file
if episode_average_log_name is None:
raise ValueError("The file does not exist: 'episode_logfile__average.csv'")
log_path = os.path.join(log_path, episode_average_log_name)
df = pd.read_csv(log_path, sep=',')
# log = genfromtxt(log_path, delimiter=',', encoding="utf8")
print(len(df.values), folder_name)
return df
def load_time_step_data(time_step_path, vehicles=[1], episode=1):
time_step_data = {}
for v in vehicles:
vehicle_path = "vehicle_" + str(v)
episode_path = "timestep_logfile_" + vehicle_path + "_episode_" + str(episode) + ".csv"
log_path = os.path.join(time_step_path, vehicle_path, episode_path)
df = pd.read_csv(log_path, sep=',')
time_step_data[v] = df
return time_step_data
def load_espisodes_data(simulation_path_base, file ="episode_logfile_average.csv" , subfolders = None):
if subfolders is None:
subfolders = get_subfolders(simulation_path_base)
espisodes_data = {}
for subfolder in subfolders:
path = os.path.join(simulation_path_base, subfolder)
log_path = os.path.join(path, "raw_logfiles", file)
df = pd.read_csv(log_path, sep=',')
espisodes_data[subfolder] = df
return espisodes_data
def ewma(x, alpha):
'''
Returns the exponentially weighted moving average of x.
Parameters:
-----------
x : array-like
alpha : float {0 <= alpha <= 1}
Returns:
--------
ewma: numpy array
the exponentially weighted moving average
'''
# Coerce x to an array
x = np.array(x)
n = x.size
# Create an initial weight matrix of (1-alpha), and a matrix of powers
# to raise the weights by
w0 = np.ones(shape=(n,n)) * (1-alpha)
p = np.vstack([np.arange(i,i-n,-1) for i in range(n)])
# Create the weight matrix
w = np.tril(w0**p,0)
# Calculate the ewma
return np.dot(w, x[::np.newaxis]) / w.sum(axis=1)
def ewma_vectorized_safe(data, alpha, row_size=None, dtype=None, order='C', out=None):
"""
Reshapes data before calculating EWMA, then iterates once over the rows
to calculate the offset without precision issues
:param data: Input data, will be flattened.
:param alpha: scalar float in range (0,1)
The alpha parameter for the moving average.
:param row_size: int, optional
The row size to use in the computation. High row sizes need higher precision,
low values will impact performance. The optimal value depends on the
platform and the alpha being used. Higher alpha values require lower
row size. Default depends on dtype.
:param dtype: optional
Data type used for calculations. Defaults to float64 unless
data.dtype is float32, then it will use float32.
:param order: {'C', 'F', 'A'}, optional
Order to use when flattening the data. Defaults to 'C'.
:param out: ndarray, or None, optional
A location into which the result is stored. If provided, it must have
the same shape as the desired output. If not provided or `None`,
a freshly-allocated array is returned.
:return: The flattened result.
"""
data = np.array(data, copy=False)
if dtype is None:
if data.dtype == np.float32:
dtype = np.float32
else:
dtype = np.float
else:
dtype = np.dtype(dtype)
row_size = int(row_size) if row_size is not None else get_max_row_size(alpha, dtype)
if data.size <= row_size:
# The normal function can handle this input, use that
return ewma_vectorized(data, alpha, dtype=dtype, order=order, out=out)
if data.ndim > 1:
# flatten input
data = np.reshape(data, -1, order=order)
if out is None:
out = np.empty_like(data, dtype=dtype)
else:
assert out.shape == data.shape
assert out.dtype == dtype
row_n = int(data.size // row_size) # the number of rows to use
trailing_n = int(data.size % row_size) # the amount of data leftover
first_offset = data[0]
if trailing_n > 0:
# set temporary results to slice view of out parameter
out_main_view = np.reshape(out[:-trailing_n], (row_n, row_size))
data_main_view = np.reshape(data[:-trailing_n], (row_n, row_size))
else:
out_main_view = out
data_main_view = data
# get all the scaled cumulative sums with 0 offset
ewma_vectorized_2d(data_main_view, alpha, axis=1, offset=0, dtype=dtype,
order='C', out=out_main_view)
scaling_factors = (1 - alpha) ** np.arange(1, row_size + 1)
last_scaling_factor = scaling_factors[-1]
# create offset array
offsets = np.empty(out_main_view.shape[0], dtype=dtype)
offsets[0] = first_offset
# iteratively calculate offset for each row
for i in range(1, out_main_view.shape[0]):
offsets[i] = offsets[i - 1] * last_scaling_factor + out_main_view[i - 1, -1]
# add the offsets to the result
out_main_view += offsets[:, np.newaxis] * scaling_factors[np.newaxis, :]
if trailing_n > 0:
# process trailing data in the 2nd slice of the out parameter
ewma_vectorized(data[-trailing_n:], alpha, offset=out_main_view[-1, -1],
dtype=dtype, order='C', out=out[-trailing_n:])
return out
def get_max_row_size(alpha, dtype=float):
assert 0. <= alpha < 1.
# This will return the maximum row size possible on
# your platform for the given dtype. I can find no impact on accuracy
# at this value on my machine.
# Might not be the optimal value for speed, which is hard to predict
# due to numpy's optimizations
# Use np.finfo(dtype).eps if you are worried about accuracy
# and want to be extra safe.
epsilon = np.finfo(dtype).tiny
# If this produces an OverflowError, make epsilon larger
return int(np.log(epsilon)/np.log(1-alpha)) + 1
def ewma_vectorized(data, alpha, offset=None, dtype=None, order='C', out=None):
"""
Calculates the exponential moving average over a vector.
Will fail for large inputs.
:param data: Input data
:param alpha: scalar float in range (0,1)
The alpha parameter for the moving average.
:param offset: optional
The offset for the moving average, scalar. Defaults to data[0].
:param dtype: optional
Data type used for calculations. Defaults to float64 unless
data.dtype is float32, then it will use float32.
:param order: {'C', 'F', 'A'}, optional
Order to use when flattening the data. Defaults to 'C'.
:param out: ndarray, or None, optional
A location into which the result is stored. If provided, it must have
the same shape as the input. If not provided or `None`,
a freshly-allocated array is returned.
"""
data = np.array(data, copy=False)
if dtype is None:
if data.dtype == np.float32:
dtype = np.float32
else:
dtype = np.float64
else:
dtype = np.dtype(dtype)
if data.ndim > 1:
# flatten input
data = data.reshape(-1, order)
if out is None:
out = np.empty_like(data, dtype=dtype)
else:
assert out.shape == data.shape
assert out.dtype == dtype
if data.size < 1:
# empty input, return empty array
return out
if offset is None:
offset = data[0]
alpha = np.array(alpha, copy=False).astype(dtype, copy=False)
# scaling_factors -> 0 as len(data) gets large
# this leads to divide-by-zeros below
scaling_factors = np.power(1. - alpha, np.arange(data.size + 1, dtype=dtype),
dtype=dtype)
# create cumulative sum array
np.multiply(data, (alpha * scaling_factors[-2]) / scaling_factors[:-1],
dtype=dtype, out=out)
np.cumsum(out, dtype=dtype, out=out)
# cumsums / scaling
out /= scaling_factors[-2::-1]
if offset != 0:
offset = np.array(offset, copy=False).astype(dtype, copy=False)
# add offsets
out += offset * scaling_factors[1:]
return out
def ewma_vectorized_2d(data, alpha, axis=None, offset=None, dtype=None, order='C', out=None):
"""
Calculates the exponential moving average over a given axis.
:param data: Input data, must be 1D or 2D array.
:param alpha: scalar float in range (0,1)
The alpha parameter for the moving average.
:param axis: The axis to apply the moving average on.
If axis==None, the data is flattened.
:param offset: optional
The offset for the moving average. Must be scalar or a
vector with one element for each row of data. If set to None,
defaults to the first value of each row.
:param dtype: optional
Data type used for calculations. Defaults to float64 unless
data.dtype is float32, then it will use float32.
:param order: {'C', 'F', 'A'}, optional
Order to use when flattening the data. Ignored if axis is not None.
:param out: ndarray, or None, optional
A location into which the result is stored. If provided, it must have
the same shape as the desired | |
Leakage': 0.00611897,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage with power gating': 0.00348781,
'Renaming Unit/Peak Dynamic': 4.56169,
'Renaming Unit/Runtime Dynamic': 0.947164,
'Renaming Unit/Subthreshold Leakage': 0.070483,
'Renaming Unit/Subthreshold Leakage with power gating': 0.0362779,
'Runtime Dynamic': 11.9519,
'Subthreshold Leakage': 6.21877,
'Subthreshold Leakage with power gating': 2.58311},
{'Area': 32.0201,
'Execution Unit/Area': 7.68434,
'Execution Unit/Complex ALUs/Area': 0.235435,
'Execution Unit/Complex ALUs/Gate Leakage': 0.0132646,
'Execution Unit/Complex ALUs/Peak Dynamic': 0.0724568,
'Execution Unit/Complex ALUs/Runtime Dynamic': 0.259599,
'Execution Unit/Complex ALUs/Subthreshold Leakage': 0.20111,
'Execution Unit/Complex ALUs/Subthreshold Leakage with power gating': 0.0754163,
'Execution Unit/Floating Point Units/Area': 4.6585,
'Execution Unit/Floating Point Units/Gate Leakage': 0.0656156,
'Execution Unit/Floating Point Units/Peak Dynamic': 0.334203,
'Execution Unit/Floating Point Units/Runtime Dynamic': 0.304033,
'Execution Unit/Floating Point Units/Subthreshold Leakage': 0.994829,
'Execution Unit/Floating Point Units/Subthreshold Leakage with power gating': 0.373061,
'Execution Unit/Gate Leakage': 0.120359,
'Execution Unit/Instruction Scheduler/Area': 1.66526,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Area': 0.275653,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Gate Leakage': 0.000977433,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Peak Dynamic': 1.04181,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Runtime Dynamic': 0.225491,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage': 0.0143453,
'Execution Unit/Instruction Scheduler/FP Instruction Window/Subthreshold Leakage with power gating': 0.00810519,
'Execution Unit/Instruction Scheduler/Gate Leakage': 0.00568913,
'Execution Unit/Instruction Scheduler/Instruction Window/Area': 0.805223,
'Execution Unit/Instruction Scheduler/Instruction Window/Gate Leakage': 0.00414562,
'Execution Unit/Instruction Scheduler/Instruction Window/Peak Dynamic': 1.6763,
'Execution Unit/Instruction Scheduler/Instruction Window/Runtime Dynamic': 0.363708,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage': 0.0625755,
'Execution Unit/Instruction Scheduler/Instruction Window/Subthreshold Leakage with power gating': 0.0355964,
'Execution Unit/Instruction Scheduler/Peak Dynamic': 3.82262,
'Execution Unit/Instruction Scheduler/ROB/Area': 0.584388,
'Execution Unit/Instruction Scheduler/ROB/Gate Leakage': 0.00056608,
'Execution Unit/Instruction Scheduler/ROB/Peak Dynamic': 1.10451,
'Execution Unit/Instruction Scheduler/ROB/Runtime Dynamic': 0.183588,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage': 0.00906853,
'Execution Unit/Instruction Scheduler/ROB/Subthreshold Leakage with power gating': 0.00364446,
'Execution Unit/Instruction Scheduler/Runtime Dynamic': 0.772787,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage': 0.0859892,
'Execution Unit/Instruction Scheduler/Subthreshold Leakage with power gating': 0.047346,
'Execution Unit/Integer ALUs/Area': 0.47087,
'Execution Unit/Integer ALUs/Gate Leakage': 0.0265291,
'Execution Unit/Integer ALUs/Peak Dynamic': 0.206659,
'Execution Unit/Integer ALUs/Runtime Dynamic': 0.101344,
'Execution Unit/Integer ALUs/Subthreshold Leakage': 0.40222,
'Execution Unit/Integer ALUs/Subthreshold Leakage with power gating': 0.150833,
'Execution Unit/Peak Dynamic': 4.84889,
'Execution Unit/Register Files/Area': 0.570804,
'Execution Unit/Register Files/Floating Point RF/Area': 0.208131,
'Execution Unit/Register Files/Floating Point RF/Gate Leakage': 0.000232788,
'Execution Unit/Register Files/Floating Point RF/Peak Dynamic': 0.063138,
'Execution Unit/Register Files/Floating Point RF/Runtime Dynamic': 0.0094581,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage': 0.00399698,
'Execution Unit/Register Files/Floating Point RF/Subthreshold Leakage with power gating': 0.00176968,
'Execution Unit/Register Files/Gate Leakage': 0.000622708,
'Execution Unit/Register Files/Integer RF/Area': 0.362673,
'Execution Unit/Register Files/Integer RF/Gate Leakage': 0.00038992,
'Execution Unit/Register Files/Integer RF/Peak Dynamic': 0.0978419,
'Execution Unit/Register Files/Integer RF/Runtime Dynamic': 0.0699485,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage': 0.00614175,
'Execution Unit/Register Files/Integer RF/Subthreshold Leakage with power gating': 0.00246675,
'Execution Unit/Register Files/Peak Dynamic': 0.16098,
'Execution Unit/Register Files/Runtime Dynamic': 0.0794066,
'Execution Unit/Register Files/Subthreshold Leakage': 0.0101387,
'Execution Unit/Register Files/Subthreshold Leakage with power gating': 0.00423643,
'Execution Unit/Results Broadcast Bus/Area Overhead': 0.0390912,
'Execution Unit/Results Broadcast Bus/Gate Leakage': 0.00537402,
'Execution Unit/Results Broadcast Bus/Peak Dynamic': 0.224244,
'Execution Unit/Results Broadcast Bus/Runtime Dynamic': 0.570166,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage': 0.081478,
'Execution Unit/Results Broadcast Bus/Subthreshold Leakage with power gating': 0.0305543,
'Execution Unit/Runtime Dynamic': 2.08734,
'Execution Unit/Subthreshold Leakage': 1.79543,
'Execution Unit/Subthreshold Leakage with power gating': 0.688821,
'Gate Leakage': 0.368936,
'Instruction Fetch Unit/Area': 5.85939,
'Instruction Fetch Unit/Branch Predictor/Area': 0.138516,
'Instruction Fetch Unit/Branch Predictor/Chooser/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Chooser/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Chooser/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Chooser/Runtime Dynamic': 0.00132942,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Chooser/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/Gate Leakage': 0.000757657,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Area': 0.0435221,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Gate Leakage': 0.000278362,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Peak Dynamic': 0.0168831,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Runtime Dynamic': 0.00132942,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage': 0.00759719,
'Instruction Fetch Unit/Branch Predictor/Global Predictor/Subthreshold Leakage with power gating': 0.0039236,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Area': 0.0257064,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Gate Leakage': 0.000154548,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Peak Dynamic': 0.0142575,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Runtime Dynamic': 0.00119871,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage': 0.00384344,
'Instruction Fetch Unit/Branch Predictor/L1_Local Predictor/Subthreshold Leakage with power gating': 0.00198631,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Area': 0.0151917,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Gate Leakage': 8.00196e-05,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Peak Dynamic': 0.00527447,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Runtime Dynamic': 0.00048635,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage': 0.00181347,
'Instruction Fetch Unit/Branch Predictor/L2_Local Predictor/Subthreshold Leakage with power gating': 0.000957045,
'Instruction Fetch Unit/Branch Predictor/Peak Dynamic': 0.0597838,
'Instruction Fetch Unit/Branch Predictor/RAS/Area': 0.0105732,
'Instruction Fetch Unit/Branch Predictor/RAS/Gate Leakage': 4.63858e-05,
'Instruction Fetch Unit/Branch Predictor/RAS/Peak Dynamic': 0.0117602,
'Instruction Fetch Unit/Branch Predictor/RAS/Runtime Dynamic': 0.00100482,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage': 0.000932505,
'Instruction Fetch Unit/Branch Predictor/RAS/Subthreshold Leakage with power gating': 0.000494733,
'Instruction Fetch Unit/Branch Predictor/Runtime Dynamic': 0.00486237,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage': 0.0199703,
'Instruction Fetch Unit/Branch Predictor/Subthreshold Leakage with power gating': 0.0103282,
'Instruction Fetch Unit/Branch Target Buffer/Area': 0.64954,
'Instruction Fetch Unit/Branch Target Buffer/Gate Leakage': 0.00272758,
'Instruction Fetch Unit/Branch Target Buffer/Peak Dynamic': 0.177867,
'Instruction Fetch Unit/Branch Target Buffer/Runtime Dynamic': 0.0112891,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage': 0.0811682,
'Instruction Fetch Unit/Branch Target Buffer/Subthreshold Leakage with power gating': 0.0435357,
'Instruction Fetch Unit/Gate Leakage': 0.0589979,
'Instruction Fetch Unit/Instruction Buffer/Area': 0.0226323,
'Instruction Fetch Unit/Instruction Buffer/Gate Leakage': 6.83558e-05,
'Instruction Fetch Unit/Instruction Buffer/Peak Dynamic': 0.606827,
'Instruction Fetch Unit/Instruction Buffer/Runtime Dynamic': 0.0672432,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage': 0.00151885,
'Instruction Fetch Unit/Instruction Buffer/Subthreshold Leakage with power gating': 0.000701682,
'Instruction Fetch Unit/Instruction Cache/Area': 3.14635,
'Instruction Fetch Unit/Instruction Cache/Gate Leakage': 0.029931,
'Instruction Fetch Unit/Instruction Cache/Peak Dynamic': 4.27725,
'Instruction Fetch Unit/Instruction Cache/Runtime Dynamic': 0.189823,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage': 0.367022,
'Instruction Fetch Unit/Instruction Cache/Subthreshold Leakage with power gating': 0.180386,
'Instruction Fetch Unit/Instruction Decoder/Area': 1.85799,
'Instruction Fetch Unit/Instruction Decoder/Gate Leakage': 0.0222493,
'Instruction Fetch Unit/Instruction Decoder/Peak Dynamic': 1.37404,
'Instruction Fetch Unit/Instruction Decoder/Runtime Dynamic': 0.228388,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage': 0.442943,
'Instruction Fetch Unit/Instruction Decoder/Subthreshold Leakage with power gating': 0.166104,
'Instruction Fetch Unit/Peak Dynamic': 6.70335,
'Instruction Fetch Unit/Runtime Dynamic': 0.501606,
'Instruction Fetch Unit/Subthreshold Leakage': 0.932286,
'Instruction Fetch Unit/Subthreshold Leakage with power gating': 0.40843,
'L2/Area': 4.53318,
'L2/Gate Leakage': 0.015464,
'L2/Peak Dynamic': 0.0173392,
'L2/Runtime Dynamic': 0.00368444,
'L2/Subthreshold Leakage': 0.834142,
'L2/Subthreshold Leakage with power gating': 0.401066,
'Load Store Unit/Area': 8.80901,
'Load Store Unit/Data Cache/Area': 6.84535,
'Load Store Unit/Data Cache/Gate Leakage': 0.0279261,
'Load Store Unit/Data Cache/Peak Dynamic': 3.71942,
'Load Store Unit/Data Cache/Runtime Dynamic': 1.19902,
'Load Store Unit/Data Cache/Subthreshold Leakage': 0.527675,
'Load Store Unit/Data Cache/Subthreshold Leakage with power gating': 0.25085,
'Load Store Unit/Gate Leakage': 0.0350888,
'Load Store Unit/LoadQ/Area': 0.0836782,
'Load Store Unit/LoadQ/Gate Leakage': 0.00059896,
'Load Store Unit/LoadQ/Peak Dynamic': 0.0803085,
'Load Store Unit/LoadQ/Runtime Dynamic': 0.0803084,
'Load Store Unit/LoadQ/Subthreshold Leakage': 0.00941961,
'Load Store Unit/LoadQ/Subthreshold Leakage with power gating': 0.00536918,
'Load Store Unit/Peak Dynamic': 4.09866,
'Load Store Unit/Runtime Dynamic': 1.67538,
'Load Store Unit/StoreQ/Area': 0.322079,
'Load Store Unit/StoreQ/Gate Leakage': 0.00329971,
'Load Store Unit/StoreQ/Peak Dynamic': 0.198027,
'Load Store Unit/StoreQ/Runtime Dynamic': 0.396053,
'Load Store Unit/StoreQ/Subthreshold Leakage': 0.0345621,
'Load Store Unit/StoreQ/Subthreshold Leakage with power gating': 0.0197004,
'Load Store Unit/Subthreshold Leakage': 0.591321,
'Load Store Unit/Subthreshold Leakage with power gating': 0.283293,
'Memory Management Unit/Area': 0.4339,
'Memory Management Unit/Dtlb/Area': 0.0879726,
'Memory Management Unit/Dtlb/Gate Leakage': 0.00088729,
'Memory Management Unit/Dtlb/Peak Dynamic': 0.0702804,
'Memory Management Unit/Dtlb/Runtime Dynamic': 0.070445,
'Memory Management Unit/Dtlb/Subthreshold Leakage': 0.0155699,
'Memory Management Unit/Dtlb/Subthreshold Leakage with power gating': 0.00887485,
'Memory Management Unit/Gate Leakage': 0.00808595,
'Memory Management Unit/Itlb/Area': 0.301552,
'Memory Management Unit/Itlb/Gate Leakage': 0.00393464,
'Memory Management Unit/Itlb/Peak Dynamic': 0.265944,
'Memory Management Unit/Itlb/Runtime Dynamic': 0.031402,
'Memory Management Unit/Itlb/Subthreshold Leakage': 0.0413758,
'Memory Management Unit/Itlb/Subthreshold Leakage with power gating': 0.0235842,
'Memory Management Unit/Peak Dynamic': 0.542782,
'Memory Management Unit/Runtime Dynamic': 0.101847,
'Memory Management Unit/Subthreshold Leakage': 0.0766103,
'Memory Management Unit/Subthreshold Leakage with power gating': 0.0398333,
'Peak Dynamic': 19.8005,
'Renaming Unit/Area': 0.303608,
'Renaming Unit/FP Front End RAT/Area': 0.131045,
'Renaming Unit/FP Front End RAT/Gate Leakage': 0.00351123,
'Renaming Unit/FP Front End RAT/Peak Dynamic': 2.51468,
'Renaming Unit/FP Front End RAT/Runtime Dynamic': 0.166088,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage': 0.0308571,
'Renaming Unit/FP Front End RAT/Subthreshold Leakage with power gating': 0.0175885,
'Renaming Unit/Free List/Area': 0.0340654,
'Renaming Unit/Free List/Gate Leakage': 2.5481e-05,
'Renaming Unit/Free List/Peak Dynamic': 0.0306032,
'Renaming Unit/Free List/Runtime Dynamic': 0.0121948,
'Renaming Unit/Free List/Subthreshold Leakage': 0.000370144,
'Renaming Unit/Free List/Subthreshold Leakage with power gating': 0.000201064,
'Renaming Unit/Gate Leakage': 0.00708398,
'Renaming Unit/Int Front End RAT/Area': 0.0941223,
'Renaming Unit/Int Front End RAT/Gate Leakage': 0.000283242,
'Renaming Unit/Int Front End RAT/Peak Dynamic': 0.731965,
'Renaming Unit/Int Front End RAT/Runtime Dynamic': 0.112569,
'Renaming Unit/Int Front End RAT/Subthreshold Leakage': | |
different ETags.
"""
path1 = f'/ldp/{uuid4()}'
path2 = f'/ldp/{uuid4()}'
content1 = b'some interesting content.'
content_cksum1 = hashlib.new(digest_algo, content1)
content2 = b'Some great content.'
content_cksum2 = hashlib.new(digest_algo, content2)
self.client.put(path1, data=content1, content_type='text/plain')
self.client.put(path2, data=content2, content_type='text/plain')
get_rsp1 = self.client.get(path1)
get_rsp2 = self.client.get(path2)
assert get_rsp1.headers.get('etag') != get_rsp2.headers.get('etag')
assert get_rsp1.headers.get('digest') != get_rsp2.headers.get('digest')
def test_etag_update(self):
"""
Verify that ETag and digest change when the resource is updated.
The headers should NOT change if the same binary content is
re-submitted.
"""
path = f'/ldp/{uuid4()}'
content1 = uuid4().bytes
content_cksum1 = hashlib.new(digest_algo, content1)
content2 = uuid4().bytes
content_cksum2 = hashlib.new(digest_algo, content2)
self.client.put(path, data=content1, content_type='text/plain')
get_rsp = self.client.get(path)
assert content_cksum1.hexdigest() == \
get_rsp.headers.get('etag').strip('"')
assert get_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum1.digest()).decode()
put_rsp = self.client.put(
path, data=content2, content_type='text/plain')
assert content_cksum2.hexdigest() == \
put_rsp.headers.get('etag').strip('"')
assert put_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum2.digest()).decode()
get_rsp = self.client.get(path)
assert content_cksum2.hexdigest() == \
get_rsp.headers.get('etag').strip('"')
assert get_rsp.headers.get('digest') == \
f'{digest_algo.upper()}=' + b64encode(content_cksum2.digest()).decode()
def test_etag_rdf(self):
"""
Verify that LDP-RS resources don't get an ETag.
TODO This is by design for now; when a reliable hashing method
for a graph is devised, this test should change.
"""
path = f'/ldp/{uuid4()}'
put_rsp = self.client.put(path, content_type='text/turtle')
assert not put_rsp.headers.get('etag')
assert not put_rsp.headers.get('digest')
get_rsp = self.client.get(path)
assert not get_rsp.headers.get('etag')
assert not get_rsp.headers.get('digest')
def test_digest_put(self):
"""
Test the ``Digest`` header with PUT to verify content integrity.
"""
path1 = f'/ldp/{uuid4()}'
path2 = f'/ldp/{uuid4()}'
path3 = f'/ldp/{uuid4()}'
content = uuid4().bytes
content_sha1 = sha1(content).hexdigest()
content_sha256 = sha256(content).hexdigest()
content_blake2b = blake2b(content).hexdigest()
assert self.client.put(path1, data=content, headers={
'digest': 'sha1=abcd'}).status_code == 409
assert self.client.put(path1, data=content, headers={
'digest': f'sha1={content_sha1}'}).status_code == 201
assert self.client.put(path2, data=content, headers={
'digest': f'SHA1={content_sha1}'}).status_code == 201
assert self.client.put(path3, data=content, headers={
'digest': f'SHA256={content_sha256}'}).status_code == 201
assert self.client.put(path3, data=content, headers={
'digest': f'blake2b={content_blake2b}'}).status_code == 204
def test_digest_post(self):
"""
Test the ``Digest`` header with POST to verify content integrity.
"""
path = '/ldp'
content = uuid4().bytes
content_sha1 = sha1(content).hexdigest()
content_sha256 = sha256(content).hexdigest()
content_blake2b = blake2b(content).hexdigest()
assert self.client.post(path, data=content, headers={
'digest': 'sha1=abcd'}).status_code == 409
assert self.client.post(path, data=content, headers={
'digest': f'sha1={content_sha1}'}).status_code == 201
assert self.client.post(path, data=content, headers={
'digest': f'SHA1={content_sha1}'}).status_code == 201
assert self.client.post(path, data=content, headers={
'digest': f'SHA256={content_sha256}'}).status_code == 201
assert self.client.post(path, data=content, headers={
'digest': f'blake2b={content_blake2b}'}).status_code == 201
assert self.client.post(path, data=content, headers={
'digest': f'bogusmd={content_blake2b}'}).status_code == 400
bencoded = b64encode(content_blake2b.encode())
assert self.client.post(
path, data=content,
headers={'digest': f'blake2b={bencoded}'}
).status_code == 400
@pytest.mark.usefixtures('client_class')
class TestETagCondHeaders:
"""
Test Digest and ETag headers.
"""
def test_if_match_get(self):
"""
Test the If-Match header on GET requests.
Test providing single and multiple ETags.
"""
path = '/ldp/test_if_match1'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
self.client.put(
path, data=content, headers={'content-type': 'text/plain'})
get_rsp = self.client.get(path, headers={
'if-match': f'"{content_cksum}"'})
assert get_rsp.status_code == 200
get_rsp = self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 412
get_rsp = self.client.get(path, headers={
'if-match': f'"{content_cksum}", "{bogus_cksum}"'})
assert get_rsp.status_code == 200
def test_if_match_put(self):
"""
Test the If-Match header on PUT requests.
Test providing single and multiple ETags.
"""
path = '/ldp/test_if_match1'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
get_rsp = self.client.get(path)
old_cksum = get_rsp.headers.get('etag')
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{content_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{content_cksum}", "{bogus_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{old_cksum}", "{bogus_cksum}"'})
assert put_rsp.status_code == 204
# Now contents have changed.
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{old_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{content_cksum}"'})
assert put_rsp.status_code == 204
# Exactly the same content was uploaded, so the ETag should not have
# changed.
put_rsp = self.client.put(path, data=content, headers={
'if-match': f'"{content_cksum}"'})
assert put_rsp.status_code == 204
# Catch-all: Proceed if resource exists at the given location.
put_rsp = self.client.put(path, data=content, headers={
'if-match': '*'})
assert put_rsp.status_code == 204
# This is wrong syntax. It will not update because the literal asterisk
# won't match.
put_rsp = self.client.put(path, data=content, headers={
'if-match': '"*"'})
assert put_rsp.status_code == 412
# Test delete.
del_rsp = self.client.delete(path, headers={
'if-match': f'"{old_cksum}"', 'Prefer': 'no-tombstone'})
assert del_rsp.status_code == 412
del_rsp = self.client.delete(path, headers={
'if-match': f'"{content_cksum}"', 'Prefer': 'no-tombstone'})
assert del_rsp.status_code == 204
put_rsp = self.client.put(path, data=content, headers={
'if-match': '*'})
assert put_rsp.status_code == 412
def test_if_none_match_get(self):
"""
Test the If-None-Match header on GET requests.
Test providing single and multiple ETags.
"""
path = '/ldp/test_if_none_match1'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
self.client.put(
path, data=content, headers={'content-type': 'text/plain'})
get_rsp1 = self.client.get(path, headers={
'if-none-match': f'"{content_cksum}"'})
assert get_rsp1.status_code == 304
get_rsp2 = self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"'})
assert get_rsp2.status_code == 200
get_rsp3 = self.client.get(path, headers={
'if-none-match': f'"{content_cksum}", "{bogus_cksum}"'})
assert get_rsp3.status_code == 304
# 404 has precedence on ETag handling.
get_rsp = self.client.get('/ldp/bogus', headers={
'if-none-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 404
get_rsp = self.client.get('/ldp/bogus', headers={
'if-none-match': f'"{content_cksum}"'})
assert get_rsp.status_code == 404
def test_if_none_match_put(self):
"""
Test the If-None-Match header on PUT requests.
Test providing single and multiple ETags.
Uses a previously created resource.
"""
path = '/ldp/test_if_none_match1'
content = uuid4().bytes
content_cksum = hashlib.new(digest_algo, content).hexdigest()
bogus_cksum = uuid4().hex
get_rsp = self.client.get(path)
old_cksum = get_rsp.headers.get('etag')
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{old_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{old_cksum}", "{bogus_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{bogus_cksum}"'})
assert put_rsp.status_code == 204
# Now contents have changed.
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{content_cksum}"'})
assert put_rsp.status_code == 412
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': f'"{old_cksum}"'})
assert put_rsp.status_code == 204
# Catch-all: fail if any resource exists at the given location.
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': '*'})
assert put_rsp.status_code == 412
# Test delete.
del_rsp = self.client.delete(path, headers={
'if-none-match': f'"{content_cksum}"', 'Prefer': 'no-tombstone'})
assert del_rsp.status_code == 412
del_rsp = self.client.delete(path, headers={
'if-none-match': f'"{bogus_cksum}"', 'Prefer': 'no-tombstone'})
assert del_rsp.status_code == 204
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': '*'})
assert put_rsp.status_code == 201
# This is wrong syntax. It will update because the literal asterisk
# won't match.
put_rsp = self.client.put(path, data=content, headers={
'if-none-match': '"*"'})
assert put_rsp.status_code == 204
def test_etag_notfound(self):
"""
Verify that 404 and 410 have precedence on ETag handling.
"""
path = f'/ldp/{uuid4()}'
bogus_cksum = uuid4().hex
get_rsp = self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 404
get_rsp = self.client.get(path, headers={
'if-match': '*'})
assert get_rsp.status_code == 404
get_rsp = self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 404
self.client.put(path)
self.client.delete(path)
get_rsp = self.client.get(path, headers={
'if-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 410
get_rsp = self.client.get(path, headers={
'if-none-match': f'"{bogus_cksum}"'})
assert get_rsp.status_code == 410
get_rsp = self.client.get(path, headers={
'if-match': '*'})
assert get_rsp.status_code == 410
@pytest.mark.usefixtures('client_class')
class TestModifyTimeCondHeaders:
"""
Test time-related conditional headers.
"""
@pytest.fixture(scope='class')
def timeframe(self):
"""
Times used in these tests: UTC midnight of today, yesterday, tomorrow.
"""
today = arrow.utcnow().floor('day')
yesterday = today.shift(days=-1)
tomorrow = today.shift(days=1)
path = f'/ldp/{uuid4()}'
self.client.put(path)
return path, today, yesterday, tomorrow
def test_nothing(self):
"""
For some reason, without this the fixture won't initialize properly.
"""
self.client.get('/')
def test_if_modified_since(self, timeframe):
"""
Test various uses of the If-Modified-Since header.
"""
path, today, yesterday, tomorrow = timeframe
assert self.client.head(
path, headers={'if-modified-since': http_date(today.timestamp)}
).status_code == 200
assert self.client.get(
path, headers={'if-modified-since': http_date(today.timestamp)}
).status_code == 200
assert self.client.head(
path, headers={'if-modified-since': http_date(yesterday.timestamp)}
).status_code == 200
assert self.client.get(
path, headers={'if-modified-since': http_date(yesterday.timestamp)}
).status_code == 200
assert self.client.head(
path, headers={'if-modified-since': http_date(tomorrow.timestamp)}
).status_code == 304
assert self.client.get(
path, headers={'if-modified-since': http_date(tomorrow.timestamp)}
).status_code == 304
def test_if_unmodified_since(self, timeframe):
"""
Test various uses of the If-Unmodified-Since header.
"""
path, today, yesterday, tomorrow = timeframe
assert self.client.head(
path, headers={'if-unmodified-since': http_date(today.timestamp)}
).status_code == 304
assert self.client.get(
path, headers={'if-unmodified-since': http_date(today.timestamp)}
).status_code == 304
assert self.client.head(
path, headers={'if-unmodified-since': http_date(yesterday.timestamp)}
).status_code == 304
assert self.client.get(
path, headers={'if-unmodified-since': http_date(yesterday.timestamp)}
).status_code == 304
assert self.client.head(
path, headers={'if-unmodified-since': http_date(tomorrow.timestamp)}
).status_code == 200
assert self.client.get(
path, headers={'if-unmodified-since': http_date(tomorrow.timestamp)}
).status_code == 200
def test_time_range(self, timeframe):
"""
Test conditions inside and outside of a time range.
"""
path, today, yesterday, tomorrow = timeframe
# Send me the resource if it has been modified between yesterday
# and tomorrow.
assert self.client.get(path, headers={
'if-modified-since': http_date(yesterday.timestamp),
'if-unmodified-since': http_date(tomorrow.timestamp),
}).status_code == 200
# Send me the resource if it has been modified between today
# and tomorrow.
assert self.client.get(path, headers={
'if-modified-since': http_date(today.timestamp),
'if-unmodified-since': http_date(tomorrow.timestamp),
}).status_code == 200
# Send me the resource if it has been modified between yesterday
# and | |
"Reconstructed Augmented Input MRBrainS Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Input ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Input ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Normalized ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Normalized ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Ground Truth ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Ground Truth ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Segmented ABIDE Image", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True,
"title": "Reconstructed Segmented ABIDE Image"}},
every=10), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Conv1 FM", PlotType.IMAGES_PLOT,
params={"nrow": 8, "opts": {"store_history": True,
"title": "Conv1 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer1 FM", PlotType.IMAGES_PLOT,
params={"nrow": 8, "opts": {"store_history": True,
"title": "Layer1 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer2 FM", PlotType.IMAGES_PLOT,
params={"nrow": 12, "opts": {"store_history": True,
"title": "Layer2 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Layer3 FM", PlotType.IMAGES_PLOT,
params={"nrow": 16, "opts": {"store_history": True,
"title": "Layer3 FM"}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Per-Dataset Histograms", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Reconstructed Images Histograms", PlotType.IMAGE_PLOT,
params={"opts": {"store_history": True}}, every=5), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
Checkpoint(save_folder, monitor_fn=lambda model_trainer: model_trainer.valid_loss, delta=0.01,
mode=MonitorMode.MIN), Event.ON_EPOCH_END) \
.with_event_handler(PlotAvgGradientPerLayer(visdom_logger, every=25), Event.ON_TRAIN_BATCH_END)
return trainer
elif self._trainer == TrainerType.DCGAN_Multimodal:
trainer = DCGANMultimodalTrainer(training_config, model_trainers, dataloaders[0], dataloaders[1],
dataloaders[2],
reconstruction_datasets, normalized_reconstructor, input_reconstructor,
segmentation_reconstructor, augmented_input_reconstructor,
gt_reconstructor,
run_config, dataset_configs, save_folder) \
.with_event_handler(PrintTrainingStatus(every=25), Event.ON_BATCH_END) \
.with_event_handler(PrintMonitors(every=25), Event.ON_BATCH_END) \
.with_event_handler(PlotMonitors(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(PlotLR(visdom_logger), Event.ON_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Generated Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Generated Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Generated Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Generated Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Input Batch Process {} Channel 0".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Input Patches Process {} Channel 0".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Input Batch Process {} Channel 1".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Input Patches Process {} Channel 1".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Segmented Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Test Segmented Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Segmented Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Training Segmentation Ground Truth Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Segmentation Ground Truth Batch Process {}".format(
run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Ground Truth Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Training Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Training Label Map Patches Process {}".format(
run_config.local_rank)}},
every=500), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Validation Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Validation Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_VALID_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger,
"Test Label Map Batch Process {}".format(run_config.local_rank),
PlotType.IMAGES_PLOT,
params={"nrow": 4,
"opts": {"store_history": True,
"title": "Test Label Map Patches Process {}".format(
run_config.local_rank)}},
every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Background Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Background Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "CSF Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "CSF Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "GM Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "GM Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "WM Generated Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "WM Generated Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Inputs Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Background Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "Background Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "CSF Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "CSF Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "GM Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "GM Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "WM Input Intensity Histogram", PlotType.HISTOGRAM_PLOT,
params={"opts": {"title": "WM Input Intensity Histogram",
"store_history": True,
"numbins": 128}}, every=100), Event.ON_TEST_BATCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Pie Plot", PlotType.PIE_PLOT,
params={"opts": {"title": "Classification hit per classes",
"legend": list(map(lambda key: key,
dataset_configs.keys())) + [
"Fake Class"]}},
every=25), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Pie Plot True", PlotType.PIE_PLOT,
params={"opts": {"title": "Batch data distribution",
"legend": list(map(lambda key: key,
dataset_configs.keys())) + [
"Fake Class"]}},
every=25), Event.ON_TRAIN_BATCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Mean Hausdorff Distance", PlotType.LINE_PLOT,
params={"opts": {"title": "Mean Hausdorff Distance",
"legend": ["Test"]}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Metric Table", PlotType.TEXT_PLOT,
params={"opts": {"title": "Metric Table"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Per-Dataset Metric Table", PlotType.TEXT_PLOT,
params={"opts": {"title": "Per-Dataset Metric Table"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Jensen-Shannon Table", PlotType.TEXT_PLOT,
params={"opts": {"title": "Jensen-Shannon Divergence"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Confusion Matrix", PlotType.HEATMAP_PLOT,
params={
"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "iSEG Confusion Matrix", PlotType.HEATMAP_PLOT,
params={
"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "iSEG Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "MRBrainS Confusion Matrix", PlotType.HEATMAP_PLOT,
params={"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "MRBrainS Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "ABIDE Confusion Matrix", PlotType.HEATMAP_PLOT,
params={
"opts": {"columnnames": ["VM", "GM", "CSF", "Background"],
"rownames": ["Background", "CSF", "GM", "WM"],
"title": "ABIDE Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Discriminator Confusion Matrix", PlotType.HEATMAP_PLOT,
params={"opts": {
"columnnames": ["Generated"] + list(reversed(list(dataset_configs.keys()))),
"rownames": list(dataset_configs.keys()) + ["Generated"],
"title": "Discriminator Confusion Matrix"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomVariables(visdom_logger, "Discriminator Confusion Matrix Training", PlotType.HEATMAP_PLOT,
params={"opts": {
"columnnames": ["Generated"] + list(reversed(list(dataset_configs.keys()))),
"rownames": list(dataset_configs.keys()) + ["Generated"],
"title": "Discriminator Confusion Matrix Training"}},
every=1), Event.ON_TRAIN_EPOCH_END) \
.with_event_handler(PlotCustomVariables(visdom_logger, "Runtime", PlotType.TEXT_PLOT,
params={"opts": {"title": "Runtime"}},
every=1), Event.ON_TEST_EPOCH_END) \
.with_event_handler(PlotCustomLoss(visdom_logger, "Discriminator Loss", every=1),
Event.ON_EPOCH_END) \
.with_event_handler(PlotCustomLoss(visdom_logger, "Total Loss", every=1), Event.ON_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Jensen-Shannon Divergence", every=1,
params={"title": "Jensen-Shannon Divergence on test data per Epoch",
"legend": ["Inputs", "Normalized"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Per Dataset Mean Hausdorff Distance", every=1,
params={"title": "Per Dataset Mean Hausdorff Distance",
"legend": list(dataset_configs.keys())}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger, "Dice score per class per epoch", every=1,
params={"title": "Dice score on test patches per class per epoch",
"legend": ["CSF", "GM", "WM"]}), Event.ON_TEST_EPOCH_END) \
.with_event_handler(
PlotCustomLinePlotWithLegend(visdom_logger,
"Hausdorff | |
# ==============================================================================
# Copyright (c) 2022 The PersFormer Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.jit import script
import torchvision.models as models
import geffnet
#__all__ = ['mish','Mish']
class WSConv2d(nn.Conv2d):
def __init___(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
super(WSConv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
padding, dilation, groups, bias)
def forward(self, x):
weight = self.weight
weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True).mean(dim=3, keepdim=True)
weight = weight - weight_mean
std = weight.view(weight.size(0), -1).std(dim=1).view(-1,1,1,1) + 1e-5
#std = torch.sqrt(torch.var(weight.view(weight.size(0),-1),dim=1)+1e-12).view(-1,1,1,1)+1e-5
weight = weight / std.expand_as(weight)
return F.conv2d(x, weight, self.bias, self.stride, self.padding, self.dilation, self.groups)
def conv_ws(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True):
return WSConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, dilation=dilation, groups=groups, bias=bias)
'''
class Mish(nn.Module):
def __init__(self):
super(Mish, self).__init__()
def forward(self, x):
return x*torch.tanh(F.softplus(x))
'''
@script
def _mish_jit_fwd(x): return x.mul(torch.tanh(F.softplus(x)))
@script
def _mish_jit_bwd(x, grad_output):
x_sigmoid = torch.sigmoid(x)
x_tanh_sp = F.softplus(x).tanh()
return grad_output.mul(x_tanh_sp + x * x_sigmoid * (1 - x_tanh_sp * x_tanh_sp))
class MishJitAutoFn(torch.autograd.Function):
@staticmethod
def forward(ctx, x):
ctx.save_for_backward(x)
return _mish_jit_fwd(x)
@staticmethod
def backward(ctx, grad_output):
x = ctx.saved_variables[0]
return _mish_jit_bwd(x, grad_output)
# Cell
def mish(x): return MishJitAutoFn.apply(x)
class Mish(nn.Module):
def __init__(self, inplace: bool = False):
super(Mish, self).__init__()
def forward(self, x):
return MishJitAutoFn.apply(x)
######################################################################################################################
######################################################################################################################
# pre-activation based upsampling conv block
class upConvLayer(nn.Module):
def __init__(self, in_channels, out_channels, scale_factor, norm, act, num_groups):
super(upConvLayer, self).__init__()
conv = conv_ws
if act == 'ELU':
act = nn.ELU()
elif act == 'Mish':
act = Mish()
else:
act = nn.ReLU(True)
self.conv = conv(in_channels=in_channels, out_channels=out_channels, kernel_size=3, stride=1, padding=1, bias=False)
if norm == 'GN':
self.norm = nn.GroupNorm(num_groups=num_groups, num_channels=in_channels)
else:
self.norm = nn.BatchNorm2d(in_channels, eps=0.001, momentum=0.1, affine=True, track_running_stats=True)
self.act = act
self.scale_factor = scale_factor
def forward(self, x):
x = self.norm(x)
x = self.act(x) #pre-activation
x = F.interpolate(x, scale_factor=self.scale_factor, mode='bilinear')
x = self.conv(x)
return x
# pre-activation based conv block
class myConv(nn.Module):
def __init__(self, in_ch, out_ch, kSize, stride=1,
padding=0, dilation=1, bias=True, norm='GN', act='ELU', num_groups=32):
super(myConv, self).__init__()
conv = conv_ws
if act == 'ELU':
act = nn.ELU()
elif act == 'Mish':
act = Mish()
else:
act = nn.ReLU(True)
module = []
if norm == 'GN':
module.append(nn.GroupNorm(num_groups=num_groups, num_channels=in_ch))
else:
module.append(nn.BatchNorm2d(in_ch, eps=0.001, momentum=0.1, affine=True, track_running_stats=True))
module.append(act)
module.append(conv(in_ch, out_ch, kernel_size=kSize, stride=stride,
padding=padding, dilation=dilation, groups=1, bias=bias))
self.module = nn.Sequential(*module)
def forward(self, x):
out = self.module(x)
return out
# Deep Feature Fxtractor
class deepFeatureExtractor_ResNext101(nn.Module):
def __init__(self, lv6 = False):
super(deepFeatureExtractor_ResNext101, self).__init__()
# after passing ReLU : H/2 x W/2
# after passing Layer1 : H/4 x W/4
# after passing Layer2 : H/8 x W/8
# after passing Layer3 : H/16 x W/16
self.encoder = models.resnext101_32x8d(pretrained=True)
self.fixList = ['layer1.0','layer1.1','.bn']
self.lv6 = lv6
if lv6 is True:
self.layerList = ['relu','layer1','layer2','layer3', 'layer4']
self.dimList = [64, 256, 512, 1024, 2048]
else:
del self.encoder.layer4
del self.encoder.fc
self.layerList = ['relu','layer1','layer2','layer3']
self.dimList = [64, 256, 512, 1024]
for name, parameters in self.encoder.named_parameters():
if name == 'conv1.weight':
parameters.requires_grad = False
if any(x in name for x in self.fixList):
parameters.requires_grad = False
def forward(self, x):
out_featList = []
feature = x
for k, v in self.encoder._modules.items():
if k == 'avgpool':
break
feature = v(feature)
#feature = v(features[-1])
#features.append(feature)
if any(x in k for x in self.layerList):
out_featList.append(feature)
return out_featList
def freeze_bn(self, enable=False):
""" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 """
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.train() if enable else module.eval()
module.weight.requires_grad = enable
module.bias.requires_grad = enable
class deepFeatureExtractor_VGG19(nn.Module):
def __init__(self, lv6 = False):
super(deepFeatureExtractor_VGG19, self).__init__()
self.lv6 = lv6
# after passing 6th layer : H/2 x W/2
# after passing 13th layer : H/4 x W/4
# after passing 26th layer : H/8 x W/8
# after passing 39th layer : H/16 x W/16
# after passing 52th layer : H/32 x W/32
self.encoder = models.vgg19_bn(pretrained=True)
del self.encoder.avgpool
del self.encoder.classifier
if lv6 is True:
self.dimList = [64, 128, 256, 512, 512]
self.layerList = [6, 13, 26, 39, 52]
else:
self.dimList = [64, 128, 256, 512]
self.layerList = [6, 13, 26, 39]
for i in range(13):
del self.encoder.features[-1]
'''
self.fixList = ['.bn']
for name, parameters in self.encoder.named_parameters():
if any(x in name for x in self.fixList):
parameters.requires_grad = False
'''
def forward(self, x):
out_featList = []
feature = x
for i in range(len(self.encoder.features)):
feature = self.encoder.features[i](feature)
if i in self.layerList:
out_featList.append(feature)
return out_featList
def freeze_bn(self, enable=False):
""" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 """
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.train() if enable else module.eval()
module.weight.requires_grad = enable
module.bias.requires_grad = enable
class deepFeatureExtractor_DenseNet161(nn.Module):
def __init__(self, lv6 = False):
super(deepFeatureExtractor_DenseNet161, self).__init__()
self.encoder = models.densenet161(pretrained=True)
self.lv6 = lv6
del self.encoder.classifier
del self.encoder.features.norm5
if lv6 is True:
self.dimList = [96, 192, 384, 1056, 2208]
else:
self.dimList = [96, 192, 384, 1056]
del self.encoder.features.denseblock4
def forward(self, x):
out_featList = []
feature = x
for k, v in self.encoder.features._modules.items():
if ('transition' in k):
feature = v.norm(feature)
feature = v.relu(feature)
feature = v.conv(feature)
out_featList.append(feature)
feature = v.pool(feature)
elif k == 'conv0':
feature = v(feature)
out_featList.append(feature)
elif k == 'denseblock4' and (self.lv6 is True):
feature = v(feature)
out_featList.append(feature)
else:
feature = v(feature)
return out_featList
def freeze_bn(self, enable=False):
""" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 """
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.train() if enable else module.eval()
module.weight.requires_grad = enable
module.bias.requires_grad = enable
class deepFeatureExtractor_InceptionV3(nn.Module):
def __init__(self, lv6 = False):
super(deepFeatureExtractor_InceptionV3, self).__init__()
self.encoder = models.inception_v3(pretrained=True)
self.encoder.aux_logits = False
self.lv6 = lv6
del self.encoder.AuxLogits
del self.encoder.fc
if lv6 is True:
self.layerList = ['Conv2d_2b_3x3','Conv2d_4a_3x3','Mixed_5d','Mixed_6e','Mixed_7c']
self.dimList = [64, 192, 288, 768, 2048]
else:
self.layerList = ['Conv2d_2b_3x3','Conv2d_4a_3x3','Mixed_5d','Mixed_6e']
self.dimList = [64, 192, 288, 768]
del self.encoder.Mixed_7a
del self.encoder.Mixed_7b
del self.encoder.Mixed_7c
def forward(self, x):
out_featList = []
feature = x
for k, v in self.encoder._modules.items():
feature = v(feature)
if k in ['Conv2d_2b_3x3', 'Conv2d_ta_3x3']:
feature = F.max_pool2d(feature, kernel_size=3, stride=2)
if any(x in k for x in self.layerList):
out_featList.append(feature)
return out_featList
def freeze_bn(self, enable=False):
""" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 """
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.train() if enable else module.eval()
module.weight.requires_grad = enable
module.bias.requires_grad = enable
class deepFeatureExtractor_MobileNetV2(nn.Module):
def __init__(self):
super(deepFeatureExtractor_MobileNetV2, self).__init__()
# after passing 1th : H/2 x W/2
# after passing 2th : H/4 x W/4
# after passing 3th : H/8 x W/8
# after passing 4th : H/16 x W/16
# after passing 5th : H/32 x W/32
self.encoder = models.mobilenet_v2(pretrained=True)
del self.encoder.classifier
self.layerList = [1, 3, 6, 13, 18]
self.dimList = [16, 24, 32, 96, 960]
def forward(self, x):
out_featList = []
feature = x
for i in range(len(self.encoder.features)):
feature = self.encoder.features[i](feature)
if i in self.layerList:
out_featList.append(feature)
return out_featList
def freeze_bn(self, enable=False):
""" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 """
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.train() if enable else module.eval()
module.weight.requires_grad = enable
module.bias.requires_grad = enable
class deepFeatureExtractor_ResNet101(nn.Module):
def __init__(self, lv6 = False):
super(deepFeatureExtractor_ResNet101, self).__init__()
# after passing ReLU : H/2 x W/2
# after passing Layer1 : H/4 x W/4
# after passing Layer2 : H/8 x W/8
# after passing Layer3 : H/16 x W/16
self.encoder = models.resnet101(pretrained=True)
self.fixList = ['layer1.0','layer1.1','.bn']
if lv6 is True:
self.layerList = ['relu','layer1','layer2','layer3', 'layer4']
self.dimList = [64, 256, 512, 1024,2048]
else:
del self.encoder.layer4
del self.encoder.fc
self.layerList = ['relu','layer1','layer2','layer3']
self.dimList = [64, 256, 512, 1024]
for name, parameters in self.encoder.named_parameters():
if name == 'conv1.weight':
parameters.requires_grad = False
if any(x in name for x in self.fixList):
parameters.requires_grad = False
def forward(self, x):
out_featList = []
feature = x
for k, v in self.encoder._modules.items():
if k == 'avgpool':
break
feature = v(feature)
#feature = v(features[-1])
#features.append(feature)
if any(x in k for x in self.layerList):
out_featList.append(feature)
return out_featList
def freeze_bn(self, enable=False):
""" Adapted from https://discuss.pytorch.org/t/how-to-train-with-frozen-batchnorm/12106/8 """
for module in self.modules():
if isinstance(module, nn.BatchNorm2d):
module.train() if enable else module.eval()
module.weight.requires_grad = enable
module.bias.requires_grad = enable
class deepFeatureExtractor_EfficientNet(nn.Module):
def __init__(self, architecture="EfficientNet-B5", lv6=False, lv5=False, lv4=False, lv3=False):
super(deepFeatureExtractor_EfficientNet, self).__init__()
assert architecture in | |
postprocessing
result = self._repetition_postcall(sds, node, result)
if space:
# XXX maybe try to get something more informative from the
# processing node (e.g. in 0.5 it used to be 'chunks'->'chunks'
# to indicate what was trained and what was tested. Now it is
# more tricky, because `node` could be anything
result.set_attr(space, (i,))
# store
results.append(result)
if ca.is_enabled("stats") and node.ca.has_key("stats") \
and node.ca.is_enabled("stats"):
if not ca.is_set('stats'):
# create empty stats container of matching type
ca.stats = node.ca['stats'].value.__class__()
# harvest summary stats
ca['stats'].value.__iadd__(node.ca['stats'].value)
# charge condition attribute
self.ca.repetition_results = results
# stack all results into a single Dataset
if concat_as == 'samples':
results = vstack(results)
elif concat_as == 'features':
results = hstack(results)
else:
raise ValueError("Unkown concatenation mode '%s'" % concat_as)
# no need to store the raw results, since the Measure class will
# automatically store them in a CA
return results
def _repetition_postcall(self, ds, node, result):
"""Post-processing handler for each repetition.
Maybe overwritten in subclasses to harvest additional data.
Parameters
----------
ds : Dataset
Input dataset for the node for this repetition
node : Node
Node after having processed the input dataset
result : Dataset
Output dataset of the node for this repetition.
Returns
-------
dataset
The result dataset.
"""
return result
def _untrain(self):
"""Untrain this measure and the embedded node."""
self._node.untrain()
super(RepeatedMeasure, self)._untrain()
node = property(fget=lambda self: self._node)
generator = property(fget=lambda self: self._generator)
callback = property(fget=lambda self: self._callback)
concat_as = property(fget=lambda self: self._concat_as)
class CrossValidation(RepeatedMeasure):
"""Cross-validate a learner's transfer on datasets.
A generator is used to resample a dataset into multiple instances (e.g.
sets of dataset partitions for leave-one-out folding). For each dataset
instance a transfer measure is computed by splitting the dataset into
two parts (defined by the dataset generators output space) and train a
custom learner on the first part and run it on the next. An arbitray error
function can by used to determine the learner's error when prediction the
dataset part that has been unseen during training.
"""
training_stats = ConditionalAttribute(enabled=False, doc=
"""Summary statistics about the training status of the learner
across all cross-validation fold.""")
# TODO move conditional attributes from CVTE into this guy
def __init__(self, learner, generator, errorfx=mean_mismatch_error,
splitter=None, **kwargs):
"""
Parameters
----------
learner : Learner
Any trainable node that shall be run on the dataset folds.
generator : Node
Generator used to resample the input dataset into multiple instances
(i.e. partitioning it). The number of datasets yielded by this
generator determines the number of cross-validation folds.
IMPORTANT: The ``space`` of this generator determines the attribute
that will be used to split all generated datasets into training and
testing sets.
errorfx : Node or callable
Custom implementation of an error function. The callable needs to
accept two arguments (1. predicted values, 2. target values). If not
a Node, it gets wrapped into a `BinaryFxNode`.
splitter : Splitter or None
A Splitter instance to split the dataset into training and testing
part. The first split will be used for training and the second for
testing -- all other splits will be ignored. If None, a default
splitter is auto-generated using the ``space`` setting of the
``generator``. The default splitter is configured to return the
``1``-labeled partition of the input dataset at first, and the
``2``-labeled partition second. This behavior corresponds to most
Partitioners that label the taken-out portion ``2`` and the remainder
with ``1``.
"""
# compile the appropriate repeated measure to do cross-validation from
# pieces
if not errorfx is None:
# error node -- postproc of transfer measure
if isinstance(errorfx, Node):
enode = errorfx
else:
# wrap into BinaryFxNode
enode = BinaryFxNode(errorfx, learner.get_space())
else:
enode = None
if splitter is None:
# default splitter splits into "1" and "2" partition.
# that will effectively ignore 'deselected' samples (e.g. by
# Balancer). It is done this way (and not by ignoring '0' samples
# because it is guaranteed to yield two splits) and is more likely
# to fail in visible ways if the attribute does not have 0,1,2
# values at all (i.e. a literal train/test/spareforlater attribute)
splitter = Splitter(generator.get_space(), attr_values=(1,2))
# transfer measure to wrap the learner
# splitter used the output space of the generator to know what to split
tm = TransferMeasure(learner, splitter, postproc=enode)
space = kwargs.pop('space', 'sa.cvfolds')
# and finally the repeated measure to perform the x-val
RepeatedMeasure.__init__(self, tm, generator, space=space,
**kwargs)
for ca in ['stats', 'training_stats']:
if self.ca.is_enabled(ca):
# enforce ca if requested
tm.ca.enable(ca)
if self.ca.is_enabled('training_stats'):
# also enable training stats in the learner
learner.ca.enable('training_stats')
def __repr__(self, prefixes=[]):
return super(CrossValidation, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['learner', 'splitter'])
+ _repr_attrs(self, ['errorfx'], default=mean_mismatch_error)
+ _repr_attrs(self, ['space'], default='sa.cvfolds')
)
def _call(self, ds):
# always untrain to wipe out previous stats
self.untrain()
return super(CrossValidation, self)._call(ds)
def _repetition_postcall(self, ds, node, result):
# local binding
ca = self.ca
if ca.is_enabled("training_stats"):
if not ca.is_set('training_stats'):
# create empty stats container of matching type
ca.training_stats = node.ca['training_stats'].value.__class__()
# harvest summary stats
ca['training_stats'].value.__iadd__(node.ca['training_stats'].value)
return result
transfermeasure = property(fget=lambda self:self._node)
# XXX Well, those properties are defined to match available
# attributes to constructor arguments. Unfortunately our
# hierarchy/API is not ideal at this point
learner = property(fget=lambda self: self.transfermeasure.measure)
splitter = property(fget=lambda self: self.transfermeasure.splitter)
errorfx = property(fget=lambda self: self.transfermeasure.postproc)
class TransferMeasure(Measure):
"""Train and run a measure on two different parts of a dataset.
Upon calling a TransferMeasure instance with a dataset the input dataset
is passed to a `Splitter` to will generate dataset subsets. The first
generated dataset is used to train an arbitray embedded `Measure. Once
trained, the measure is then called with the second generated dataset
and the result is returned.
"""
stats = ConditionalAttribute(enabled=False, doc=
"""Optional summary statistics about the transfer performance""")
training_stats = ConditionalAttribute(enabled=False, doc=
"""Summary statistics about the training status of the learner""")
is_trained = True
"""Indicate that this measure is always trained."""
def __init__(self, measure, splitter, **kwargs):
"""
Parameters
----------
measure: Measure
This measure instance is trained on the first dataset and called with
the second.
splitter: Splitter
This splitter instance has to generate at least two dataset splits
when called with the input dataset. The first split is used to train
the measure, the second split is used to run the trained measure.
"""
Measure.__init__(self, **kwargs)
self.__measure = measure
self.__splitter = splitter
def __repr__(self, prefixes=[]):
return super(TransferMeasure, self).__repr__(
prefixes=prefixes
+ _repr_attrs(self, ['measure', 'splitter'])
)
def _call(self, ds):
# local binding
measure = self.__measure
splitter = self.__splitter
ca = self.ca
space = self.get_space()
# generate the training and testing dataset subsequently to reduce the
# memory footprint, i.e. the splitter might generate copies of the data
# and no creates one at a time instead of two (for train and test) at
# once
# activate the dataset splitter
dsgen = splitter.generate(ds)
dstrain = dsgen.next()
if space:
# get unique chunks for training set
train_chunks = ','.join([str(i)
for i in dstrain.get_attr(splitter.get_space())[0].unique])
# ask splitter for first part
measure.train(dstrain)
# cleanup to free memory
del dstrain
# TODO get training confusion/stats
# run with second
dstest = dsgen.next()
if space:
# get unique chunks for testing set
test_chunks = ','.join([str(i)
for i in dstest.get_attr(splitter.get_space())[0].unique])
res = measure(dstest)
if space:
# will broadcast to desired length
res.set_attr(space, ("%s->%s" % (train_chunks, test_chunks),))
# cleanup to free memory
del dstest
# compute measure stats
if ca.is_enabled('stats'):
if not hasattr(measure, '__summary_class__'):
warning('%s has no __summary_class__ attribute -- '
'necessary for computing transfer stats' % measure)
else:
stats = measure.__summary_class__(
# hmm, might be unsupervised, i.e no targets...
targets=res.sa[measure.get_space()].value,
# XXX this should really accept the full dataset
predictions=res.samples[:, 0],
estimates = measure.ca.get('estimates', None))
ca.stats = stats
if ca.is_enabled('training_stats'):
if measure.ca.has_key("training_stats") \
and measure.ca.is_enabled("training_stats"):
ca.training_stats = measure.ca.training_stats
else:
warning("'training_stats' conditional attribute was enabled, "
"but the assigned measure '%s' either doesn't support "
"it, or it is disabled" % measure)
return res
measure = property(fget=lambda self:self.__measure)
splitter = property(fget=lambda self:self.__splitter)
class | |
def test_impl(S):
return S.mean()
for data in self._mean_data_samples():
with self.subTest(data=data):
S = pd.Series(data)
self._check_mean(test_impl, S)
@skip_sdc_jit("Series.mean() any parameters unsupported")
def test_series_mean_skipna(self):
def test_impl(S, skipna):
return S.mean(skipna=skipna)
for skipna in [True, False]:
for data in self._mean_data_samples():
S = pd.Series(data)
self._check_mean(test_impl, S, skipna)
def test_series_var1(self):
def test_impl(S):
return S.var()
hpat_func = self.jit(test_impl)
S = pd.Series([np.nan, 2., 3.])
self.assertEqual(hpat_func(S), test_impl(S))
def test_series_min(self):
def test_impl(S):
return S.min()
hpat_func = self.jit(test_impl)
# TODO type_min/type_max
for input_data in [
[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf],
[np.nan, np.nan, np.inf, np.nan],
]:
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
self.assertEqual(result, result_ref)
@skip_sdc_jit("Series.min() any parameters unsupported")
def test_series_min_param(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = self.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
@unittest.expectedFailure
def test_series_min_param_fail(self):
def test_impl(S, param_skipna):
return S.min(skipna=param_skipna)
hpat_func = self.jit(test_impl)
cases = [
([2., 3., 1, np.inf, -1000, np.nan], False), # min == np.nan
]
for input_data, param_skipna in cases:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
def test_series_max(self):
def test_impl(S):
return S.max()
hpat_func = self.jit(test_impl)
# TODO type_min/type_max
for input_data in [
[np.nan, 2., np.nan, 3., np.inf, 1, -1000],
[8, 31, 1123, -1024],
[2., 3., 1, -1000, np.inf],
[np.inf, np.inf, np.inf, np.inf],
[np.inf, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan],
[np.nan, 1.0, np.nan, np.nan],
[np.nan, 1.0, 1.0, np.nan],
[np.nan, np.nan, 1.0, np.nan],
[np.nan, np.nan, 1.0, np.nan, np.nan],
[np.nan, np.nan, np.inf, np.nan],
[np.nan, np.nan, np.inf, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.inf],
np.arange(11),
]:
with self.subTest(data=input_data):
S = pd.Series(input_data)
result_ref = test_impl(S)
result = hpat_func(S)
np.testing.assert_equal(result, result_ref)
@skip_sdc_jit("Series.max() any parameters unsupported")
def test_series_max_param(self):
def test_impl(S, param_skipna):
return S.max(skipna=param_skipna)
hpat_func = self.jit(test_impl)
for input_data, param_skipna in [([np.nan, 2., np.nan, 3., 1, -1000, np.inf], True),
([2., 3., 1, np.inf, -1000], False)]:
S = pd.Series(input_data)
result_ref = test_impl(S, param_skipna)
result = hpat_func(S, param_skipna)
self.assertEqual(result, result_ref)
@skip_sdc_jit('Old-style value_counts implementation doesn\'t handle numpy.nan values')
def test_series_value_counts_number(self):
def test_impl(S):
return S.value_counts()
input_data = [test_global_input_data_integer64, test_global_input_data_float64]
extras = [[1, 2, 3, 1, 1, 3], [0.1, 0., 0.1, 0.1]]
hpat_func = self.jit(test_impl)
for data_to_test, extra in zip(input_data, extras):
for d in data_to_test:
data = d + extra
with self.subTest(series_data=data):
S = pd.Series(data)
# use sort_index() due to possible different order of values with the same counts in results
result_ref = test_impl(S).sort_index()
result = hpat_func(S).sort_index()
pd.testing.assert_series_equal(result, result_ref)
@skip_sdc_jit('Fails to compile with latest Numba')
def test_series_value_counts_boolean(self):
def test_impl(S):
return S.value_counts()
input_data = [True, False, True, True, False]
sdc_func = self.jit(test_impl)
S = pd.Series(input_data)
result_ref = test_impl(S)
result = sdc_func(S)
pd.testing.assert_series_equal(result, result_ref)
@skip_sdc_jit('Bug in old-style value_counts implementation for ascending param support')
def test_series_value_counts_sort(self):
def test_impl(S, value):
return S.value_counts(sort=True, ascending=value)
hpat_func = self.jit(test_impl)
data = [1, 0, 0, 1, 1, -1, 0, -1, 0]
for ascending in (False, True):
with self.subTest(ascending=ascending):
S = pd.Series(data)
# to test sorting of result series works correctly do not use sort_index() on results!
# instead ensure that there are no elements with the same frequency in the data
result_ref = test_impl(S, ascending)
result = hpat_func(S, ascending)
pd.testing.assert_series_equal(result, result_ref)
@skip_sdc_jit('Old-style value_counts implementation doesn\'t handle numpy.nan values')
def test_series_value_counts_numeric_dropna_false(self):
def test_impl(S):
return S.value_counts(dropna=False)
data_to_test = [[1, 2, 3, 1, 1, 3],
[1, 2, 3, np.nan, 1, 3, np.nan, np.inf],
[0.1, 3., np.nan, 3., 0.1, 3., np.nan, np.inf, 0.1, 0.1]]
hpat_func = self.jit(test_impl)
for data in data_to_test:
with self.subTest(series_data=data):
S = pd.Series(data)
pd.testing.assert_series_equal(hpat_func(S), test_impl(S))
@skip_sdc_jit('Old-style value_counts implementation doesn\'t handle None values in string series')
def test_series_value_counts_str_dropna_false(self):
def test_impl(S):
return S.value_counts(dropna=False)
data_to_test = [['a', '', 'a', '', 'b', None, 'a', '', None, 'b'],
['dog', None, 'NaN', '', 'cat', None, 'cat', None, 'dog', ''],
['dog', 'NaN', '', 'cat', 'cat', 'dog', '']]
hpat_func = self.jit(test_impl)
for data in data_to_test:
with self.subTest(series_data=data):
S = pd.Series(data)
# use sort_index() due to possible different order of values with the same counts in results
result_ref = test_impl(S).sort_index()
result = hpat_func(S).sort_index()
pd.testing.assert_series_equal(result, result_ref)
@skip_sdc_jit('Old-style value_counts implementation doesn\'t handle sort argument')
def test_series_value_counts_str_sort(self):
def test_impl(S, ascending):
return S.value_counts(sort=True, ascending=ascending)
data_to_test = [['a', '', 'a', '', 'b', None, 'a', '', 'a', 'b'],
['dog', 'cat', 'cat', 'cat', 'dog']]
hpat_func = self.jit(test_impl)
for data in data_to_test:
for ascending in (True, False):
with self.subTest(series_data=data, ascending=ascending):
S = pd.Series(data)
# to test sorting of result series works correctly do not use sort_index() on results!
# instead ensure that there are no elements with the same frequency in the data
result_ref = test_impl(S, ascending)
result = hpat_func(S, ascending)
pd.testing.assert_series_equal(result, result_ref)
@skip_sdc_jit("Fails to compile with latest Numba")
def test_series_value_counts_index(self):
def test_impl(S):
return S.value_counts()
hpat_func = self.jit(test_impl)
for data in test_global_input_data_integer64:
with self.subTest(series_data=data):
index = np.arange(start=1, stop=len(data) + 1)
S = pd.Series(data, index=index)
pd.testing.assert_series_equal(hpat_func(S).sort_index(), test_impl(S).sort_index())
def test_series_value_counts_no_unboxing(self):
def test_impl():
S = pd.Series([1, 2, 3, 1, 1, 3])
return S.value_counts()
hpat_func = self.jit(test_impl)
pd.testing.assert_series_equal(hpat_func(), test_impl())
@skip_numba_jit
def test_series_dist_input1(self):
"""Verify distribution of a Series without index"""
def test_impl(S):
return S.max()
hpat_func = self.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@skip_sdc_jit("Fails to compile with latest Numba")
@skip_numba_jit
def test_series_dist_input2(self):
"""Verify distribution of a Series with integer index"""
def test_impl(S):
return S.max()
hpat_func = self.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), 1 + np.arange(n))
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S[start:end]))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
@unittest.skip("Passed if run single")
def test_series_dist_input3(self):
"""Verify distribution of a Series with string index"""
def test_impl(S):
return S.max()
hpat_func = self.jit(distributed={'S'})(test_impl)
n = 111
S = pd.Series(np.arange(n), ['abc{}'.format(id) for id in range(n)])
start, end = get_start_end(n)
self.assertEqual(hpat_func(S[start:end]), test_impl(S))
self.assertEqual(count_array_REPs(), 0)
self.assertEqual(count_parfor_REPs(), 0)
def test_series_tuple_input1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = self.jit(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
s_tup = (S, 1, S2)
self.assertEqual(hpat_func(s_tup), test_impl(s_tup))
@unittest.skip("pending handling of build_tuple in dist pass")
def test_series_tuple_input_dist1(self):
def test_impl(s_tup):
return s_tup[0].max()
hpat_func = self.jit(locals={'s_tup:input': 'distributed'})(test_impl)
n = 111
S = pd.Series(np.arange(n))
S2 = pd.Series(np.arange(n) + 1.0)
start, end = get_start_end(n)
s_tup = (S, 1, S2)
h_s_tup = (S[start:end], 1, S2[start:end])
self.assertEqual(hpat_func(h_s_tup), test_impl(s_tup))
@skip_numba_jit
def test_series_concat1(self):
def test_impl(S1, S2):
return pd.concat([S1, S2]).values
hpat_func = self.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6., 7.])
np.testing.assert_array_equal(hpat_func(S1, S2), test_impl(S1, S2))
@skip_numba_jit
def test_series_combine(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = self.jit(test_impl)
S1 = pd.Series([1.0, 2., 3., 4., 5.])
S2 = pd.Series([6.0, 21., 3.6, 5.])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
@skip_numba_jit
def test_series_combine_float3264(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = self.jit(test_impl)
S1 = pd.Series([np.float64(1), np.float64(2),
np.float64(3), np.float64(4), np.float64(5)])
S2 = pd.Series([np.float32(1), np.float32(2),
np.float32(3), np.float32(4), np.float32(5)])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
@skip_numba_jit
def test_series_combine_assert1(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, 3])
S2 = pd.Series([6., 21., 3., 5.])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
@skip_numba_jit
def test_series_combine_assert2(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = self.jit(test_impl)
S1 = pd.Series([6., 21., 3., 5.])
S2 = pd.Series([1, 2, 3])
with self.assertRaises(AssertionError):
hpat_func(S1, S2)
@skip_numba_jit
def test_series_combine_integer(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b, 16)
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 3, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
@skip_numba_jit
def test_series_combine_different_types(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = self.jit(test_impl)
S1 = pd.Series([6.1, 21.2, 3.3, 5.4, 6.7])
S2 = pd.Series([1, 2, 3, 4, 5])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
@skip_numba_jit
def test_series_combine_integer_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = self.jit(test_impl)
S1 = pd.Series([1, 2, 3, 4, 5])
S2 = pd.Series([6, 21, 17, -5, 4])
pd.testing.assert_series_equal(hpat_func(S1, S2), test_impl(S1, S2))
@skip_numba_jit
def test_series_combine_samelen(self):
def test_impl(S1, S2):
return S1.combine(S2, lambda a, b: 2 * a + b)
hpat_func = | |
sorted(feature_list, key=operator.itemgetter(0))
return(feature_list)
def check_hit_within_hit(intersect_left, left_range, intersect_right, right_range):
if (intersect_left[0] in right_range and intersect_left[1] in right_range):
return True
elif (intersect_right[0] in left_range and intersect_right[1] in left_range):
return True
else:
return False
def get_qualifiers(cds_qualifiers, trna_qualifiers, rrna_qualifiers, feature):
'''
Takes a list of possible qualifier IDs and attempts
to find them in the feature given.
If the qualifier is present, appends to a list, otherwise
skips and keeps going.
Returns a list of qualfiers found in that feature.
'''
return_quals = []
if feature.type == 'CDS':
qualifier_list = cds_qualifiers
elif feature.type == 'tRNA':
qualifier_list = trna_qualifiers
elif feature.type == 'rRNA':
qualifier_list = rrna_qualifiers
for qual in qualifier_list:
try:
return_quals.append(feature.qualifiers[qual][0])
except KeyError:
pass
return return_quals
def get_orientation(left_coords, right_coords):
'''
:param left_coords: list of coordinates for left end of hit
:param right_coords: list of coordinates for right end of hit
:return: return ISHit object, intialised with orienation and left/right positions
'''
# x must always be the smallest position, and y the largest position
# regardless of orientation
if left_coords[0] < right_coords[0] or left_coords[1] < right_coords[1]:
smallest = min(right_coords[0], left_coords[1])
biggest = max(right_coords[0], left_coords[1])
new_hit = ISHit(smallest, biggest)
# we are in forward orientation
new_hit.orientation = 'F'
else:
smallest = min(left_coords[0], right_coords[1])
biggest = max(left_coords[0], right_coords[1])
new_hit = ISHit(smallest, biggest)
# we are in reverse orientation
new_hit.orientation = 'R'
return new_hit
def doBlast(blast_input, blast_output, database):
'''
Perform a BLAST using the NCBI command line tools
in BioPython.
'''
run_command(['makeblastdb', '-dbtype nucl', '-in', database], shell=True)
run_command(['blastn', '-query', blast_input, '-db', database, '-outfmt "6 qseqid qlen sacc pident length slen sstart send evalue bitscore qcovs"', '>', blast_output], shell=True)
def check_seq_between(genbank_seq, insertion, start, end, name, temp):
'''
Check the sequence between two ends to see
if it matches the IS query or not, and what
the coverage and %ID to the query.
:param genbank_seq: Whole sequence from genbank file
:param insertion: IS query object to BLAST against
:param start: Smallest coordinate, to extract sequence
:param end: Largest coordinate, to extract sequence
:param name: prefix for the file of this sequence
:param temp: folder for the file of this sequence to go to
:return: If there is a BLAST hit, return a dictionary with the 'coverage' and 'per_id' values, else return
an empty dict
'''
# Get sequence between left and right ends
seq_between = genbank_seq[start:end]
# Turn the sequence into a fasta file
seq_between = SeqRecord(Seq(str(seq_between)), id=name)
out_seq_between = os.path.join(temp, name + '.fasta')
out_insertion = os.path.join(temp, name + 'ISseq.fasta')
SeqIO.write(seq_between, out_seq_between, 'fasta')
SeqIO.write(insertion, out_insertion, 'fasta')
blast_out = os.path.join(temp, name + '_out.txt')
# Perform the BLAST
doBlast(out_seq_between, blast_out, out_insertion)
# Only want the top hit, so set count variable to 0
first_result = 0
# Open the BLAST output file
with open(blast_out) as summary:
for line in summary:
# Get coverage and % ID for top hit
if first_result == 0:
info = line.strip().split('\t')
hit = {'coverage': float(info[-1]), 'per_id': float(info[3])}
first_result += 1
return hit
# If there is no hit, just return zeros
return {'coverage': 0, 'per_id': 0}
def check_unpaired_hits(line_check, ref_gbk_obj, ref_feature_list, is_query_obj, min_range, max_range, novel_gap_size,
tmp_output_folder):
# intialise a list of all the hits found in this file
IS_hit_list = []
# intialise list of hits to remove
removed_hit_list = []
# get length of IS
is_query_length = len(is_query_obj.seq)
# go through each line
for info in line_check:
# get the distance between the hits
gap = int(info[6])
# separate out info on the left and right sides of the hit
intersect_left = [int(info[1]), int(info[2])]
intersect_right = [int(info[4]), int(info[5])]
# TODO: check_hit_within_hit
# get the orientation and the IS hit object
new_hit = get_orientation(intersect_left, intersect_right)
# if the gap is small, it's a novel hit
if gap <= novel_gap_size:
new_hit.hit_type = 'novel'
new_hit.confidence_level = 'unpaired'
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
IS_hit_list.append(new_hit)
# if the gap is big enough, could be the IS itself, so do a BLAST check
elif float(gap) / is_query_length >= min_range and float(gap) / is_query_length <= max_range:
new_hit = get_orientation(intersect_left, intersect_right)
seq_check_results = check_seq_between(ref_gbk_obj.seq, is_query_obj, new_hit.x,
new_hit.y, 'tmp_seq', tmp_output_folder)
# if it's a good hit, add it
if len(seq_check_results) != 0 and seq_check_results['per_id'] >= 80 and seq_check_results[
'coverage'] >= 80:
# get the flanking genes
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# make sure its a confident, novel hit
new_hit.hit_type = 'known'
new_hit.confidence_level = 'unpaired'
new_hit.per_id = str(seq_check_results['per_id'])
new_hit.coverage = str(seq_check_results['coverage'])
# add it to the list
IS_hit_list.append(new_hit)
# if the thresholds are low, then mark it as a possible related IS
elif len(seq_check_results) != 0 and seq_check_results['per_id'] >= 50 and seq_check_results[
'coverage'] >= 50:
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# mark it as a possible related IS, but confident
new_hit.hit_type = 'possible related IS'
new_hit.confidence_level = 'unpaired'
new_hit.per_id = str(seq_check_results['per_id'])
new_hit.coverage = str(seq_check_results['coverage'])
# add it to the list
IS_hit_list.append(new_hit)
# otherwise this is a spurious result, remove
else:
removed_hit = RemovedHit(intersect_left, intersect_right)
removed_hit.reason = 'Sequence between does not match IS query'
removed_hit.comparison_type = 'BED closest, unpaired'
removed_hit.per_id = str(seq_check_results['per_id'])
removed_hit.coverage = str(seq_check_results['coverage'])
removed_hit_list.append(removed_hit)
# the gap is too small to be the IS, but larger than a novel hit
elif float(gap) / is_query_length <= min_range and float(gap) / is_query_length < max_range:
new_hit = get_orientation(intersect_left, intersect_right)
# add the relevant information
new_hit.hit_type = 'novel'
new_hit.confidence_level = 'unpaired'
new_hit.get_flanking_genes(ref_gbk_obj, ref_feature_list)
# add it to the list
IS_hit_list.append(new_hit)
# otherwise remove!
else:
removed_hit = RemovedHit(intersect_left, intersect_right)
removed_hit.reason = 'Sequence between is not large enough to be IS query'
removed_hit.comparison_type = 'BED closest, unpaired'
removed_hit_list.append(removed_hit)
return IS_hit_list, removed_hit_list
def write_typing_output(IShits, removedhits, cds_feature_info, rrna_feature_info, trna_feature_info, output_table):
with open(output_table, 'w') as out:
# set the header and write it to the output file
header = ["region", "orientation", "x", "y", "gap", "call", "percent_ID", "percent_cov", "left_gene", "left_description", "left_strand",
"left_distance", "right_gene", "right_description", "right_strand", "right_distance", "gene_interruption"]
out.write('\t'.join(header) + '\n')
# if there are no hits, record this and exit the function
if len(IShits) == 0:
out.write('No hits found')
out.close()
return
# sort IS hits by left position, ascending order
IShits.sort(key=lambda x: x.x)
# loop through each hit
region = 1
for IShit in IShits:
region_num = 'region_%s' % region
region += 1
call_type = IShit.hit_type
if IShit.confidence_level == 'imprecise':
call_type = call_type + '*'
elif IShit.confidence_level == 'unpaired':
call_type = call_type + '?'
# calculate gap distance
IShit.get_gap_distance()
# determine if gene is interrupted or not
IShit.determine_interrupted()
# get qualifiers for left and right genes
# TODO: make sure this qualifier call is robust
if IShit.left_feature.type == 'CDS':
try:
left_description = IShit.left_feature.qualifiers[cds_feature_info][0]
except KeyError:
left_description = ''
logging.warning('No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --cds.' % IShit.left_feature.qualifiers['locus_tag'][0])
elif IShit.left_feature.type == 'rRNA':
try:
left_description = IShit.left_feature.qualifiers[rrna_feature_info][0]
except KeyError:
left_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --rrna.' % IShit.left_feature.qualifiers['locus_tag'][0])
elif IShit.left_feature.type == 'tRNA':
try:
left_description = IShit.left_feature.qualifiers[trna_feature_info][0]
except KeyError:
left_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --trna.' % IShit.left_feature.qualifiers['locus_tag'][0])
if IShit.right_feature.type == 'CDS':
try:
right_description = IShit.right_feature.qualifiers[cds_feature_info][0]
except KeyError:
right_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --cds.' % IShit.right_feature.qualifiers['locus_tag'][0])
elif IShit.right_feature.type == 'rRNA':
try:
right_description = IShit.right_feature.qualifiers[rrna_feature_info][0]
except KeyError:
right_description = ''
logging.warning(
'No qualifier was found for gene %s. By default, this is the "product" qualifier for the feature. '
'If you would like to use a different qualifier, '
'please supply it to --rrna.' % IShit.right_feature.qualifiers['locus_tag'][0])
elif IShit.right_feature.type == 'tRNA':
try:
right_description = IShit.right_feature.qualifiers[trna_feature_info][0]
except KeyError:
right_description | |
<filename>deploy/virenv/bin/gdb.py
#!/home/liuwei/virenv/bin/python
from __future__ import print_function
from ptrace import PtraceError
from ptrace.debugger import (PtraceDebugger, Application,
ProcessExit, NewProcessEvent, ProcessSignal,
ProcessExecution, ProcessError)
from optparse import OptionParser
from os import getpid
from sys import stdout, stderr, exit
from logging import getLogger, info, warning, error
from ptrace.version import VERSION, WEBSITE
from ptrace.error import PTRACE_ERRORS, writeError
from ptrace.binding import HAS_PTRACE_SINGLESTEP
from ptrace.disasm import HAS_DISASSEMBLER
from ptrace.ctypes_tools import (truncateWord,
formatWordHex, formatAddress, formatAddressRange, word2bytes)
from ptrace.process_tools import dumpProcessInfo
from ptrace.tools import inverseDict
from ptrace.func_call import FunctionCallOptions
from ptrace.signames import signalName, SIGNAMES
from ptrace.six import PY3, binary_type
from signal import SIGTRAP, SIGINT
from ptrace.terminal import enableEchoMode, terminalWidth
from errno import ESRCH
from ptrace.cpu_info import CPU_POWERPC
from ptrace.debugger import ChildError
from ptrace.debugger.memory_mapping import readProcessMappings
from ptrace.os_tools import RUNNING_PYTHON3
try:
unichr
raw_input
except NameError:
# Python 3
unichr = chr
raw_input = input
import re
if stdout.isatty():
try:
# Use readline for better raw_input()
import readline
except ImportError:
pass
# Match a register name: $eax, $gp0, $orig_eax
REGISTER_REGEX = re.compile(r"\$[a-z]+[a-z0-9_]+")
#BYTES_REGEX = re.compile(r"""(?:'([^'\\]*)'|"([^"\\]*)")""")
SIGNALS = inverseDict(SIGNAMES) # name -> signum
COMMANDS = (
# trace instructions
("cont", "continue execution"),
("step", "execute one instruction (do not enter in a call)"),
("stepi", "execute one instruction (enter the call)"),
("until", "execute code until specified address (until <address>)"),
("set", "set register value (set <register>=<value>)"),
("sys", "continue execution to next syscall"),
("signal", "send a signal to the process (signal <signum>)"),
("signals", "display signals"),
# current process info
("regs", "display registers"),
("where", "display true code content (show breakpoints effects on code). eg. 'where $eip', 'where $eip $eip+20'"),
("print", "display a value (print <value>)"),
("hexdump", "dump memory as specified address or address range (hexdump <address> or hexdump <start> <stop>)"),
("where2", "display original code content (don't show effects of breakpoint on code)"),
("stack", "display stack content"),
("backtrace", "dump the backtrace"),
("proc", "display process information"),
("maps", "display memory mappings"),
# breakpoints
("break", "set a breakpoint (break <address>)"),
("breakpoints", "display breakpoints"),
("delete", "delete a breakpoint (delete <address>)"),
# processes
("attach", 'attach a new process (eg. "attach 2390")'),
("proclist", "list of traced processes"),
("switch", "switch active process (switch or switch <pid>)"),
("follow", r'''follow a term (eg. "follow '\x12\x14\x27\x13'")'''),
("showfollow", 'show all "followed" terms'),
("resetfollow", 'reset all "followed" terms'),
("xray", 'show addresses of (and possible pointers to) "followed" terms'),
# other
("dbginfo", "information about the debugger"),
("quit", "quit debugger"),
("help", "display this help"),
)
def formatAscii(data):
def asciiChar(byte):
if 32 <= byte <= 126:
return unichr(byte)
else:
return '.'
if RUNNING_PYTHON3:
return u''.join(asciiChar(byte) for byte in data)
else:
return u''.join(asciiChar(ord(byte)) for byte in data)
def formatHexa(data):
if RUNNING_PYTHON3:
return u' '.join(u"%02x" % byte for byte in data)
else:
return u' '.join(u"%02x" % ord(byte) for byte in data)
# finds possible pointer values in process memory space,
# pointing to address
def getPointers(process, address):
address = word2bytes(address)
procmaps = readProcessMappings(process)
for pm in procmaps:
for found in pm.search(address):
yield found
class Gdb(Application):
def __init__(self):
Application.__init__(self)
# Parse self.options
self.parseOptions()
# Setup output (log)
self.setupLog()
self.last_signal = {}
# We assume user wants all possible information
self.syscall_options = FunctionCallOptions(
write_types=True,
write_argname=True,
write_address=True,
)
# FIXME: Remove self.breaks!
self.breaks = dict()
self.followterms = []
def setupLog(self):
self._setupLog(stdout)
def parseOptions(self):
parser = OptionParser(usage="%prog [options] -- program [arg1 arg2 ...]")
self.createCommonOptions(parser)
self.createLogOptions(parser)
self.options, self.program = parser.parse_args()
if self.options.pid is None and not self.program:
parser.print_help()
exit(1)
self.processOptions()
self.show_pid = self.options.fork
def _continueProcess(self, process, signum=None):
if not signum and process in self.last_signal:
signum = self.last_signal[process]
if signum:
error("Send %s to %s" % (signalName(signum), process))
process.cont(signum)
try:
del self.last_signal[process]
except KeyError:
pass
else:
process.cont()
def cont(self, signum=None):
for process in self.debugger:
process.syscall_state.clear()
if process == self.process:
self._continueProcess(process, signum)
else:
self._continueProcess(process)
# Wait for a process signal
signal = self.debugger.waitSignals()
process = signal.process
# Hit breakpoint?
if signal.signum == SIGTRAP:
ip = self.process.getInstrPointer()
if not CPU_POWERPC:
# Go before "INT 3" instruction
ip -= 1
breakpoint = self.process.findBreakpoint(ip)
if breakpoint:
error("Stopped at %s" % breakpoint)
breakpoint.desinstall(set_ip=True)
else:
self.processSignal(signal)
return None
def readRegister(self, regs):
name = regs.group(0)[1:]
value = self.process.getreg(name)
return str(value)
def parseInteger(self, text):
# Remove spaces and convert to lower case
text = text.strip()
if " " in text:
raise ValueError("Space are forbidden: %r" % text)
text = text.lower()
# Replace registers by their value
orig_text = text
text = REGISTER_REGEX.sub(self.readRegister, text)
# Replace hexadecimal numbers by decimal numbers
def readHexadecimal(regs):
text = regs.group(0)
if text.startswith("0x"):
text = text[2:]
elif not re.search("[a-f]", text):
return text
value = int(text, 16)
return str(value)
text = re.sub(r"(?:0x)?[0-9a-f]+", readHexadecimal, text)
# Reject invalid characters
if not re.match(r"^[()<>+*/&0-9-]+$", text):
raise ValueError("Invalid expression: %r" % orig_text)
# Use integer division (a//b) instead of float division (a/b)
text = text.replace("/", "//")
# Finally, evaluate the expression
is_pointer = text.startswith("*")
if is_pointer:
text = text[1:]
try:
value = eval(text)
value = truncateWord(value)
except SyntaxError:
raise ValueError("Invalid expression: %r" % orig_text)
if is_pointer:
value = self.process.readWord(value)
return value
def parseIntegers(self, text):
values = []
for item in text.split():
item = item.strip()
value = self.parseInteger(item)
values.append(value)
return values
def parseBytes(self, text):
# FIXME: Validate input
# if not BYTES_REGEX.match(text):
# raise ValueError('Follow text must be enclosed in quotes!')
if PY3:
text = 'b' + text.lstrip()
value = eval(text)
if not isinstance(value, binary_type):
raise TypeError("Input is not a bytes string!")
return value
def addFollowTerm(self, text):
# Allow terms of the form 'string', "string", '\x04', "\x01\x14"
term = self.parseBytes(text)
self.followterms.append(term)
def showFollowTerms(self):
print(self.followterms)
def _xray(self):
for term in self.followterms:
for process in self.debugger:
for procmap in readProcessMappings(process):
for address in procmap.search(term):
yield (process, procmap, address, term)
# displays the offsets of all terms found in the process memory mappings
# along with possible addresses of pointers pointing to these terms
def xray(self):
for process, procmap, address, term in self._xray():
pointers = " ".join(formatAddress(ptr_addr)
for ptr_addr in getPointers(process, address))
print("term[%s] pid[%i] %s %s pointers: %s" % (
repr(term), process.pid, procmap,
formatAddress(address),
pointers))
def execute(self, command):
errmsg = None
if command == "cont":
errmsg = self.cont()
elif command == "proc":
self.procInfo()
elif command == "proclist":
self.procList()
elif command.startswith("attach "):
errmsg = self.attachProcess(command[7:])
elif command == "regs":
self.process.dumpRegs()
elif command == "stack":
self.process.dumpStack()
elif command == "backtrace":
errmsg = self.backtrace()
elif command == "where" or command.startswith("where "):
errmsg = self.where(command[6:])
elif command == "where2" or command.startswith("where2 "):
errmsg = self.where(command[7:], manage_bp=True)
elif command == "maps":
self.process.dumpMaps()
elif command == "dbginfo":
self.debuggerInfo()
elif command == "step":
errmsg = self.step(False)
elif command == "stepi":
errmsg = self.step(True)
elif command == "sys":
errmsg = self.syscallTrace()
elif command == "help":
self.help()
elif command.startswith("set "):
errmsg = self.set(command)
elif command.startswith("until "):
errmsg = self.until(command[6:])
elif command.startswith("switch") or command == "switch":
errmsg = self.switch(command[6:])
elif command.startswith("break "):
errmsg = self.breakpoint(command[6:])
elif command.startswith("breakpoints"):
self.displayBreakpoints()
elif command.startswith("signals"):
self.displaySignals()
elif command.startswith("delete "):
errmsg = self.delete(command[7:])
elif command.startswith("hexdump "):
errmsg = self.hexdump(command[8:])
elif command.startswith("signal "):
errmsg = self.signal(command[7:])
elif command.startswith("print "):
errmsg = self.print_(command[6:])
elif command.startswith("follow "):
errmsg = self.addFollowTerm(command[7:])
elif command == "showfollow":
self.showFollowTerms()
elif command == "resetfollow":
self.followterms = []
elif command == "xray":
self.xray()
else:
errmsg = "Unknown command: %r" % command
if errmsg:
print(errmsg, file=stderr)
return False
return True
def parseSignum(self, command):
try:
return SIGNALS[command]
except KeyError:
pass
try:
return SIGNALS["SIG"+command]
except KeyError:
pass
try:
return self.parseInteger(command)
except ValueError as err:
raise ValueError("Invalid signal number: %r" % command)
def signal(self, command):
try:
signum = self.parseSignum(command)
except ValueError as err:
return str(err)
last_process = self.process
try:
errmsg = self.cont(signum)
return errmsg
finally:
try:
del self.last_signal[last_process]
except KeyError:
pass
def print_(self, command):
try:
value = self.parseInteger(command)
except ValueError as err:
return str(err)
error("Decimal: %s" % value)
error("Hexadecimal: %s" % formatWordHex(value))
for map in self.process.readMappings():
if value not in map:
continue
error("Address is part of mapping: %s" % map)
return None
def hexdump(self, command):
max_line = 20
width = (terminalWidth() - len(formatAddress(1)) - 3) // 4
width = max(width, 1)
limited = None
parts = command.split(" ", 1)
if 1 < len(parts):
try:
start_address = self.parseInteger(parts[0])
end_address = self.parseInteger(parts[1])
if end_address <= start_address:
raise ValueError('End address (%s) is smaller than start address(%s)!'
% (formatAddress(end_address), formatAddress(start_address)))
except ValueError as err:
return str(err)
size = end_address - start_address
max_size = width*max_line
if max_size < size:
limited = max_size
end_address | |
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 3 14:47:12 2021
@author: dwinge
"""
import matplotlib.pyplot as plt
#import pandas as pd
import networkx as nx
import matplotlib.colors as mcolors
# Define parameters
my_dpi = 300
prop_cycle = plt.rcParams['axes.prop_cycle']
colors = prop_cycle.by_key()['color']
# Figure sizes
inchmm = 25.4
nature_single = 89.0 / 25.4
nature_double = 183.0 / 25.4
nature_full = 247.0 / 25.4
# Plot options
font = {'family' : 'sans',
'weight' : 'normal',
'size' : 10}
plt.rc('font', **font)
def name_edges(weights, layers) :
# Aquire the numerical indicies
num_edges = weights.get_edges()
# Loop through the numerical indicies and ask for names
from_layer=weights.from_layer
to_layer = weights.to_layer
# Loop over channels and name edges
named_edges = {}
for key in num_edges :
edge_list = []
for edge in num_edges[key]:
down=edge[0]
up = edge[1]
weight = edge[2]
down_name=layers[from_layer].get_node_name(down,from_layer)
up_name=layers[to_layer].get_node_name(up,to_layer)
edge = (down_name,up_name,weight)
# Need to explicitly replace the value
edge_list.append(edge)
named_edges[key] = edge_list
# return translated dictionary
return named_edges
def name_nodes(layers) :
nodes = {}
for key in layers :
node_names = layers[key].get_names(key)
nodes[key] = node_names
return nodes
def retrieve_G(layers, weights) :
edges = {}
for key in weights :
edges[key] = name_edges(weights[key],layers)
nodes = name_nodes(layers)
# Construct a graph
G = nx.DiGraph()
for key in nodes :
G.add_nodes_from(nodes[key], subset=key)
for edge_set in edges.values() :
for key in edge_set :
G.add_weighted_edges_from(edge_set[key],color=key)
return G
def visualize_network(layers, weights, exclude_nodes={}, node_size=600,
layout='multipartite', show_edge_labels=True,
shell_order=None, savefig=False, font_scaling=6,
arrow_size=20) :
edges = {}
for key in weights :
edges[key] = name_edges(weights[key],layers)
nodes = name_nodes(layers)
for key in exclude_nodes :
for node in exclude_nodes[key] :
nodes[key].remove(node)
# Construct a graph
G = nx.DiGraph()
for key in nodes :
G.add_nodes_from(nodes[key], subset=key)
for edge_set in edges.values() :
for key in edge_set :
G.add_weighted_edges_from(edge_set[key],color=key)
val_map = {'I': 0.5,
'H': 0.6,
'O': 0.7}
values = [val_map.get(node[0], 0.45) for node in G.nodes()]
edge_labels=dict([((u,v,),f"{d['weight']:.1f}")
for u,v,d in G.edges(data=True)])
edge_colors=['tab:'+d['color'] for u,v,d in G.edges(data=True)]
edge_weights=[d['weight'] for u,v,d in G.edges(data=True)]
# Try new way of constructing this
#edge_labels = dict([((n1, n2), f'{n1}->{n2}')
# for n1, n2 in G.edges])
#red_edges = [('I1','H0')]
red_edges = []
#edge_colors = ['black' if not edge in red_edges else 'red' for edge in G.edges()]
black_edges = [edge for edge in G.edges() if edge not in red_edges]
if layout=='multipartite' :
pos=nx.multipartite_layout(G)
elif layout=='spring' :
pos=nx.spring_layout(G, iterations=500, threshold=1e-5)
elif layout=='circular' :
pos=nx.circular_layout(G)
elif layout=='spiral' :
pos=nx.spiral_layout(G)
elif layout=='kamada_kawai' :
pos=nx.kamada_kawai_layout(G)
elif layout=='shell' :
nlist = []
# Combine the output and input layer on the same circle
if shell_order is None:
# Number of layers
P = len(nodes.keys())
for key in nodes:
if key < P-1 :
nlist.append(nodes[key])
else :
nlist[0] += nodes[key]
# Reverse list to have input + output as outer layer
nlist = nlist[::-1]
else :
for entry in shell_order :
if type(entry) is list:
nlist.append(nodes[entry[0]])
for k in range(1,len(entry)) :
nlist[-1] += nodes[entry[k]]
else :
nlist.append(nodes[entry])
pos=nx.shell_layout(G,nlist=nlist)
else :
print('Sorry, layout not implemented, reverting back to multipartite')
pos=nx.multipartite_layout(G)
# Try simple scaling
c = node_size/600
nx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('Blues'),
node_color = values, vmin=0., vmax=1.0,
node_size = node_size)
nx.draw_networkx_labels(G, pos, font_size=(6+c*font_scaling))
nx.draw_networkx_edges(G, pos, edgelist=red_edges, edge_color='r',
arrows=True, arrowsize=arrow_size,node_size=node_size)
nx.draw_networkx_edges(G, pos, edgelist=black_edges, edge_color=edge_colors,
arrows=True, arrowsize=arrow_size,node_size=node_size,
width=edge_weights,
connectionstyle='arc3,rad=.2')
# connectionstyle='arc3,rad=0.2')
if show_edge_labels :
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)
# There is an interface to graphviz .dot files provided by
#nx.drawing.nx_pydot.write_dot(G, 'graph.dot')
# to generate a png, run dot -Tpng graph.dot > graph.png
if savefig :
#nx.drawing.nx_pydot.write_dot(G, 'network_layout.dot')
plt.savefig('network_layout.png',dpi=300)
plt.show()
return G
def simple_paths(G,source, target) :
paths = nx.all_simple_paths(G,source,target)
return paths
def movie_maker(movie_series, layers, weights, exclude_nodes={}, node_size=600, layout='multipartite', show_edge_labels=True, shell_order=None) :
from matplotlib.animation import FuncAnimation
# Setup the network plot from the layers and weights
edges = {}
for key in weights :
edges[key] = name_edges(weights[key],layers)
nodes = name_nodes(layers)
for key in exclude_nodes :
for node in exclude_nodes[key] :
nodes[key].remove(node)
# Construct a graph
G = nx.DiGraph()
for key in nodes :
G.add_nodes_from(nodes[key], subset=key)
for edge_set in edges.values() :
for key in edge_set :
G.add_weighted_edges_from(edge_set[key],color=key)
# Small loop here to set the colors
val_map = {0: 'tab:blue',
1: 'tab:red',
2: 'tab:green'}
values=[]
for node in G.nodes() :
if node[0]=='H' :
values.append(val_map[0])
elif node[0]=='K' :
values.append(val_map[1])
else :
values.append(val_map.get(int(node[1])))
edge_labels=dict([((u,v,),f"{d['weight']:.1f}")
for u,v,d in G.edges(data=True)])
edge_colors=['tab:'+d['color'] for u,v,d in G.edges(data=True)]
# Specified dynamically
#edge_weights=[d['weight'] for u,v,d in G.edges(data=True)]
if layout=='multipartite' :
pos=nx.multipartite_layout(G)
elif layout=='spring' :
pos=nx.spring_layout(G)
elif layout=='circular' :
pos=nx.circular_layout(G)
elif layout=='spiral' :
pos=nx.spiral_layout(G)
elif layout=='kamada_kawai' :
pos=nx.kamada_kawai_layout(G)
elif layout=='shell' :
nlist = []
# Combine the output and input layer on the same circle
if shell_order is None:
# Number of layers
P = len(nodes.keys())
for key in nodes:
if key < P-1 :
nlist.append(nodes[key])
else :
nlist[0] += nodes[key]
# Reverse list to have input + output as outer layer
nlist = nlist[::-1]
else :
for entry in shell_order :
if type(entry) is list:
nlist.append(nodes[entry[0]])
for k in range(1,len(entry)) :
nlist[-1] += nodes[entry[k]]
else :
nlist.append(nodes[entry])
pos=nx.shell_layout(G,nlist=nlist)
else :
print('Sorry, layout not implemented, reverting back to multipartite')
pos=nx.multipartite_layout(G)
# Try simple scaling
c = node_size/600
# Need a way to scale the alpha of each node depending on its activity
# Start by renaming the DataFrame columns for easy access
short_names = []
for name in movie_series.columns :
idx = name.find('-')
if idx == -1 : idx = None
short_names.append(name[:idx])
changes=dict(zip(movie_series.columns,short_names))
movie_series.rename(columns=changes,inplace=True)
# Check maximum current in movies_series
Imax=max(movie_series.max()) # .max() gives a column max
# Now the alpha can be retrieved as a list
tseries = movie_series['Time'] # easier to call
idx = tseries.first_valid_index()
alpha_P = []
for node in G.nodes() :
# Calculate transparancy normalized to 1.
alpha_P.append(movie_series[node][idx]/Imax)
# At this point we have G and pos which is what we need NEW:
# Create a fixed size figure
# fig, ax = plt.subplots(figsize=(5,5))
fig = plt.figure(figsize=(5,7))
ax1 = plt.subplot(411)
ax2 = plt.subplot(4,1,(2,4),aspect=1.)
def init() :
movie_series.plot(x = 'Time', y = 'O0',ax=ax1)
ax1.set_xlabel('Time (ns)')
ax1.set_ylabel('O0-Pout (nA)')
def update(idx) :
ax2.clear()
try :
ax1.lines[1].remove()
except :
pass
# Draw a dot to mark our point
t = tseries.loc[idx]
Pout = movie_series['O0'].loc[idx]
ax1.plot(t,Pout,'ro',ms=5.)
# Update our values of alpha
alpha_P = []
for node in G.nodes() :
# Calculate transparancy normalized to 1.
alpha_P.append(max(movie_series[node][idx],0)/Imax)
# Scale also edges by the activity of the sending node
edge_weights=[d['weight']*movie_series[u][idx]/Imax for u,v,d in G.edges(data=True)]
nx.draw_networkx_edges(G, pos, edgelist=G.edges(), edge_color=edge_colors,
arrows=True, arrowsize=5,node_size=node_size,
width=edge_weights,
connectionstyle='arc3,rad=.2')
try :
allnodes = nx.draw_networkx_nodes(G, pos, cmap=plt.get_cmap('Blues'),
node_color = values, vmin=0., vmax=1.0,
node_size = node_size, alpha=alpha_P)
except ValueError:
print(f'Encountered error at t={float(tseries.loc[idx]):.1f} ns, idx={idx}')
print('Values are:')
print(values)
print('Alphas are:')
print(alpha_P)
allnodes.set_edgecolor("black")
nx.draw_networkx_labels(G, pos, font_size=(6+c*2))
#nx.draw_networkx_edges(G, pos, edgelist=red_edges, edge_color='r',
# arrows=True, arrowsize=20,node_size=node_size)
# connectionstyle='arc3,rad=0.2')
if show_edge_labels :
nx.draw_networkx_edge_labels(G,pos,edge_labels=edge_labels)
ax1.set_title(f't={float(tseries.loc[idx]):.1f} ns')
# Create the animation
ani = FuncAnimation(fig,
update,
frames=range(tseries.first_valid_index(),tseries.last_valid_index()),
repeat=False,
init_func=init)
ani.save('movie.mp4')
# Show animation in the end
plt.show()
def visualize_scaled_result(res, columns, scaling=None, time_interval=None) :
# Make a copy to do nasty things to
scaled = res.copy()
if scaling is not None :
for k, col in enumerate(columns) :
scaled[col] *= scaling[k]
# Send back to visualize results
visualize_dynamic_result(scaled,columns,time_interval)
def visualize_dynamic_result(res, columns, time_interval=None) :
if time_interval is not None :
select_res = res[(res["Time"]>=time_interval[0]) & (res["Time"]<=time_interval[1])]
else :
select_res = res
# Pretty generic plot function
select_res.plot(x = 'Time', y = columns,
xlabel='Time (ns)', ylabel='Voltage/Current (V/nA)')
plt.gca().grid(True)
def plot_sum_nodes(res, layers, quantity, time_interval=None) :
import pandas as pd
# First we select the correct time span
if time_interval is not None :
select_res = res[(res["Time"]>=time_interval[0]) & (res["Time"]<=time_interval[1])]
else :
select_res = res
# Pick out the time vector
time = res['Time']
# Construct a df for the wanted values
df = pd.DataFrame(columns=layers)
for char in layers :
regex = char + '.\d?-' + quantity # Example 'H.\d?-Pout'
df[char] = select_res.filter(regex=regex).sum(axis=1)
df.insert(0,'Time',time)
df.plot(x = | |
import os
import utils.file_utils as file
import utils.system_utils as system
import utils.db_utils as db
BIBLE_BOOKS = {
'oldTestament': {
'gen': 'Genesis',
'exo': 'Exodus',
'lev': 'Leviticus',
'num': 'Numbers',
'deu': 'Deuteronomy',
'jos': 'Joshua',
'jdg': 'Judges',
'rut': 'Ruth',
'1sa': '1 Samuel',
'2sa': '2 Samuel',
'1ki': '1 Kings',
'2ki': '2 Kings',
'1ch': '1 Chronicles',
'2ch': '2 Chronicles',
'ezr': 'Ezra',
'neh': 'Nehemiah',
'est': 'Esther',
'job': 'Job',
'psa': 'Psalms',
'pro': 'Proverbs',
'ecc': 'Ecclesiastes',
'sng': 'Song of Solomon',
'isa': 'Isaiah',
'jer': 'Jeremiah',
'lam': 'Lamentations',
'ezk': 'Ezekiel',
'dan': 'Daniel',
'hos': 'Hosea',
'jol': 'Joel',
'amo': 'Amos',
'oba': 'Obadiah',
'jon': 'Jonah',
'mic': 'Micah',
'nam': 'Nahum',
'hab': 'Habakkuk',
'zep': 'Zephaniah',
'hag': 'Haggai',
'zec': 'Zechariah',
'mal': 'Malachi',
},
'newTestament': {
'mat': 'Matthew',
'mrk': 'Mark',
'luk': 'Luke',
'jhn': 'John',
'act': 'Acts',
'rom': 'Romans',
'1co': '1 Corinthians',
'2co': '2 Corinthians',
'gal': 'Galatians',
'eph': 'Ephesians',
'php': 'Philippians',
'col': 'Colossians',
'1th': '1 Thessalonians',
'2th': '2 Thessalonians',
'1ti': '1 Timothy',
'2ti': '2 Timothy',
'tit': 'Titus',
'phm': 'Philemon',
'heb': 'Hebrews',
'jas': 'James',
'1pe': '1 Peter',
'2pe': '2 Peter',
'1jn': '1 John',
'2jn': '2 John',
'3jn': '3 John',
'jud': 'Jude',
'rev': 'Revelation',
},
}
BIBLES_ABBRV_INDEX = {
'gen': '01',
'exo': '02',
'lev': '03',
'num': '04',
'deu': '05',
'jos': '06',
'jdg': '07',
'rut': '08',
'1sa': '09',
'2sa': '10',
'1ki': '11',
'2ki': '12',
'1ch': '13',
'2ch': '14',
'ezr': '15',
'neh': '16',
'est': '17',
'job': '18',
'psa': '19',
'pro': '20',
'ecc': '21',
'sng': '22',
'isa': '23',
'jer': '24',
'lam': '25',
'ezk': '26',
'dan': '27',
'hos': '28',
'jol': '29',
'amo': '30',
'oba': '31',
'jon': '32',
'mic': '33',
'nam': '34',
'hab': '35',
'zep': '36',
'hag': '37',
'zec': '38',
'mal': '39',
'mat': '41',
'mrk': '42',
'luk': '43',
'jhn': '44',
'act': '45',
'rom': '46',
'1co': '47',
'2co': '48',
'gal': '49',
'eph': '50',
'php': '51',
'col': '52',
'1th': '53',
'2th': '54',
'1ti': '55',
'2ti': '56',
'tit': '57',
'phm': '58',
'heb': '59',
'jas': '60',
'1pe': '61',
'2pe': '62',
'1jn': '63',
'2jn': '64',
'3jn': '65',
'jud': '66',
'rev': '67',
}
# export const ALL_BIBLE_BOOKS = {
# ...BIBLE_BOOKS.oldTestament,
# ...BIBLE_BOOKS.newTestament,
# };
BOOK_CHAPTER_VERSES = {
'gen': {
'1': '31',
'2': '25',
'3': '24',
'4': '26',
'5': '32',
'6': '22',
'7': '24',
'8': '22',
'9': '29',
'10': '32',
'11': '32',
'12': '20',
'13': '18',
'14': '24',
'15': '21',
'16': '16',
'17': '27',
'18': '33',
'19': '38',
'20': '18',
'21': '34',
'22': '24',
'23': '20',
'24': '67',
'25': '34',
'26': '35',
'27': '46',
'28': '22',
'29': '35',
'30': '43',
'31': '55',
'32': '32',
'33': '20',
'34': '31',
'35': '29',
'36': '43',
'37': '36',
'38': '30',
'39': '23',
'40': '23',
'41': '57',
'42': '38',
'43': '34',
'44': '34',
'45': '28',
'46': '34',
'47': '31',
'48': '22',
'49': '33',
'50': '26',
},
'exo': {
'1': '22',
'2': '25',
'3': '22',
'4': '31',
'5': '23',
'6': '30',
'7': '25',
'8': '32',
'9': '35',
'10': '29',
'11': '10',
'12': '51',
'13': '22',
'14': '31',
'15': '27',
'16': '36',
'17': '16',
'18': '27',
'19': '25',
'20': '26',
'21': '36',
'22': '31',
'23': '33',
'24': '18',
'25': '40',
'26': '37',
'27': '21',
'28': '43',
'29': '46',
'30': '38',
'31': '18',
'32': '35',
'33': '23',
'34': '35',
'35': '35',
'36': '38',
'37': '29',
'38': '31',
'39': '43',
'40': '38',
},
'lev': {
'1': '17',
'2': '16',
'3': '17',
'4': '35',
'5': '19',
'6': '30',
'7': '38',
'8': '36',
'9': '24',
'10': '20',
'11': '47',
'12': '8',
'13': '59',
'14': '57',
'15': '33',
'16': '34',
'17': '16',
'18': '30',
'19': '37',
'20': '27',
'21': '24',
'22': '33',
'23': '44',
'24': '23',
'25': '55',
'26': '46',
'27': '34',
},
'num': {
'1': '54',
'2': '34',
'3': '51',
'4': '49',
'5': '31',
'6': '27',
'7': '89',
'8': '26',
'9': '23',
'10': '36',
'11': '35',
'12': '16',
'13': '33',
'14': '45',
'15': '41',
'16': '50',
'17': '13',
'18': '32',
'19': '22',
'20': '29',
'21': '35',
'22': '41',
'23': '30',
'24': '25',
'25': '18',
'26': '65',
'27': '23',
'28': '31',
'29': '40',
'30': '16',
'31': '54',
'32': '42',
'33': '56',
'34': '29',
'35': '34',
'36': '13',
},
'deu': {
'1': '46',
'2': '37',
'3': '29',
'4': '49',
'5': '33',
'6': '25',
'7': '26',
'8': '20',
'9': '29',
'10': '22',
'11': '32',
'12': '32',
'13': '18',
'14': '29',
'15': '23',
'16': '22',
'17': '20',
'18': '22',
'19': '21',
'20': '20',
'21': '23',
'22': '30',
'23': '25',
'24': '22',
'25': '19',
'26': '19',
'27': '26',
'28': '68',
'29': '29',
'30': '20',
'31': '30',
'32': '52',
'33': '29',
'34': '12',
},
'jos': {
'1': '18',
'2': '24',
'3': '17',
'4': '24',
'5': '15',
'6': '27',
'7': '26',
'8': '35',
'9': '27',
'10': '43',
'11': '23',
'12': '24',
'13': '33',
'14': '15',
'15': '63',
'16': '10',
'17': '18',
'18': '28',
'19': '51',
'20': '9',
'21': '45',
'22': '34',
'23': '16',
'24': '33',
},
'jdg': {
'1': '36',
'2': '23',
'3': '31',
'4': '24',
'5': '31',
'6': '40',
'7': '25',
'8': '35',
'9': '57',
'10': '18',
'11': '40',
'12': '15',
'13': '25',
'14': '20',
'15': '20',
'16': '31',
'17': '13',
'18': '31',
'19': '30',
'20': '48',
'21': '25',
},
'rut': {
'1': '22',
'2': '23',
'3': '18',
'4': '22',
},
'1sa': {
'1': '28',
'2': '36',
'3': '21',
'4': '22',
'5': '12',
'6': '21',
'7': '17',
'8': '22',
'9': '27',
'10': '27',
'11': '15',
'12': '25',
'13': '23',
'14': '52',
'15': '35',
'16': '23',
'17': '58',
'18': '30',
'19': '24',
'20': '42',
'21': '15',
'22': '23',
'23': '29',
'24': '22',
'25': '44',
'26': '25',
'27': '12',
'28': '25',
'29': '11',
'30': '31',
'31': '13',
},
'2sa': {
'1': '27',
'2': '32',
'3': '39',
'4': '12',
'5': '25',
'6': '23',
'7': '29',
'8': '18',
'9': '13',
'10': '19',
'11': '27',
'12': '31',
'13': '39',
'14': '33',
'15': '37',
'16': '23',
'17': '29',
'18': '33',
'19': '43',
'20': '26',
'21': '22',
'22': '51',
'23': '39',
'24': '25',
},
'1ki': {
'1': '53',
'2': '46',
'3': '28',
'4': '34',
'5': '18',
'6': '38',
'7': '51',
'8': '66',
'9': '28',
'10': '29',
'11': '43',
'12': '33',
'13': '34',
'14': '31',
'15': '34',
'16': '34',
'17': '24',
'18': '46',
'19': '21',
'20': '43',
'21': '29',
'22': '53',
},
'2ki': {
'1': '18',
'2': '25',
'3': '27',
'4': '44',
'5': '27',
'6': '33',
'7': '20',
'8': '29',
'9': '37',
'10': '36',
'11': '21',
'12': '21',
'13': '25',
'14': '29',
'15': '38',
'16': '20',
'17': '41',
'18': '37',
'19': '37',
'20': '21',
'21': '26',
'22': '20',
'23': '37',
'24': '20',
'25': '30',
},
'1ch': {
'1': '54',
'2': '55',
'3': '24',
'4': '43',
'5': '26',
'6': '81',
'7': '40',
'8': '40',
'9': '44',
'10': '14',
'11': '47',
'12': '40',
'13': '14',
'14': '17',
'15': '29',
'16': '43',
'17': '27',
'18': '17',
'19': '19',
'20': '8',
'21': '30',
'22': '19',
'23': '32',
'24': '31',
'25': '31',
'26': '32',
'27': '34',
'28': '21',
'29': '30',
},
'2ch': {
'1': '17',
'2': '18',
'3': '17',
'4': '22',
'5': '14',
'6': '42',
'7': '22',
'8': '18',
'9': '31',
'10': '19',
'11': '23',
'12': '16',
'13': '22',
'14': '15',
'15': '19',
'16': '14',
'17': '19',
'18': '34',
'19': '11',
'20': '37',
'21': '20',
'22': '12',
'23': '21',
'24': '27',
'25': '28',
'26': '23',
'27': '9',
'28': '27',
'29': '36',
'30': '27',
'31': '21',
'32': '33',
'33': '25',
'34': '33',
'35': '27',
'36': '23',
},
'ezr': {
'1': '11',
'2': '70',
'3': '13',
'4': '24',
'5': '17',
'6': '22',
'7': '28',
'8': '36',
'9': '15',
'10': '44',
},
'neh': {
'1': '11',
'2': '20',
'3': '32',
'4': '23',
'5': '19',
'6': '19',
'7': '73',
'8': '18',
'9': '38',
'10': '39',
'11': '36',
'12': '47',
'13': '31',
},
'est': {
'1': '22',
'2': '23',
'3': '15',
'4': '17',
'5': '14',
'6': '14',
'7': '10',
'8': '17',
'9': '32',
'10': '3',
},
'job': {
'1': '22',
'2': '13',
'3': '26',
'4': '21',
'5': '27',
'6': '30',
'7': '21',
'8': '22',
'9': '35',
'10': '22',
'11': '20',
'12': '25',
'13': '28',
'14': '22',
'15': '35',
'16': '22',
'17': '16',
'18': '21',
'19': '29',
'20': '29',
'21': | |
self._segment_path = lambda: "linktrace-reply"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply, ['raw_data'], name, value)
class Header(Entity):
"""
Frame header
.. attribute:: level
MD level
**type**\: :py:class:`CfmBagMdLevel <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmBagMdLevel>`
.. attribute:: version
Version
**type**\: int
**range:** 0..255
.. attribute:: use_fdb_only
Use filtering DB only
**type**\: bool
.. attribute:: forwarded
LTR was forwarded
**type**\: bool
.. attribute:: terminal_mep
Terminal MEP reached
**type**\: bool
.. attribute:: transaction_id
Transaction ID
**type**\: int
**range:** 0..4294967295
.. attribute:: ttl
TTL
**type**\: int
**range:** 0..255
.. attribute:: relay_action
Relay action
**type**\: :py:class:`CfmPmRelayAction <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmRelayAction>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.Header, self).__init__()
self.yang_name = "header"
self.yang_parent_name = "linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('level', (YLeaf(YType.enumeration, 'level'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmBagMdLevel', '')])),
('version', (YLeaf(YType.uint8, 'version'), ['int'])),
('use_fdb_only', (YLeaf(YType.boolean, 'use-fdb-only'), ['bool'])),
('forwarded', (YLeaf(YType.boolean, 'forwarded'), ['bool'])),
('terminal_mep', (YLeaf(YType.boolean, 'terminal-mep'), ['bool'])),
('transaction_id', (YLeaf(YType.uint32, 'transaction-id'), ['int'])),
('ttl', (YLeaf(YType.uint8, 'ttl'), ['int'])),
('relay_action', (YLeaf(YType.enumeration, 'relay-action'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmRelayAction', '')])),
])
self.level = None
self.version = None
self.use_fdb_only = None
self.forwarded = None
self.terminal_mep = None
self.transaction_id = None
self.ttl = None
self.relay_action = None
self._segment_path = lambda: "header"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.Header, ['level', 'version', 'use_fdb_only', 'forwarded', 'terminal_mep', 'transaction_id', 'ttl', 'relay_action'], name, value)
class SenderId(Entity):
"""
Sender ID TLV
.. attribute:: chassis_id
Chassis ID
**type**\: :py:class:`ChassisId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId>`
.. attribute:: management_address_domain
Management address domain
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
.. attribute:: management_address
Management address
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId, self).__init__()
self.yang_name = "sender-id"
self.yang_parent_name = "linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("chassis-id", ("chassis_id", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId))])
self._leafs = OrderedDict([
('management_address_domain', (YLeaf(YType.str, 'management-address-domain'), ['str'])),
('management_address', (YLeaf(YType.str, 'management-address'), ['str'])),
])
self.management_address_domain = None
self.management_address = None
self.chassis_id = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId()
self.chassis_id.parent = self
self._children_name_map["chassis_id"] = "chassis-id"
self._segment_path = lambda: "sender-id"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId, ['management_address_domain', 'management_address'], name, value)
class ChassisId(Entity):
"""
Chassis ID
.. attribute:: chassis_id_value
Chassis ID (Current)
**type**\: :py:class:`ChassisIdValue <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId.ChassisIdValue>`
.. attribute:: chassis_id_type
Chassis ID Type
**type**\: :py:class:`CfmPmChassisIdFmt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmChassisIdFmt>`
.. attribute:: chassis_id_type_value
Chassis ID Type
**type**\: int
**range:** 0..255
.. attribute:: chassis_id
Chassis ID (Deprecated)
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId, self).__init__()
self.yang_name = "chassis-id"
self.yang_parent_name = "sender-id"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("chassis-id-value", ("chassis_id_value", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId.ChassisIdValue))])
self._leafs = OrderedDict([
('chassis_id_type', (YLeaf(YType.enumeration, 'chassis-id-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmChassisIdFmt', '')])),
('chassis_id_type_value', (YLeaf(YType.uint8, 'chassis-id-type-value'), ['int'])),
('chassis_id', (YLeaf(YType.str, 'chassis-id'), ['str'])),
])
self.chassis_id_type = None
self.chassis_id_type_value = None
self.chassis_id = None
self.chassis_id_value = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId.ChassisIdValue()
self.chassis_id_value.parent = self
self._children_name_map["chassis_id_value"] = "chassis-id-value"
self._segment_path = lambda: "chassis-id"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId, ['chassis_id_type', 'chassis_id_type_value', 'chassis_id'], name, value)
class ChassisIdValue(Entity):
"""
Chassis ID (Current)
.. attribute:: chassis_id_format
ChassisIDFormat
**type**\: :py:class:`CfmPmIdFmt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmIdFmt>`
.. attribute:: chassis_id_string
Chassis ID String
**type**\: str
.. attribute:: chassis_id_mac
Chassis ID MAC Address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: chassis_id_raw
Raw Chassis ID
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId.ChassisIdValue, self).__init__()
self.yang_name = "chassis-id-value"
self.yang_parent_name = "chassis-id"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('chassis_id_format', (YLeaf(YType.enumeration, 'chassis-id-format'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmIdFmt', '')])),
('chassis_id_string', (YLeaf(YType.str, 'chassis-id-string'), ['str'])),
('chassis_id_mac', (YLeaf(YType.str, 'chassis-id-mac'), ['str'])),
('chassis_id_raw', (YLeaf(YType.str, 'chassis-id-raw'), ['str'])),
])
self.chassis_id_format = None
self.chassis_id_string = None
self.chassis_id_mac = None
self.chassis_id_raw = None
self._segment_path = lambda: "chassis-id-value"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.SenderId.ChassisId.ChassisIdValue, ['chassis_id_format', 'chassis_id_string', 'chassis_id_mac', 'chassis_id_raw'], name, value)
class EgressId(Entity):
"""
Egress ID TLV
.. attribute:: last_egress_id
Last egress ID
**type**\: :py:class:`LastEgressId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.LastEgressId>`
.. attribute:: next_egress_id
Next egress ID
**type**\: :py:class:`NextEgressId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.NextEgressId>`
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId, self).__init__()
self.yang_name = "egress-id"
self.yang_parent_name = "linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("last-egress-id", ("last_egress_id", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.LastEgressId)), ("next-egress-id", ("next_egress_id", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.NextEgressId))])
self._leafs = OrderedDict()
self.last_egress_id = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.LastEgressId()
self.last_egress_id.parent = self
self._children_name_map["last_egress_id"] = "last-egress-id"
self.next_egress_id = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.NextEgressId()
self.next_egress_id.parent = self
self._children_name_map["next_egress_id"] = "next-egress-id"
self._segment_path = lambda: "egress-id"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId, [], name, value)
class LastEgressId(Entity):
"""
Last egress ID
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..65535
.. attribute:: mac_address
MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.LastEgressId, self).__init__()
self.yang_name = "last-egress-id"
self.yang_parent_name = "egress-id"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unique_id', (YLeaf(YType.uint16, 'unique-id'), ['int'])),
('mac_address', (YLeaf(YType.str, 'mac-address'), ['str'])),
])
self.unique_id = None
self.mac_address = None
self._segment_path = lambda: "last-egress-id"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.LastEgressId, ['unique_id', 'mac_address'], name, value)
class NextEgressId(Entity):
"""
Next egress ID
.. attribute:: unique_id
Unique ID
**type**\: int
**range:** 0..65535
.. attribute:: mac_address
MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.NextEgressId, self).__init__()
self.yang_name = "next-egress-id"
self.yang_parent_name = "egress-id"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('unique_id', (YLeaf(YType.uint16, 'unique-id'), ['int'])),
('mac_address', (YLeaf(YType.str, 'mac-address'), ['str'])),
])
self.unique_id = None
self.mac_address = None
self._segment_path = lambda: "next-egress-id"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.EgressId.NextEgressId, ['unique_id', 'mac_address'], name, value)
class ReplyIngress(Entity):
"""
Reply ingress TLV
.. attribute:: port_id
Port ID
**type**\: :py:class:`PortId <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId>`
.. attribute:: action
Reply ingress action
**type**\: :py:class:`CfmPmIngressAction <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmIngressAction>`
.. attribute:: mac_address
MAC address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress, self).__init__()
self.yang_name = "reply-ingress"
self.yang_parent_name = "linktrace-reply"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("port-id", ("port_id", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId))])
self._leafs = OrderedDict([
('action', (YLeaf(YType.enumeration, 'action'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmIngressAction', '')])),
('mac_address', (YLeaf(YType.str, 'mac-address'), ['str'])),
])
self.action = None
self.mac_address = None
self.port_id = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId()
self.port_id.parent = self
self._children_name_map["port_id"] = "port-id"
self._segment_path = lambda: "reply-ingress"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress, ['action', 'mac_address'], name, value)
class PortId(Entity):
"""
Port ID
.. attribute:: port_id_value
Port ID (Current)
**type**\: :py:class:`PortIdValue <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId.PortIdValue>`
.. attribute:: port_id_type
Port ID type
**type**\: :py:class:`CfmPmPortIdFmt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmPortIdFmt>`
.. attribute:: port_id_type_value
Port ID type value
**type**\: int
**range:** 0..255
.. attribute:: port_id
Port ID (Deprecated)
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId, self).__init__()
self.yang_name = "port-id"
self.yang_parent_name = "reply-ingress"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("port-id-value", ("port_id_value", Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId.PortIdValue))])
self._leafs = OrderedDict([
('port_id_type', (YLeaf(YType.enumeration, 'port-id-type'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmPortIdFmt', '')])),
('port_id_type_value', (YLeaf(YType.uint8, 'port-id-type-value'), ['int'])),
('port_id', (YLeaf(YType.str, 'port-id'), ['str'])),
])
self.port_id_type = None
self.port_id_type_value = None
self.port_id = None
self.port_id_value = Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId.PortIdValue()
self.port_id_value.parent = self
self._children_name_map["port_id_value"] = "port-id-value"
self._segment_path = lambda: "port-id"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId, ['port_id_type', 'port_id_type_value', 'port_id'], name, value)
class PortIdValue(Entity):
"""
Port ID (Current)
.. attribute:: port_id_format
PortIDFormat
**type**\: :py:class:`CfmPmIdFmt <ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper.CfmPmIdFmt>`
.. attribute:: port_id_string
Port ID String
**type**\: str
.. attribute:: port_id_mac
Port ID MAC Address
**type**\: str
**pattern:** [0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2}){5}
.. attribute:: port_id_raw
Raw Port ID
**type**\: str
**pattern:** ([0\-9a\-fA\-F]{2}(\:[0\-9a\-fA\-F]{2})\*)?
"""
_prefix = 'ethernet-cfm-oper'
_revision = '2017-10-06'
def __init__(self):
super(Cfm.Global.TracerouteCaches.TracerouteCache.LinktraceReply.ReplyIngress.PortId.PortIdValue, self).__init__()
self.yang_name = "port-id-value"
self.yang_parent_name = "port-id"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('port_id_format', (YLeaf(YType.enumeration, 'port-id-format'), [('ydk.models.cisco_ios_xr.Cisco_IOS_XR_ethernet_cfm_oper', 'CfmPmIdFmt', '')])),
('port_id_string', (YLeaf(YType.str, 'port-id-string'), ['str'])),
('port_id_mac', (YLeaf(YType.str, 'port-id-mac'), ['str'])),
('port_id_raw', (YLeaf(YType.str, 'port-id-raw'), ['str'])),
])
self.port_id_format = None
self.port_id_string = None
self.port_id_mac = None
self.port_id_raw = None
self._segment_path = lambda: | |
<gh_stars>1-10
class MapInterpolation:
"""
Create a MapInterpolation class where the data for drawing an interpolation
map can be downloaded, modified, manually provided.
Keyword arguments:
country -- a country or a list of countries for which the shapefile will
be drawn. Alternatively, if you pass the 'EUROPE' value, all European countries
will be drawn on the map (default None)
dataframe -- data for interpolation (default None)
shapefile_path -- a path to the non-default shapefile from which boundaries
will be drawn (default None)
crs -- the imported shapefile's coordinates system (default None).
This argument is required when specifying a path to the non-default shapefile
(in the 'shapefile_path' argument). If the 'shapefile_path' argument is
specified and the 'crs' argument is None, the boundaries will be drawn
for the EPSG:4326 coordinates system. If it is not valid coordinates system,
the coordinates on the map may be bizarre
---------------METHODS---------------
draw()
d_imgw_data()
d_wmo_data()
import_global_df()
-------------------------------------
---------------DATA STRUCTURE---------------
The supported data structure for MapInterpolation.dataframe is 3 columns of
pandas.DataFrame object.
1st column: values,
2nd column: longitude,
3rd column: latitude
Exemplary dataframe:
import pandas as pd
dataframe = pd.DataFrame({
'values': [7.9, 7.6, 7.4, 8.0, 8.6, 7.7, 8.4],
'longitude': [19.4, 18.6, 16.2, 19.8, 14.6, 21.0, 16.9],
'latitude': [54.2, 54.4, 54.2, 50.1, 53.4, 52.2, 51.1]
})
The exemplary data is the air temperature and comes from a couple of synoptic
stations from Poland.
--------------------------------------------
---------------NOTE THAT---------------
You can select country boundaries from the default shapefile by setting the
'country' argument. The default shapefile comes from the Natural Earth Data
website (https://www.naturalearthdata.com/about/terms-of-use/), which shares
many shapefiles for free use. However, the 'shapefile_path' and 'crs'
arguments allow you to select non-default boundaries from your PC. If you
specify the non-default shapefile, the 'country' argument does not change
anything.
---------------------------------------
"""
def __init__(
self, country=None, dataframe=None,
shapefile_path=None, crs=None,
):
self.country = country
self.dataframe = dataframe
self.shapefile_path = shapefile_path
self.crs = crs
def draw(
self, levels=None, cmap='jet',
fill_contours=True, show_contours=False, show_clabels=False,
show_cbar=True, show_grid=False, show_frame=True, show_coordinates=True, show_ticks=True, add_shape=None,
save=None, **kwargs
):
"""
Specify which elements are to be drawn and draw an interpolation map.
Keyword arguments:
levels -- levels for which interpolated data will be displayed. If
None, the levels will be adjusted automatically, but it may result in
poor map's look, so it is recommended to specify the levels on your
own. Exemplary 'levels' input: numpy.arange(5, 15, 0.5) (default None)
cmap -- a colormap which will be used for displaying interpolated
data. To see the available colormaps, see: https://matplotlib.org/stable
/tutorials/colors/colormaps.html (default for default style 'jet';
default for retro style 'Greys_r')
fill_contours -- if the interpolated contours are to be filled with
the selected colormap (default True)
show_contours -- if the interpolated contours are to be shown (default
False)
show_clabels -- if the interpolated contours are to be labeled (default
False)
show_cbar -- if a colorbar for the interpolated data is to be shown
(default True)
show_grid -- if a grid for the coordinates is to be shown (default
False)
show_frame -- if the frame of the axis is to be shown (default True)
show_coordinates -- if the coordinates are to be shown (default True)
show_ticks -- if the x and y-ticks are to be shown (default True)
add_shape -- if additional shapes are to be drawn. The argument takes
a dictionary in which keys are the paths to the additional shapefile and
values are style/coordinates system settings - the value must be a single
string in which commas separate consecutive style/coordinates system
arguments. There are 5 settings that may be changed: coordinates system
(crs), linewidth (lw or linewidth), linestyle (ls or linestyle), fill color
(fc or fill_color), boundaries color (c or color). The coordinates' system
must be passed in EPSG code (if the coordinates' system is not specified,
then EPSG:4326 code will be taken). Exemplary 'add_shape' input: {'home/
python/test.shp': 'crs=2180, ls=dotted, lw=2, fc=black, c=yellow'}. Note
that: inside the string which specifies settings no quotes should be used
(default None)
save -- if the interpolation map is to be saved. A string in which
file name must be passed, for example: 'interpolated_map.png'. Note that
other picture formats can also be passed, e.g. 'interpolated_map.jpg'
(default None)
**kwargs -- the rest of arguments which are less relevant than the
above arguments (for setting the interpolation map style)
**kwargs:
title -- the map title (default None)
title_bold -- if the map title font weight is to be bold (default
False)
title_x_position -- the map title relative position on the x-axis of
the figure (default 0.13)
title_y_position -- the map title relative position on the y-axis of
the figure (default 0.06)
title_ha -- the map title horizontal alignment. Valid inputs are:
'left', 'right', 'center' (default 'left')
xlabel -- the map x-axis title (default None)
xlabel_bold -- if the map x-axis title font weight is to be bold
(default True)
ylabel -- the map y-axis title (default None)
ylabel_bold -- if the map y-axis title font weight is to be bold
(default True)
text_size -- the map title text size. Another text elements will be
adjusted automatically respectively to the 'text_size' value (default
for default style: 8; default for retro style: 10)
numcols -- number of columns for creating an interpolation grid.
Depending on the figure' sides ratio, the contours shape may be changed
slightly (default 240)
numrows -- number of rows for creating an interpolation grid.
Depending on the figure' sides ratio, the contours shape may be changed
slightly (default 240)
interpolation_method -- the interpolation method to be applied.
Available interpolation methods: 'linear', 'nearest', 'cubic' (default
'cubic')
interpolation_within_levels -- if interpolated values must be within
the given levels range specified in the 'levels' argument. It may be handy
when interpolation process returns values which can not be returned, e.g.
negative values for the number of cases. It is also useful when white
polygons appear on the map, which means that the given levels range does
not cover all interpolated values (default False)
extrapolation_into_zoomed_area -- if the 'zoom_in' argument is
specified, the extrapolation process will be conducted to the corners of
the zoomed-in area. It is handy when a country with the overseas territories
was chosen and the data concerns only a specific part of the shapefile -
in such case, the map may be zoomed in the proper area and the extrapolation
will be conducted to the corners of the zoomed-in area, not to the corners
of the whole shapefile (default True)
contours_levels -- non-default levels for the contours. If the argument
is None, then the 'contours_levels' argument will be the same as the
'levels' argument (default None)
clabels_levels -- non-default levels for the contour labels. If the
argument is None, then the 'clabels_levels' argument will be the same as
the 'contours_levels' argument (default None)
clabels_add -- add non-default contour labels to the map. Non-default
contour labels can be placed by specifying coordinates in a list of tuples,
in which tuples are x and y coordinates (e.g. [(15, 50)] will place a
label on the closest contour to the given coordinates, in the closest point
to the given coordinates). Note that the 'clabels_add' argument can be
used even when the 'show_contours' argument is set to False. In such case,
the labels will be placed on the invisible contours that respond to the
'levels' argument (default None)
clabels_inline_spacing -- the spacing between text on the contours and
the contours. Generally, the more characters the text on the contour has,
the less 'clabels_inline_spacing' value should be. Note the that DPI value
also affects the spacing between the text on the contours and the contours,
so the space may be different while previewing the map and while saving the
map (default 0)
clabels_decimal_place -- decimal places of the contour labels (default
0)
xticks -- non-default x ticks. Available input: a list of ints/floats
(default | |
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2015-2021 <NAME> (The Compiler) <<EMAIL>>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <https://www.gnu.org/licenses/>.
"""Simple history which gets written to disk."""
import os
import time
import contextlib
from typing import cast, Mapping, MutableSequence
from PyQt5.QtCore import pyqtSlot, QUrl, pyqtSignal
from PyQt5.QtWidgets import QProgressDialog, QApplication
from qutebrowser.config import config
from qutebrowser.api import cmdutils
from qutebrowser.utils import utils, log, usertypes, message, qtutils
from qutebrowser.misc import objects, sql
web_history = cast('WebHistory', None)
class HistoryProgress:
"""Progress dialog for history imports/conversions.
This makes WebHistory simpler as it can call methods of this class even
when we don't want to show a progress dialog (for very small imports). This
means tick() and finish() can be called even when start() wasn't.
"""
def __init__(self):
self._progress = None
self._value = 0
def start(self, text):
"""Start showing a progress dialog."""
self._progress = QProgressDialog()
self._progress.setMaximum(0) # unknown
self._progress.setMinimumDuration(0)
self._progress.setLabelText(text)
self._progress.setCancelButton(None)
self._progress.setAutoClose(False)
self._progress.show()
QApplication.processEvents()
def set_maximum(self, maximum):
"""Set the progress maximum as soon as we know about it."""
assert self._progress is not None
self._progress.setMaximum(maximum)
QApplication.processEvents()
def tick(self):
"""Increase the displayed progress value."""
self._value += 1
if self._progress is not None:
self._progress.setValue(self._value)
QApplication.processEvents()
def finish(self):
"""Finish showing the progress dialog.
After this is called, the object can be reused.
"""
if self._progress is not None:
self._progress.hide()
class CompletionMetaInfo(sql.SqlTable):
"""Table containing meta-information for the completion."""
KEYS = {
'excluded_patterns': '',
'force_rebuild': False,
}
def __init__(self, parent=None):
self._fields = ['key', 'value']
self._constraints = {'key': 'PRIMARY KEY'}
super().__init__(
"CompletionMetaInfo", self._fields, constraints=self._constraints)
if sql.user_version_changed():
self._init_default_values()
def _check_key(self, key):
if key not in self.KEYS:
raise KeyError(key)
def try_recover(self):
"""Try recovering the table structure.
This should be called if getting a value via __getattr__ failed. In theory, this
should never happen, in practice, it does.
"""
self._create_table(self._fields, constraints=self._constraints, force=True)
self._init_default_values()
def _init_default_values(self):
for key, default in self.KEYS.items():
if key not in self:
self[key] = default
def __contains__(self, key):
self._check_key(key)
query = self.contains_query('key')
return query.run(val=key).value()
def __getitem__(self, key):
self._check_key(key)
query = sql.Query('SELECT value FROM CompletionMetaInfo '
'WHERE key = :key')
return query.run(key=key).value()
def __setitem__(self, key, value):
self._check_key(key)
self.insert({'key': key, 'value': value}, replace=True)
class CompletionHistory(sql.SqlTable):
"""History which only has the newest entry for each URL."""
def __init__(self, parent=None):
super().__init__("CompletionHistory", ['url', 'title', 'last_atime'],
constraints={'url': 'PRIMARY KEY',
'title': 'NOT NULL',
'last_atime': 'NOT NULL'},
parent=parent)
self.create_index('CompletionHistoryAtimeIndex', 'last_atime')
class WebHistory(sql.SqlTable):
"""The global history of visited pages.
Attributes:
completion: A CompletionHistory instance.
metainfo: A CompletionMetaInfo instance.
_progress: A HistoryProgress instance.
"""
# All web history cleared
history_cleared = pyqtSignal()
# one url cleared
url_cleared = pyqtSignal(QUrl)
def __init__(self, progress, parent=None):
super().__init__("History", ['url', 'title', 'atime', 'redirect'],
constraints={'url': 'NOT NULL',
'title': 'NOT NULL',
'atime': 'NOT NULL',
'redirect': 'NOT NULL'},
parent=parent)
self._progress = progress
# Store the last saved url to avoid duplicate immediate saves.
self._last_url = None
self.completion = CompletionHistory(parent=self)
self.metainfo = CompletionMetaInfo(parent=self)
try:
rebuild_completion = self.metainfo['force_rebuild']
except sql.BugError: # pragma: no cover
log.sql.warning("Failed to access meta info, trying to recover...",
exc_info=True)
self.metainfo.try_recover()
rebuild_completion = self.metainfo['force_rebuild']
if sql.user_version_changed():
# If the DB user version changed, run a full cleanup and rebuild the
# completion history.
#
# In the future, this could be improved to only be done when actually needed
# - but version changes happen very infrequently, rebuilding everything
# gives us less corner-cases to deal with, and we can run a VACUUM to make
# things smaller.
self._cleanup_history()
rebuild_completion = True
# Get a string of all patterns
patterns = config.instance.get_str('completion.web_history.exclude')
# If patterns changed, update them in database and rebuild completion
if self.metainfo['excluded_patterns'] != patterns:
self.metainfo['excluded_patterns'] = patterns
rebuild_completion = True
if rebuild_completion and self:
# If no history exists, we don't need to spawn a dialog for
# cleaning it up.
self._rebuild_completion()
self.create_index('HistoryIndex', 'url')
self.create_index('HistoryAtimeIndex', 'atime')
self._contains_query = self.contains_query('url')
self._between_query = sql.Query('SELECT * FROM History '
'where not redirect '
'and not url like "qute://%" '
'and atime > :earliest '
'and atime <= :latest '
'ORDER BY atime desc')
self._before_query = sql.Query('SELECT * FROM History '
'where not redirect '
'and not url like "qute://%" '
'and atime <= :latest '
'ORDER BY atime desc '
'limit :limit offset :offset')
def __repr__(self):
return utils.get_repr(self, length=len(self))
def __contains__(self, url):
return self._contains_query.run(val=url).value()
@contextlib.contextmanager
def _handle_sql_errors(self):
try:
yield
except sql.KnownError as e:
message.error(f"Failed to write history: {e.text()}")
def _is_excluded_from_completion(self, url):
"""Check if the given URL is excluded from the completion."""
patterns = config.cache['completion.web_history.exclude']
return any(pattern.matches(url) for pattern in patterns)
def _is_excluded_entirely(self, url):
"""Check if the given URL is excluded from the entire history.
This is the case for URLs which can't be visited at a later point; or which are
usually excessively long.
NOTE: If you add new filters here, it might be a good idea to adjust the
_USER_VERSION code and _cleanup_history so that older histories get cleaned up
accordingly as well.
"""
return (
url.scheme() in {'data', 'view-source'} or
(url.scheme() == 'qute' and url.host() in {'back', 'pdfjs'})
)
def _cleanup_history(self):
"""Do a one-time cleanup of the entire history.
This is run only once after the v2.0.0 upgrade, based on the database's
user_version.
"""
terms = [
'data:%',
'view-source:%',
'qute://back%',
'qute://pdfjs%',
]
where_clause = ' OR '.join(f"url LIKE '{term}'" for term in terms)
q = sql.Query(f'DELETE FROM History WHERE {where_clause}')
entries = q.run()
log.sql.debug(f"Cleanup removed {entries.rows_affected()} items")
def _rebuild_completion(self):
# If this process was interrupted, make sure we trigger a rebuild again
# at the next start.
self.metainfo['force_rebuild'] = True
data: Mapping[str, MutableSequence[str]] = {
'url': [],
'title': [],
'last_atime': []
}
self._progress.start(
"<b>Rebuilding completion...</b><br>"
"This is a one-time operation and happens because the database version "
"or <i>completion.web_history.exclude</i> was changed."
)
# Delete old entries
self.completion.delete_all()
QApplication.processEvents()
# Select the latest entry for each url
q = sql.Query('SELECT url, title, max(atime) AS atime FROM History '
'WHERE NOT redirect '
'GROUP BY url ORDER BY atime asc')
result = q.run()
QApplication.processEvents()
entries = list(result)
self._progress.set_maximum(len(entries))
for entry in entries:
self._progress.tick()
url = QUrl(entry.url)
if self._is_excluded_from_completion(url):
continue
data['url'].append(self._format_completion_url(url))
data['title'].append(entry.title)
data['last_atime'].append(entry.atime)
self._progress.set_maximum(0)
# We might have caused fragmentation - let's clean up.
sql.Query('VACUUM').run()
QApplication.processEvents()
self.completion.insert_batch(data, replace=True)
QApplication.processEvents()
self._progress.finish()
self.metainfo['force_rebuild'] = False
def get_recent(self):
"""Get the most recent history entries."""
return self.select(sort_by='atime', sort_order='desc', limit=100)
def entries_between(self, earliest, latest):
"""Iterate non-redirect, non-qute entries between two timestamps.
Args:
earliest: Omit timestamps earlier than this.
latest: Omit timestamps later than this.
"""
self._between_query.run(earliest=earliest, latest=latest)
return iter(self._between_query)
def entries_before(self, latest, limit, offset):
"""Iterate non-redirect, non-qute entries occurring before a timestamp.
Args:
latest: Omit timestamps more recent than this.
limit: Max number of entries to include.
offset: Number of entries to skip.
"""
self._before_query.run(latest=latest, limit=limit, offset=offset)
return iter(self._before_query)
def clear(self):
"""Clear all browsing history."""
with self._handle_sql_errors():
self.delete_all()
self.completion.delete_all()
self.history_cleared.emit()
self._last_url = None
def delete_url(self, url):
"""Remove all history entries with the given url.
Args:
url: URL string to delete.
"""
qurl = QUrl(url)
qtutils.ensure_valid(qurl)
self.delete('url', self._format_url(qurl))
self.completion.delete('url', self._format_completion_url(qurl))
if self._last_url == url:
self._last_url = None
self.url_cleared.emit(qurl)
@pyqtSlot(QUrl, QUrl, str)
def add_from_tab(self, url, requested_url, title):
"""Add a new history entry as slot, called from a BrowserTab."""
if self._is_excluded_entirely(url) or self._is_excluded_entirely(requested_url):
return
if url.isEmpty():
# things set via setHtml
return
no_formatting = QUrl.UrlFormattingOption(0)
if (requested_url.isValid() and
not requested_url.matches(url, no_formatting)):
# If the url of the page is different than the url of the link
# originally clicked, save them both.
self.add_url(requested_url, title, redirect=True)
if url != self._last_url:
self.add_url(url, title)
self._last_url = url
def add_url(self, url, title="", *, redirect=False, atime=None):
"""Called via add_from_tab when a URL should be | |
vertex3 = vertices[_i2]
subdivisions.append(Triangle(triangle.vertex1, vertex1, vertex3, color))
subdivisions.append(Triangle(triangle.vertex2, vertex2, vertex1, color))
subdivisions.append(Triangle(triangle.vertex3, vertex3, vertex2, color))
subdivisions.append(Triangle(vertex1, vertex2, vertex3, color))
triangles = subdivisions
#print(triangles)
return triangles
def FibonnaciSphereTriangles(color=(255, 255, 255), n=50):
#not finished
triangles = []
vertices = []
# golden ratio in radians
g = pi * (3 - sqrt(5))/2
for i in range(n):
y = 1 - (i / float(n - 1)) * 2 # y goes from 1 to -1
radius = sqrt(1 - y * y) # radius at y
theta = g * i # golden angle increment
x = cos(theta) * radius
z = sin(theta) * radius
vertices.append(Vector3(x, y, z))
for i in range(len(vertices)-3):
vertex1 = vertices[i]
vertex2 = vertices[i+1]
vertex3 = vertices[i+2]
triangles.append(Triangle(vertex1, vertex2, vertex3, color))
print("work in progress")
return triangles
import colorsys
def hsv_to_rgb(h, s, v):
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(h, s, v))
def DrawTriangle(screen, triangle, fill, wireframe, vertices, radius, verticeColor, wireframeColor, lineWidth):
if fill == True:
#print(triangle.color)
pygame.draw.polygon(screen, triangle.color, triangle.GetPolygons())
if wireframe == True:
pygame.draw.line(screen, wireframeColor, triangle.vertex1.GetTuple(), triangle.vertex2.GetTuple(), lineWidth)
pygame.draw.line(screen, wireframeColor, triangle.vertex2.GetTuple(), triangle.vertex3.GetTuple(), lineWidth)
pygame.draw.line(screen, wireframeColor, triangle.vertex3.GetTuple(), triangle.vertex1.GetTuple(), lineWidth)
if vertices == True:
color = (255, 255 ,255) if verticeColor==False else triangle.verticeColor
pygame.draw.circle(screen, color, triangle.vertex1.GetTuple(), radius)
pygame.draw.circle(screen, color, triangle.vertex2.GetTuple(), radius)
pygame.draw.circle(screen, color, triangle.vertex3.GetTuple(), radius)
def hsv2rgb(h,s,v):
return tuple(round(i * 255) for i in colorsys.hsv_to_rgb(h,s,v))
def LoadMesh(objectPath, color=(255, 255, 255)):
vert_data = []
triangle_indices = []
data = None
meshData = []
#read and close file
with open(objectPath, 'r') as objectFile:
data = objectFile.readlines()
# get data
for _line in data:
_line = _line.split(" ")
if _line[0] == 'v':
vert_data.append(Vector3(float(_line[1]), float(_line[2]), float(_line[3])))
elif _line[0] == 'f':
temp = _line[1:]
line_indices = []
for el in temp:
indexList = el.split('/')
line_indices.append(int(indexList[0]) )
triangle_indices.append(line_indices)
for t in triangle_indices:
triangle = Triangle( vert_data[t[0]-1], vert_data[t[1]-1],vert_data[t[2]-1], color)
meshData.append(triangle)
return meshData
def translateValue(value, min1, max1, min2, max2):
return min2 + (max2 - min2)* ((value-min1)/(max1-min1))
def SignedDist(pos, normal, p):
n = Normalize(pos)
return (normal.x * pos.x + normal.y * pos.y + normal.z * pos.z - dotProduct(normal, p))
def TriangleClipped(pos, normal, triangle, outTriangle, clippingDebug=False):
#normal = Normalize(normal)
insidePoints, insideCount = [None for _ in range(3)], 0
outsidePoints, outsideCount = [None for _ in range(3)], 0
d0 = SignedDist(triangle.vertex1, normal, pos)
d1 = SignedDist(triangle.vertex2, normal, pos)
d2 = SignedDist(triangle.vertex3, normal, pos)
if d0 >= 0:
insidePoints[insideCount] = triangle.vertex1
insideCount += 1
else:
outsidePoints[outsideCount] = triangle.vertex1
outsideCount += 1
if d1 >= 0:
insidePoints[insideCount] = triangle.vertex2
insideCount += 1
else:
outsidePoints[outsideCount] = triangle.vertex2
outsideCount += 1
if d2 >= 0:
insidePoints[insideCount] = triangle.vertex3
insideCount += 1
else:
outsidePoints[outsideCount] = triangle.vertex3
outsideCount += 1
if insideCount == 0:
return 0
if insideCount == 3:
outTriangle[0] = triangle
return 1
if insideCount == 1 and outsideCount == 2:
# outTriangle[0].color = (0, 255,24)
outTriangle[0].color = triangle.color if clippingDebug==False else red
outTriangle[0].vertex1 = insidePoints[0]
outTriangle[0].vertex2 = PlaneLineIntersection(pos, normal, insidePoints[0], outsidePoints[0])
outTriangle[0].vertex3 = PlaneLineIntersection(pos, normal, insidePoints[0], outsidePoints[1])
return 1
if insideCount == 2 and outsideCount == 1:
# outTriangle[0].color = (55, 60, 255)
# outTriangle[1].color = (255,51, 12)
outTriangle[0].color = triangle.color if clippingDebug==False else blue
outTriangle[1].color = triangle.color if clippingDebug==False else green
outTriangle[0].vertex1 = insidePoints[1]
outTriangle[0].vertex2 = insidePoints[0]
outTriangle[0].vertex3 = PlaneLineIntersection(pos, normal, insidePoints[0], outsidePoints[0])
outTriangle[1].vertex1 = insidePoints[1]
outTriangle[1].vertex2 = outTriangle[0].vertex3
outTriangle[1].vertex3 = PlaneLineIntersection(pos, normal, insidePoints[1], outsidePoints[0])
return 2
def DrawAxis(screen, camera, scale=3,center=None, Xaxis=True, Yaxis=True, Zaxis=True, stroke=5):
if center == None:
center = Point(Vector3(0, 0, 0))
X = Point(Vector3(scale, 0, 0), (255, 0, 0))
Y = Point(Vector3(0, scale, 0), (0, 255, 0))
Z = Point(Vector3(0, 0, scale), (0, 0, 255))
origin = center.update(screen, camera)
if Xaxis:
x_axis = X.update(screen, camera, True)
pygame.draw.line(screen, X.color, origin.GetTuple(),x_axis.GetTuple(), stroke)
if Zaxis:
z_axis = Z.update(screen, camera, True)
pygame.draw.line(screen, Z.color, origin.GetTuple(), z_axis.GetTuple(), stroke)
if Yaxis:
y_axis = Y.update(screen, camera, True)
pygame.draw.line(screen, Y.color, origin.GetTuple(), y_axis.GetTuple(), stroke)
def PointAt(current, next, up) -> Matrix:
#f = (next - current).norm()
#u = (up - f * up.dot(f)).norm()
#r = u.cross(f) # right vector
f = Normalize(next - current) # forward vector
u = (up - f * dotProduct(up, f)) # up vector
r = crossProduct(u, f) # right vector
m = Matrix()
m.val = [
[r.x, r.y, r.z, 0.0],
[u.x, u.y, u.z, 0.0],
[f.x, f.y, f.z, 0.0],
[current.x, current.y, current.z, 1.0],
]
return m
# TODO: perhaps this function should take in a Matrix as arg
# TODO: should move into matrix.Matrix class?
def Shearing(
xy: float, xz: float, yx: float, yz: float, zx: float, zy: float
) -> Matrix:
m = Matrix()
m.val = [
[1, xy, xz, 0.0],
[yx, 1, yz, 0.0],
[zx, zy, 1, 0.0],
[0.0, 0.0, 0.0, 1],
]
return m
class Triangle:
def __init__(self, v1=None, v2=None, v3=None, color=(255, 255, 255)):
self.vertex1 = v1
self.vertex2 = v2
self.vertex3 = v3
self.color = color
self.verticeColor = color
def Shade(self, val):
r, g, b = 0, 0, 0
if self.color[0] * val > 255:
r = 255
elif self.color[0] * val < 0:
r = 0
else:
r = int(self.color[0] * val)
if self.color[1] * val > 255:
g = 255
elif self.color[1] * val < 0:
g = 0
else:
g = int(self.color[1] * val)
if self.color[2] * val > 255:
b = 255
elif self.color[2] * val < 0:
b = 0
else:
b = int(self.color[2] * val)
return (r, g, b)
def GetPolygons(self):
return [(int(self.vertex1.x), int(self.vertex1.y)),
(int(self.vertex2.x), int(self.vertex2.y)),
(int(self.vertex3.x), int(self.vertex3.y))]
def __repr__(self):
#debug
return f"triangle-> {(self.vertex1), (self.vertex2), (self.vertex3), {self.color}}"
import colorsys
class Vector2:
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __add__(a, b):
if type(b) == Vector2:
return Vector2(a.x + b.x, a.y + b.y)
return Vector2(a.x + b, a.y + b)
def __sub__(a, b):
if type(b) == Vector2:
return Vector2(a.x - b.x, a.y - b.y)
return Vector2(a.x - b, a.y - b)
def __mul__(a, b):
if type(b) == Vector2:
return Vector2(a.x * b.x, a.y * b.y)
return Vector2(a.x * b, a.y * b)
def __truediv__(a, b):
if type(b) == Vector2:
return Vector2(a.x / b.x, a.y / b.y)
return Vector2(a.x / b, a.y / b)
def __repr__(self):
return f"vec2-> ({self.x}, {self.y})"
def toVector3(matrix):
return Vector3(matrix.val[0][0], matrix.val[0][1], matrix.val[0][2])
def crossProduct(a, b):
x = a.y * b.z - a.z * b.y
y = a.z * b.x - a.x * b.z
z = a.x * b.y - a.y * b.x
return Vector3(x, y, z)
def dotProduct(a, b):
return a.x * b.x + a.y * b.y + a.z * b.z
def GetMagnitude(a):
if type(a) == Vector3:
return sqrt( pow(a.x,2) + pow(a.y,2) + pow(a.z,2) )
else:
return sqrt(pow(a.x,2) + pow(a.y,2))
def Normalize(a):
mg = GetMagnitude(a)
if mg == 0:
return Vector3()
return Vector3(a.x/mg, a.y/mg, a.z/mg)
def PlaneLineIntersection(pos, normal, lineStart, lineEnd):
normal = Normalize(normal)
p = - dotProduct(normal, pos)
ad = dotProduct(lineStart, normal)
bd = dotProduct(lineEnd, normal)
t = (-p -ad) / (bd - ad)
lineStartEnd = lineEnd - lineStart
lineTointersect = lineStartEnd * t
return (lineStart + lineTointersect)
class Scene:
def __init__(self, world=[]):
self.world = world
def update(self, dt, camera, light, screen, showAxis=False,
fill=True, wireframe=False, vertices=False, depth=True,
clippingDebug=False, showNormals=False,
radius=8, verticeColor=False,
wireframeColor=(255, 255, 255),ChangingColor=0, lineWidth=1):
camera.HandleInput(dt)
camera.direction = Vector3(0, 0, 1)
camera.up = Vector3(0, 1, 0)
camera.target = Vector3(0, 0, 1)
camera.rotation = Matrix.rotation_y(camera.yaw)
camera.direction = multiplyMatrixVector(camera.target, camera.rotation)
camera.target = camera.position + camera.direction
lookAtMatrix = PointAt(camera.position, camera.target, camera.up)
camera.viewMatrix = QuickInverse(lookAtMatrix)
camera.target= Vector3(0, 0, 1)
triangles = []
origins = []
for ob in self.world:
triangles += ob.update(screen,fill, wireframe, dt, camera, light, depth, clippingDebug, ChangingColor)
# sort the triangles list based on the average of their
# z coordinate -> painters algorithm
def Zsort(val):
return (val.vertex1.z + val.vertex2.z + val.vertex3.z) / 3.0
triangles.sort(key=Zsort)
normals_length = 250
normals = []
for projected in reversed(triangles):
origin = (projected.vertex1+projected.vertex2+projected.vertex3)/3
line1 = projected.vertex2 - projected.vertex1
line2 = projected.vertex3 - projected.vertex1
normal = crossProduct(line1, line2) * normals_length
DrawTriangle(screen, projected, fill, wireframe,vertices, radius, verticeColor, wireframeColor, lineWidth)
origins.append(origin)
normals.append(normal)
if showAxis:
DrawAxis(screen, camera)
if showNormals == True: #---to fix later
# get the normal vectors
for i, n in enumerate(normals):
endPoint = origins[i] + (n)
#pygame.draw.circle(screen, (0,255, 0), endPoint.GetTuple(), 10)
pygame.draw.line(screen, (0, 255, 0), origins[i].GetTuple(), | |
<filename>COVID19_prediction/COVID_model/model_util.py
# -*- coding: utf-8 -*-
"""
Created on Wed Sep 9 17:18:28 2020
@author: XT
"""
from __future__ import print_function
import random
import joblib
import numpy as np
import pandas as pd
import requests
from sklearn import metrics
SR = 16000 # sample rate
import os # noqa: E402
import sys # noqa: E402
import librosa # noqa: E402
import model_params as params # noqa: E402
sys.path.append("../vggish")
from vggish_input import waveform_to_examples # noqa: E402
SR_VGG = params.SR_VGG
def get_resort(files):
"""Re-sort the files under data path.
:param files: file list
:type files: list
:return: alphabetic orders
:rtype: list
"""
name_dict = {}
for sample in files:
name = sample.lower()
name_dict[name] = sample
re_file = [name_dict[s] for s in sorted(name_dict.keys())]
np.random.seed(222)
np.random.shuffle(re_file)
return re_file
def get_resort_test(files):
"""Re-sort the files under data path.
:param files: file list
:type files: list
:return: alphabetic orders
:rtype: list
"""
name_dict = {}
for sample in files:
name = sample.lower()
name_dict[name] = sample
re_file = [name_dict[s] for s in sorted(name_dict.keys())]
return re_file
def get_aug(y, type):
"""Augment data for training, validation and testing.
:param data_path: path
:type data_path: str
:param is_aug: using augmentation
:type is_aug: bool
:return: batch
:rtype: list
"""
if type == "noise":
y_aug = y + 0.005 * np.random.normal(0, 1, len(y))
if type == "pitchspeed":
step = np.random.uniform(-6, 6)
y_aug = librosa.effects.pitch_shift(y, SR, step)
yt_n = y_aug / np.max(np.abs(y_aug)) # re-normolized the sound
return yt_n
def spec_augment(spec: np.ndarray, num_mask=2, freq_masking_max_percentage=0.1, time_masking_max_percentage=0.2):
spec = spec.copy()
for i in range(num_mask):
all_frames_num, all_freqs_num = spec.shape
freq_percentage = random.uniform(0.0, freq_masking_max_percentage)
num_freqs_to_mask = int(freq_percentage * all_freqs_num)
f0 = np.random.uniform(low=0.0, high=all_freqs_num - num_freqs_to_mask)
f0 = int(f0)
spec[:, f0 : f0 + num_freqs_to_mask] = 0
time_percentage = random.uniform(0.0, time_masking_max_percentage)
num_frames_to_mask = int(time_percentage * all_frames_num)
t0 = np.random.uniform(low=0.0, high=all_frames_num - num_frames_to_mask)
t0 = int(t0)
spec[t0 : t0 + num_frames_to_mask, :] = 0
return spec
def load_data(data_path, is_aug):
"""Load data for training, validation and testing.
:param data_path: path
:type data_path: str
:param is_aug: using augmentation
:type is_aug: bool
:return: batch
:rtype: list
"""
print("start to load data:", data_path)
data = joblib.load(open(data_path + "_covid.pk", "rb")) # load positive samples
data2 = joblib.load(open(data_path + "_noncovid.pk", "rb")) # load negative samples
data.update(data2)
train_task = []
covidcnt = 0
noncvcnt = 0
for uid in get_resort(data["train_covid_id"]):
for temp in data["train_covid_id"][uid]:
train_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["train_noncovid_id"]):
for temp in data["train_noncovid_id"][uid]:
train_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
print("covid:", covidcnt, "non-covid:", noncvcnt)
total = len(train_task)
# upsampling by repeating some covid to balance the class
np.random.seed(1)
add_covid = np.random.choice(range(covidcnt), (noncvcnt - covidcnt) * 2, replace=False)
add_sample = [train_task[i] for i in add_covid]
train_task = train_task + add_sample
total = len(train_task)
print("add covid:", noncvcnt - covidcnt, "total:", total)
"""
#down sample
np.random.seed(1)
add_covid = np.random.choice(range(covidcnt, covidcnt + noncvcnt), covidcnt, replace=False)
add_sample = [train_task[i] for i in add_covid]
train_task = train_task[:covidcnt] + add_sample
print('delete noncovid:', noncvcnt-covidcnt)
total = len(train_task)
"""
if is_aug: # only works for train
for i, type in enumerate(["_augnoise.pk", "_augpitch.pk"]): #
data_aug = joblib.load(open(data_path + type, "rb"))
aug_covid = data_aug["covid"]
aug_noncovid = data_aug["noncovid"]
np.random.seed(i + 2) # random and different
add_covid = np.random.choice(range(covidcnt), (noncvcnt - covidcnt) * 2, replace=False)
add_sample = [aug_covid[i] for i in add_covid]
train_task = train_task + aug_covid + add_sample + aug_noncovid
vad_task = []
covidcnt = 0
noncvcnt = 0
for uid in get_resort(data["vad_covid_id"]):
for temp in data["vad_covid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["vad_noncovid_id"]):
for temp in data["vad_noncovid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
print("covid:", covidcnt, "non-covid:", noncvcnt)
test_task = []
covidcnt = 0
noncvcnt = 0
for uid in get_resort(data["test_covid_id"]):
for temp in data["test_covid_id"][uid]:
test_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["test_noncovid_id"]):
for temp in data["test_noncovid_id"][uid]:
test_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
print("covid:", covidcnt, "non-covid:", noncvcnt)
test_task = test_task + test_task[:5]
# suffle samples
np.random.seed(222)
np.random.shuffle(train_task)
np.random.seed(222)
np.random.shuffle(vad_task)
np.random.seed(222)
np.random.shuffle(test_task)
return train_task, vad_task, test_task
def load_vad_data(data_path):
"""Load vad data only."""
print("start to load data:", data_path)
data = joblib.load(open(data_path + "_covid.pk", "rb"))
data2 = joblib.load(open(data_path + "_noncovid.pk", "rb"))
data.update(data2)
vad_task = []
covidcnt = 0
noncvcnt = 0
# i = 0
for uid in get_resort(data["train_covid_id"]):
for temp in data["train_covid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["train_noncovid_id"]):
for temp in data["train_noncovid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
for uid in get_resort(data["vad_covid_id"]):
for temp in data["vad_covid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort(data["vad_noncovid_id"]):
for temp in data["vad_noncovid_id"][uid]:
vad_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
print("covid:", covidcnt, "non-covid:", noncvcnt)
np.random.seed(222)
np.random.shuffle(vad_task)
return vad_task
def load_test_data(data_path):
"""Load test data only."""
print("start to load data:", data_path)
data = joblib.load(open(data_path + "_covid.pk", "rb"))
data2 = joblib.load(open(data_path + "_noncovid.pk", "rb"))
data.update(data2)
test_task = []
covidcnt = 0
noncvcnt = 0
i = 0
for uid in get_resort_test(data["test_covid_id"]):
for temp in data["test_covid_id"][uid]:
i += 1
test_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [0, 1]}
)
covidcnt += 1
for uid in get_resort_test(data["test_noncovid_id"]):
for temp in data["test_noncovid_id"][uid]:
i += 1
test_task.append(
{"breath": temp["breath"], "cough": temp["cough"], "voice": temp["voice"], "label": [1, 0]}
)
noncvcnt += 1
return test_task
def load_test_dict_data(data_path):
"""load dict data with labal."""
print("start to load data:", data_path)
data = joblib.load(open(data_path, "rb"))
return data
def get_input(sample):
"""transfer audio input into spectrogram."""
vgg_b = waveform_to_examples(sample["breath"], SR_VGG)
vgg_c = waveform_to_examples(sample["cough"], SR_VGG)
vgg_v = waveform_to_examples(sample["voice"], SR_VGG)
index = vgg_b.shape[0]
index2 = vgg_c.shape[0] + index
vgg_input = np.concatenate((vgg_b, vgg_c, vgg_v), axis=0)
labels = sample["label"]
symptoms = [[1] * 13] # sample['sym']
return vgg_input, [[index]], [[index2]], labels, symptoms
def get_metrics(probs, labels):
"""calculate metrics.
:param probs: list
:type probs: float
:param labels: list
:type labels: int
:return: metrics
"""
probs = np.array(probs)
probs = np.squeeze(probs)
predicted = []
for i in range(len(probs)):
if probs[i][0] > 0.5:
predicted.append(0)
else:
predicted.append(1)
label = np.array(labels)
label = np.squeeze(label)
predicted = np.array(predicted)
predicted = np.squeeze(predicted)
# pre = metrics.precision_score(label, predicted)
# acc = metrics.accuracy_score(label, predicted)
auc = metrics.roc_auc_score(label, probs[:, 1])
precision, recall, _ = metrics.precision_recall_curve(label, probs[:, 1])
# rec = metrics.recall_score(label, predicted)
TN, FP, FN, TP = metrics.confusion_matrix(label, predicted).ravel()
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP / (TP + FN)
# Specificity or true negative rate
TNR = TN / (TN + FP)
# PPV = TP/(TP + FP)
# NPV = TN/(TN + FN)
fpr, tpr, thresholds = metrics.roc_curve(label, probs[:, 1])
index = np.where(tpr > 0.9)[0][0] - 1
print(
"AUC:"
+ "{:.2f}".format(auc)
+ " Sensitivity:"
+ "{:.2f}".format(TPR)
+ " Specificity:"
+ "{:.2f}".format(TNR)
+ " spe@90%sen:"
+ "{:.2f}".format(1 - fpr[index])
)
return auc, TPR, TNR, 1 - fpr[index]
def get_metrics_t(probs, label):
predicted = []
for i in range(len(probs)):
if probs[i] > 0.5:
predicted.append(1)
else:
predicted.append(0)
auc = metrics.roc_auc_score(label, probs)
TN, FP, FN, TP = metrics.confusion_matrix(label, predicted).ravel()
# Sensitivity, hit rate, recall, or true positive rate
TPR = TP * 1.0 / (TP + FN)
# Specificity or true negative rate
TNR = TN * 1.0 / (TN + FP)
return auc, TPR, TNR
def get_CI(data, AUC, Sen, Spe):
AUCs = []
TPRs = []
TNRs = []
for s in range(1000):
np.random.seed(s) # Para2
sample = np.random.choice(range(len(data)), len(data), replace=True)
samples = [data[i] for i in sample]
sample_pro = [x[0] for x in samples]
sample_label = [x[1] for x in samples]
try:
get_metrics_t(sample_pro, sample_label)
except ValueError:
np.random.seed(1001) # Para2
sample = np.random.choice(range(len(data)), len(data), replace=True)
samples = [data[i] for i in sample]
sample_pro = [x[0] for x in samples]
sample_label = [x[1] for x in samples]
else:
auc, TPR, TNR = get_metrics_t(sample_pro, sample_label)
AUCs.append(auc)
TPRs.append(TPR)
TNRs.append(TNR)
q_0 = pd.DataFrame(np.array(AUCs)).quantile(0.025)[0] # 2.5% percentile
q_1 = pd.DataFrame(np.array(AUCs)).quantile(0.975)[0] # 97.5% percentile
q_2 = pd.DataFrame(np.array(TPRs)).quantile(0.025)[0] # 2.5% percentile
q_3 | |
point to return a set of response records. When the results of a DescribeClusterSnapshots request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type OwnerAccount: string
:param OwnerAccount: The AWS customer account used to create or copy the snapshot. Use this field to filter the results to snapshots owned by a particular account. To describe snapshots you own, either specify your AWS customer account, or do not specify the parameter.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching cluster snapshots that are associated with the specified key or keys. For example, suppose that you have snapshots that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag keys associated with them.
(string) --
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching cluster snapshots that are associated with the specified tag value or values. For example, suppose that you have snapshots that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the snapshots that have either or both of these tag values associated with them.
(string) --
:rtype: dict
:return: {
'Marker': 'string',
'Snapshots': [
{
'SnapshotIdentifier': 'string',
'ClusterIdentifier': 'string',
'SnapshotCreateTime': datetime(2015, 1, 1),
'Status': 'string',
'Port': 123,
'AvailabilityZone': 'string',
'ClusterCreateTime': datetime(2015, 1, 1),
'MasterUsername': 'string',
'ClusterVersion': 'string',
'SnapshotType': 'string',
'NodeType': 'string',
'NumberOfNodes': 123,
'DBName': 'string',
'VpcId': 'string',
'Encrypted': True|False,
'KmsKeyId': 'string',
'EncryptedWithHSM': True|False,
'AccountsWithRestoreAccess': [
{
'AccountId': 'string',
'AccountAlias': 'string'
},
],
'OwnerAccount': 'string',
'TotalBackupSizeInMegaBytes': 123.0,
'ActualIncrementalBackupSizeInMegaBytes': 123.0,
'BackupProgressInMegaBytes': 123.0,
'CurrentBackupRateInMegaBytesPerSecond': 123.0,
'EstimatedSecondsToCompletion': 123,
'ElapsedTimeInSeconds': 123,
'SourceRegion': 'string',
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
],
'RestorableNodeTypes': [
'string',
],
'EnhancedVpcRouting': True|False
},
]
}
:returns:
CreateClusterSnapshot and CopyClusterSnapshot returns status as "creating".
DescribeClusterSnapshots returns status as "creating", "available", "final snapshot", or "failed".
DeleteClusterSnapshot returns status as "deleted".
"""
pass
def describe_cluster_subnet_groups(ClusterSubnetGroupName=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns one or more cluster subnet group objects, which contain metadata about your cluster subnet groups. By default, this operation returns information about all cluster subnet groups that are defined in you AWS account.
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all subnet groups that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all subnet groups that have any combination of those values are returned.
If both tag keys and values are omitted from the request, subnet groups are returned regardless of whether they have tag keys or values associated with them.
See also: AWS API Documentation
:example: response = client.describe_cluster_subnet_groups(
ClusterSubnetGroupName='string',
MaxRecords=123,
Marker='string',
TagKeys=[
'string',
],
TagValues=[
'string',
]
)
:type ClusterSubnetGroupName: string
:param ClusterSubnetGroupName: The name of the cluster subnet group for which information is requested.
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
Default: 100
Constraints: minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterSubnetGroups request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:type TagKeys: list
:param TagKeys: A tag key or keys for which you want to return all matching cluster subnet groups that are associated with the specified key or keys. For example, suppose that you have subnet groups that are tagged with keys called owner and environment . If you specify both of these tag keys in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag keys associated with them.
(string) --
:type TagValues: list
:param TagValues: A tag value or values for which you want to return all matching cluster subnet groups that are associated with the specified tag value or values. For example, suppose that you have subnet groups that are tagged with values called admin and test . If you specify both of these tag values in the request, Amazon Redshift returns a response with the subnet groups that have either or both of these tag values associated with them.
(string) --
:rtype: dict
:return: {
'Marker': 'string',
'ClusterSubnetGroups': [
{
'ClusterSubnetGroupName': 'string',
'Description': 'string',
'VpcId': 'string',
'SubnetGroupStatus': 'string',
'Subnets': [
{
'SubnetIdentifier': 'string',
'SubnetAvailabilityZone': {
'Name': 'string'
},
'SubnetStatus': 'string'
},
],
'Tags': [
{
'Key': 'string',
'Value': 'string'
},
]
},
]
}
"""
pass
def describe_cluster_versions(ClusterVersion=None, ClusterParameterGroupFamily=None, MaxRecords=None, Marker=None):
"""
Returns descriptions of the available Amazon Redshift cluster versions. You can call this operation even before creating any clusters to learn more about the Amazon Redshift versions. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
See also: AWS API Documentation
:example: response = client.describe_cluster_versions(
ClusterVersion='string',
ClusterParameterGroupFamily='string',
MaxRecords=123,
Marker='string'
)
:type ClusterVersion: string
:param ClusterVersion: The specific cluster version to return.
Example: 1.0
:type ClusterParameterGroupFamily: string
:param ClusterParameterGroupFamily: The name of a specific cluster parameter group family to return details for.
Constraints:
Must be 1 to 255 alphanumeric characters
First character must be a letter
Cannot end with a hyphen or contain two consecutive hyphens
:type MaxRecords: integer
:param MaxRecords: The maximum number of response records to return in each call. If the number of remaining response records exceeds the specified MaxRecords value, a value is returned in a marker field of the response. You can retrieve the next set of records by retrying the command with the returned marker value.
Default: 100
Constraints: minimum 20, maximum 100.
:type Marker: string
:param Marker: An optional parameter that specifies the starting point to return a set of response records. When the results of a DescribeClusterVersions request exceed the value specified in MaxRecords , AWS returns a value in the Marker field of the response. You can retrieve the next set of response records by providing the returned marker value in the Marker parameter and retrying the request.
:rtype: dict
:return: {
'Marker': 'string',
'ClusterVersions': [
{
'ClusterVersion': 'string',
'ClusterParameterGroupFamily': 'string',
'Description': 'string'
},
]
}
"""
pass
def describe_clusters(ClusterIdentifier=None, MaxRecords=None, Marker=None, TagKeys=None, TagValues=None):
"""
Returns properties of provisioned clusters including general cluster properties, cluster database properties, maintenance and backup properties, and security and access properties. This operation supports pagination. For more information about managing clusters, go to Amazon Redshift Clusters in the Amazon Redshift Cluster Management Guide .
If you specify both tag keys and tag values in the same request, Amazon Redshift returns all clusters that match any combination of the specified keys and values. For example, if you have owner and environment for tag keys, and admin and test for tag values, all clusters that have any combination of those values | |
of psychedelia in your sound
code. Calling this function with `flip` set to non-zero reverses the
chunks's usual channels. If `flip` is zero, the effect is unregistered.
This uses the `Mix_RegisterEffect` API internally, and thus is probably
more CPU intensive than having the user just plug in his speakers
correctly. `Mix_SetReverseStereo` returns without registering the
effect function if the audio device is not configured for stereo
output.
If you specify `MIX_CHANNEL_POST` for `channel`, then this the effect
is used on the final mixed stream before sending it on to the audio
device (a posteffect).
:Parameters:
- `channel`: int
- `flip`: int
''',
args=['channel', 'flip'],
arg_types=[c_int, c_int],
return_type=c_int,
error_return=0)
Mix_ReserveChannels = _dll.function(
'Mix_ReserveChannels',
'''Reserve the first channels (0 to n-1) for the application.
If reserved, a channel will not be allocated dynamically to a sample
if requested with one of the ``Mix_Play*`` functions.
:Parameters:
- `num`: int
:rtype: int
:return: the number of reserved channels
''',
args=['num'],
arg_types=[c_int],
return_type=c_int)
Mix_GroupChannel = _dll.function(
'Mix_GroupChannel',
'''Assing a channel to a group.
A tag can be assigned to several mixer channels, to form groups
of channels. If `tag` is -1, the tag is removed (actually -1 is the
tag used to represent the group of all the channels).
:Parameters:
- `channel`: int
- `tag`: int
''',
args=['channel', 'tag'],
arg_types=[c_int, c_int],
return_type=c_int,
error_return=0)
Mix_GroupChannels = _dll.function(
'Mix_GroupChannels',
'''Assign several consecutive channels to a group.
A tag can be assigned to several mixer channels, to form groups
of channels. If `tag` is -1, the tag is removed (actually -1 is the
tag used to represent the group of all the channels).
:Parameters:
- `channel_from`: int
- `channel_to`: int
- `tag`: int
''',
args=['channel_from', 'channel_to', 'tag'],
arg_types=[c_int, c_int, c_int],
return_type=c_int,
error_return=0)
Mix_GroupAvailable = _dll.function(
'Mix_GroupAvailable',
'''Find the first available channel in a group of channels.
:Parameters:
- `tag`: int
:rtype: int
:return: a channel, or -1 if none are available.
''',
args=['tag'],
arg_types=[c_int],
return_type=c_int)
Mix_GroupCount = _dll.function(
'Mix_GroupCount',
'''Get the number of channels in a group.
If `tag` is -1, returns the total number of channels.
:Parameters:
- `tag`: int
:rtype: int
''',
args=['tag'],
arg_types=[c_int],
return_type=c_int)
Mix_GroupOldest = _dll.function(
'Mix_GroupOldest',
'''Find the "oldest" sample playing in a group of channels.
:Parameters:
- `tag`: int
:rtype: int
''',
args=['tag'],
arg_types=[c_int],
return_type=c_int)
Mix_GroupNewer = _dll.function(
'Mix_GroupNewer',
'''Find the "most recent" (i.e., last) sample playing in a group of
channels.
:Parameters:
- `tag`: int
:rtype: int
''',
args=['tag'],
arg_types=[c_int],
return_type=c_int)
def Mix_PlayChannel(channel, chunk, loops):
"""Play an audio chunk on a specific channel.
:Parameters:
`channel` : int
If -1, play on the first free channel.
`chunk` : `Mix_Chunk`
Chunk to play
`loops` : int
If greater than zero, the number of times to play the sound;
if -1, loop infinitely.
:rtype: int
:return: the channel that was used to play the sound.
"""
return Mix_PlayChannelTimed(channel, chunk, loops, -1)
Mix_PlayChannelTimed = _dll.function(
'Mix_PlayChannelTimed',
'''Play an audio chunk on a specific channel for a specified amount of
time.
:Parameters:
`channel` : int
If -1, play on the first free channel.
`chunk` : `Mix_Chunk`
Chunk to play
`loops` : int
If greater than zero, the number of times to play the sound;
if -1, loop infinitely.
`ticks` : int
Maximum number of milliseconds to play sound for.
:rtype: int
:return: the channel that was used to play the sound.
''',
args=['channel', 'chunk', 'loops', 'ticks'],
arg_types=[c_int, POINTER(Mix_Chunk), c_int, c_int],
return_type=c_int)
Mix_PlayMusic = _dll.function(
'Mix_PlayMusic',
'''Play a music chunk.
:Parameters:
`music` : ``Mix_Music``
Chunk to play
`loops` : int
If greater than zero, the number of times to play the sound;
if -1, loop infinitely.
''',
args=['music', 'loops'],
arg_types=[_Mix_Music, c_int],
return_type=c_int,
error_return=-1)
Mix_FadeInMusic = _dll.function(
'Mix_FadeInMusic',
'''Fade in music over a period of time.
:Parameters:
`music` : ``Mix_Music``
Chunk to play
`loops` : int
If greater than zero, the number of times to play the sound;
if -1, loop infinitely.
`ms` : int
Number of milliseconds to fade up over.
''',
args=['music', 'loops', 'ms'],
arg_types=[_Mix_Music, c_int, c_int],
return_type=c_int,
error_return=-1)
Mix_FadeInMusicPos = _dll.function(
'Mix_FadeInMusicPos',
'''Fade in music at an offset over a period of time.
:Parameters:
`music` : ``Mix_Music``
Chunk to play
`loops` : int
If greater than zero, the number of times to play the sound;
if -1, loop infinitely.
`ms` : int
Number of milliseconds to fade up over.
`position` : float
Position within music to start at. Currently implemented
only for MOD, OGG and MP3.
:see: Mix_SetMusicPosition
''',
args=['music', 'loops', 'ms', 'position'],
arg_types=[_Mix_Music, c_int, c_int, c_double],
return_type=c_int,
error_return=-1)
def Mix_FadeInChannel(channel, chunk, loops, ms):
"""Fade in a channel.
:Parameters:
`channel` : int
If -1, play on the first free channel.
`chunk` : `Mix_Chunk`
Chunk to play
`loops` : int
If greater than zero, the number of times to play the sound;
if -1, loop infinitely.
`ms` : int
Number of milliseconds to fade up over.
"""
Mix_FadeInChannelTimed(channel, chunk, loops, -1)
Mix_FadeInChannelTimed = _dll.function(
'Mix_FadeInChannelTimed',
'''Fade in a channel and play for a specified amount of time.
:Parameters:
`channel` : int
If -1, play on the first free channel.
`chunk` : `Mix_Chunk`
Chunk to play
`loops` : int
If greater than zero, the number of times to play the sound;
if -1, loop infinitely.
`ms` : int
Number of milliseconds to fade up over.
`ticks` : int
Maximum number of milliseconds to play sound for.
''',
args=['channel', 'music', 'loops', 'ms', 'ticks'],
arg_types=[c_int, _Mix_Music, c_int, c_int, c_int],
return_type=c_int,
error_return=-1)
Mix_Volume = _dll.function(
'Mix_Volume',
'''Set the volume in the range of 0-128 of a specific channel.
:Parameters:
`channel` : int
If -1, set the volume for all channels
`volume` : int
Volume to set, in the range 0-128, or -1 to just return the
current volume.
:rtype: int
:return: the original volume.
''',
args=['channel', 'volume'],
arg_types=[c_int, c_int],
return_type=c_int)
Mix_VolumeChunk = _dll.function(
'Mix_VolumeChunk',
'''Set the volume in the range of 0-128 of a chunk.
:Parameters:
`chunk` : `Mix_Chunk`
Chunk to set volume.
`volume` : int
Volume to set, in the range 0-128, or -1 to just return the
current volume.
:rtype: int
:return: the original volume.
''',
args=['chunk', 'volume'],
arg_types=[POINTER(Mix_Chunk), c_int],
return_type=c_int)
Mix_VolumeMusic = _dll.function(
'Mix_VolumeMusic',
'''Set the volume in the range of 0-128 of the music.
:Parameters:
`volume` : int
Volume to set, in the range 0-128, or -1 to just return the
current volume.
:rtype: int
:return: the original volume.
''',
args=['volume'],
arg_types=[c_int],
return_type=c_int)
Mix_HaltChannel = _dll.function(
'Mix_HaltChannel',
'''Halt playing of a particular channel.
:Parameters:
- `channel`: int
''',
args=['channel'],
arg_types=[c_int],
return_type=None)
Mix_HaltGroup = _dll.function(
'Mix_HaltGroup',
'''Halt playing of a particular group.
:Parameters:
- `tag`: int
''',
args=['tag'],
arg_types=[c_int],
return_type=None)
Mix_HaltMusic = _dll.function(
'Mix_HaltMusic',
'''Halt playing music.
''',
args=[],
arg_types=[],
return_type=None)
Mix_ExpireChannel = _dll.function(
'Mix_ExpireChannel',
'''Change the expiration delay for a particular channel.
The sample will stop playing afte the `ticks` milliseconds have
elapsed, or remove the expiration if `ticks` is -1.
:Parameters:
- `channel`: int
- `ticks`: int
:rtype: int
:return: the number of channels affected.
''',
args=['channel', 'ticks'],
arg_types=[c_int, c_int],
return_type=c_int)
Mix_FadeOutChannel = _dll.function(
'Mix_FadeOutChannel',
'''Halt a channel, fading it out progressively until it's silent.
The `ms` parameter indicates the number of milliseconds the fading
will take.
:Parameters:
- `channel`: int
- `ms`: int
''',
args=['channel', 'ms'],
arg_types=[c_int, c_int],
return_type=None)
Mix_FadeOutGroup = _dll.function(
'Mix_FadeOutGroup',
'''Halt a group, fading it out progressively until it's silent.
The `ms` parameter indicates the number of milliseconds the fading
will take.
:Parameters:
- `tag`: int
- `ms`: int
''',
args=['tag', 'ms'],
arg_types=[c_int, c_int],
return_type=None)
Mix_FadeOutMusic = _dll.function(
'Mix_FadeOutMusic',
'''Halt playing music, fading it out progressively until it's silent.
The `ms` parameter indicates the number of milliseconds the fading
will take.
:Parameters:
- `ms`: int
''',
args=['ms'],
arg_types=[c_int],
return_type=None)
Mix_FadingMusic = _dll.function(
'Mix_FadingMusic',
'''Query the fading status of the music.
:rtype: int
:return: one of MIX_NO_FADING, MIX_FADING_OUT, MIX_FADING_IN.
''',
args=[],
arg_types=[],
return_type=c_int)
Mix_FadingChannel = _dll.function(
'Mix_FadingChannel',
'''Query the fading status of a channel.
:Parameters:
- `channel`: int
:rtype: int
:return: one of MIX_NO_FADING, MIX_FADING_OUT, MIX_FADING_IN.
''',
args=['channel'],
arg_types=[c_int],
return_type=c_int)
Mix_Pause = _dll.function(
'Mix_Pause',
'''Pause a particular channel.
:Parameters:
| |
import unittest
import threading
import socket
import re
import random
from time import sleep
from coapthon.resource_directory.resourceDirectory import ResourceDirectory
from coapthon.messages.response import Response
from coapthon.messages.request import Request
from coapthon import defines
from coapthon.serializer import Serializer
from pymongo import MongoClient
from coapthon.client.helperclient import HelperClient
__author__ = '<NAME>'
class ResourceDirectoryTest(unittest.TestCase):
def setUp(self):
self.server_address = ("127.0.0.1", 5683)
self.current_mid = random.randint(1, 1000)
self.server = ResourceDirectory("127.0.0.1", 5683, start_mongo=False)
self.server_thread = threading.Thread(target=self.server.listen, args=(10,))
self.server_thread.start()
self.delete_database()
def tearDown(self):
self.server.close()
self.server_thread.join(timeout=25)
self.server = None
@staticmethod
def delete_database():
database = defines.MONGO_DATABASE
connection = MongoClient(defines.MONGO_HOST, defines.MONGO_PORT, username=defines.MONGO_USER,
password=defines.MONGO_PWD, authSource=database, authMechanism='SCRAM-SHA-1')
collection = connection[database].resources
try:
collection.delete_many({})
except:
print("Error in delete_database")
@staticmethod
def parse_core_link_format(link_format):
data = []
while len(link_format) > 0:
pattern = "<([^>]*)>;"
result = re.match(pattern, link_format)
path = result.group(1)
link_format = link_format[result.end(1) + 2:]
pattern = "([^<,])*"
result = re.match(pattern, link_format)
attributes = result.group(0)
dict_att = {}
if len(attributes) > 0:
attributes = attributes.split(";")
for att in attributes:
a = att.split("=")
if len(a) > 1:
if a[1].isdigit():
a[1] = int(a[1])
else:
a[1] = a[1].replace('"', '')
dict_att[a[0]] = a[1]
else:
dict_att[a[0]] = a[0]
link_format = link_format[result.end(0) + 1:]
tmp = {'path': path}
dict_att.update(tmp)
data.append(dict_att)
return data
def _test_check(self, message_list, timeout=0):
serializer = Serializer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
for message, expected in message_list:
if message is not None:
datagram = serializer.serialize(message)
sleep(timeout)
sock.sendto(datagram, message.destination)
if expected is not None:
datagram, source = sock.recvfrom(4096)
received_message = serializer.deserialize(datagram, source)
if expected.type is not None:
self.assertEqual(received_message.type, expected.type)
if expected.mid is not None:
self.assertEqual(received_message.mid, expected.mid)
self.assertEqual(received_message.code, expected.code)
if expected.source is not None:
self.assertEqual(received_message.source, source)
if expected.token is not None:
self.assertEqual(received_message.token, expected.token)
if expected.content_type is not None:
self.assertEqual(received_message.content_type, expected.content_type)
if expected.payload is not None:
expected_list = self.parse_core_link_format(expected.payload)
received_list = self.parse_core_link_format(received_message.payload)
all_list = []
for expected_elem in expected_list:
for received_elem in received_list:
if expected_elem['path'] == received_elem['path']:
all_list_elem = (expected_elem, received_elem)
all_list.append(all_list_elem)
for data in all_list:
for k in data[1]:
self.assertIn(k, data[0])
if (k != "lt") and (k in data[0]):
self.assertEqual(data[0][k], data[1][k])
else:
self.assertEqual(expected.payload, received_message.payload)
sock.close()
def test_uri_discovery(self):
print("Uri discovery")
path = ".well-known/core"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '</rd-lookup/res>;rt="core.rd-lookup-res";ct=40,</rd>;rt="core.rd";ct=40,' \
'</rd-lookup/ep>;rt="core.rd-lookup-ep";ct=40'
self.current_mid += 1
self._test_check([(req, expected)])
def test_registration(self):
print("Registration")
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CREATED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_res(self):
print("Resource lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_ep(self):
print("Endpoint lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = "rd-lookup/ep?et=oic.d.sensor"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '</' + loc_path + '>;con="coap://local-proxy-old.example.com:5683";ep="node1";' \
'et="oic.d.sensor";lt=500'
self.current_mid += 1
self._test_check([(req, expected)])
def test_update(self):
print("Update")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path + "?con=coaps://new.example.com:5684"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CHANGED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_read_endpoint_links(self):
print("Read endpoint links")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683",' \
'<coap://local-proxy-old.example.com:5683/sensors/light>;ct=41;rt="light-lux";if="sensor"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_delete(self):
print("Delete")
client = HelperClient(self.server_address)
path = "rd?ep=endpoint1<=500&con=coap://local-proxy-old.example.com:5683&et=oic.d.sensor"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
loc_path = response.location_path
client.stop()
path = loc_path
req = Request()
req.code = defines.Codes.DELETE.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.DELETED.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_lookup_expired_res(self):
print("Expired resource lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)], 61)
def test_lookup_expired_ep(self):
print("Expired endpoint lookup")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd-lookup/ep?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = None
self.current_mid += 1
# After 61 seconds the resource will be expired
self._test_check([(req, expected)], 61)
def test_update_expired(self):
print("Update expired registration resource")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
response = client.post(path, payload, None, None, **ct)
# After 61 seconds the resource will be expired
sleep(61)
loc_path = response.location_path
client.post(loc_path, None)
client.stop()
path = "rd-lookup/res?ep=node1&rt=temperature-c"
req = Request()
req.code = defines.Codes.GET.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = 0
req.payload = None
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.CONTENT.number
expected.token = None
expected.content_type = defines.Content_types["application/link-format"]
expected.payload = '<coap://local-proxy-old.example.com:5683/sensors/temp>;ct=41;rt="temperature-c";' \
'if="sensor";anchor="coap://spurious.example.com:5683"'
self.current_mid += 1
self._test_check([(req, expected)])
def test_wrong_ep(self):
print("Endpoint name already exists")
client = HelperClient(self.server_address)
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683<=60"
ct = {'content_type': defines.Content_types["application/link-format"]}
payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";anchor="coap://spurious.example.com:5683",' \
'</sensors/light>;ct=41;rt="light-lux";if="sensor"'
client.post(path, payload, None, None, **ct)
client.stop()
path = "rd?ep=node1&con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.SERVICE_UNAVAILABLE.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_no_ep(self):
print("Registration without endpoint name")
path = "rd?con=coap://local-proxy-old.example.com:5683"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = self.server_address
req.content_type = defines.Content_types["application/link-format"]
req.payload = '</sensors/temp>;ct=41;rt="temperature-c";if="sensor";' \
'anchor="coap://spurious.example.com:5683",</sensors/light>;ct=41;rt="light-lux";if="sensor"'
expected = Response()
expected.type = defines.Types["ACK"]
expected._mid = self.current_mid
expected.code = defines.Codes.BAD_REQUEST.number
expected.token = None
expected.content_type = 0
expected.payload = None
self.current_mid += 1
self._test_check([(req, expected)])
def test_update_res_not_found(self):
print("Resource not found on update")
path = "rd/4521?con=coaps://new.example.com:5684"
req = Request()
req.code = defines.Codes.POST.number
req.uri_path = path
req.type = defines.Types["CON"]
req._mid = self.current_mid
req.destination = | |
"""
Implementation of textX language in Arpeggio.
The idea for this language is shamelessly stolen from the Xtext language but
there are some differences in both syntax and semantics. To make things clear I
have named this language textX ;)
"""
from __future__ import unicode_literals
import re
from arpeggio import StrMatch, Optional, ZeroOrMore, OneOrMore, Sequence,\
OrderedChoice, UnorderedGroup, Not, And, RegExMatch, Match, NoMatch, EOF, \
ParsingExpression, ParserPython, PTNodeVisitor, visit_parse_tree
from arpeggio.export import PMDOTExporter
from arpeggio import RegExMatch as _
from .exceptions import TextXError, TextXSyntaxError, TextXSemanticError
from .const import MULT_ONE, MULT_ZEROORMORE, MULT_ONEORMORE, \
MULT_OPTIONAL, RULE_COMMON, RULE_MATCH, RULE_ABSTRACT, mult_lt
import sys
if sys.version < '3':
text = unicode # noqa
else:
text = str
# textX grammar
def textx_model(): return (ZeroOrMore(import_or_reference_stm),
ZeroOrMore(textx_rule), EOF)
def import_or_reference_stm(): return [import_stm, reference_stm]
def import_stm(): return 'import', grammar_to_import
def reference_stm(): return ('reference', language_name,
Optional(language_alias))
def language_alias(): return 'as', ident
def language_name(): return _(r'(\w|-)+')
def grammar_to_import(): return _(r'(\w|\.)+')
# Rules
def textx_rule(): return rule_name, Optional(rule_params), ":", textx_rule_body, ";"
def rule_params(): return '[', rule_param, ZeroOrMore(',', rule_param), ']'
def rule_param(): return param_name, Optional('=', string_value)
def param_name(): return ident
def textx_rule_body(): return choice
def choice(): return sequence, ZeroOrMore("|", sequence)
def sequence(): return OneOrMore(repeatable_expr)
def repeatable_expr(): return expression, Optional(repeat_operator), Optional('-')
def expression(): return [assignment, (Optional(syntactic_predicate),
[simple_match, rule_ref,
bracketed_choice])]
def bracketed_choice(): return '(', choice, ')'
def repeat_operator(): return ['*', '?', '+', '#'], Optional(repeat_modifiers)
def repeat_modifiers(): return '[', OneOrMore([simple_match,
'eolterm']), ']'
def syntactic_predicate(): return ['!', '&']
def simple_match(): return [str_match, re_match]
# Assignment
def assignment(): return attribute, assignment_op, assignment_rhs
def attribute(): return ident
def assignment_op(): return ["=", "*=", "+=", "?="]
def assignment_rhs(): return [simple_match, reference], Optional(repeat_modifiers)
# References
def reference(): return [rule_ref, obj_ref]
def rule_ref(): return ident
def obj_ref(): return '[', class_name, Optional('|', obj_ref_rule), ']'
def rule_name(): return ident
def obj_ref_rule(): return ident
def class_name(): return qualified_ident
def str_match(): return string_value
def re_match(): return _(r"/((?:(?:\\/)|[^/])*)/")
def ident(): return _(r'\w+')
def qualified_ident(): return _(r'\w+(\.\w+)?')
def integer(): return _(r'[-+]?[0-9]+')
def string_value(): return [_(r"'((\\')|[^'])*'"),
_(r'"((\\")|[^"])*"')]
# Comments
def comment(): return [comment_line, comment_block]
def comment_line(): return _(r'//.*?$')
def comment_block(): return _(r'/\*(.|\n)*?\*/')
# Special rules - primitive types
ID = _(r'[^\d\W]\w*\b', rule_name='ID', root=True)
BOOL = _(r'(True|true|False|false|0|1)\b', rule_name='BOOL', root=True)
INT = _(r'[-+]?[0-9]+\b', rule_name='INT', root=True)
FLOAT = _(r'[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(?<=[\w\.])(?![\w\.])',
'FLOAT', root=True)
STRICTFLOAT = _(r'[+-]?(((\d+\.(\d*)?|\.\d+)([eE][+-]?\d+)?)|((\d+)([eE][+-]?\d+)))(?<=[\w\.])(?![\w\.])',
'STRICTFLOAT', root=True)
STRING = _(r'("(\\"|[^"])*")|(\'(\\\'|[^\'])*\')', 'STRING', root=True)
NUMBER = OrderedChoice(nodes=[STRICTFLOAT, INT], rule_name='NUMBER', root=True)
BASETYPE = OrderedChoice(nodes=[NUMBER, FLOAT, BOOL, ID, STRING],
rule_name='BASETYPE', root=True)
# A dummy rule for generic type. This rule should never be used for parsing.
OBJECT = _(r'', rule_name='OBJECT', root=True)
BASE_TYPE_RULES = {rule.rule_name: rule
for rule in [ID, BOOL, INT, FLOAT, STRICTFLOAT,
STRING, NUMBER, BASETYPE]}
BASE_TYPE_NAMES = list(BASE_TYPE_RULES.keys())
ALL_TYPE_NAMES = BASE_TYPE_NAMES + ['OBJECT']
PRIMITIVE_PYTHON_TYPES = [int, float, text, bool]
for regex in [ID, BOOL, INT, FLOAT, STRICTFLOAT, STRING]:
regex.compile()
def python_type(textx_type_name):
"""Return Python type from the name of base textx type."""
return {
'ID': text,
'BOOL': bool,
'INT': int,
'FLOAT': float,
'STRICTFLOAT': float,
'STRING': text,
'NUMBER': float,
'BASETYPE': text,
}.get(textx_type_name, textx_type_name)
class RuleCrossRef(object):
"""
Used during meta-model parser construction for cross reference resolving
of PEG rules, to support forward references.
Attributes:
rule_name(str): A name of the PEG rule that should be used to match
this cross-ref. For link rule references it will be ID by default.
cls(str or ClassCrossRef): Target class which is matched by the
rule_name rule or which name is matched by the rule_name rule (for
link rule references).
Used for rule references in the RHS of the assignments to
determine attribute type.
position(int): A position in the input string of this cross-ref.
"""
def __init__(self, rule_name, cls, position):
self.rule_name = rule_name
self.cls = cls
self.position = position
self.suppress = False
def __str__(self):
return self.rule_name
def __unicode__(self):
return self.__str__()
class ClassCrossRef(object):
"""
Used for class reference resolving on the meta-model level.
References will be resolved after semantic analysis of the meta-model
parse tree. After resolving phase the meta-model will be fully linked.
Attributes:
cls_name(str): A name of the target meta-model class.
position(int): The position in the input string of this cross-ref.
"""
def __init__(self, cls_name, position=0):
self.cls_name = cls_name
self.position = position
class TextXVisitor(PTNodeVisitor):
def __init__(self, grammar_parser, metamodel):
self.grammar_parser = grammar_parser
self.metamodel = metamodel
self.debug = metamodel.debug
# Prepare regex used in keyword-like strmatch detection.
# See visit_str_match
flags = 0
if metamodel.ignore_case:
flags = re.IGNORECASE
self.keyword_regex = re.compile(r'[^\d\W]\w*', flags)
super(TextXVisitor, self).__init__()
def visit_textx_model(self, node, children):
if 'Comment' in self.metamodel:
comments_model = self.metamodel['Comment']._tx_peg_rule
else:
comments_model = None
root_rule = children[0]
from .model import get_model_parser
model_parser = get_model_parser(root_rule, comments_model,
ignore_case=self.metamodel.ignore_case,
skipws=self.metamodel.skipws,
ws=self.metamodel.ws,
autokwd=self.metamodel.autokwd,
memoization=self.metamodel.memoization,
debug=self.metamodel.debug,
file=self.metamodel.file)
model_parser.metamodel = self.metamodel
return model_parser
def second_textx_model(self, model_parser):
"""Cross reference resolving for parser model."""
if self.grammar_parser.debug:
self.grammar_parser.dprint("RESOLVING MODEL PARSER: second_pass")
self._resolve_rule_refs(self.grammar_parser, model_parser)
self._determine_rule_types(model_parser.metamodel)
self._resolve_cls_refs(self.grammar_parser, model_parser)
return model_parser
def _resolve_rule_refs(self, grammar_parser, model_parser):
"""Resolves parser ParsingExpression crossrefs."""
def _resolve_rule(rule):
"""
Recursively resolve peg rule references.
Args:
rule(ParsingExpression or RuleCrossRef)
"""
if not isinstance(rule, RuleCrossRef) and rule in resolved_rules:
return rule
resolved_rules.add(rule)
if grammar_parser.debug:
grammar_parser.dprint("Resolving rule: {}".format(rule))
if type(rule) is RuleCrossRef:
rule_name = rule.rule_name
suppress = rule.suppress
if rule_name in model_parser.metamodel:
rule = model_parser.metamodel[rule_name]._tx_peg_rule
if type(rule) is RuleCrossRef:
rule = _resolve_rule(rule)
model_parser.metamodel[rule_name]._tx_peg_rule = rule
if suppress:
# Special case. Suppression on rule reference.
_tx_class = rule._tx_class
rule = Sequence(nodes=[rule],
rule_name=rule_name,
suppress=suppress)
rule._tx_class = _tx_class
else:
line, col = grammar_parser.pos_to_linecol(rule.position)
raise TextXSemanticError(
'Unexisting rule "{}" at position {}.'
.format(rule.rule_name,
(line, col)), line, col)
assert isinstance(rule, ParsingExpression),\
"{}:{}".format(type(rule), text(rule))
# Recurse into subrules, and resolve rules.
for idx, child in enumerate(rule.nodes):
if child not in resolved_rules:
child = _resolve_rule(child)
rule.nodes[idx] = child
return rule
# Two pass resolving
for i in range(2):
if grammar_parser.debug:
grammar_parser.dprint("RESOLVING RULE CROSS-REFS - PASS {}"
.format(i + 1))
resolved_rules = set()
_resolve_rule(model_parser.parser_model)
# Resolve rules of all meta-classes to handle unreferenced
# rules also.
for cls in model_parser.metamodel:
cls._tx_peg_rule = _resolve_rule(cls._tx_peg_rule)
def _determine_rule_types(self, metamodel):
"""Determine textX rule/metaclass types"""
def _determine_rule_type(cls):
"""
Determine rule type (abstract, match, common) and inherited
classes.
"""
if cls in resolved_classes:
return
resolved_classes.add(cls)
# If there are attributes collected than this is a common
# rule
if len(cls._tx_attrs) > 0:
if cls._tx_type != RULE_COMMON:
cls._tx_type = RULE_COMMON
has_change[0] = True
return
rule = cls._tx_peg_rule
# Check if this rule is abstract. Abstract are root rules which
# haven't got any attributes and reference at least one non-match
# rule.
abstract = False
if rule.rule_name and cls.__name__ != rule.rule_name:
# Special case. Body of the rule is a single rule reference and
# the referenced rule is not match rule.
target_cls = metamodel[rule.rule_name]
_determine_rule_type(target_cls)
abstract = target_cls._tx_type != RULE_MATCH
else:
# Find at least one referenced rule that is not match rule by
# going down the parser model and finding root rules.
def _has_nonmatch_ref(rule):
for r in rule.nodes:
if r.root:
_determine_rule_type(r._tx_class)
result = r._tx_class._tx_type != RULE_MATCH
else:
result = _has_nonmatch_ref(r)
if result:
return True
abstract = _has_nonmatch_ref(rule)
if abstract and cls._tx_type != RULE_ABSTRACT:
cls._tx_type = RULE_ABSTRACT
has_change[0] = True
# Add inherited classes to this rule's meta-class
if rule.rule_name and cls.__name__ != rule.rule_name:
if rule._tx_class not in cls._tx_inh_by:
cls._tx_inh_by.append(rule._tx_class)
else:
# Recursively append all referenced classes.
def _add_reffered_classes(rule, inh_by, start=False):
if rule.root and not start:
if hasattr(rule, '_tx_class'):
_determine_rule_type(rule._tx_class)
if rule._tx_class._tx_type != RULE_MATCH and\
rule._tx_class not in inh_by:
inh_by.append(rule._tx_class)
else:
for r in rule.nodes:
_add_reffered_classes(r, inh_by)
if type(rule) is OrderedChoice:
for r in rule.nodes:
_add_reffered_classes(r, cls._tx_inh_by)
else:
_add_reffered_classes(rule, cls._tx_inh_by, start=True)
# Multi-pass rule type resolving to support circular rule references.
# `has_change` is a list to support outer scope variable change in
# Python 2.x
has_change = [True]
while has_change[0]:
has_change[0] = False
resolved_classes = set()
for cls in metamodel:
_determine_rule_type(cls)
def _resolve_cls_refs(self, grammar_parser, model_parser):
resolved_classes = {}
def _resolve_cls(cls):
if cls in resolved_classes:
return resolved_classes[cls]
metamodel = model_parser.metamodel
to_resolve = cls
if isinstance(cls, ClassCrossRef):
try:
cls = metamodel[cls.cls_name]
except KeyError:
line, col = grammar_parser.pos_to_linecol(cls.position)
raise TextXSemanticError(
'Unknown class/rule "{}".'.format(cls.cls_name),
line=line, col=col, filename=metamodel.file_name)
resolved_classes[to_resolve] = cls
if cls._tx_type == RULE_ABSTRACT:
# Resolve inherited classes
for idx, inh in enumerate(cls._tx_inh_by):
inh = _resolve_cls(inh)
cls._tx_inh_by[idx] = inh
else:
# If this is not abstract class than it must be common or
# match. Resolve referred classes.
for attr in cls._tx_attrs.values():
attr.cls = _resolve_cls(attr.cls)
# If target cls is of a base type or match rule
# then attr can not be a reference.
if attr.cls.__name__ in BASE_TYPE_NAMES \
or attr.cls._tx_type == RULE_MATCH:
attr.ref = False
attr.cont = True
attr.is_base_type = True
else:
attr.ref = True
attr.is_base_type = False
| |
from tally import tally
from collections import OrderedDict
import json
from cktgen.transformation import Rect, Transformation, Tech
class GR:
def __init__( self, netName=None, layer=None, width=None, rect=None):
self.netName = netName
self.layer = layer
self.width = width
self.rect = rect
def encode_GR( tech, obj):
#
# Very broken ---
# remove *2 and //2 later
#
if isinstance(obj, GR):
# Convert global route coords to physical coords
if obj.layer in tech.verticalMetals:
assert obj.rect.llx == obj.rect.urx
xc = tech.pitchPoly*(tech.halfXGRGrid*2*obj.rect.llx + tech.halfXGRGrid)
llx = xc - obj.width//2
urx = xc + obj.width//2
lly = tech.pitchDG*(tech.halfYGRGrid*2*obj.rect.lly + tech.halfYGRGrid - (tech.halfYGRGrid - 1))
ury = tech.pitchDG*(tech.halfYGRGrid*2*obj.rect.ury + tech.halfYGRGrid + (tech.halfYGRGrid - 1))
elif obj.layer in tech.horizontalMetals:
assert obj.rect.lly == obj.rect.ury
yc = tech.pitchDG*(tech.halfYGRGrid*2*obj.rect.lly + tech.halfYGRGrid)
lly = yc - obj.width//2
ury = yc + obj.width//2
llx = tech.pitchPoly*(tech.halfXGRGrid*2*obj.rect.llx + tech.halfXGRGrid - (tech.halfXGRGrid - 1))
urx = tech.pitchPoly*(tech.halfXGRGrid*2*obj.rect.urx + tech.halfXGRGrid + (tech.halfXGRGrid - 1))
else:
raise RuntimeError(repr(obj) + ("is not horizontal nor vertical (%d,%d,%d,%d)." % (obj.rect.llx,obj.rect.lly,obj.rect.urx,obj.rect.ury)))
return { "netName" : obj.netName, "layer" : obj.layer, "width" : obj.width, "rect" : [llx, lly, urx, ury]}
else:
raise TypeError(repr(obj) + " is not JSON serializable.")
class Grid:
def __init__( self, nx, ny, gridFactor=4):
self.nx = nx
self.ny = ny
self.gridFactor = gridFactor
self.nets = OrderedDict()
self.layers = ['metal2','metal3']
self.routes = OrderedDict()
print( "Grid size:", self.nx, self.ny)
def dumpJSON( self, fp, tech):
wires = []
for (k,v) in self.wires.items():
for (ly,grs) in v.items():
for gr in grs:
wires.append( { "layer": gr.layer, "net_name": gr.netName, "width": gr.width, "rect": gr.rect.toList()})
data = { "wires": wires}
fp.write( json.dumps( data, indent=2) + "\n")
def addTerminal( self, net_nm, x, y):
if net_nm not in self.nets: self.nets[net_nm] = set()
self.nets[net_nm].add( ( x, y))
def idx( self, x, y):
return self.ny*x + y
def allRasterPoints( self):
for x in range(self.nx):
for y in range(self.ny):
for (k,v) in self.per_net_grid.items():
for (ly,bv) in v.items():
yield x,y,k,ly,bv
def cleanAntennas( self):
print( "Phase 1: cleanAntennas: force all routing decision to remain.")
for (k,v) in self.routes.items():
for r in v:
if r.val() is True:
self.s.emit_always( r.var())
elif r.val() is False:
self.s.emit_never( r.var())
self.s.solve()
assert self.s.state == 'SAT'
print( "Phase 2: cleanAntennas: force all empty sites to remain empty.")
for (x,y,k,ly,bv) in self.allRasterPoints():
if bv.val( self.idx(x,y)) is False:
self.s.emit_never( bv.var( self.idx(x,y)))
self.s.solve()
assert self.s.state == 'SAT'
print( "Phase 3: cleanAntennas: one by one, check if a site can be made empty, then force it to remain empty.")
for (x,y,k,ly,bv) in self.allRasterPoints():
if bv.val( self.idx(x,y)) is True:
self.s.solve( assumptions=[-bv.var( self.idx(x,y))])
if self.s.state == 'SAT':
print( "Removing antenna from %s %s %d %d" % (k,ly,x,y))
self.s.emit_never( bv.var( self.idx(x,y)))
self.s.solve()
assert self.s.state == 'SAT'
def genMaxCapacityConstraints( self, max_capacity):
self.max_capacity_constraints = OrderedDict()
for ly in self.layers:
self.max_capacity_constraints[ly] = OrderedDict()
for x in range(self.nx):
for y in range(self.ny):
outs_bv = tally.BitVec( self.s, 'cap_%s_%d_%d' % (ly,x,y), max_capacity+1)
self.max_capacity_constraints[ly][(x,y)] = outs_bv
outs = [ outs_bv.var( i) for i in range(max_capacity+1)]
inps = [ self.per_net_grid[k][ly].var( self.idx(x,y)) for k in self.nets.keys()]
self.s.emit_tally( inps, outs)
self.s.emit_never( outs_bv.var( max_capacity))
def genDifferentNetMaxCapacityConstraints( self, max_capacity):
for x in range(self.nx):
for y in range(self.ny):
for (l,ll) in ( (l, ll) for l in self.layers for ll in self.layers if l != ll):
for k in self.per_net_grid.keys():
inps = [ self.per_net_grid[k ][l ].var( self.idx(x,y))] + \
[ self.per_net_grid[kk][ll].var( self.idx(x,y)) for kk in self.per_net_grid.keys() if k != kk]
outs_bv = tally.BitVec( self.s, 'cap2_%s_%s_%s_%d_%d' % (l,ll,k,x,y), max_capacity+1)
outs = [ outs_bv.var( i) for i in range( max_capacity+1)]
self.s.emit_tally( inps, outs)
self.s.emit_never( outs_bv.var( max_capacity))
def genRoutes( self):
hly = "metal2"
vly = "metal3"
for (k,v) in self.nets.items():
v = list(set(v))
if len(v) < 2: continue
self.routes[k] = []
for xy in v:
assert 0 <= xy[0] < self.nx, ( k, v, self.nx, self.ny)
assert 0 <= xy[1] < self.ny, ( k, v, self.nx, self.ny)
minx = min( xy[0] for xy in v)
maxx = max( xy[0] for xy in v)
miny = min( xy[1] for xy in v)
maxy = max( xy[1] for xy in v)
# step in x
if minx < maxx:
for x in range(minx,maxx+1):
r = tally.BitVar( self.s, '%s_route_x_%d' % ( k, x))
self.routes[k].append( r)
self.emitWire( k, r, vly, x, miny, x, maxy) # trunk
for (xx,yy) in v: # stubs
if x != xx:
self.emitWire( k, r, hly, min(x,xx), yy, max(x,xx), yy)
if miny < maxy:
for y in range(miny,maxy+1):
r = tally.BitVar( self.s, '%s_route_y_%d' % ( k, y))
self.routes[k].append( r)
self.emitWire( k, r, hly, minx, y, maxx, y) # trunk
for (xx,yy) in v: # stubs
if y != yy:
self.emitWire( k, r, vly, xx, min(y,yy), xx, max(y,yy))
if self.routes[k]:
self.s.emit_at_least_one( [ bv.var() for bv in self.routes[k]])
def genSymmetricRoutes( self, n0, n1):
hly = "metal2"
vly = "metal3"
(k0,v0) = n0
(k1,v1) = n1
assert len(v0) == 2
assert len(v1) == 2
# check assumed mirroring about a vertical line
v0 = list(v0)
v1 = list(v1)
if v0[0][0] > v0[1][0]:
v0[0],v0[1] = v0[1],v0[0]
if v1[0][0] > v1[1][0]:
v1[0],v1[1] = v1[1],v1[0]
# y coords the same
assert v0[0][1] == v1[1][1]
assert v0[1][1] == v1[0][1]
# x coords flip
xdist = v1[1][0] - v0[0][0]
assert v0[0][0] == xdist - v1[1][0]
assert v0[1][0] == xdist - v1[0][0]
def allStepX( k, v, kk, vv):
x0,y0 = v[0]
x1,y1 = v[1]
if x0 > x1:
x0,y0,x1,y1 = x1,y1,x0,y0
for x in range(x0,x1+1):
r = tally.BitVar( self.s, '%s_%s_symmetric_route_x_%d' % ( k, kk, x))
self.routes[k].append( r)
if x != x0: self.emitWire( k, r, hly, x0, y0, x, y0)
self.emitWire( k, r, vly, x, y0, x, y1)
if x != x1: self.emitWire( k, r, hly, x, y1, x1, y1)
if x != x0: self.emitWire( kk, r, hly, xdist - x, y0, xdist - x0, y0)
self.emitWire( kk, r, vly, xdist - x, y0, xdist - x, y1)
if x != x1: self.emitWire( kk, r, hly, xdist - x1, y1, xdist - x, y1)
def allStepY( k, v, kk, vv):
x0,y0 = v[0]
x1,y1 = v[1]
if y0 > y1:
x0,y0,x1,y1 = x1,y1,x0,y0
for y in range(y0,y1+1):
r = tally.BitVar( self.s, '%s_%s_symmetric_route_y_%d' % ( k, kk, y))
self.routes[k].append( r)
if y != y0: self.emitWire( k, r, vly, x0, y0, x0, y)
self.emitWire( k, r, hly, x0, y, x1, y)
if y != y1: self.emitWire( k, r, vly, x1, y, x1, y1)
if y != y0: self.emitWire( kk, r, vly, xdist - x0, y0, xdist - x0, y)
self.emitWire( kk, r, hly, xdist - x1, y, xdist - x0, y)
if y != y1: self.emitWire( kk, r, vly, xdist - x1, y, xdist - x1, y1)
self.routes[k0] = []
allStepX( k0, v0, k1, v1)
allStepY( k0, v0, k1, v1)
self.s.emit_exactly_one( [ bv.var() for bv in self.routes[k0]])
def semantic( self, max_capacity=1, different_net_max_capacity=None):
self.s = tally.Tally()
self.per_net_grid = OrderedDict()
for k in list(self.nets.keys()) + ['!kor']:
self.per_net_grid[k] = OrderedDict()
for ly in self.layers:
self.per_net_grid[k][ly] = tally.BitVec( self.s, k + '_' + ly, self.nx * self.ny)
self.genMaxCapacityConstraints( max_capacity=max_capacity)
if different_net_max_capacity is not None:
self.genDifferentNetMaxCapacityConstraints( different_net_max_capacity)
self.genRoutes()
def semanticSymmetric( self, max_capacity=1, different_net_max_capacity=None):
self.s = tally.Tally()
self.per_net_grid = OrderedDict()
for k in list(self.nets.keys()) + ['!kor']:
self.per_net_grid[k] = OrderedDict()
for ly in self.layers:
self.per_net_grid[k][ly] = tally.BitVec( self.s, k + '_' + ly, self.nx * self.ny)
self.genMaxCapacityConstraints( max_capacity=max_capacity)
if different_net_max_capacity is not None:
self.genDifferentNetMaxCapacityConstraints( different_net_max_capacity)
items = list(self.nets.items())
assert len(items) == 2
self.genSymmetricRoutes( items[0], items[1])
def emitWire( self, k, r, ly, x0, y0, x1, y1):
print( "Call emitWire", k, ly, x0, x0, x1, y1)
assert 0 <= x0 < self.nx, (x0,y0,x1,y1,self.nx,self.ny)
assert 0 <= x1 < self.nx, (x0,y0,x1,y1,self.nx,self.ny)
assert 0 <= y0 < self.ny, (x0,y0,x1,y1,self.nx,self.ny)
assert 0 <= y1 < self.ny, (x0,y0,x1,y1,self.nx,self.ny)
if x0 != x1:
assert y0 == y1
if x0 > x1: x0,x1 = x1,x0
for x in range( x0, x1+1):
print( k, x, y0, y1)
self.s.emit_implies( r.var(), self.per_net_grid[k][ly].var( self.idx(x,y0)))
if y0 != y1:
assert x0 == x1
if y0 > y1: y0,y1 = y1,y0
for y in range( y0, y1+1):
| |
account, acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns (container_count, object_count) for an account.
:param account: Account on which to get the information.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
resp = self.make_request('HEAD', path, {}, acceptable_statuses)
if not resp.status_int // 100 == 2:
return (0, 0)
return (int(resp.headers.get('x-account-container-count', 0)),
int(resp.headers.get('x-account-object-count', 0)))
def get_account_metadata(
self, account, metadata_prefix='', acceptable_statuses=(2,)):
"""
Gets account metadata.
:param account: Account on which to get the metadata.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:returns : Returns dict of account metadata. Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
return self._get_metadata(path, metadata_prefix, acceptable_statuses)
def set_account_metadata(
self, account, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets account metadata. A call to this will add to the account
metadata and not overwrite all of it with values in the metadata dict.
To clear an account metadata value, pass an empty string as
the value for the key in the metadata dict.
:param account: Account on which to get the metadata.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
# container methods
def container_exists(self, account, container):
"""
Checks to see if a container exists.
:param account: The container's account.
:param container: Container to check.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
:returns : True if container exists, false otherwise.
"""
path = self.make_path(account, container)
resp = self.make_request('HEAD', path, {}, (2, HTTP_NOT_FOUND))
return not resp.status_int == HTTP_NOT_FOUND
def create_container(
self, account, container, headers=None, acceptable_statuses=(2,)):
"""
Creates container.
:param account: The container's account.
:param container: Container to create.
:param headers: Defaults to empty dict.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container)
self.make_request('PUT', path, headers, acceptable_statuses)
def delete_container(
self, account, container, acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Deletes a container.
:param account: The container's account.
:param container: Container to delete.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
self.make_request('DELETE', path, {}, acceptable_statuses)
def get_container_metadata(
self, account, container, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Gets container metadata.
:param account: The container's account.
:param container: Container to get metadata on.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:returns : Returns dict of container metadata. Keys will be lowercase.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
return self._get_metadata(path, metadata_prefix, acceptable_statuses)
def iter_objects(
self, account, container, marker='', end_marker='',
acceptable_statuses=(2, HTTP_NOT_FOUND)):
"""
Returns an iterator of object dicts from a container.
:param account: The container's account.
:param container: Container to iterate objects on.
:param marker: Prefix of first desired item, defaults to ''.
:param end_marker: Last item returned will be 'less' than this,
defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
return self._iter_items(path, marker, end_marker, acceptable_statuses)
def set_container_metadata(
self, account, container, metadata, metadata_prefix='',
acceptable_statuses=(2,)):
"""
Sets container metadata. A call to this will add to the container
metadata and not overwrite all of it with values in the metadata dict.
To clear a container metadata value, pass an empty string as the value
for the key in the metadata dict.
:param account: The container's account.
:param container: Container to set metadata on.
:param metadata: Dict of metadata to set.
:param metadata_prefix: Prefix used to set metadata values in headers
of requests, used to prefix keys in metadata
when setting metadata, defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container)
self._set_metadata(
path, metadata, metadata_prefix, acceptable_statuses)
# object methods
def delete_object(
self, account, container, obj,
acceptable_statuses=(2, HTTP_NOT_FOUND),
headers=None):
"""
Deletes an object.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param acceptable_statuses: List of status for valid responses,
defaults to (2, HTTP_NOT_FOUND).
:param headers: extra headers to send with request
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
self.make_request('DELETE', path, (headers or {}), acceptable_statuses)
def get_object_metadata(
self, account, container, obj, metadata_prefix='',
acceptable_statuses=(2,), headers=None):
"""
Gets object metadata.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param metadata_prefix: Used to filter values from the headers
returned. Will strip that prefix from the
keys in the dict returned. Defaults to ''.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:param headers: extra headers to send with request
:returns : Dict of object metadata.
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
path = self.make_path(account, container, obj)
return self._get_metadata(path, metadata_prefix, acceptable_statuses,
headers=headers)
def get_object(self, account, container, obj, headers,
acceptable_statuses=(2,)):
"""
Returns a 3-tuple (status, headers, iterator of object body)
"""
headers = headers or {}
path = self.make_path(account, container, obj)
resp = self.make_request('GET', path, headers, acceptable_statuses)
return (resp.status_int, resp.headers, resp.app_iter)
def iter_object_lines(
self, account, container, obj, headers=None,
acceptable_statuses=(2,)):
"""
Returns an iterator of object lines from an uncompressed or compressed
text object.
Uncompress object as it is read if the object's name ends with '.gz'.
:param account: The object's account.
:param container: The object's container.
:param obj: The object.
:param acceptable_statuses: List of status for valid responses,
defaults to (2,).
:raises UnexpectedResponse: Exception raised when requests fail
to get a response with an acceptable status
:raises Exception: Exception is raised when code fails in an
unexpected way.
"""
headers = headers or {}
path = self.make_path(account, container, obj)
resp = self.make_request('GET', path, headers, acceptable_statuses)
if not resp.status_int // 100 == 2:
return
last_part = ''
compressed = obj.endswith('.gz')
# magic in the following zlib.decompressobj argument is courtesy of
# Python decompressing gzip chunk-by-chunk
# http://stackoverflow.com/questions/2423866
d = zlib.decompressobj(16 + | |
#definimos bloque como conjunto de lineas que poseen una funcion
#bloque OPIN: Este tipo de bloque es utilizado para que el usuario no ingrese opciones incorrectas
# Presentación del programa
print ("BIENVENIDO AL PROGRAMA DE TRANSFORMACION DE CADENAS")
print ("")
print ("Ingrese a continuación la cadena de caracteres que desea modificar:")
cadena = input() #ingreso de la cadena a editar
Cadena_Inicial = cadena
#bloque que impide que se ingrese una cadena de longitud 0
while len(cadena) == 0:
print ("Debe ingresar al menos un caracter")
cadena = input() #se solicita nuevamente la cadena
#definicion de menu, opciones y cadena comparadora para utilizar más adelante
menu = "MENU PRICIPAL\n\n0- Modo Comando\n1 – Cambiar todas las letras de minúsculas a MAYÚSCULAS.\n2 – Cambiar de minúscula a Mayúscula la primera letra de todas las palabras.\n3 – Cambiar de MAYÚSCULAS a minúsculas todas las letras.\n4 – Quitar todos los espacios a la izquierda del texto.\n5 – Quitar todos los espacios a la derecha del texto.\n6 – Reemplazar todas las ocurrencias de un carácter o palabra por otro\n7 – Cifrado utilizando cifrado Cesar.\n8 – Descifrado utilizando cifrado Cesar.\n9 – Salir\n10- Mostrar MENU\n11- Contacto"
opciones = ["0","1","2","3","4","5","6","7","8","9","10","11"]
cadena_comparadora= "abcdefghijklmnñopqrstuvwxyzABCDEFGHIJKLMNÑOPQRSTUVWXYZ .,_1234567890><!#$%&/()=?¡¿´+*[]{}_:;áéíóú"
x = 10 #definimos la opcion inicial "10" para que el programa inicie con el menú
while x != 9: #mientras la opciion ingresada no sea la salida, se produce un bucle que permite hacer varias modificaciones a la cadena
if x==11 : #contacto
print ("")
print ("Contacto:\nPor consultas comuníquese via mail con nosotros: <NAME> (<EMAIL>) , <NAME>(<EMAIL>)\nMuchas gracias")
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x == 10: #mostrar menu
print ("")
print (menu)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x == 1: #opcion 1
cadena = cadena.upper()
print (cadena)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x ==2: #opcion 2
cadena = cadena.title ()
print (cadena)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x ==3: #opcion 3
cadena = cadena.lower ()
print (cadena)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x ==4 : #opcion 4
cadena = cadena.lstrip()
print (cadena)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x ==5 : #opcion 5
cadena = cadena.rstrip()
print (cadena)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x == 6 : #opcion 6
modificado = input("Ingrese caracter o palabra a sustituir ")
modificador = input ("Ingrese caracter o palabra que sustituye ")
if modificado in cadena:
cadena = cadena.split(modificado)
cadena = modificador.join(cadena)
#OPIN
else:
print ("La palabra a sustituir no existe en la cadena")
print (cadena)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x ==7: #opcion 7
numeros = []
for n in range (0,98):
numeros.append(str(n))
despl = input("Ingrese el desplazamiento de cifrado ")
#OPIN
while str(despl)not in numeros:
print ("La opción que Ud. ingresó no es válida")
despl = input("Ingrese el desplazamiento de cifrado ")
a = int(despl)
cifrada = ""
for i in range (len(cadena)):
x= cadena_comparadora.find(cadena[i])
while x+a >= 97: #en esta opcion, se puede ingresar un valor de cifrado mayor a 97
x -= 97
cifrada += cadena_comparadora[x+a]
cadena = cifrada
print (cadena)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x ==8 : #opcion 8
numeros = []
for n in range (0,98):
numeros.append(str(n))
despl = input("Ingrese el desplazamiento de cifrado")
#OPIN
while str(despl)not in numeros:
print ("La opción que Ud. ingresó no es válida")
despl = input("Ingrese el desplazamiento de cifrado ")
a = int(despl)
cifrada = ""
for i in range (len(cadena)):
x= cadena_comparadora.find(cadena[i])
while x+a >= 97: #en esta opcion, se puede ingresar un valor de cifrado mayor a 97
x -= 97
cifrada += cadena_comparadora[x-a]
cadena = cifrada
print (cadena)
opcion = input("Ingrese la opción deseada: ")
#OPIN
while opcion not in opciones:
print ("La opción que Ud. ingresó no es válida")
opcion = input("Ingrese la opción deseada: ")
x = int(opcion)
elif x == 0: #opcion 0
print("Modo Comando")
comando = input ("Ingrese los comandos deseados: ")#el comando es de tipo (str)
if comando == "10":
x = 10
else:
comando = str(comando)
comando = list(comando)
comando = "".join(comando)
comando = comando.split("|")
#Bloque intercambio / se crea la lista"intercambio" que posee todas las combinaciones
#de la cadena comparadora (todos los caracteres), consigo misma, concatenados por el caracter ">"
intercambio = []
lista_abc = list(cadena_comparadora)
for i in lista_abc:
for j in lista_abc:
intercambio.append (str(str(i)+">"+str(j)))
#Bloque cif / se crea la lista cif que posee todas las opciones de (cif x) hasta el valor maximo
#de caracteres que posee la cadena comparadora
# en este comando no está permtido realizar cifrados con un valor mayor a 97
cif = []
cif1= ["cif "]
for i in cif1:
for numero in range (0,98):
cif.append (str(str(i)+str(numero)))
#Bloque decif / idem Bloque cif, para decifrados
decif = []
decif1= ["decif "]
for i in decif1:
for numero in range (0,98):
decif.append (str(str(i)+str(numero)))
lista_de_comandos = ["mM","Mm","aA","-espacio","más>+","cif ","decif ","salir"]+ intercambio+cif+decif
#OPIN - El verificador se utiliza para recorrer los comandos ingresados
# si la entrada es incorrecta, (verificador < len (comando)) se deben ingresar nuevamente
verificador = 0
while verificador < len(comando):
for i in range (len(comando)):
if comando[i] in lista_de_comandos:
verificador += 1
else:
print ("Error: Verifique los comandos ingresados")
comando = input ("Ingrese los comandos deseados: ")
comando = list(comando)
comando = "".join(comando)
comando = comando.split("|") #quitamos los "pipe" para obtener solamente los comandos
for i in range (len(comando)): #se recorren los comandos ingresados para proceder en orden de ingreso
#opciones del modo comando/se compara el elemento recorrido con las opciones
if comando[i] == "mM":
cadena = cadena.upper()
elif comando[i]=="Mm":
cadena = cadena.lower()
elif comando[i] == "aA":
cadena = cadena.title ()
elif comando[i] == "-espacio":
cadena = cadena.strip()
elif comando[i] in intercambio:
lista1 = comando[i]
lista1 = lista1.split(">")
aux = cadena.split(lista1[0])
aux2= lista1[1].join(aux)
cadena = aux2
elif comando[i] == "más>+":
if "más" in cadena:
cadena = cadena.split("más")
cadena = "+".join(cadena)
#OPIN - en caso de que la cadena no contenga la palabra más (con tilde)
else:
print ("La palabra a sustituir no existe en la cadena")
print ("")
print (cadena)
elif comando [i] in cif:
cad = cadena
lista2=comando[i]
lista2=lista2.split("cif ")
lista3= lista2[1]
avance = int(lista3[0])
cifrada = ""
for i in range (len(cadena)):
adelanto = cadena_comparadora.find(cad[i])
cifrada += cadena_comparadora [adelanto+avance]
cadena = cifrada
elif comando [i] in decif:
cad = cadena
lista2=comando[i]
lista2=lista2.split("decif ")
lista3= lista2[1]
avance = int(lista3[0])
cifrada = ""
for i in |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.